dma-mapping updates
- convert arm32 to the common dma-direct code (Arnd Bergmann, Robin Murphy, Christoph Hellwig) - restructure the PCIe peer to peer mapping support (Logan Gunthorpe) - allow the IOMMU code to communicate an optional DMA mapping length and use that in scsi and libata (John Garry) - split the global swiotlb lock (Tianyu Lan) - various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang, Lukas Bulwahn, Robin Murphy) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmLuIYULHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPS5A//Ty1ZNyXExmwZ6J6g7/oIvQlpAHilDr22mCd8tR8Y Ne7TgLa/X+usFvJTxJfkvg/LNMDjD7qx0J/mhDGm4reOFcEL4/PBy0rDSOgnmntV k/fPhgwnpuztiAQ+s+WkJ3pkrmG1HaEId7GGj2JaoYdas6RX2mGX7vL8uvUFepjw lYPAqWMtJHkOfsDK0PqqyQsr7dcC6lyFLqnn/wqvHtTJeKCfGs6W/SIrlWme2SZY 3dNx84ZR1uPjaazAmtf2IWfjh/TBmd0ETRYycgUUKRP9iwsCkBQDBwsBGSIYXiWj BUKQ5oMvjAlUGRF0jYz9e77KuedE6GxWiXNQstitBmid142M37DHA5tvZRf65MPS THHcjTDmmoaO4YfFhhXOcFOrjG4/V8bF7fgHB6XkHDjhVVTcnIx8zuOAXIVBZvIV VAALmamBqEfIZZrCqgr7hzFssK2bip+TIMkdoD46Wcr+D7bAlujhuzWxubn9+ulT 23v/pAvC80ut6LvKj6EA+GpRm/pejfOtEbjXPoO2hguNxvuUKvPQqNh9hy0q+v1e 8n2Y/4lhy5bv02S7wKooNkfCoV753jBY1TIru45UmEYc3EkTQPii6okYe0DvW4QX VCnKgo156wSBfE+9eWdxCROv2SZqJFMV/wL3vw54dpJQMbDy7VkNsh4mGREdUkU1 uek= =Bv19 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - convert arm32 to the common dma-direct code (Arnd Bergmann, Robin Murphy, Christoph Hellwig) - restructure the PCIe peer to peer mapping support (Logan Gunthorpe) - allow the IOMMU code to communicate an optional DMA mapping length and use that in scsi and libata (John Garry) - split the global swiotlb lock (Tianyu Lan) - various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang, Lukas Bulwahn, Robin Murphy) * tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping: (45 commits) swiotlb: fix passing local variable to debugfs_create_ulong() dma-mapping: reformat comment to suppress htmldoc warning PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg() RDMA/rw: drop pci_p2pdma_[un]map_sg() RDMA/core: introduce ib_dma_pci_p2p_dma_supported() nvme-pci: convert to using dma_map_sgtable() nvme-pci: check DMA ops when indicating support for PCI P2PDMA iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg iommu: Explicitly skip bus address marked segments in __iommu_map_sg() dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support dma-direct: support PCI P2PDMA pages in dma-direct map_sg dma-mapping: allow EREMOTEIO return code for P2PDMA transfers PCI/P2PDMA: Introduce helpers for dma_map_sg implementations PCI/P2PDMA: Attempt to set map_type if it has not been set lib/scatterlist: add flag for indicating P2PDMA segments in an SGL swiotlb: clean up some coding style and minor issues dma-mapping: update comment after dmabounce removal scsi: sd: Add a comment about limiting max_sectors to shost optimal limit ata: libata-scsi: cap ata_device->max_sectors according to shost->max_sectors scsi: scsi_transport_sas: cap shost opt_sectors according to DMA optimal limit ...
This commit is contained in:
Коммит
c993e07be0
|
@ -5999,8 +5999,11 @@
|
|||
it if 0 is given (See Documentation/admin-guide/cgroup-v1/memory.rst)
|
||||
|
||||
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
|
||||
Format: { <int> | force | noforce }
|
||||
Format: { <int> [,<int>] | force | noforce }
|
||||
<int> -- Number of I/O TLB slabs
|
||||
<int> -- Second integer after comma. Number of swiotlb
|
||||
areas with their own lock. Will be rounded up
|
||||
to a power of 2.
|
||||
force -- force using of bounce buffers even if they
|
||||
wouldn't be automatically used by the kernel
|
||||
noforce -- Never use bounce buffers (for debugging)
|
||||
|
|
|
@ -204,6 +204,20 @@ Returns the maximum size of a mapping for the device. The size parameter
|
|||
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||
others should not be larger than the returned value.
|
||||
|
||||
::
|
||||
|
||||
size_t
|
||||
dma_opt_mapping_size(struct device *dev);
|
||||
|
||||
Returns the maximum optimal size of a mapping for the device.
|
||||
|
||||
Mapping larger buffers may take much longer in certain scenarios. In
|
||||
addition, for high-rate short-lived streaming mappings, the upfront time
|
||||
spent on the mapping may account for an appreciable part of the total
|
||||
request lifetime. As such, if splitting larger requests incurs no
|
||||
significant performance penalty, then device drivers are advised to
|
||||
limit total DMA streaming mappings length to the returned value.
|
||||
|
||||
::
|
||||
|
||||
bool
|
||||
|
|
|
@ -287,11 +287,13 @@ iommu options only relevant to the AMD GART hardware IOMMU:
|
|||
iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
|
||||
implementation:
|
||||
|
||||
swiotlb=<pages>[,force]
|
||||
<pages>
|
||||
Prereserve that many 128K pages for the software IO bounce buffering.
|
||||
swiotlb=<slots>[,force,noforce]
|
||||
<slots>
|
||||
Prereserve that many 2K slots for the software IO bounce buffering.
|
||||
force
|
||||
Force all IO through the software TLB.
|
||||
noforce
|
||||
Do not initialize the software TLB.
|
||||
|
||||
|
||||
Miscellaneous
|
||||
|
|
|
@ -15,13 +15,12 @@ config ARM
|
|||
select ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_SETUP_DMA_OPS
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
||||
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
config SA1111
|
||||
bool
|
||||
select DMABOUNCE if !ARCH_PXA
|
||||
|
||||
config DMABOUNCE
|
||||
bool
|
||||
select ZONE_DMA
|
||||
select ZONE_DMA if ARCH_SA1100
|
||||
|
||||
config KRAIT_L2_ACCESSORS
|
||||
bool
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
obj-y += firmware.o
|
||||
|
||||
obj-$(CONFIG_SA1111) += sa1111.o
|
||||
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
|
||||
obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
|
||||
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
|
||||
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
|
||||
|
|
|
@ -1,582 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* arch/arm/common/dmabounce.c
|
||||
*
|
||||
* Special dma_{map/unmap/dma_sync}_* routines for systems that have
|
||||
* limited DMA windows. These functions utilize bounce buffers to
|
||||
* copy data to/from buffers located outside the DMA region. This
|
||||
* only works for systems in which DMA memory is at the bottom of
|
||||
* RAM, the remainder of memory is at the top and the DMA memory
|
||||
* can be marked as ZONE_DMA. Anything beyond that such as discontiguous
|
||||
* DMA windows will require custom implementations that reserve memory
|
||||
* areas at early bootup.
|
||||
*
|
||||
* Original version by Brad Parker (brad@heeltoe.com)
|
||||
* Re-written by Christopher Hoover <ch@murgatroid.com>
|
||||
* Made generic by Deepak Saxena <dsaxena@plexity.net>
|
||||
*
|
||||
* Copyright (C) 2002 Hewlett Packard Company.
|
||||
* Copyright (C) 2004 MontaVista Software, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
|
||||
#undef STATS
|
||||
|
||||
#ifdef STATS
|
||||
#define DO_STATS(X) do { X ; } while (0)
|
||||
#else
|
||||
#define DO_STATS(X) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* ************************************************** */
|
||||
|
||||
struct safe_buffer {
|
||||
struct list_head node;
|
||||
|
||||
/* original request */
|
||||
void *ptr;
|
||||
size_t size;
|
||||
int direction;
|
||||
|
||||
/* safe buffer info */
|
||||
struct dmabounce_pool *pool;
|
||||
void *safe;
|
||||
dma_addr_t safe_dma_addr;
|
||||
};
|
||||
|
||||
struct dmabounce_pool {
|
||||
unsigned long size;
|
||||
struct dma_pool *pool;
|
||||
#ifdef STATS
|
||||
unsigned long allocs;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dmabounce_device_info {
|
||||
struct device *dev;
|
||||
struct list_head safe_buffers;
|
||||
#ifdef STATS
|
||||
unsigned long total_allocs;
|
||||
unsigned long map_op_count;
|
||||
unsigned long bounce_count;
|
||||
int attr_res;
|
||||
#endif
|
||||
struct dmabounce_pool small;
|
||||
struct dmabounce_pool large;
|
||||
|
||||
rwlock_t lock;
|
||||
|
||||
int (*needs_bounce)(struct device *, dma_addr_t, size_t);
|
||||
};
|
||||
|
||||
#ifdef STATS
|
||||
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
|
||||
device_info->small.allocs,
|
||||
device_info->large.allocs,
|
||||
device_info->total_allocs - device_info->small.allocs -
|
||||
device_info->large.allocs,
|
||||
device_info->total_allocs,
|
||||
device_info->map_op_count,
|
||||
device_info->bounce_count);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
|
||||
#endif
|
||||
|
||||
|
||||
/* allocate a 'safe' buffer and keep track of it */
|
||||
static inline struct safe_buffer *
|
||||
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
struct dmabounce_pool *pool;
|
||||
struct device *dev = device_info->dev;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
|
||||
__func__, ptr, size, dir);
|
||||
|
||||
if (size <= device_info->small.size) {
|
||||
pool = &device_info->small;
|
||||
} else if (size <= device_info->large.size) {
|
||||
pool = &device_info->large;
|
||||
} else {
|
||||
pool = NULL;
|
||||
}
|
||||
|
||||
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
|
||||
if (buf == NULL) {
|
||||
dev_warn(dev, "%s: kmalloc failed\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buf->ptr = ptr;
|
||||
buf->size = size;
|
||||
buf->direction = dir;
|
||||
buf->pool = pool;
|
||||
|
||||
if (pool) {
|
||||
buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
|
||||
&buf->safe_dma_addr);
|
||||
} else {
|
||||
buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
|
||||
GFP_ATOMIC);
|
||||
}
|
||||
|
||||
if (buf->safe == NULL) {
|
||||
dev_warn(dev,
|
||||
"%s: could not alloc dma memory (size=%d)\n",
|
||||
__func__, size);
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef STATS
|
||||
if (pool)
|
||||
pool->allocs++;
|
||||
device_info->total_allocs++;
|
||||
#endif
|
||||
|
||||
write_lock_irqsave(&device_info->lock, flags);
|
||||
list_add(&buf->node, &device_info->safe_buffers);
|
||||
write_unlock_irqrestore(&device_info->lock, flags);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* determine if a buffer is from our "safe" pool */
|
||||
static inline struct safe_buffer *
|
||||
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
|
||||
{
|
||||
struct safe_buffer *b, *rb = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&device_info->lock, flags);
|
||||
|
||||
list_for_each_entry(b, &device_info->safe_buffers, node)
|
||||
if (b->safe_dma_addr <= safe_dma_addr &&
|
||||
b->safe_dma_addr + b->size > safe_dma_addr) {
|
||||
rb = b;
|
||||
break;
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&device_info->lock, flags);
|
||||
return rb;
|
||||
}
|
||||
|
||||
static inline void
|
||||
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
|
||||
|
||||
write_lock_irqsave(&device_info->lock, flags);
|
||||
|
||||
list_del(&buf->node);
|
||||
|
||||
write_unlock_irqrestore(&device_info->lock, flags);
|
||||
|
||||
if (buf->pool)
|
||||
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
|
||||
else
|
||||
dma_free_coherent(device_info->dev, buf->size, buf->safe,
|
||||
buf->safe_dma_addr);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/* ************************************************** */
|
||||
|
||||
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
|
||||
dma_addr_t dma_addr, const char *where)
|
||||
{
|
||||
if (!dev || !dev->archdata.dmabounce)
|
||||
return NULL;
|
||||
if (dma_mapping_error(dev, dma_addr)) {
|
||||
dev_err(dev, "Trying to %s invalid mapping\n", where);
|
||||
return NULL;
|
||||
}
|
||||
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
|
||||
}
|
||||
|
||||
static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
if (!dev || !dev->archdata.dmabounce)
|
||||
return 0;
|
||||
|
||||
if (dev->dma_mask) {
|
||||
unsigned long limit, mask = *dev->dma_mask;
|
||||
|
||||
limit = (mask + 1) & ~mask;
|
||||
if (limit && size > limit) {
|
||||
dev_err(dev, "DMA mapping too big (requested %#x "
|
||||
"mask %#Lx)\n", size, *dev->dma_mask);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* Figure out if we need to bounce from the DMA mask. */
|
||||
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
|
||||
}
|
||||
|
||||
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
struct safe_buffer *buf;
|
||||
|
||||
if (device_info)
|
||||
DO_STATS ( device_info->map_op_count++ );
|
||||
|
||||
buf = alloc_safe_buffer(device_info, ptr, size, dir);
|
||||
if (buf == NULL) {
|
||||
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
|
||||
__func__, ptr);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
|
||||
!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
|
||||
__func__, ptr, buf->safe, size);
|
||||
memcpy(buf->safe, ptr, size);
|
||||
}
|
||||
|
||||
return buf->safe_dma_addr;
|
||||
}
|
||||
|
||||
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
BUG_ON(buf->size != size);
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
|
||||
!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
void *ptr = buf->ptr;
|
||||
|
||||
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe, ptr, size);
|
||||
memcpy(ptr, buf->safe, size);
|
||||
|
||||
/*
|
||||
* Since we may have written to a page cache page,
|
||||
* we need to ensure that the data will be coherent
|
||||
* with user mappings.
|
||||
*/
|
||||
__cpuc_flush_dcache_area(ptr, size);
|
||||
}
|
||||
free_safe_buffer(dev->archdata.dmabounce, buf);
|
||||
}
|
||||
|
||||
/* ************************************************** */
|
||||
|
||||
/*
|
||||
* see if a buffer address is in an 'unsafe' range. if it is
|
||||
* allocate a 'safe' buffer and copy the unsafe buffer into it.
|
||||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
||||
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
|
||||
__func__, page, offset, size, dir);
|
||||
|
||||
dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
|
||||
ret = needs_bounce(dev, dma_addr, size);
|
||||
if (ret < 0)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (ret == 0) {
|
||||
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return map_single(dev, page_address(page) + offset, size, dir, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
* the data from the safe buffer back to the unsafe buffer and free up
|
||||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
|
||||
__func__, dma_addr, size, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
|
||||
if (!buf) {
|
||||
arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
|
||||
return;
|
||||
}
|
||||
|
||||
unmap_single(dev, buf, size, dir, attrs);
|
||||
}
|
||||
|
||||
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, sz, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
return 1;
|
||||
|
||||
off = addr - buf->safe_dma_addr;
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
|
||||
__func__, buf->safe + off, buf->ptr + off, sz);
|
||||
memcpy(buf->ptr + off, buf->safe + off, sz);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmabounce_sync_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, sz, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
return 1;
|
||||
|
||||
off = addr - buf->safe_dma_addr;
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
|
||||
dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
|
||||
__func__,buf->ptr + off, buf->safe + off, sz);
|
||||
memcpy(buf->safe + off, buf->ptr + off, sz);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmabounce_sync_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (!__dmabounce_sync_for_device(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
if (dev->archdata.dmabounce)
|
||||
return 0;
|
||||
|
||||
return arm_dma_ops.dma_supported(dev, dma_mask);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops dmabounce_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
.mmap = arm_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = dmabounce_map_page,
|
||||
.unmap_page = dmabounce_unmap_page,
|
||||
.sync_single_for_cpu = dmabounce_sync_for_cpu,
|
||||
.sync_single_for_device = dmabounce_sync_for_device,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.unmap_sg = arm_dma_unmap_sg,
|
||||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.dma_supported = dmabounce_dma_supported,
|
||||
};
|
||||
|
||||
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
|
||||
const char *name, unsigned long size)
|
||||
{
|
||||
pool->size = size;
|
||||
DO_STATS(pool->allocs = 0);
|
||||
pool->pool = dma_pool_create(name, dev, size,
|
||||
0 /* byte alignment */,
|
||||
0 /* no page-crossing issues */);
|
||||
|
||||
return pool->pool ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
||||
unsigned long large_buffer_size,
|
||||
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
|
||||
{
|
||||
struct dmabounce_device_info *device_info;
|
||||
int ret;
|
||||
|
||||
device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
|
||||
if (!device_info) {
|
||||
dev_err(dev,
|
||||
"Could not allocated dmabounce_device_info\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = dmabounce_init_pool(&device_info->small, dev,
|
||||
"small_dmabounce_pool", small_buffer_size);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
|
||||
small_buffer_size);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (large_buffer_size) {
|
||||
ret = dmabounce_init_pool(&device_info->large, dev,
|
||||
"large_dmabounce_pool",
|
||||
large_buffer_size);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
|
||||
large_buffer_size);
|
||||
goto err_destroy;
|
||||
}
|
||||
}
|
||||
|
||||
device_info->dev = dev;
|
||||
INIT_LIST_HEAD(&device_info->safe_buffers);
|
||||
rwlock_init(&device_info->lock);
|
||||
device_info->needs_bounce = needs_bounce_fn;
|
||||
|
||||
#ifdef STATS
|
||||
device_info->total_allocs = 0;
|
||||
device_info->map_op_count = 0;
|
||||
device_info->bounce_count = 0;
|
||||
device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
|
||||
#endif
|
||||
|
||||
dev->archdata.dmabounce = device_info;
|
||||
set_dma_ops(dev, &dmabounce_ops);
|
||||
|
||||
dev_info(dev, "dmabounce: registered device\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy:
|
||||
dma_pool_destroy(device_info->small.pool);
|
||||
err_free:
|
||||
kfree(device_info);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_register_dev);
|
||||
|
||||
void dmabounce_unregister_dev(struct device *dev)
|
||||
{
|
||||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
|
||||
dev->archdata.dmabounce = NULL;
|
||||
set_dma_ops(dev, NULL);
|
||||
|
||||
if (!device_info) {
|
||||
dev_warn(dev,
|
||||
"Never registered with dmabounce but attempting"
|
||||
"to unregister!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!list_empty(&device_info->safe_buffers)) {
|
||||
dev_err(dev,
|
||||
"Removing from dmabounce with pending buffers!\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (device_info->small.pool)
|
||||
dma_pool_destroy(device_info->small.pool);
|
||||
if (device_info->large.pool)
|
||||
dma_pool_destroy(device_info->large.pool);
|
||||
|
||||
#ifdef STATS
|
||||
if (device_info->attr_res == 0)
|
||||
device_remove_file(dev, &dev_attr_dmabounce_stats);
|
||||
#endif
|
||||
|
||||
kfree(device_info);
|
||||
|
||||
dev_info(dev, "dmabounce: device unregistered\n");
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_unregister_dev);
|
||||
|
||||
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
|
||||
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -1389,70 +1389,9 @@ void sa1111_driver_unregister(struct sa1111_driver *driver)
|
|||
}
|
||||
EXPORT_SYMBOL(sa1111_driver_unregister);
|
||||
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
/*
|
||||
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
|
||||
* Chip Specification Update" (June 2000), erratum #7, there is a
|
||||
* significant bug in the SA1111 SDRAM shared memory controller. If
|
||||
* an access to a region of memory above 1MB relative to the bank base,
|
||||
* it is important that address bit 10 _NOT_ be asserted. Depending
|
||||
* on the configuration of the RAM, bit 10 may correspond to one
|
||||
* of several different (processor-relative) address bits.
|
||||
*
|
||||
* This routine only identifies whether or not a given DMA address
|
||||
* is susceptible to the bug.
|
||||
*
|
||||
* This should only get called for sa1111_device types due to the
|
||||
* way we configure our device dma_masks.
|
||||
*/
|
||||
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
/*
|
||||
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
|
||||
* User's Guide" mentions that jumpers R51 and R52 control the
|
||||
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
|
||||
* SDRAM bank 1 on Neponset). The default configuration selects
|
||||
* Assabet, so any address in bank 1 is necessarily invalid.
|
||||
*/
|
||||
return (machine_is_assabet() || machine_is_pfs168()) &&
|
||||
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
|
||||
}
|
||||
|
||||
static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct sa1111_dev *dev = to_sa1111_device(data);
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) {
|
||||
int ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
|
||||
sa1111_needs_bounce);
|
||||
if (ret)
|
||||
dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret);
|
||||
}
|
||||
break;
|
||||
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL)
|
||||
dmabounce_unregister_dev(&dev->dev);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block sa1111_bus_notifier = {
|
||||
.notifier_call = sa1111_notifier_call,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init sa1111_init(void)
|
||||
{
|
||||
int ret = bus_register(&sa1111_bus_type);
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
if (ret == 0)
|
||||
bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
|
||||
#endif
|
||||
if (ret == 0)
|
||||
platform_driver_register(&sa1111_device_driver);
|
||||
return ret;
|
||||
|
@ -1461,9 +1400,6 @@ static int __init sa1111_init(void)
|
|||
static void __exit sa1111_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&sa1111_device_driver);
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
|
||||
#endif
|
||||
bus_unregister(&sa1111_bus_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,9 +6,6 @@
|
|||
#define ASMARM_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
struct dmabounce_device_info *dmabounce;
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
struct dma_iommu_mapping *mapping;
|
||||
#endif
|
||||
|
|
|
@ -1,48 +1 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_ARM_DMA_DIRECT_H
|
||||
#define ASM_ARM_DMA_DIRECT_H 1
|
||||
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private
|
||||
* functions used internally by the DMA-mapping API to provide DMA
|
||||
* addresses. They must not be used by drivers.
|
||||
*/
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
if (dev && dev->dma_range_map)
|
||||
pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn)));
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = __bus_to_pfn(addr);
|
||||
|
||||
if (dev && dev->dma_range_map)
|
||||
pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn)));
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
||||
{
|
||||
if (dev)
|
||||
return pfn_to_dma(dev, virt_to_pfn(addr));
|
||||
|
||||
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
unsigned int offset = paddr & ~PAGE_MASK;
|
||||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
unsigned int offset = dev_addr & ~PAGE_MASK;
|
||||
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
||||
}
|
||||
|
||||
#endif /* ASM_ARM_DMA_DIRECT_H */
|
||||
#include <mach/dma-direct.h>
|
||||
|
|
|
@ -1,128 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASMARM_DMA_MAPPING_H
|
||||
#define ASMARM_DMA_MAPPING_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
extern const struct dma_map_ops arm_dma_ops;
|
||||
extern const struct dma_map_ops arm_coherent_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
|
||||
return &arm_dma_ops;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_alloc - allocate consistent memory for DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @size: required memory size
|
||||
* @handle: bus-specific DMA address
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Allocate some memory for a device for performing DMA. This function
|
||||
* allocates pages, and will return the CPU-viewed address, and sets @handle
|
||||
* to be the device-viewed address.
|
||||
*/
|
||||
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
|
||||
/**
|
||||
* arm_dma_free - free memory allocated by arm_dma_alloc
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @size: size of memory originally requested in dma_alloc_coherent
|
||||
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
|
||||
* @handle: device-view address returned from dma_alloc_coherent
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Free (and unmap) a DMA buffer previously allocated by
|
||||
* arm_dma_alloc().
|
||||
*
|
||||
* References to memory and mappings associated with cpu_addr/handle
|
||||
* during and after this call executing are illegal.
|
||||
*/
|
||||
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs);
|
||||
|
||||
/**
|
||||
* arm_dma_mmap - map a coherent DMA allocation into user space
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @vma: vm_area_struct describing requested user mapping
|
||||
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
|
||||
* @handle: device-view address returned from dma_alloc_coherent
|
||||
* @size: size of memory originally requested in dma_alloc_coherent
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
|
||||
/*
|
||||
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
||||
* and utilize bounce buffers as needed to work around limited DMA windows.
|
||||
*
|
||||
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
|
||||
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
|
||||
* On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
|
||||
*
|
||||
* The following are helper functions used by the dmabounce subystem
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* dmabounce_register_dev
|
||||
*
|
||||
* @dev: valid struct device pointer
|
||||
* @small_buf_size: size of buffers to use with small buffer pool
|
||||
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
|
||||
* @needs_bounce_fn: called to determine whether buffer needs bouncing
|
||||
*
|
||||
* This function should be called by low-level platform code to register
|
||||
* a device as requireing DMA buffer bouncing. The function will allocate
|
||||
* appropriate DMA pools for the device.
|
||||
*/
|
||||
extern int dmabounce_register_dev(struct device *, unsigned long,
|
||||
unsigned long, int (*)(struct device *, dma_addr_t, size_t));
|
||||
|
||||
/**
|
||||
* dmabounce_unregister_dev
|
||||
*
|
||||
* @dev: valid struct device pointer
|
||||
*
|
||||
* This function should be called by low-level platform code when device
|
||||
* that was previously registered with dmabounce_register_dev is removed
|
||||
* from the system.
|
||||
*
|
||||
*/
|
||||
extern void dmabounce_unregister_dev(struct device *);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* The scatter list versions of the above methods.
|
||||
*/
|
||||
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction, unsigned long attrs);
|
||||
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction, unsigned long attrs);
|
||||
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
|
@ -378,8 +378,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x)
|
|||
#ifndef __virt_to_bus
|
||||
#define __virt_to_bus __virt_to_phys
|
||||
#define __bus_to_virt __phys_to_virt
|
||||
#define __pfn_to_bus(x) __pfn_to_phys(x)
|
||||
#define __bus_to_pfn(x) __phys_to_pfn(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -61,6 +61,7 @@ endmenu
|
|||
|
||||
# Footbridge support
|
||||
config FOOTBRIDGE
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
bool
|
||||
|
||||
# Footbridge in host mode
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <video/vga.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
@ -335,17 +336,19 @@ unsigned long __bus_to_virt(unsigned long res)
|
|||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(__bus_to_virt);
|
||||
|
||||
unsigned long __pfn_to_bus(unsigned long pfn)
|
||||
#else
|
||||
static inline unsigned long fb_bus_sdram_offset(void)
|
||||
{
|
||||
return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
|
||||
return BUS_OFFSET;
|
||||
}
|
||||
EXPORT_SYMBOL(__pfn_to_bus);
|
||||
#endif /* CONFIG_FOOTBRIDGE_ADDIN */
|
||||
|
||||
unsigned long __bus_to_pfn(unsigned long bus)
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
|
||||
return paddr + (fb_bus_sdram_offset() - PHYS_OFFSET);
|
||||
}
|
||||
EXPORT_SYMBOL(__bus_to_pfn);
|
||||
|
||||
#endif
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
return dev_addr - (fb_bus_sdram_offset() - PHYS_OFFSET);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef MACH_FOOTBRIDGE_DMA_DIRECT_H
|
||||
#define MACH_FOOTBRIDGE_DMA_DIRECT_H 1
|
||||
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr);
|
||||
|
||||
#endif /* MACH_FOOTBRIDGE_DMA_DIRECT_H */
|
|
@ -26,8 +26,6 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
extern unsigned long __virt_to_bus(unsigned long);
|
||||
extern unsigned long __bus_to_virt(unsigned long);
|
||||
extern unsigned long __pfn_to_bus(unsigned long);
|
||||
extern unsigned long __bus_to_pfn(unsigned long);
|
||||
#endif
|
||||
#define __virt_to_bus __virt_to_bus
|
||||
#define __bus_to_virt __bus_to_virt
|
||||
|
@ -42,8 +40,6 @@ extern unsigned long __bus_to_pfn(unsigned long);
|
|||
#define BUS_OFFSET 0xe0000000
|
||||
#define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET))
|
||||
#define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
|
||||
#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
|
||||
#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
|
|||
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
|
||||
val = readl(sregs_base + reg);
|
||||
writel(val | 0xff01, sregs_base + reg);
|
||||
set_dma_ops(dev, &arm_coherent_dma_ops);
|
||||
dev->dma_coherent = true;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -95,7 +95,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb,
|
|||
|
||||
if (event != BUS_NOTIFY_ADD_DEVICE)
|
||||
return NOTIFY_DONE;
|
||||
set_dma_ops(dev, &arm_coherent_dma_ops);
|
||||
dev->dma_coherent = true;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
|
|
@ -103,139 +103,6 @@ static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
|
|||
* before transfers and delay cache invalidation until transfer completion.
|
||||
*
|
||||
*/
|
||||
static void __dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
/**
|
||||
* arm_dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*/
|
||||
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
*
|
||||
* Unmap a page streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_page() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
|
||||
static void arm_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
static void arm_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int offset = handle & (PAGE_SIZE - 1);
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether the given device DMA address mask can be supported
|
||||
* properly. For example, if your device can only drive the low 24-bits
|
||||
* during bus mastering, then you would pass 0x00ffffff as the mask
|
||||
* to this function.
|
||||
*/
|
||||
static int arm_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
|
||||
|
||||
/*
|
||||
* Translate the device's DMA mask to a PFN limit. This
|
||||
* PFN number includes the page which we can DMA to.
|
||||
*/
|
||||
return dma_to_pfn(dev, mask) >= max_dma_pfn;
|
||||
}
|
||||
|
||||
const struct dma_map_ops arm_dma_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
.mmap = arm_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_dma_map_page,
|
||||
.unmap_page = arm_dma_unmap_page,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.unmap_sg = arm_dma_unmap_sg,
|
||||
.map_resource = dma_direct_map_resource,
|
||||
.sync_single_for_cpu = arm_dma_sync_single_for_cpu,
|
||||
.sync_single_for_device = arm_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.dma_supported = arm_dma_supported,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_dma_ops);
|
||||
|
||||
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
|
||||
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs);
|
||||
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
|
||||
const struct dma_map_ops arm_coherent_dma_ops = {
|
||||
.alloc = arm_coherent_dma_alloc,
|
||||
.free = arm_coherent_dma_free,
|
||||
.alloc_pages = dma_direct_alloc_pages,
|
||||
.free_pages = dma_direct_free_pages,
|
||||
.mmap = arm_coherent_dma_mmap,
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_coherent_dma_map_page,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.map_resource = dma_direct_map_resource,
|
||||
.dma_supported = arm_dma_supported,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||
|
||||
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
|
||||
{
|
||||
|
@ -725,7 +592,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
if (page) {
|
||||
unsigned long flags;
|
||||
|
||||
*handle = pfn_to_dma(dev, page_to_pfn(page));
|
||||
*handle = phys_to_dma(dev, page_to_phys(page));
|
||||
buf->virt = args.want_vaddr ? addr : page;
|
||||
|
||||
spin_lock_irqsave(&arm_dma_bufs_lock, flags);
|
||||
|
@ -738,67 +605,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
return args.want_vaddr ? addr : page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate DMA-coherent memory space and return both the kernel remapped
|
||||
* virtual and bus address for that space.
|
||||
*/
|
||||
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
|
||||
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
||||
attrs, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
|
||||
attrs, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
unsigned long nr_vma_pages = vma_pages(vma);
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create userspace mapping for the DMA-coherent memory.
|
||||
*/
|
||||
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a buffer as defined by the above mapping.
|
||||
*/
|
||||
|
@ -806,7 +612,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
dma_addr_t handle, unsigned long attrs,
|
||||
bool is_coherent)
|
||||
{
|
||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||
struct page *page = phys_to_page(dma_to_phys(dev, handle));
|
||||
struct arm_dma_buffer *buf;
|
||||
struct arm_dma_free_args args = {
|
||||
.dev = dev,
|
||||
|
@ -824,40 +630,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
|
||||
}
|
||||
|
||||
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
|
||||
}
|
||||
|
||||
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
unsigned long pfn = dma_to_pfn(dev, handle);
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/* If the PFN is not valid, we do not have a struct page */
|
||||
if (!pfn_valid(pfn))
|
||||
return -ENXIO;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
void (*op)(const void *, size_t, int))
|
||||
|
@ -907,8 +679,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|||
|
||||
/*
|
||||
* Make an area consistent for devices.
|
||||
* Note: Drivers should NOT use this function directly, as it will break
|
||||
* platforms with CONFIG_DMABOUNCE.
|
||||
* Note: Drivers should NOT use this function directly.
|
||||
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
||||
*/
|
||||
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
|
@ -961,122 +732,6 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of buffers described by scatterlist in streaming mode for DMA.
|
||||
* This is the scatter-gather version of the dma_map_single interface.
|
||||
* Here the scatter gather list elements are each tagged with the
|
||||
* appropriate dma address and length. They are obtained via
|
||||
* sg_dma_{address,length}.
|
||||
*
|
||||
* Device ownership issues as mentioned for dma_map_single are the same
|
||||
* here.
|
||||
*/
|
||||
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i, j, ret;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
s->dma_length = s->length;
|
||||
#endif
|
||||
s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
|
||||
s->length, dir, attrs);
|
||||
if (dma_mapping_error(dev, s->dma_address)) {
|
||||
ret = -EIO;
|
||||
goto bad_mapping;
|
||||
}
|
||||
}
|
||||
return nents;
|
||||
|
||||
bad_mapping:
|
||||
for_each_sg(sg, s, i, j)
|
||||
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
||||
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
||||
*
|
||||
* Unmap a set of streaming mode DMA translations. Again, CPU access
|
||||
* rules concerning calls here are the same as for dma_unmap_single().
|
||||
*/
|
||||
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_sync_sg_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map (returned from dma_map_sg)
|
||||
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
||||
*/
|
||||
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
|
||||
dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_sync_sg_for_device
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map (returned from dma_map_sg)
|
||||
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
||||
*/
|
||||
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
|
||||
dir);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
||||
{
|
||||
/*
|
||||
* When CONFIG_ARM_LPAE is set, physical address can extend above
|
||||
* 32-bits, which then can't be addressed by devices that only support
|
||||
* 32-bit DMA.
|
||||
* Use the generic dma-direct / swiotlb ops code in that case, as that
|
||||
* handles bounce buffering for us.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_LPAE))
|
||||
return NULL;
|
||||
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
|
||||
static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
|
||||
|
@ -1423,13 +1078,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
|||
__free_from_pool(cpu_addr, size);
|
||||
}
|
||||
|
||||
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
|
||||
int coherent_flag)
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
struct page **pages;
|
||||
void *addr = NULL;
|
||||
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
|
||||
|
||||
*handle = DMA_MAPPING_ERROR;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
@ -1472,19 +1127,7 @@ err_buffer:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
|
||||
}
|
||||
|
||||
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
|
||||
}
|
||||
|
||||
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -1498,35 +1141,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
|
|||
if (vma->vm_pgoff >= nr_pages)
|
||||
return -ENXIO;
|
||||
|
||||
if (!dev->dma_coherent)
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
|
||||
err = vm_map_pages(vma, pages, nr_pages);
|
||||
if (err)
|
||||
pr_err("Remapping memory failed: %d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
static int arm_iommu_mmap_attrs(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs)
|
||||
{
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
|
||||
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
|
||||
struct vm_area_struct *vma, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs)
|
||||
{
|
||||
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* free a page as defined by the above mapping.
|
||||
* Must not be called with IRQs disabled.
|
||||
*/
|
||||
static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs, int coherent_flag)
|
||||
static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
|
||||
struct page **pages;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
|
@ -1548,19 +1180,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad
|
|||
__iommu_free_buffer(dev, pages, size, attrs);
|
||||
}
|
||||
|
||||
static void arm_iommu_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
|
||||
}
|
||||
|
||||
static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t handle, unsigned long attrs)
|
||||
{
|
||||
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
|
||||
}
|
||||
|
||||
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, unsigned long attrs)
|
||||
|
@ -1580,8 +1199,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
*/
|
||||
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||
size_t size, dma_addr_t *handle,
|
||||
enum dma_data_direction dir, unsigned long attrs,
|
||||
bool is_coherent)
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova, iova_base;
|
||||
|
@ -1601,7 +1219,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
|
@ -1621,9 +1239,20 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs,
|
||||
bool is_coherent)
|
||||
/**
|
||||
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of buffers described by scatterlist in streaming mode for DMA.
|
||||
* The scatter gather list elements are merged together (if possible) and
|
||||
* tagged with the appropriate dma address and length. They are obtained via
|
||||
* sg_dma_{address,length}.
|
||||
*/
|
||||
static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *s = sg, *dma = sg, *start = sg;
|
||||
int i, count = 0, ret;
|
||||
|
@ -1638,8 +1267,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
|
||||
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
||||
ret = __map_sg_chunk(dev, start, size,
|
||||
&dma->dma_address, dir, attrs,
|
||||
is_coherent);
|
||||
&dma->dma_address, dir, attrs);
|
||||
if (ret < 0)
|
||||
goto bad_mapping;
|
||||
|
||||
|
@ -1653,8 +1281,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
}
|
||||
size += s->length;
|
||||
}
|
||||
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
|
||||
is_coherent);
|
||||
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
|
||||
if (ret < 0)
|
||||
goto bad_mapping;
|
||||
|
||||
|
@ -1671,76 +1298,6 @@ bad_mapping:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of i/o coherent buffers described by scatterlist in streaming
|
||||
* mode for DMA. The scatter gather list elements are merged together (if
|
||||
* possible) and tagged with the appropriate dma address and length. They are
|
||||
* obtained via sg_dma_{address,length}.
|
||||
*/
|
||||
static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
||||
* @dev: valid struct device pointer
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Map a set of buffers described by scatterlist in streaming mode for DMA.
|
||||
* The scatter gather list elements are merged together (if possible) and
|
||||
* tagged with the appropriate dma address and length. They are obtained via
|
||||
* sg_dma_{address,length}.
|
||||
*/
|
||||
static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
|
||||
}
|
||||
|
||||
static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs, bool is_coherent)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (sg_dma_len(s))
|
||||
__iommu_remove_mapping(dev, sg_dma_address(s),
|
||||
sg_dma_len(s));
|
||||
if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
||||
s->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
||||
* @dev: valid struct device pointer
|
||||
* @sg: list of buffers
|
||||
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
||||
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
||||
*
|
||||
* Unmap a set of streaming mode DMA translations. Again, CPU access
|
||||
* rules concerning calls here are the same as for dma_unmap_single().
|
||||
*/
|
||||
static void arm_coherent_iommu_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
||||
* @dev: valid struct device pointer
|
||||
|
@ -1756,7 +1313,17 @@ static void arm_iommu_unmap_sg(struct device *dev,
|
|||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (sg_dma_len(s))
|
||||
__iommu_remove_mapping(dev, sg_dma_address(s),
|
||||
sg_dma_len(s));
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
||||
s->length, dir);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1773,6 +1340,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
|
|||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (dev->dma_coherent)
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
|
@ -1792,22 +1362,24 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
|
|||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (dev->dma_coherent)
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* arm_coherent_iommu_map_page
|
||||
* arm_iommu_map_page
|
||||
* @dev: valid struct device pointer
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Coherent IOMMU aware version of arm_dma_map_page()
|
||||
* IOMMU aware version of arm_dma_map_page()
|
||||
*/
|
||||
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
|
||||
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -1815,6 +1387,9 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
dma_addr_t dma_addr;
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
@ -1831,50 +1406,6 @@ fail:
|
|||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_map_page
|
||||
* @dev: valid struct device pointer
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* IOMMU aware version of arm_dma_map_page()
|
||||
*/
|
||||
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
|
||||
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_coherent_iommu_unmap_page
|
||||
* @dev: valid struct device pointer
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
*
|
||||
* Coherent IOMMU aware version of arm_dma_unmap_page()
|
||||
*/
|
||||
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
int offset = handle & ~PAGE_MASK;
|
||||
int len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
iommu_unmap(mapping->domain, iova, len);
|
||||
__free_iova(mapping, iova, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_iommu_unmap_page
|
||||
* @dev: valid struct device pointer
|
||||
|
@ -1889,15 +1420,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
struct page *page;
|
||||
int offset = handle & ~PAGE_MASK;
|
||||
int len = PAGE_ALIGN(size + offset);
|
||||
|
||||
if (!iova)
|
||||
return;
|
||||
|
||||
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
iommu_unmap(mapping->domain, iova, len);
|
||||
__free_iova(mapping, iova, len);
|
||||
|
@ -1965,12 +1498,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
|||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
|
||||
if (!iova)
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
|
@ -1979,12 +1513,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
|
|||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
|
||||
if (!iova)
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
|
@ -2006,26 +1541,6 @@ static const struct dma_map_ops iommu_ops = {
|
|||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
static const struct dma_map_ops iommu_coherent_ops = {
|
||||
.alloc = arm_coherent_iommu_alloc_attrs,
|
||||
.free = arm_coherent_iommu_free_attrs,
|
||||
.mmap = arm_coherent_iommu_mmap_attrs,
|
||||
.get_sgtable = arm_iommu_get_sgtable,
|
||||
|
||||
.map_page = arm_coherent_iommu_map_page,
|
||||
.unmap_page = arm_coherent_iommu_unmap_page,
|
||||
|
||||
.map_sg = arm_coherent_iommu_map_sg,
|
||||
.unmap_sg = arm_coherent_iommu_unmap_sg,
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -2201,40 +1716,32 @@ void arm_iommu_detach_device(struct device *dev)
|
|||
iommu_detach_device(mapping->domain, dev);
|
||||
kref_put(&mapping->kref, release_iommu_mapping);
|
||||
to_dma_iommu_mapping(dev) = NULL;
|
||||
set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
|
||||
set_dma_ops(dev, NULL);
|
||||
|
||||
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &iommu_coherent_ops : &iommu_ops;
|
||||
}
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
if (!iommu)
|
||||
return false;
|
||||
|
||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||
if (IS_ERR(mapping)) {
|
||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||
size, dev_name(dev));
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (__arm_iommu_attach_device(dev, mapping)) {
|
||||
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
|
||||
dev_name(dev));
|
||||
arm_iommu_release_mapping(mapping);
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
return true;
|
||||
set_dma_ops(dev, &iommu_ops);
|
||||
}
|
||||
|
||||
static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
|
@ -2250,27 +1757,20 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
|||
|
||||
#else
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
||||
|
||||
#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
|
||||
|
||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
const struct dma_map_ops *dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
dev->dma_coherent = coherent;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't override the dma_ops if they have already been set. Ideally
|
||||
|
@ -2280,12 +1780,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
if (dev->dma_ops)
|
||||
return;
|
||||
|
||||
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
|
||||
dma_ops = arm_get_iommu_dma_map_ops(coherent);
|
||||
else
|
||||
dma_ops = arm_get_dma_map_ops(coherent);
|
||||
|
||||
set_dma_ops(dev, dma_ops);
|
||||
if (iommu)
|
||||
arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
dev->archdata.dma_ops_setup = true;
|
||||
|
@ -2301,7 +1797,6 @@ void arch_teardown_dma_ops(struct device *dev)
|
|||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -2329,4 +1824,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
{
|
||||
__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
|
||||
}
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
|
|
@ -1060,6 +1060,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
|||
dev->flags |= ATA_DFLAG_NO_UNLOAD;
|
||||
|
||||
/* configure max sectors */
|
||||
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
|
||||
blk_queue_max_hw_sectors(q, dev->max_sectors);
|
||||
|
||||
if (dev->class == ATA_DEV_ATAPI) {
|
||||
|
|
|
@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
|
||||
u32 sg_cnt, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_pci_p2pdma_page(sg_page(sg)))
|
||||
pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
|
||||
else
|
||||
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
|
||||
}
|
||||
|
||||
static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int nents;
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
|
||||
if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
|
||||
return 0;
|
||||
nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
|
||||
sgt->orig_nents, dir);
|
||||
if (!nents)
|
||||
return -EIO;
|
||||
sgt->nents = nents;
|
||||
return 0;
|
||||
}
|
||||
return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
|
||||
* @ctx: context to initialize
|
||||
|
@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
|
|||
};
|
||||
int ret;
|
||||
|
||||
ret = rdma_rw_map_sgtable(dev, &sgt, dir);
|
||||
ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
sg_cnt = sgt.nents;
|
||||
|
@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
|
|||
return ret;
|
||||
|
||||
out_unmap_sg:
|
||||
rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
|
||||
ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_init);
|
||||
|
@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = rdma_rw_map_sgtable(dev, &sgt, dir);
|
||||
ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (prot_sg_cnt) {
|
||||
ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
|
||||
ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
|
||||
if (ret)
|
||||
goto out_unmap_sg;
|
||||
}
|
||||
|
@ -486,9 +459,9 @@ out_free_ctx:
|
|||
kfree(ctx->reg);
|
||||
out_unmap_prot_sg:
|
||||
if (prot_sgt.nents)
|
||||
rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
|
||||
ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
|
||||
out_unmap_sg:
|
||||
rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
|
||||
ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
|
||||
|
@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
break;
|
||||
}
|
||||
|
||||
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
|
||||
|
||||
|
@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
|||
kfree(ctx->reg);
|
||||
|
||||
if (prot_sg_cnt)
|
||||
rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
|
||||
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
|
||||
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/iova.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/memremap.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
|
@ -1062,15 +1063,30 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
/* Restore this segment's original unaligned fields first */
|
||||
dma_addr_t s_dma_addr = sg_dma_address(s);
|
||||
unsigned int s_iova_off = sg_dma_address(s);
|
||||
unsigned int s_length = sg_dma_len(s);
|
||||
unsigned int s_iova_len = s->length;
|
||||
|
||||
s->offset += s_iova_off;
|
||||
s->length = s_length;
|
||||
sg_dma_address(s) = DMA_MAPPING_ERROR;
|
||||
sg_dma_len(s) = 0;
|
||||
|
||||
if (sg_is_dma_bus_address(s)) {
|
||||
if (i > 0)
|
||||
cur = sg_next(cur);
|
||||
|
||||
sg_dma_unmark_bus_address(s);
|
||||
sg_dma_address(cur) = s_dma_addr;
|
||||
sg_dma_len(cur) = s_length;
|
||||
sg_dma_mark_bus_address(cur);
|
||||
count++;
|
||||
cur_len = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
s->offset += s_iova_off;
|
||||
s->length = s_length;
|
||||
|
||||
/*
|
||||
* Now fill in the real DMA data. If...
|
||||
* - there is a valid output segment to append to
|
||||
|
@ -1111,10 +1127,14 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
|||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
if (sg_dma_address(s) != DMA_MAPPING_ERROR)
|
||||
s->offset += sg_dma_address(s);
|
||||
if (sg_dma_len(s))
|
||||
s->length = sg_dma_len(s);
|
||||
if (sg_is_dma_bus_address(s)) {
|
||||
sg_dma_unmark_bus_address(s);
|
||||
} else {
|
||||
if (sg_dma_address(s) != DMA_MAPPING_ERROR)
|
||||
s->offset += sg_dma_address(s);
|
||||
if (sg_dma_len(s))
|
||||
s->length = sg_dma_len(s);
|
||||
}
|
||||
sg_dma_address(s) = DMA_MAPPING_ERROR;
|
||||
sg_dma_len(s) = 0;
|
||||
}
|
||||
|
@ -1167,6 +1187,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
|
||||
struct pci_p2pdma_map_state p2pdma_state = {};
|
||||
enum pci_p2pdma_map_type map;
|
||||
dma_addr_t iova;
|
||||
size_t iova_len = 0;
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
|
@ -1196,6 +1218,30 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
size_t s_length = s->length;
|
||||
size_t pad_len = (mask - iova_len + 1) & mask;
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(s))) {
|
||||
map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
|
||||
switch (map) {
|
||||
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||
/*
|
||||
* iommu_map_sg() will skip this segment as
|
||||
* it is marked as a bus address,
|
||||
* __finalise_sg() will copy the dma address
|
||||
* into the output segment.
|
||||
*/
|
||||
continue;
|
||||
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||
/*
|
||||
* Mapping through host bridge should be
|
||||
* mapped with regular IOVAs, thus we
|
||||
* do nothing here and continue below.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
ret = -EREMOTEIO;
|
||||
goto out_restore_sg;
|
||||
}
|
||||
}
|
||||
|
||||
sg_dma_address(s) = s_iova_off;
|
||||
sg_dma_len(s) = s_length;
|
||||
s->offset -= s_iova_off;
|
||||
|
@ -1224,6 +1270,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
prev = s;
|
||||
}
|
||||
|
||||
if (!iova_len)
|
||||
return __finalise_sg(dev, sg, nents, 0);
|
||||
|
||||
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||
if (!iova) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -1245,7 +1294,7 @@ out_free_iova:
|
|||
out_restore_sg:
|
||||
__invalidate_sg(sg, nents);
|
||||
out:
|
||||
if (ret != -ENOMEM)
|
||||
if (ret != -ENOMEM && ret != -EREMOTEIO)
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
@ -1253,7 +1302,7 @@ out:
|
|||
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_addr_t start, end;
|
||||
dma_addr_t end = 0, start;
|
||||
struct scatterlist *tmp;
|
||||
int i;
|
||||
|
||||
|
@ -1267,16 +1316,37 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|||
|
||||
/*
|
||||
* The scatterlist segments are mapped into a single
|
||||
* contiguous IOVA allocation, so this is incredibly easy.
|
||||
* contiguous IOVA allocation, the start and end points
|
||||
* just have to be determined.
|
||||
*/
|
||||
start = sg_dma_address(sg);
|
||||
for_each_sg(sg_next(sg), tmp, nents - 1, i) {
|
||||
for_each_sg(sg, tmp, nents, i) {
|
||||
if (sg_is_dma_bus_address(tmp)) {
|
||||
sg_dma_unmark_bus_address(tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (sg_dma_len(tmp) == 0)
|
||||
break;
|
||||
sg = tmp;
|
||||
|
||||
start = sg_dma_address(tmp);
|
||||
break;
|
||||
}
|
||||
end = sg_dma_address(sg) + sg_dma_len(sg);
|
||||
__iommu_dma_unmap(dev, start, end - start);
|
||||
|
||||
nents -= i;
|
||||
for_each_sg(tmp, tmp, nents, i) {
|
||||
if (sg_is_dma_bus_address(tmp)) {
|
||||
sg_dma_unmark_bus_address(tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (sg_dma_len(tmp) == 0)
|
||||
break;
|
||||
|
||||
end = sg_dma_address(tmp) + sg_dma_len(tmp);
|
||||
}
|
||||
|
||||
if (end)
|
||||
__iommu_dma_unmap(dev, start, end - start);
|
||||
}
|
||||
|
||||
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
|
@ -1468,7 +1538,13 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
|
|||
return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
|
||||
}
|
||||
|
||||
static size_t iommu_dma_opt_mapping_size(void)
|
||||
{
|
||||
return iova_rcache_range();
|
||||
}
|
||||
|
||||
static const struct dma_map_ops iommu_dma_ops = {
|
||||
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
|
||||
.alloc = iommu_dma_alloc,
|
||||
.free = iommu_dma_free,
|
||||
.alloc_pages = dma_common_alloc_pages,
|
||||
|
@ -1488,6 +1564,7 @@ static const struct dma_map_ops iommu_dma_ops = {
|
|||
.map_resource = iommu_dma_map_resource,
|
||||
.unmap_resource = iommu_dma_unmap_resource,
|
||||
.get_merge_boundary = iommu_dma_get_merge_boundary,
|
||||
.opt_mapping_size = iommu_dma_opt_mapping_size,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -2460,6 +2460,9 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
len = 0;
|
||||
}
|
||||
|
||||
if (sg_is_dma_bus_address(sg))
|
||||
goto next;
|
||||
|
||||
if (len) {
|
||||
len += sg->length;
|
||||
} else {
|
||||
|
@ -2467,6 +2470,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
start = s_phys;
|
||||
}
|
||||
|
||||
next:
|
||||
if (++i < nents)
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
|
|||
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
||||
static void free_iova_rcaches(struct iova_domain *iovad);
|
||||
|
||||
unsigned long iova_rcache_range(void)
|
||||
{
|
||||
return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
|
||||
}
|
||||
|
||||
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct iova_domain *iovad;
|
||||
|
|
|
@ -4198,7 +4198,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
|
|||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
|
||||
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
|
||||
if (ctrl->ops->supports_pci_p2pdma &&
|
||||
ctrl->ops->supports_pci_p2pdma(ctrl))
|
||||
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
|
||||
|
||||
ns->ctrl = ctrl;
|
||||
|
|
|
@ -504,7 +504,6 @@ struct nvme_ctrl_ops {
|
|||
unsigned int flags;
|
||||
#define NVME_F_FABRICS (1 << 0)
|
||||
#define NVME_F_METADATA_SUPPORTED (1 << 1)
|
||||
#define NVME_F_PCI_P2PDMA (1 << 2)
|
||||
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
||||
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
||||
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
||||
|
@ -514,6 +513,7 @@ struct nvme_ctrl_ops {
|
|||
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
|
||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
void (*print_device_info)(struct nvme_ctrl *ctrl);
|
||||
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -230,11 +230,10 @@ struct nvme_iod {
|
|||
bool use_sgl;
|
||||
int aborted;
|
||||
int npages; /* In the PRP list. 0 means small pool in use */
|
||||
int nents; /* Used in scatterlist */
|
||||
dma_addr_t first_dma;
|
||||
unsigned int dma_len; /* length of single DMA segment mapping */
|
||||
dma_addr_t meta_dma;
|
||||
struct scatterlist *sg;
|
||||
struct sg_table sgt;
|
||||
};
|
||||
|
||||
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
|
||||
|
@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
|
|||
static void **nvme_pci_iod_list(struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
|
||||
return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
|
||||
}
|
||||
|
||||
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
||||
|
@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
|
|||
}
|
||||
}
|
||||
|
||||
static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(iod->sg)))
|
||||
pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
|
||||
rq_dma_dir(req));
|
||||
else
|
||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
|
||||
}
|
||||
|
||||
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
|||
return;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!iod->nents);
|
||||
WARN_ON_ONCE(!iod->sgt.nents);
|
||||
|
||||
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
|
||||
|
||||
nvme_unmap_sg(dev, req);
|
||||
if (iod->npages == 0)
|
||||
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
|
||||
iod->first_dma);
|
||||
|
@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
|||
nvme_free_sgls(dev, req);
|
||||
else
|
||||
nvme_free_prps(dev, req);
|
||||
mempool_free(iod->sg, dev->iod_mempool);
|
||||
mempool_free(iod->sgt.sgl, dev->iod_mempool);
|
||||
}
|
||||
|
||||
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
|
||||
|
@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
|
|||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct dma_pool *pool;
|
||||
int length = blk_rq_payload_bytes(req);
|
||||
struct scatterlist *sg = iod->sg;
|
||||
struct scatterlist *sg = iod->sgt.sgl;
|
||||
int dma_len = sg_dma_len(sg);
|
||||
u64 dma_addr = sg_dma_address(sg);
|
||||
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
|
||||
|
@ -702,16 +691,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
|
|||
dma_len = sg_dma_len(sg);
|
||||
}
|
||||
done:
|
||||
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
|
||||
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
|
||||
return BLK_STS_OK;
|
||||
free_prps:
|
||||
nvme_free_prps(dev, req);
|
||||
return BLK_STS_RESOURCE;
|
||||
bad_sgl:
|
||||
WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
|
||||
WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
|
||||
"Invalid SGL for payload:%d nents:%d\n",
|
||||
blk_rq_payload_bytes(req), iod->nents);
|
||||
blk_rq_payload_bytes(req), iod->sgt.nents);
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
|
@ -737,12 +726,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
|
|||
}
|
||||
|
||||
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
|
||||
struct request *req, struct nvme_rw_command *cmd, int entries)
|
||||
struct request *req, struct nvme_rw_command *cmd)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct dma_pool *pool;
|
||||
struct nvme_sgl_desc *sg_list;
|
||||
struct scatterlist *sg = iod->sg;
|
||||
struct scatterlist *sg = iod->sgt.sgl;
|
||||
unsigned int entries = iod->sgt.nents;
|
||||
dma_addr_t sgl_dma;
|
||||
int i = 0;
|
||||
|
||||
|
@ -840,7 +830,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
blk_status_t ret = BLK_STS_RESOURCE;
|
||||
int nr_mapped;
|
||||
int rc;
|
||||
|
||||
if (blk_rq_nr_phys_segments(req) == 1) {
|
||||
struct bio_vec bv = req_bvec(req);
|
||||
|
@ -858,26 +848,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
}
|
||||
|
||||
iod->dma_len = 0;
|
||||
iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
|
||||
if (!iod->sg)
|
||||
iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
|
||||
if (!iod->sgt.sgl)
|
||||
return BLK_STS_RESOURCE;
|
||||
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
|
||||
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
|
||||
if (!iod->nents)
|
||||
sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
|
||||
iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
|
||||
if (!iod->sgt.orig_nents)
|
||||
goto out_free_sg;
|
||||
|
||||
if (is_pci_p2pdma_page(sg_page(iod->sg)))
|
||||
nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
|
||||
iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
|
||||
else
|
||||
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
|
||||
rq_dma_dir(req), DMA_ATTR_NO_WARN);
|
||||
if (!nr_mapped)
|
||||
rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
|
||||
DMA_ATTR_NO_WARN);
|
||||
if (rc) {
|
||||
if (rc == -EREMOTEIO)
|
||||
ret = BLK_STS_TARGET;
|
||||
goto out_free_sg;
|
||||
}
|
||||
|
||||
iod->use_sgl = nvme_pci_use_sgls(dev, req);
|
||||
if (iod->use_sgl)
|
||||
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
|
||||
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
|
||||
else
|
||||
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
|
||||
if (ret != BLK_STS_OK)
|
||||
|
@ -885,9 +874,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
return BLK_STS_OK;
|
||||
|
||||
out_unmap_sg:
|
||||
nvme_unmap_sg(dev, req);
|
||||
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
|
||||
out_free_sg:
|
||||
mempool_free(iod->sg, dev->iod_mempool);
|
||||
mempool_free(iod->sgt.sgl, dev->iod_mempool);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -911,7 +900,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
|
|||
|
||||
iod->aborted = 0;
|
||||
iod->npages = -1;
|
||||
iod->nents = 0;
|
||||
iod->sgt.nents = 0;
|
||||
|
||||
ret = nvme_setup_cmd(req->q->queuedata, req);
|
||||
if (ret)
|
||||
|
@ -2992,7 +2981,6 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
|
|||
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
|
||||
}
|
||||
|
||||
|
||||
static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
|
||||
|
@ -3007,11 +2995,17 @@ static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
|
|||
subsys->firmware_rev);
|
||||
}
|
||||
|
||||
static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_dev *dev = to_nvme_dev(ctrl);
|
||||
|
||||
return dma_pci_p2pdma_supported(dev->dev);
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
||||
.name = "pcie",
|
||||
.module = THIS_MODULE,
|
||||
.flags = NVME_F_METADATA_SUPPORTED |
|
||||
NVME_F_PCI_P2PDMA,
|
||||
.flags = NVME_F_METADATA_SUPPORTED,
|
||||
.reg_read32 = nvme_pci_reg_read32,
|
||||
.reg_write32 = nvme_pci_reg_write32,
|
||||
.reg_read64 = nvme_pci_reg_read64,
|
||||
|
@ -3019,6 +3013,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
|||
.submit_async_event = nvme_pci_submit_async_event,
|
||||
.get_address = nvme_pci_get_address,
|
||||
.print_device_info = nvme_pci_print_device_info,
|
||||
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
|
||||
};
|
||||
|
||||
static int nvme_dev_map(struct nvme_dev *dev)
|
||||
|
|
|
@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
|||
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
|
||||
goto out_free_rsp;
|
||||
|
||||
if (!ib_uses_virt_dma(ndev->device))
|
||||
if (ib_dma_pci_p2p_dma_supported(ndev->device))
|
||||
r->req.p2p_client = &ndev->device->dev;
|
||||
r->send_sge.length = sizeof(*r->req.cqe);
|
||||
r->send_sge.lkey = ndev->pd->local_dma_lkey;
|
||||
|
|
|
@ -164,6 +164,11 @@ config PCI_PASID
|
|||
config PCI_P2PDMA
|
||||
bool "PCI peer-to-peer transfer support"
|
||||
depends on ZONE_DEVICE
|
||||
#
|
||||
# The need for the scatterlist DMA bus address flag means PCI P2PDMA
|
||||
# requires 64bit
|
||||
#
|
||||
depends on 64BIT
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
Enableѕ drivers to do PCI peer-to-peer transactions to and from
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#define pr_fmt(fmt) "pci-p2pdma: " fmt
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/pci-p2pdma.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -20,13 +21,6 @@
|
|||
#include <linux/seq_buf.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
enum pci_p2pdma_map_type {
|
||||
PCI_P2PDMA_MAP_UNKNOWN = 0,
|
||||
PCI_P2PDMA_MAP_NOT_SUPPORTED,
|
||||
PCI_P2PDMA_MAP_BUS_ADDR,
|
||||
PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
|
||||
};
|
||||
|
||||
struct pci_p2pdma {
|
||||
struct gen_pool *pool;
|
||||
bool p2pmem_published;
|
||||
|
@ -854,6 +848,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
|
|||
struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
|
||||
struct pci_dev *client;
|
||||
struct pci_p2pdma *p2pdma;
|
||||
int dist;
|
||||
|
||||
if (!provider->p2pdma)
|
||||
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
|
||||
|
@ -870,74 +865,48 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
|
|||
type = xa_to_value(xa_load(&p2pdma->map_types,
|
||||
map_types_idx(client)));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (type == PCI_P2PDMA_MAP_UNKNOWN)
|
||||
return calc_map_type_and_dist(provider, client, &dist, true);
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
|
||||
struct device *dev, struct scatterlist *sg, int nents)
|
||||
/**
|
||||
* pci_p2pdma_map_segment - map an sg segment determining the mapping type
|
||||
* @state: State structure that should be declared outside of the for_each_sg()
|
||||
* loop and initialized to zero.
|
||||
* @dev: DMA device that's doing the mapping operation
|
||||
* @sg: scatterlist segment to map
|
||||
*
|
||||
* This is a helper to be used by non-IOMMU dma_map_sg() implementations where
|
||||
* the sg segment is the same for the page_link and the dma_address.
|
||||
*
|
||||
* Attempt to map a single segment in an SGL with the PCI bus address.
|
||||
* The segment must point to a PCI P2PDMA page and thus must be
|
||||
* wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
|
||||
*
|
||||
* Returns the type of mapping used and maps the page if the type is
|
||||
* PCI_P2PDMA_MAP_BUS_ADDR.
|
||||
*/
|
||||
enum pci_p2pdma_map_type
|
||||
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
|
||||
sg_dma_len(s) = s->length;
|
||||
if (state->pgmap != sg_page(sg)->pgmap) {
|
||||
state->pgmap = sg_page(sg)->pgmap;
|
||||
state->map = pci_p2pdma_map_type(state->pgmap, dev);
|
||||
state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_p2pdma_map_sg_attrs - map a PCI peer-to-peer scatterlist for DMA
|
||||
* @dev: device doing the DMA request
|
||||
* @sg: scatter list to map
|
||||
* @nents: elements in the scatterlist
|
||||
* @dir: DMA direction
|
||||
* @attrs: DMA attributes passed to dma_map_sg() (if called)
|
||||
*
|
||||
* Scatterlists mapped with this function should be unmapped using
|
||||
* pci_p2pdma_unmap_sg_attrs().
|
||||
*
|
||||
* Returns the number of SG entries mapped or 0 on error.
|
||||
*/
|
||||
int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct pci_p2pdma_pagemap *p2p_pgmap =
|
||||
to_p2p_pgmap(sg_page(sg)->pgmap);
|
||||
|
||||
switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
|
||||
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||
return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
|
||||
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||
return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
|
||||
sg->dma_address = sg_phys(sg) + state->bus_off;
|
||||
sg_dma_len(sg) = sg->length;
|
||||
sg_dma_mark_bus_address(sg);
|
||||
}
|
||||
|
||||
return state->map;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
|
||||
|
||||
/**
|
||||
* pci_p2pdma_unmap_sg_attrs - unmap a PCI peer-to-peer scatterlist that was
|
||||
* mapped with pci_p2pdma_map_sg()
|
||||
* @dev: device doing the DMA request
|
||||
* @sg: scatter list to map
|
||||
* @nents: number of elements returned by pci_p2pdma_map_sg()
|
||||
* @dir: DMA direction
|
||||
* @attrs: DMA attributes passed to dma_unmap_sg() (if called)
|
||||
*/
|
||||
void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
enum pci_p2pdma_map_type map_type;
|
||||
|
||||
map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
|
||||
|
||||
if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
|
||||
dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
|
||||
|
||||
/**
|
||||
* pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
|
||||
|
|
|
@ -236,6 +236,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||
|
||||
shost->dma_dev = dma_dev;
|
||||
|
||||
if (dma_dev->dma_mask) {
|
||||
shost->max_sectors = min_t(unsigned int, shost->max_sectors,
|
||||
dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
error = scsi_mq_setup_tags(shost);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
|
|
@ -1876,10 +1876,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|||
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
|
||||
}
|
||||
|
||||
if (dev->dma_mask) {
|
||||
shost->max_sectors = min_t(unsigned int, shost->max_sectors,
|
||||
dma_max_mapping_size(dev) >> SECTOR_SHIFT);
|
||||
}
|
||||
blk_queue_max_hw_sectors(q, shost->max_sectors);
|
||||
blk_queue_segment_boundary(q, shost->dma_boundary);
|
||||
dma_set_seg_boundary(dev, shost->dma_boundary);
|
||||
|
|
|
@ -225,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
|
|||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(dev);
|
||||
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
|
||||
struct device *dma_dev = shost->dma_dev;
|
||||
|
||||
INIT_LIST_HEAD(&sas_host->rphy_list);
|
||||
mutex_init(&sas_host->lock);
|
||||
|
@ -236,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
|
|||
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
|
||||
shost->host_no);
|
||||
|
||||
if (dma_dev->dma_mask) {
|
||||
shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
|
||||
dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3297,6 +3297,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||
(sector_t)BLK_DEF_MAX_SECTORS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit default to SCSI host optimal sector limit if set. There may be
|
||||
* an impact on performance for when the size of a request exceeds this
|
||||
* host limit.
|
||||
*/
|
||||
rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
|
||||
|
||||
/* Do not exceed controller limit */
|
||||
rw_max = min(rw_max, queue_max_hw_sectors(q));
|
||||
|
||||
|
|
|
@ -1251,7 +1251,8 @@ void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
|
|||
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
|
||||
|
||||
/*
|
||||
* Some usb host controllers can only perform dma using a small SRAM area.
|
||||
* Some usb host controllers can only perform dma using a small SRAM area,
|
||||
* or have restrictions on addressable DRAM.
|
||||
* The usb core itself is however optimized for host controllers that can dma
|
||||
* using regular system memory - like pci devices doing bus mastering.
|
||||
*
|
||||
|
@ -3127,8 +3128,18 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
|
|||
if (IS_ERR(hcd->localmem_pool))
|
||||
return PTR_ERR(hcd->localmem_pool);
|
||||
|
||||
local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
|
||||
size, MEMREMAP_WC);
|
||||
/*
|
||||
* if a physical SRAM address was passed, map it, otherwise
|
||||
* allocate system memory as a buffer.
|
||||
*/
|
||||
if (phys_addr)
|
||||
local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
|
||||
size, MEMREMAP_WC);
|
||||
else
|
||||
local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
|
||||
GFP_KERNEL,
|
||||
DMA_ATTR_WRITE_COMBINE);
|
||||
|
||||
if (IS_ERR(local_mem))
|
||||
return PTR_ERR(local_mem);
|
||||
|
||||
|
|
|
@ -203,6 +203,31 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
|
|||
goto err1;
|
||||
}
|
||||
|
||||
/*
|
||||
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
|
||||
* Chip Specification Update" (June 2000), erratum #7, there is a
|
||||
* significant bug in the SA1111 SDRAM shared memory controller. If
|
||||
* an access to a region of memory above 1MB relative to the bank base,
|
||||
* it is important that address bit 10 _NOT_ be asserted. Depending
|
||||
* on the configuration of the RAM, bit 10 may correspond to one
|
||||
* of several different (processor-relative) address bits.
|
||||
*
|
||||
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
|
||||
* User's Guide" mentions that jumpers R51 and R52 control the
|
||||
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
|
||||
* SDRAM bank 1 on Neponset). The default configuration selects
|
||||
* Assabet, so any address in bank 1 is necessarily invalid.
|
||||
*
|
||||
* As a workaround, use a bounce buffer in addressable memory
|
||||
* as local_mem, relying on ZONE_DMA to provide an area that
|
||||
* fits within the above constraints.
|
||||
*
|
||||
* SZ_64K is an estimate for what size this might need.
|
||||
*/
|
||||
ret = usb_hcd_setup_local_mem(hcd, 0, 0, SZ_64K);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
|
||||
dev_dbg(&dev->dev, "request_mem_region failed\n");
|
||||
ret = -EBUSY;
|
||||
|
|
|
@ -11,7 +11,17 @@
|
|||
|
||||
struct cma;
|
||||
|
||||
/*
|
||||
* Values for struct dma_map_ops.flags:
|
||||
*
|
||||
* DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
|
||||
* handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
|
||||
*/
|
||||
#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
|
||||
|
||||
struct dma_map_ops {
|
||||
unsigned int flags;
|
||||
|
||||
void *(*alloc)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs);
|
||||
|
@ -69,6 +79,7 @@ struct dma_map_ops {
|
|||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
size_t (*max_mapping_size)(struct device *dev);
|
||||
size_t (*opt_mapping_size)(void);
|
||||
unsigned long (*get_merge_boundary)(struct device *dev);
|
||||
};
|
||||
|
||||
|
@ -379,4 +390,57 @@ static inline void debug_dma_dump_mappings(struct device *dev)
|
|||
|
||||
extern const struct dma_map_ops dma_dummy_ops;
|
||||
|
||||
enum pci_p2pdma_map_type {
|
||||
/*
|
||||
* PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
|
||||
* type hasn't been calculated yet. Functions that return this enum
|
||||
* never return this value.
|
||||
*/
|
||||
PCI_P2PDMA_MAP_UNKNOWN = 0,
|
||||
|
||||
/*
|
||||
* PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
|
||||
* traverse the host bridge and the host bridge is not in the
|
||||
* allowlist. DMA Mapping routines should return an error when
|
||||
* this is returned.
|
||||
*/
|
||||
PCI_P2PDMA_MAP_NOT_SUPPORTED,
|
||||
|
||||
/*
|
||||
* PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
|
||||
* each other directly through a PCI switch and the transaction will
|
||||
* not traverse the host bridge. Such a mapping should program
|
||||
* the DMA engine with PCI bus addresses.
|
||||
*/
|
||||
PCI_P2PDMA_MAP_BUS_ADDR,
|
||||
|
||||
/*
|
||||
* PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
|
||||
* to each other, but the transaction traverses a host bridge on the
|
||||
* allowlist. In this case, a normal mapping either with CPU physical
|
||||
* addresses (in the case of dma-direct) or IOVA addresses (in the
|
||||
* case of IOMMUs) should be used to program the DMA engine.
|
||||
*/
|
||||
PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
|
||||
};
|
||||
|
||||
struct pci_p2pdma_map_state {
|
||||
struct dev_pagemap *pgmap;
|
||||
int map;
|
||||
u64 bus_off;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI_P2PDMA
|
||||
enum pci_p2pdma_map_type
|
||||
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
|
||||
struct scatterlist *sg);
|
||||
#else /* CONFIG_PCI_P2PDMA */
|
||||
static inline enum pci_p2pdma_map_type
|
||||
pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
|
||||
}
|
||||
#endif /* CONFIG_PCI_P2PDMA */
|
||||
|
||||
#endif /* _LINUX_DMA_MAP_OPS_H */
|
||||
|
|
|
@ -140,10 +140,12 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
unsigned long attrs);
|
||||
bool dma_can_mmap(struct device *dev);
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
bool dma_pci_p2pdma_supported(struct device *dev);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
u64 dma_get_required_mask(struct device *dev);
|
||||
size_t dma_max_mapping_size(struct device *dev);
|
||||
size_t dma_opt_mapping_size(struct device *dev);
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
unsigned long dma_get_merge_boundary(struct device *dev);
|
||||
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||
|
@ -250,6 +252,10 @@ static inline int dma_supported(struct device *dev, u64 mask)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool dma_pci_p2pdma_supported(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
return -EIO;
|
||||
|
@ -266,6 +272,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline size_t dma_opt_mapping_size(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return false;
|
||||
|
|
|
@ -79,6 +79,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
|
|||
int iova_cache_get(void);
|
||||
void iova_cache_put(void);
|
||||
|
||||
unsigned long iova_rcache_range(void);
|
||||
|
||||
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
void __free_iova(struct iova_domain *iovad, struct iova *iova);
|
||||
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
|
||||
|
|
|
@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
|
|||
unsigned int *nents, u32 length);
|
||||
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
|
||||
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
|
||||
int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
|
||||
bool *use_p2pdma);
|
||||
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
|
||||
|
@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
|
|||
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
|
||||
{
|
||||
}
|
||||
static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline int pci_p2pdma_enable_store(const char *page,
|
||||
struct pci_dev **p2p_dev, bool *use_p2pdma)
|
||||
{
|
||||
|
@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
|
|||
return pci_p2pmem_find_many(&client, 1);
|
||||
}
|
||||
|
||||
static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
|
||||
}
|
||||
|
||||
static inline void pci_p2pdma_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_PCI_P2P_H */
|
||||
|
|
|
@ -16,6 +16,9 @@ struct scatterlist {
|
|||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
unsigned int dma_length;
|
||||
#endif
|
||||
#ifdef CONFIG_PCI_P2PDMA
|
||||
unsigned int dma_flags;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -245,6 +248,72 @@ static inline void sg_unmark_end(struct scatterlist *sg)
|
|||
sg->page_link &= ~SG_END;
|
||||
}
|
||||
|
||||
/*
|
||||
* CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
|
||||
* in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
|
||||
* Use this padding for DMA flags bits to indicate when a specific
|
||||
* dma address is a bus address.
|
||||
*/
|
||||
#ifdef CONFIG_PCI_P2PDMA
|
||||
|
||||
#define SG_DMA_BUS_ADDRESS (1 << 0)
|
||||
|
||||
/**
|
||||
* sg_dma_is_bus address - Return whether a given segment was marked
|
||||
* as a bus address
|
||||
* @sg: SG entry
|
||||
*
|
||||
* Description:
|
||||
* Returns true if sg_dma_mark_bus_address() has been called on
|
||||
* this segment.
|
||||
**/
|
||||
static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
return sg->dma_flags & SG_DMA_BUS_ADDRESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
|
||||
* @sg: SG entry
|
||||
*
|
||||
* Description:
|
||||
* Marks the passed in sg entry to indicate that the dma_address is
|
||||
* a bus address and doesn't need to be unmapped. This should only be
|
||||
* used by dma_map_sg() implementations to mark bus addresses
|
||||
* so they can be properly cleaned up in dma_unmap_sg().
|
||||
**/
|
||||
static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
sg->dma_flags |= SG_DMA_BUS_ADDRESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
|
||||
* @sg: SG entry
|
||||
*
|
||||
* Description:
|
||||
* Clears the bus address mark.
|
||||
**/
|
||||
static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
}
|
||||
static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* sg_phys - Return physical address of an sg entry
|
||||
* @sg: SG entry
|
||||
|
|
|
@ -60,7 +60,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
|||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
extern enum swiotlb_force swiotlb_force;
|
||||
|
||||
/**
|
||||
* struct io_tlb_mem - IO TLB Memory Pool Descriptor
|
||||
|
@ -80,15 +79,14 @@ extern enum swiotlb_force swiotlb_force;
|
|||
* @used: The number of used IO TLB block.
|
||||
* @list: The free list describing the number of free entries available
|
||||
* from each index.
|
||||
* @index: The index to start searching in the next round.
|
||||
* @orig_addr: The original address corresponding to a mapped entry.
|
||||
* @alloc_size: Size of the allocated buffer.
|
||||
* @lock: The lock to protect the above data structures in the map and
|
||||
* unmap calls.
|
||||
* @debugfs: The dentry to debugfs.
|
||||
* @late_alloc: %true if allocated using the page allocator
|
||||
* @force_bounce: %true if swiotlb bouncing is forced
|
||||
* @for_alloc: %true if the pool is used for memory allocation
|
||||
* @nareas: The area number in the pool.
|
||||
* @area_nslabs: The slot number in the area.
|
||||
*/
|
||||
struct io_tlb_mem {
|
||||
phys_addr_t start;
|
||||
|
@ -96,17 +94,14 @@ struct io_tlb_mem {
|
|||
void *vaddr;
|
||||
unsigned long nslabs;
|
||||
unsigned long used;
|
||||
unsigned int index;
|
||||
spinlock_t lock;
|
||||
struct dentry *debugfs;
|
||||
bool late_alloc;
|
||||
bool force_bounce;
|
||||
bool for_alloc;
|
||||
struct io_tlb_slot {
|
||||
phys_addr_t orig_addr;
|
||||
size_t alloc_size;
|
||||
unsigned int list;
|
||||
} *slots;
|
||||
unsigned int nareas;
|
||||
unsigned int area_nslabs;
|
||||
struct io_tlb_area *areas;
|
||||
struct io_tlb_slot *slots;
|
||||
};
|
||||
extern struct io_tlb_mem io_tlb_default_mem;
|
||||
|
||||
|
|
|
@ -4013,6 +4013,17 @@ static inline bool ib_uses_virt_dma(struct ib_device *dev)
|
|||
return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
|
||||
*/
|
||||
static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
|
||||
{
|
||||
if (ib_uses_virt_dma(dev))
|
||||
return false;
|
||||
|
||||
return dma_pci_p2pdma_supported(dev->dma_device);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_dma_mapping_error - check a DMA addr for error
|
||||
* @dev: The device for which the dma_addr was created
|
||||
|
|
|
@ -607,6 +607,7 @@ struct Scsi_Host {
|
|||
short unsigned int sg_tablesize;
|
||||
short unsigned int sg_prot_tablesize;
|
||||
unsigned int max_sectors;
|
||||
unsigned int opt_sectors;
|
||||
unsigned int max_segment_size;
|
||||
unsigned long dma_boundary;
|
||||
unsigned long virt_boundary_mask;
|
||||
|
|
|
@ -453,29 +453,60 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmaps segments, except for ones marked as pci_p2pdma which do not
|
||||
* require any further action as they contain a bus address.
|
||||
*/
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
|
||||
attrs);
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (sg_is_dma_bus_address(sg))
|
||||
sg_dma_unmark_bus_address(sg);
|
||||
else
|
||||
dma_direct_unmap_page(dev, sg->dma_address,
|
||||
sg_dma_len(sg), dir, attrs);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int i;
|
||||
struct pci_p2pdma_map_state p2pdma_state = {};
|
||||
enum pci_p2pdma_map_type map;
|
||||
struct scatterlist *sg;
|
||||
int i, ret;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (is_pci_p2pdma_page(sg_page(sg))) {
|
||||
map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
|
||||
switch (map) {
|
||||
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||
continue;
|
||||
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||
/*
|
||||
* Any P2P mapping that traverses the PCI
|
||||
* host bridge must be mapped with CPU physical
|
||||
* address and not PCI bus addresses. This is
|
||||
* done with dma_direct_map_page() below.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
ret = -EREMOTEIO;
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
|
||||
sg->offset, sg->length, dir, attrs);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR) {
|
||||
ret = -EIO;
|
||||
goto out_unmap;
|
||||
}
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
|
||||
|
@ -483,7 +514,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
|||
|
||||
out_unmap:
|
||||
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
return -EIO;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define _KERNEL_DMA_DIRECT_H
|
||||
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/memremap.h>
|
||||
|
||||
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
|
@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
|||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
|
||||
if (is_swiotlb_force_bounce(dev))
|
||||
if (is_swiotlb_force_bounce(dev)) {
|
||||
if (is_pci_p2pdma_page(page))
|
||||
return DMA_MAPPING_ERROR;
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
}
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||
if (is_pci_p2pdma_page(page))
|
||||
return DMA_MAPPING_ERROR;
|
||||
if (is_swiotlb_active(dev))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
|||
if (ents > 0)
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
|
||||
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
|
||||
ents != -EIO))
|
||||
ents != -EIO && ents != -EREMOTEIO))
|
||||
return -EIO;
|
||||
|
||||
return ents;
|
||||
|
@ -249,12 +249,15 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
|
|||
* Returns 0 on success or a negative error code on error. The following
|
||||
* error codes are supported with the given meaning:
|
||||
*
|
||||
* -EINVAL An invalid argument, unaligned access or other error
|
||||
* in usage. Will not succeed if retried.
|
||||
* -ENOMEM Insufficient resources (like memory or IOVA space) to
|
||||
* complete the mapping. Should succeed if retried later.
|
||||
* -EIO Legacy error code with an unknown meaning. eg. this is
|
||||
* returned if a lower level call returned DMA_MAPPING_ERROR.
|
||||
* -EINVAL An invalid argument, unaligned access or other error
|
||||
* in usage. Will not succeed if retried.
|
||||
* -ENOMEM Insufficient resources (like memory or IOVA space) to
|
||||
* complete the mapping. Should succeed if retried later.
|
||||
* -EIO Legacy error code with an unknown meaning. eg. this is
|
||||
* returned if a lower level call returned
|
||||
* DMA_MAPPING_ERROR.
|
||||
* -EREMOTEIO The DMA device cannot access P2PDMA memory specified
|
||||
* in the sg_table. This will not succeed if retried.
|
||||
*/
|
||||
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
|
@ -720,6 +723,24 @@ int dma_supported(struct device *dev, u64 mask)
|
|||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
bool dma_pci_p2pdma_supported(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
/* if ops is not set, dma direct will be used which supports P2PDMA */
|
||||
if (!ops)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Note: dma_ops_bypass is not checked here because P2PDMA should
|
||||
* not be used with dma mapping ops that do not have support even
|
||||
* if the specific device is bypassing them.
|
||||
*/
|
||||
|
||||
return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
|
||||
void arch_dma_set_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
|
@ -773,6 +794,18 @@ size_t dma_max_mapping_size(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||
|
||||
size_t dma_opt_mapping_size(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
if (ops && ops->opt_mapping_size)
|
||||
size = ops->opt_mapping_size();
|
||||
|
||||
return min(dma_max_mapping_size(dev), size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
|
||||
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
|
|
@ -62,6 +62,12 @@
|
|||
|
||||
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
|
||||
|
||||
struct io_tlb_slot {
|
||||
phys_addr_t orig_addr;
|
||||
size_t alloc_size;
|
||||
unsigned int list;
|
||||
};
|
||||
|
||||
static bool swiotlb_force_bounce;
|
||||
static bool swiotlb_force_disable;
|
||||
|
||||
|
@ -70,6 +76,62 @@ struct io_tlb_mem io_tlb_default_mem;
|
|||
phys_addr_t swiotlb_unencrypted_base;
|
||||
|
||||
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
|
||||
static unsigned long default_nareas;
|
||||
|
||||
/**
|
||||
* struct io_tlb_area - IO TLB memory area descriptor
|
||||
*
|
||||
* This is a single area with a single lock.
|
||||
*
|
||||
* @used: The number of used IO TLB block.
|
||||
* @index: The slot index to start searching in this area for next round.
|
||||
* @lock: The lock to protect the above data structures in the map and
|
||||
* unmap calls.
|
||||
*/
|
||||
struct io_tlb_area {
|
||||
unsigned long used;
|
||||
unsigned int index;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/*
|
||||
* Round up number of slabs to the next power of 2. The last area is going
|
||||
* be smaller than the rest if default_nslabs is not power of two.
|
||||
* The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
|
||||
* otherwise a segment may span two or more areas. It conflicts with free
|
||||
* contiguous slots tracking: free slots are treated contiguous no matter
|
||||
* whether they cross an area boundary.
|
||||
*
|
||||
* Return true if default_nslabs is rounded up.
|
||||
*/
|
||||
static bool round_up_default_nslabs(void)
|
||||
{
|
||||
if (!default_nareas)
|
||||
return false;
|
||||
|
||||
if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
|
||||
default_nslabs = IO_TLB_SEGSIZE * default_nareas;
|
||||
else if (is_power_of_2(default_nslabs))
|
||||
return false;
|
||||
default_nslabs = roundup_pow_of_two(default_nslabs);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void swiotlb_adjust_nareas(unsigned int nareas)
|
||||
{
|
||||
/* use a single area when non is specified */
|
||||
if (!nareas)
|
||||
nareas = 1;
|
||||
else if (!is_power_of_2(nareas))
|
||||
nareas = roundup_pow_of_two(nareas);
|
||||
|
||||
default_nareas = nareas;
|
||||
|
||||
pr_info("area num %d.\n", nareas);
|
||||
if (round_up_default_nslabs())
|
||||
pr_info("SWIOTLB bounce buffer size roundup to %luMB",
|
||||
(default_nslabs << IO_TLB_SHIFT) >> 20);
|
||||
}
|
||||
|
||||
static int __init
|
||||
setup_io_tlb_npages(char *str)
|
||||
|
@ -79,6 +141,10 @@ setup_io_tlb_npages(char *str)
|
|||
default_nslabs =
|
||||
ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
|
||||
}
|
||||
if (*str == ',')
|
||||
++str;
|
||||
if (isdigit(*str))
|
||||
swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
|
||||
if (*str == ',')
|
||||
++str;
|
||||
if (!strcmp(str, "force"))
|
||||
|
@ -112,8 +178,11 @@ void __init swiotlb_adjust_size(unsigned long size)
|
|||
*/
|
||||
if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
|
||||
return;
|
||||
|
||||
size = ALIGN(size, IO_TLB_SIZE);
|
||||
default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
if (round_up_default_nslabs())
|
||||
size = default_nslabs << IO_TLB_SHIFT;
|
||||
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
|
||||
}
|
||||
|
||||
|
@ -192,7 +261,8 @@ void __init swiotlb_update_mem_attributes(void)
|
|||
}
|
||||
|
||||
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
unsigned long nslabs, unsigned int flags, bool late_alloc)
|
||||
unsigned long nslabs, unsigned int flags,
|
||||
bool late_alloc, unsigned int nareas)
|
||||
{
|
||||
void *vaddr = phys_to_virt(start);
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||
|
@ -200,12 +270,18 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
|||
mem->nslabs = nslabs;
|
||||
mem->start = start;
|
||||
mem->end = mem->start + bytes;
|
||||
mem->index = 0;
|
||||
mem->late_alloc = late_alloc;
|
||||
mem->nareas = nareas;
|
||||
mem->area_nslabs = nslabs / mem->nareas;
|
||||
|
||||
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
||||
|
||||
spin_lock_init(&mem->lock);
|
||||
for (i = 0; i < mem->nareas; i++) {
|
||||
spin_lock_init(&mem->areas[i].lock);
|
||||
mem->areas[i].index = 0;
|
||||
mem->areas[i].used = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < mem->nslabs; i++) {
|
||||
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
|
||||
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
|
||||
|
@ -232,7 +308,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
|||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
unsigned long nslabs = default_nslabs;
|
||||
unsigned long nslabs;
|
||||
size_t alloc_size;
|
||||
size_t bytes;
|
||||
void *tlb;
|
||||
|
@ -242,6 +318,17 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
|||
if (swiotlb_force_disable)
|
||||
return;
|
||||
|
||||
/*
|
||||
* default_nslabs maybe changed when adjust area number.
|
||||
* So allocate bounce buffer after adjusting area number.
|
||||
*/
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
nslabs = default_nslabs;
|
||||
if (nslabs < IO_TLB_MIN_SLABS)
|
||||
panic("%s: nslabs = %lu too small\n", __func__, nslabs);
|
||||
|
||||
/*
|
||||
* By default allocate the bounce buffer memory from low memory, but
|
||||
* allow to pick a location everywhere for hypervisors with guest
|
||||
|
@ -254,7 +341,8 @@ retry:
|
|||
else
|
||||
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
|
||||
if (!tlb) {
|
||||
pr_warn("%s: failed to allocate tlb structure\n", __func__);
|
||||
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
|
||||
__func__, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -274,7 +362,13 @@ retry:
|
|||
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
|
||||
__func__, alloc_size, PAGE_SIZE);
|
||||
|
||||
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
|
||||
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
|
||||
default_nareas), SMP_CACHE_BYTES);
|
||||
if (!mem->areas)
|
||||
panic("%s: Failed to allocate mem->areas.\n", __func__);
|
||||
|
||||
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
|
||||
default_nareas);
|
||||
|
||||
if (flags & SWIOTLB_VERBOSE)
|
||||
swiotlb_print_info();
|
||||
|
@ -282,7 +376,7 @@ retry:
|
|||
|
||||
void __init swiotlb_init(bool addressing_limit, unsigned int flags)
|
||||
{
|
||||
return swiotlb_init_remap(addressing_limit, flags, NULL);
|
||||
swiotlb_init_remap(addressing_limit, flags, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -296,7 +390,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
|||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
unsigned char *vstart = NULL;
|
||||
unsigned int order;
|
||||
unsigned int order, area_order;
|
||||
bool retried = false;
|
||||
int rc = 0;
|
||||
|
||||
|
@ -337,19 +431,34 @@ retry:
|
|||
(PAGE_SIZE << order) >> 20);
|
||||
}
|
||||
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
area_order = get_order(array_size(sizeof(*mem->areas),
|
||||
default_nareas));
|
||||
mem->areas = (struct io_tlb_area *)
|
||||
__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
|
||||
if (!mem->areas)
|
||||
goto error_area;
|
||||
|
||||
mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(array_size(sizeof(*mem->slots), nslabs)));
|
||||
if (!mem->slots) {
|
||||
free_pages((unsigned long)vstart, order);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!mem->slots)
|
||||
goto error_slots;
|
||||
|
||||
set_memory_decrypted((unsigned long)vstart,
|
||||
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
|
||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
|
||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
|
||||
default_nareas);
|
||||
|
||||
swiotlb_print_info();
|
||||
return 0;
|
||||
|
||||
error_slots:
|
||||
free_pages((unsigned long)mem->areas, area_order);
|
||||
error_area:
|
||||
free_pages((unsigned long)vstart, order);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void __init swiotlb_exit(void)
|
||||
|
@ -357,6 +466,7 @@ void __init swiotlb_exit(void)
|
|||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
unsigned long tbl_vaddr;
|
||||
size_t tbl_size, slots_size;
|
||||
unsigned int area_order;
|
||||
|
||||
if (swiotlb_force_bounce)
|
||||
return;
|
||||
|
@ -371,9 +481,14 @@ void __init swiotlb_exit(void)
|
|||
|
||||
set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
|
||||
if (mem->late_alloc) {
|
||||
area_order = get_order(array_size(sizeof(*mem->areas),
|
||||
mem->nareas));
|
||||
free_pages((unsigned long)mem->areas, area_order);
|
||||
free_pages(tbl_vaddr, get_order(tbl_size));
|
||||
free_pages((unsigned long)mem->slots, get_order(slots_size));
|
||||
} else {
|
||||
memblock_free_late(__pa(mem->areas),
|
||||
array_size(sizeof(*mem->areas), mem->nareas));
|
||||
memblock_free_late(mem->start, tbl_size);
|
||||
memblock_free_late(__pa(mem->slots), slots_size);
|
||||
}
|
||||
|
@ -476,9 +591,9 @@ static inline unsigned long get_max_slots(unsigned long boundary_mask)
|
|||
return nr_slots(boundary_mask + 1);
|
||||
}
|
||||
|
||||
static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
|
||||
static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
|
||||
{
|
||||
if (index >= mem->nslabs)
|
||||
if (index >= mem->area_nslabs)
|
||||
return 0;
|
||||
return index;
|
||||
}
|
||||
|
@ -487,10 +602,12 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
|
|||
* Find a suitable number of IO TLB entries size that will fit this request and
|
||||
* allocate a buffer from that IO TLB pool.
|
||||
*/
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask)
|
||||
static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
phys_addr_t orig_addr, size_t alloc_size,
|
||||
unsigned int alloc_align_mask)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_area *area = mem->areas + area_index;
|
||||
unsigned long boundary_mask = dma_get_seg_boundary(dev);
|
||||
dma_addr_t tbl_dma_addr =
|
||||
phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
|
||||
|
@ -501,8 +618,11 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
|||
unsigned int index, wrap, count = 0, i;
|
||||
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
|
||||
unsigned long flags;
|
||||
unsigned int slot_base;
|
||||
unsigned int slot_index;
|
||||
|
||||
BUG_ON(!nslots);
|
||||
BUG_ON(area_index >= mem->nareas);
|
||||
|
||||
/*
|
||||
* For mappings with an alignment requirement don't bother looping to
|
||||
|
@ -514,16 +634,20 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
|||
stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
|
||||
stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
|
||||
|
||||
spin_lock_irqsave(&mem->lock, flags);
|
||||
if (unlikely(nslots > mem->nslabs - mem->used))
|
||||
spin_lock_irqsave(&area->lock, flags);
|
||||
if (unlikely(nslots > mem->area_nslabs - area->used))
|
||||
goto not_found;
|
||||
|
||||
index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
|
||||
slot_base = area_index * mem->area_nslabs;
|
||||
index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
|
||||
|
||||
do {
|
||||
slot_index = slot_base + index;
|
||||
|
||||
if (orig_addr &&
|
||||
(slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
|
||||
(orig_addr & iotlb_align_mask)) {
|
||||
index = wrap_index(mem, index + 1);
|
||||
(slot_addr(tbl_dma_addr, slot_index) &
|
||||
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
|
||||
index = wrap_area_index(mem, index + 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -532,26 +656,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
|||
* contiguous buffers, we allocate the buffers from that slot
|
||||
* and mark the entries as '0' indicating unavailable.
|
||||
*/
|
||||
if (!iommu_is_span_boundary(index, nslots,
|
||||
if (!iommu_is_span_boundary(slot_index, nslots,
|
||||
nr_slots(tbl_dma_addr),
|
||||
max_slots)) {
|
||||
if (mem->slots[index].list >= nslots)
|
||||
if (mem->slots[slot_index].list >= nslots)
|
||||
goto found;
|
||||
}
|
||||
index = wrap_index(mem, index + stride);
|
||||
index = wrap_area_index(mem, index + stride);
|
||||
} while (index != wrap);
|
||||
|
||||
not_found:
|
||||
spin_unlock_irqrestore(&mem->lock, flags);
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
return -1;
|
||||
|
||||
found:
|
||||
for (i = index; i < index + nslots; i++) {
|
||||
for (i = slot_index; i < slot_index + nslots; i++) {
|
||||
mem->slots[i].list = 0;
|
||||
mem->slots[i].alloc_size =
|
||||
alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
|
||||
mem->slots[i].alloc_size = alloc_size - (offset +
|
||||
((i - slot_index) << IO_TLB_SHIFT));
|
||||
}
|
||||
for (i = index - 1;
|
||||
for (i = slot_index - 1;
|
||||
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
|
||||
mem->slots[i].list; i--)
|
||||
mem->slots[i].list = ++count;
|
||||
|
@ -559,14 +683,42 @@ found:
|
|||
/*
|
||||
* Update the indices to avoid searching in the next round.
|
||||
*/
|
||||
if (index + nslots < mem->nslabs)
|
||||
mem->index = index + nslots;
|
||||
if (index + nslots < mem->area_nslabs)
|
||||
area->index = index + nslots;
|
||||
else
|
||||
mem->index = 0;
|
||||
mem->used += nslots;
|
||||
area->index = 0;
|
||||
area->used += nslots;
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
return slot_index;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&mem->lock, flags);
|
||||
return index;
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
int start = raw_smp_processor_id() & (mem->nareas - 1);
|
||||
int i = start, index;
|
||||
|
||||
do {
|
||||
index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
|
||||
alloc_align_mask);
|
||||
if (index >= 0)
|
||||
return index;
|
||||
if (++i >= mem->nareas)
|
||||
i = 0;
|
||||
} while (i != start);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
{
|
||||
int i;
|
||||
unsigned long used = 0;
|
||||
|
||||
for (i = 0; i < mem->nareas; i++)
|
||||
used += mem->areas[i].used;
|
||||
return used;
|
||||
}
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
|
@ -580,7 +732,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
|||
int index;
|
||||
phys_addr_t tlb_addr;
|
||||
|
||||
if (!mem)
|
||||
if (!mem || !mem->nslabs)
|
||||
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
|
||||
|
||||
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
|
||||
|
@ -598,7 +750,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
|||
if (!(attrs & DMA_ATTR_NO_WARN))
|
||||
dev_warn_ratelimited(dev,
|
||||
"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
|
||||
alloc_size, mem->nslabs, mem->used);
|
||||
alloc_size, mem->nslabs, mem_used(mem));
|
||||
return (phys_addr_t)DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
|
@ -628,6 +780,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
|||
unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
|
||||
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
|
||||
int nslots = nr_slots(mem->slots[index].alloc_size + offset);
|
||||
int aindex = index / mem->area_nslabs;
|
||||
struct io_tlb_area *area = &mem->areas[aindex];
|
||||
int count, i;
|
||||
|
||||
/*
|
||||
|
@ -636,7 +790,9 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
|||
* While returning the entries to the free list, we merge the entries
|
||||
* with slots below and above the pool being returned.
|
||||
*/
|
||||
spin_lock_irqsave(&mem->lock, flags);
|
||||
BUG_ON(aindex >= mem->nareas);
|
||||
|
||||
spin_lock_irqsave(&area->lock, flags);
|
||||
if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
|
||||
count = mem->slots[index + nslots].list;
|
||||
else
|
||||
|
@ -660,8 +816,8 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
|||
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
|
||||
i--)
|
||||
mem->slots[i].list = ++count;
|
||||
mem->used -= nslots;
|
||||
spin_unlock_irqrestore(&mem->lock, flags);
|
||||
area->used -= nslots;
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -756,6 +912,13 @@ bool is_swiotlb_active(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(is_swiotlb_active);
|
||||
|
||||
static int io_tlb_used_get(void *data, u64 *val)
|
||||
{
|
||||
*val = mem_used(&io_tlb_default_mem);
|
||||
return 0;
|
||||
}
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
|
||||
|
||||
static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
|
||||
const char *dirname)
|
||||
{
|
||||
|
@ -764,7 +927,8 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
|
|||
return;
|
||||
|
||||
debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
|
||||
debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
|
||||
debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
|
||||
&fops_io_tlb_used);
|
||||
}
|
||||
|
||||
static int __init __maybe_unused swiotlb_create_default_debugfs(void)
|
||||
|
@ -815,6 +979,9 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
|||
struct io_tlb_mem *mem = rmem->priv;
|
||||
unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
|
||||
|
||||
/* Set Per-device io tlb area to one */
|
||||
unsigned int nareas = 1;
|
||||
|
||||
/*
|
||||
* Since multiple devices can share the same pool, the private data,
|
||||
* io_tlb_mem struct, will be initialized by the first device attached
|
||||
|
@ -831,10 +998,18 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mem->areas = kcalloc(nareas, sizeof(*mem->areas),
|
||||
GFP_KERNEL);
|
||||
if (!mem->areas) {
|
||||
kfree(mem->slots);
|
||||
kfree(mem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
|
||||
rmem->size >> PAGE_SHIFT);
|
||||
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
|
||||
false);
|
||||
false, nareas);
|
||||
mem->for_alloc = true;
|
||||
|
||||
rmem->priv = mem;
|
||||
|
|
Загрузка…
Ссылка в новой задаче