lib/dma-virt: Add dma_virt_ops

Several RDMA drivers (hfi1, qib and rxe) expect that ib_sge.addr
is a virtual address. Provide DMA mapping operations that are
suitable for these drivers.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Bart Van Assche 2017-01-20 13:04:07 -08:00 коммит произвёл Doug Ledford
Родитель 7844572c63
Коммит 551199aca1
4 изменённых файлов: 79 добавлений и 0 удалений

Просмотреть файл

@ -128,6 +128,7 @@ struct dma_map_ops {
};
extern const struct dma_map_ops dma_noop_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))

Просмотреть файл

@ -400,6 +400,11 @@ config DMA_NOOP_OPS
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
config DMA_VIRT_OPS
bool
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
config CHECK_SIGNATURE
bool

Просмотреть файл

@ -27,6 +27,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
obj-y += lockref.o

72
lib/dma-virt.c Normal file
Просмотреть файл

@ -0,0 +1,72 @@
/*
* lib/dma-virt.c
*
* DMA operations that map to virtual addresses without flushing memory.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
static void *dma_virt_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
void *ret;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret)
*dma_handle = (uintptr_t)ret;
return ret;
}
static void dma_virt_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
return (uintptr_t)(page_address(page) + offset);
}
static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
sg_dma_len(sg) = sg->length;
}
return nents;
}
static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
static int dma_virt_supported(struct device *dev, u64 mask)
{
return true;
}
const struct dma_map_ops dma_virt_ops = {
.alloc = dma_virt_alloc,
.free = dma_virt_free,
.map_page = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
.mapping_error = dma_virt_mapping_error,
.dma_supported = dma_virt_supported,
};
EXPORT_SYMBOL(dma_virt_ops);