Merge branch 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6
* 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6: swiotlb: Make swiotlb bookkeeping functions visible in the header file. swiotlb: search and replace "int dir" with "enum dma_data_direction dir" swiotlb: Make internal bookkeeping functions have 'swiotlb_tbl' prefix. swiotlb: add the swiotlb initialization function with iotlb memory swiotlb: add swiotlb_tbl_map_single library function
This commit is contained in:
Коммит
4a35cee066
|
@ -23,6 +23,29 @@ extern int swiotlb_force;
|
||||||
#define IO_TLB_SHIFT 11
|
#define IO_TLB_SHIFT 11
|
||||||
|
|
||||||
extern void swiotlb_init(int verbose);
|
extern void swiotlb_init(int verbose);
|
||||||
|
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enumeration for sync targets
|
||||||
|
*/
|
||||||
|
enum dma_sync_target {
|
||||||
|
SYNC_FOR_CPU = 0,
|
||||||
|
SYNC_FOR_DEVICE = 1,
|
||||||
|
};
|
||||||
|
extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
|
||||||
|
phys_addr_t phys, size_t size,
|
||||||
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
|
extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr,
|
||||||
|
size_t size, enum dma_data_direction dir);
|
||||||
|
|
||||||
|
extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr,
|
||||||
|
size_t size, enum dma_data_direction dir,
|
||||||
|
enum dma_sync_target target);
|
||||||
|
|
||||||
|
/* Accessory functions. */
|
||||||
|
extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
|
||||||
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
|
@ -42,11 +65,11 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
||||||
|
|
||||||
extern int
|
extern int
|
||||||
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||||
int direction);
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||||
int direction);
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
extern int
|
extern int
|
||||||
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||||
|
|
137
lib/swiotlb.c
137
lib/swiotlb.c
|
@ -50,19 +50,11 @@
|
||||||
*/
|
*/
|
||||||
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
||||||
|
|
||||||
/*
|
|
||||||
* Enumeration for sync targets
|
|
||||||
*/
|
|
||||||
enum dma_sync_target {
|
|
||||||
SYNC_FOR_CPU = 0,
|
|
||||||
SYNC_FOR_DEVICE = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
int swiotlb_force;
|
int swiotlb_force;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used to do a quick range check in unmap_single and
|
* Used to do a quick range check in swiotlb_tbl_unmap_single and
|
||||||
* sync_single_*, to see if the memory was in fact allocated by this
|
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
|
||||||
* API.
|
* API.
|
||||||
*/
|
*/
|
||||||
static char *io_tlb_start, *io_tlb_end;
|
static char *io_tlb_start, *io_tlb_end;
|
||||||
|
@ -140,28 +132,14 @@ void swiotlb_print_info(void)
|
||||||
(unsigned long long)pend);
|
(unsigned long long)pend);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
||||||
* Statically reserve bounce buffer space and initialize bounce buffer data
|
|
||||||
* structures for the software IO TLB used to implement the DMA API.
|
|
||||||
*/
|
|
||||||
void __init
|
|
||||||
swiotlb_init_with_default_size(size_t default_size, int verbose)
|
|
||||||
{
|
{
|
||||||
unsigned long i, bytes;
|
unsigned long i, bytes;
|
||||||
|
|
||||||
if (!io_tlb_nslabs) {
|
bytes = nslabs << IO_TLB_SHIFT;
|
||||||
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
|
|
||||||
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
io_tlb_nslabs = nslabs;
|
||||||
|
io_tlb_start = tlb;
|
||||||
/*
|
|
||||||
* Get IO TLB memory from the low pages
|
|
||||||
*/
|
|
||||||
io_tlb_start = alloc_bootmem_low_pages(bytes);
|
|
||||||
if (!io_tlb_start)
|
|
||||||
panic("Cannot allocate SWIOTLB buffer");
|
|
||||||
io_tlb_end = io_tlb_start + bytes;
|
io_tlb_end = io_tlb_start + bytes;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -185,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
|
||||||
swiotlb_print_info();
|
swiotlb_print_info();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Statically reserve bounce buffer space and initialize bounce buffer data
|
||||||
|
* structures for the software IO TLB used to implement the DMA API.
|
||||||
|
*/
|
||||||
|
void __init
|
||||||
|
swiotlb_init_with_default_size(size_t default_size, int verbose)
|
||||||
|
{
|
||||||
|
unsigned long bytes;
|
||||||
|
|
||||||
|
if (!io_tlb_nslabs) {
|
||||||
|
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
|
||||||
|
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get IO TLB memory from the low pages
|
||||||
|
*/
|
||||||
|
io_tlb_start = alloc_bootmem_low_pages(bytes);
|
||||||
|
if (!io_tlb_start)
|
||||||
|
panic("Cannot allocate SWIOTLB buffer");
|
||||||
|
|
||||||
|
swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
|
||||||
|
}
|
||||||
|
|
||||||
void __init
|
void __init
|
||||||
swiotlb_init(int verbose)
|
swiotlb_init(int verbose)
|
||||||
{
|
{
|
||||||
|
@ -323,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
|
||||||
/*
|
/*
|
||||||
* Bounce: copy the swiotlb buffer back to the original dma location
|
* Bounce: copy the swiotlb buffer back to the original dma location
|
||||||
*/
|
*/
|
||||||
static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
|
void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned long pfn = PFN_DOWN(phys);
|
unsigned long pfn = PFN_DOWN(phys);
|
||||||
|
|
||||||
|
@ -360,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
|
||||||
memcpy(phys_to_virt(phys), dma_addr, size);
|
memcpy(phys_to_virt(phys), dma_addr, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(swiotlb_bounce);
|
||||||
|
|
||||||
/*
|
void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
|
||||||
* Allocates bounce buffer and returns its kernel virtual address.
|
phys_addr_t phys, size_t size,
|
||||||
*/
|
enum dma_data_direction dir)
|
||||||
static void *
|
|
||||||
map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
char *dma_addr;
|
char *dma_addr;
|
||||||
unsigned int nslots, stride, index, wrap;
|
unsigned int nslots, stride, index, wrap;
|
||||||
int i;
|
int i;
|
||||||
unsigned long start_dma_addr;
|
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
unsigned long offset_slots;
|
unsigned long offset_slots;
|
||||||
unsigned long max_slots;
|
unsigned long max_slots;
|
||||||
|
|
||||||
mask = dma_get_seg_boundary(hwdev);
|
mask = dma_get_seg_boundary(hwdev);
|
||||||
start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
|
|
||||||
|
|
||||||
offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
tbl_dma_addr &= mask;
|
||||||
|
|
||||||
|
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Carefully handle integer overflow which can occur when mask == ~0UL.
|
* Carefully handle integer overflow which can occur when mask == ~0UL.
|
||||||
|
@ -466,12 +469,27 @@ found:
|
||||||
|
|
||||||
return dma_addr;
|
return dma_addr;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocates bounce buffer and returns its kernel virtual address.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void *
|
||||||
|
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
|
{
|
||||||
|
dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
|
||||||
|
|
||||||
|
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
|
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
|
||||||
*/
|
*/
|
||||||
static void
|
void
|
||||||
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
|
swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
|
||||||
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||||
|
@ -509,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
|
||||||
|
|
||||||
static void
|
void
|
||||||
sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
||||||
int dir, int target)
|
enum dma_data_direction dir,
|
||||||
|
enum dma_sync_target target)
|
||||||
{
|
{
|
||||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||||
phys_addr_t phys = io_tlb_orig_addr[index];
|
phys_addr_t phys = io_tlb_orig_addr[index];
|
||||||
|
@ -536,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
|
||||||
|
|
||||||
void *
|
void *
|
||||||
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
|
@ -559,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
}
|
}
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
/*
|
/*
|
||||||
* We are either out of memory or the device can't DMA
|
* We are either out of memory or the device can't DMA to
|
||||||
* to GFP_DMA memory; fall back on map_single(), which
|
* GFP_DMA memory; fall back on map_single(), which
|
||||||
* will grab memory from the lowest available address range.
|
* will grab memory from the lowest available address range.
|
||||||
*/
|
*/
|
||||||
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
|
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
|
||||||
|
@ -578,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
(unsigned long long)dev_addr);
|
(unsigned long long)dev_addr);
|
||||||
|
|
||||||
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
||||||
do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
|
swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
*dma_handle = dev_addr;
|
*dma_handle = dev_addr;
|
||||||
|
@ -596,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
||||||
if (!is_swiotlb_buffer(paddr))
|
if (!is_swiotlb_buffer(paddr))
|
||||||
free_pages((unsigned long)vaddr, get_order(size));
|
free_pages((unsigned long)vaddr, get_order(size));
|
||||||
else
|
else
|
||||||
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
|
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
|
||||||
do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
|
swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(swiotlb_free_coherent);
|
EXPORT_SYMBOL(swiotlb_free_coherent);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
|
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
|
||||||
|
int do_panic)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Ran out of IOMMU space for this operation. This is very bad.
|
* Ran out of IOMMU space for this operation. This is very bad.
|
||||||
|
@ -680,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
|
||||||
* whatever the device wrote there.
|
* whatever the device wrote there.
|
||||||
*/
|
*/
|
||||||
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
|
||||||
size_t size, int dir)
|
size_t size, enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
|
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (is_swiotlb_buffer(paddr)) {
|
if (is_swiotlb_buffer(paddr)) {
|
||||||
do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
|
swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -723,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
||||||
size_t size, int dir, int target)
|
size_t size, enum dma_data_direction dir,
|
||||||
|
enum dma_sync_target target)
|
||||||
{
|
{
|
||||||
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
|
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
if (is_swiotlb_buffer(paddr)) {
|
if (is_swiotlb_buffer(paddr)) {
|
||||||
sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
|
swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
|
||||||
|
target);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -809,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
|
||||||
|
|
||||||
int
|
int
|
||||||
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||||
int dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
|
return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
|
||||||
}
|
}
|
||||||
|
@ -836,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
|
||||||
|
|
||||||
void
|
void
|
||||||
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||||
int dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
|
return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
|
||||||
}
|
}
|
||||||
|
@ -851,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
|
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
|
||||||
int nelems, int dir, int target)
|
int nelems, enum dma_data_direction dir,
|
||||||
|
enum dma_sync_target target)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче