iommu sg merging: ppc: make iommu respect the segment size limits

This patch makes iommu respect segment size limits when merging sg
lists.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
FUJITA Tomonori 2008-02-04 22:27:57 -08:00 коммит произвёл Linus Torvalds
Родитель 42d00284e1
Коммит 740c3ce667
3 изменённых файлов: 8 добавлений и 4 удалений

Просмотреть файл

@ -68,7 +68,7 @@ static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction) int nelems, enum dma_data_direction direction)
{ {
return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, return iommu_map_sg(dev, sglist, nelems,
device_to_mask(dev), direction); device_to_mask(dev), direction);
} }

Просмотреть файл

@ -270,16 +270,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags); spin_unlock_irqrestore(&(tbl->it_lock), flags);
} }
int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask, int nelems, unsigned long mask,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct iommu_table *tbl = dev->archdata.dma_data;
dma_addr_t dma_next = 0, dma_addr; dma_addr_t dma_next = 0, dma_addr;
unsigned long flags; unsigned long flags;
struct scatterlist *s, *outs, *segstart; struct scatterlist *s, *outs, *segstart;
int outcount, incount, i; int outcount, incount, i;
unsigned int align; unsigned int align;
unsigned long handle; unsigned long handle;
unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
@ -298,6 +300,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags); spin_lock_irqsave(&(tbl->it_lock), flags);
max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) { for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen; unsigned long vaddr, npages, entry, slen;
@ -344,7 +347,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
/* We cannot merge if: /* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation * - allocated dma_addr isn't contiguous to previous allocation
*/ */
if (novmerge || (dma_addr != dma_next)) { if (novmerge || (dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */ /* Can't merge: create a new segment */
segstart = s; segstart = s;
outcount++; outcount++;

Просмотреть файл

@ -79,7 +79,7 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid); int nid);
extern int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, extern int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask, int nelems, unsigned long mask,
enum dma_data_direction direction); enum dma_data_direction direction);
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,