Update arch/ to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Родитель
f9527f121b
Коммит
58b053e4ce
|
@ -465,7 +465,7 @@ EXPORT_SYMBOL(pci_free_consistent);
|
||||||
Write dma_length of each leader with the combined lengths of
|
Write dma_length of each leader with the combined lengths of
|
||||||
the mergable followers. */
|
the mergable followers. */
|
||||||
|
|
||||||
#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
|
#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
|
||||||
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
|
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -442,7 +442,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for (i = 0; i < nents; i++, sg++) {
|
||||||
struct page *page = sg->page;
|
struct page *page = sg_page(sg);
|
||||||
unsigned int offset = sg->offset;
|
unsigned int offset = sg->offset;
|
||||||
unsigned int length = sg->length;
|
unsigned int length = sg->length;
|
||||||
void *ptr = page_address(page) + offset;
|
void *ptr = page_address(page) + offset;
|
||||||
|
|
|
@ -160,8 +160,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for (i = 0; i < nents; i++, sg++) {
|
||||||
sg->dma_address = (dma_addr_t)(page_address(sg->page) +
|
sg->dma_address = (dma_addr_t) sg_virt(sg);
|
||||||
sg->offset);
|
|
||||||
|
|
||||||
invalidate_dcache_range(sg_dma_address(sg),
|
invalidate_dcache_range(sg_dma_address(sg),
|
||||||
sg_dma_address(sg) +
|
sg_dma_address(sg) +
|
||||||
|
|
|
@ -246,7 +246,7 @@ static int reserve_sba_gart = 1;
|
||||||
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
|
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
|
||||||
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
|
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
|
||||||
|
|
||||||
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
|
#define sba_sg_address(sg) sg_virt((sg))
|
||||||
|
|
||||||
#ifdef FULL_VALID_PDIR
|
#ifdef FULL_VALID_PDIR
|
||||||
static u64 prefetch_spill_page;
|
static u64 prefetch_spill_page;
|
||||||
|
|
|
@ -131,7 +131,7 @@ simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
|
||||||
stat.fd = desc[sc->device->id];
|
stat.fd = desc[sc->device->id];
|
||||||
|
|
||||||
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
|
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
|
||||||
req.addr = __pa(page_address(sl->page) + sl->offset);
|
req.addr = __pa(sg_virt(sl));
|
||||||
req.len = sl->length;
|
req.len = sl->length;
|
||||||
if (DBG)
|
if (DBG)
|
||||||
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
|
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
|
||||||
|
@ -212,7 +212,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
|
||||||
if (!len)
|
if (!len)
|
||||||
break;
|
break;
|
||||||
thislen = min(len, slp->length);
|
thislen = min(len, slp->length);
|
||||||
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
|
memcpy(sg_virt(slp), buf, thislen);
|
||||||
len -= thislen;
|
len -= thislen;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include <asm/sn/pcidev.h>
|
#include <asm/sn/pcidev.h>
|
||||||
#include <asm/sn/sn_sal.h>
|
#include <asm/sn/sn_sal.h>
|
||||||
|
|
||||||
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
|
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
|
||||||
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
|
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -121,7 +121,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nents; sg++, i++) {
|
for (i = 0; i < nents; sg++, i++) {
|
||||||
sg->dma_address = page_to_phys(sg->page) + sg->offset;
|
sg->dma_address = sg_phys(sg);
|
||||||
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
|
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
|
||||||
}
|
}
|
||||||
return nents;
|
return nents;
|
||||||
|
|
|
@ -165,12 +165,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
for (i = 0; i < nents; i++, sg++) {
|
for (i = 0; i < nents; i++, sg++) {
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
addr = (unsigned long) page_address(sg->page);
|
addr = (unsigned long) sg_virt(sg);
|
||||||
if (!plat_device_is_coherent(dev) && addr)
|
if (!plat_device_is_coherent(dev) && addr)
|
||||||
__dma_sync(addr + sg->offset, sg->length, direction);
|
__dma_sync(addr, sg->length, direction);
|
||||||
sg->dma_address = plat_map_dma_mem(dev,
|
sg->dma_address = plat_map_dma_mem(dev,
|
||||||
(void *)(addr + sg->offset),
|
(void *)addr, sg->length);
|
||||||
sg->length);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
|
@ -223,10 +222,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||||
for (i = 0; i < nhwentries; i++, sg++) {
|
for (i = 0; i < nhwentries; i++, sg++) {
|
||||||
if (!plat_device_is_coherent(dev) &&
|
if (!plat_device_is_coherent(dev) &&
|
||||||
direction != DMA_TO_DEVICE) {
|
direction != DMA_TO_DEVICE) {
|
||||||
addr = (unsigned long) page_address(sg->page);
|
addr = (unsigned long) sg_virt(sg);
|
||||||
if (addr)
|
if (addr)
|
||||||
__dma_sync(addr + sg->offset, sg->length,
|
__dma_sync(addr, sg->length, direction);
|
||||||
direction);
|
|
||||||
}
|
}
|
||||||
plat_unmap_dma_mem(sg->dma_address);
|
plat_unmap_dma_mem(sg->dma_address);
|
||||||
}
|
}
|
||||||
|
@ -304,7 +302,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||||
for (i = 0; i < nelems; i++, sg++) {
|
for (i = 0; i < nelems; i++, sg++) {
|
||||||
if (cpu_is_noncoherent_r10000(dev))
|
if (cpu_is_noncoherent_r10000(dev))
|
||||||
__dma_sync((unsigned long)page_address(sg->page),
|
__dma_sync((unsigned long)page_address(sg_page(sg)),
|
||||||
sg->length, direction);
|
sg->length, direction);
|
||||||
plat_unmap_dma_mem(sg->dma_address);
|
plat_unmap_dma_mem(sg->dma_address);
|
||||||
}
|
}
|
||||||
|
@ -322,7 +320,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
|
||||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||||
for (i = 0; i < nelems; i++, sg++) {
|
for (i = 0; i < nelems; i++, sg++) {
|
||||||
if (!plat_device_is_coherent(dev))
|
if (!plat_device_is_coherent(dev))
|
||||||
__dma_sync((unsigned long)page_address(sg->page),
|
__dma_sync((unsigned long)page_address(sg_page(sg)),
|
||||||
sg->length, direction);
|
sg->length, direction);
|
||||||
plat_unmap_dma_mem(sg->dma_address);
|
plat_unmap_dma_mem(sg->dma_address);
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,8 +161,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
|
sg->dma_address = sg_phys(sg) | dma_direct_offset;
|
||||||
dma_direct_offset;
|
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,8 +102,7 @@ static int ibmebus_map_sg(struct device *dev,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = (dma_addr_t)page_address(sg->page)
|
sg->dma_address = (dma_addr_t) sg_virt(sg);
|
||||||
+ sg->offset;
|
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -307,7 +307,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* Allocate iommu entries for that segment */
|
/* Allocate iommu entries for that segment */
|
||||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
vaddr = (unsigned long) sg_virt(s);
|
||||||
npages = iommu_num_pages(vaddr, slen);
|
npages = iommu_num_pages(vaddr, slen);
|
||||||
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
|
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
|
||||||
|
|
||||||
|
|
|
@ -628,9 +628,8 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
int result = ps3_dma_map(dev->d_region,
|
int result = ps3_dma_map(dev->d_region, sg_phys(sg),
|
||||||
page_to_phys(sg->page) + sg->offset, sg->length,
|
sg->length, &sg->dma_address, 0);
|
||||||
&sg->dma_address, 0);
|
|
||||||
|
|
||||||
if (result) {
|
if (result) {
|
||||||
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
|
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
|
||||||
|
|
|
@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
/* IIep is write-through, not flushing. */
|
/* IIep is write-through, not flushing. */
|
||||||
for_each_sg(sgl, sg, nents, n) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||||
sg->dvma_address =
|
sg->dvma_address = virt_to_phys(sg_virt(sg));
|
||||||
virt_to_phys(page_address(sg->page)) + sg->offset;
|
|
||||||
sg->dvma_length = sg->length;
|
sg->dvma_length = sg->length;
|
||||||
}
|
}
|
||||||
return nents;
|
return nents;
|
||||||
|
@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for_each_sg(sgl, sg, nents, n) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg_page(sg)),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for_each_sg(sgl, sg, nents, n) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg_page(sg)),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
|
||||||
BUG_ON(direction == PCI_DMA_NONE);
|
BUG_ON(direction == PCI_DMA_NONE);
|
||||||
if (direction != PCI_DMA_TODEVICE) {
|
if (direction != PCI_DMA_TODEVICE) {
|
||||||
for_each_sg(sgl, sg, nents, n) {
|
for_each_sg(sgl, sg, nents, n) {
|
||||||
BUG_ON(page_address(sg->page) == NULL);
|
BUG_ON(page_address(sg_page(sg)) == NULL);
|
||||||
mmu_inval_dma_area(
|
mmu_inval_dma_area(
|
||||||
(unsigned long) page_address(sg->page),
|
(unsigned long) page_address(sg_page(sg)),
|
||||||
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,7 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
|
||||||
spin_lock_irqsave(&iounit->lock, flags);
|
spin_lock_irqsave(&iounit->lock, flags);
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
|
sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length);
|
||||||
sg->dvma_length = sg->length;
|
sg->dvma_length = sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
@ -273,7 +273,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
||||||
* XXX Is this a good assumption?
|
* XXX Is this a good assumption?
|
||||||
* XXX What if someone else unmaps it here and races us?
|
* XXX What if someone else unmaps it here and races us?
|
||||||
*/
|
*/
|
||||||
if ((page = (unsigned long) page_address(sg->page)) != 0) {
|
if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
if (page != oldpage) { /* Already flushed? */
|
if (page != oldpage) { /* Already flushed? */
|
||||||
flush_page_for_dma(page);
|
flush_page_for_dma(page);
|
||||||
|
@ -283,7 +283,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
|
sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
|
||||||
sg->dvma_length = (__u32) sg->length;
|
sg->dvma_length = (__u32) sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1228,7 +1228,7 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
|
||||||
{
|
{
|
||||||
while (sz != 0) {
|
while (sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
|
sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
|
||||||
sg->dvma_length = sg->length;
|
sg->dvma_length = sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -472,8 +472,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SG_ENT_PHYS_ADDRESS(SG) \
|
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
||||||
(__pa(page_address((SG)->page)) + (SG)->offset)
|
|
||||||
|
|
||||||
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
||||||
int nused, int nelems,
|
int nused, int nelems,
|
||||||
|
@ -565,9 +564,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nelems == 1) {
|
if (nelems == 1) {
|
||||||
sglist->dma_address =
|
sglist->dma_address =
|
||||||
dma_4u_map_single(dev,
|
dma_4u_map_single(dev, sg_virt(sglist),
|
||||||
(page_address(sglist->page) +
|
|
||||||
sglist->offset),
|
|
||||||
sglist->length, direction);
|
sglist->length, direction);
|
||||||
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -73,7 +73,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
|
||||||
|
|
||||||
daddr = dma_sg->dma_address;
|
daddr = dma_sg->dma_address;
|
||||||
sglen = sg->length;
|
sglen = sg->length;
|
||||||
sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
|
sgaddr = (unsigned long) sg_virt(sg);
|
||||||
while (dlen > 0) {
|
while (dlen > 0) {
|
||||||
unsigned long paddr;
|
unsigned long paddr;
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
if (--nents <= 0)
|
if (--nents <= 0)
|
||||||
break;
|
break;
|
||||||
sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
|
sgaddr = (unsigned long) sg_virt(sg);
|
||||||
sglen = sg->length;
|
sglen = sg->length;
|
||||||
}
|
}
|
||||||
if (dlen < 0) {
|
if (dlen < 0) {
|
||||||
|
@ -191,7 +191,7 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np
|
||||||
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
|
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
|
||||||
"dma_address[%016x] dma_length[%016x]\n",
|
"dma_address[%016x] dma_length[%016x]\n",
|
||||||
i,
|
i,
|
||||||
page_address(sg->page), sg->offset,
|
page_address(sg_page(sg)), sg->offset,
|
||||||
sg->length,
|
sg->length,
|
||||||
sg->dma_address, sg->dma_length);
|
sg->dma_address, sg->dma_length);
|
||||||
}
|
}
|
||||||
|
@ -207,15 +207,14 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
|
||||||
unsigned long prev;
|
unsigned long prev;
|
||||||
u32 dent_addr, dent_len;
|
u32 dent_addr, dent_len;
|
||||||
|
|
||||||
prev = (unsigned long) (page_address(sg->page) + sg->offset);
|
prev = (unsigned long) sg_virt(sg);
|
||||||
prev += (unsigned long) (dent_len = sg->length);
|
prev += (unsigned long) (dent_len = sg->length);
|
||||||
dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
|
dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
|
||||||
& (IO_PAGE_SIZE - 1UL));
|
|
||||||
while (--nents) {
|
while (--nents) {
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
addr = (unsigned long) (page_address(sg->page) + sg->offset);
|
addr = (unsigned long) sg_virt(sg);
|
||||||
if (! VCONTIG(prev, addr)) {
|
if (! VCONTIG(prev, addr)) {
|
||||||
dma_sg->dma_address = dent_addr;
|
dma_sg->dma_address = dent_addr;
|
||||||
dma_sg->dma_length = dent_len;
|
dma_sg->dma_length = dent_len;
|
||||||
|
|
|
@ -2057,7 +2057,7 @@ static void fill_cookies(struct cookie_state *sp, unsigned long pa,
|
||||||
|
|
||||||
static int sg_count_one(struct scatterlist *sg)
|
static int sg_count_one(struct scatterlist *sg)
|
||||||
{
|
{
|
||||||
unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT;
|
unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
|
||||||
long len = sg->length;
|
long len = sg->length;
|
||||||
|
|
||||||
if ((sg->offset | len) & (8UL - 1))
|
if ((sg->offset | len) & (8UL - 1))
|
||||||
|
|
|
@ -365,8 +365,7 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SG_ENT_PHYS_ADDRESS(SG) \
|
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
|
||||||
(__pa(page_address((SG)->page)) + (SG)->offset)
|
|
||||||
|
|
||||||
static long fill_sg(long entry, struct device *dev,
|
static long fill_sg(long entry, struct device *dev,
|
||||||
struct scatterlist *sg,
|
struct scatterlist *sg,
|
||||||
|
@ -477,9 +476,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nelems == 1) {
|
if (nelems == 1) {
|
||||||
sglist->dma_address =
|
sglist->dma_address =
|
||||||
dma_4v_map_single(dev,
|
dma_4v_map_single(dev, sg_virt(sglist),
|
||||||
(page_address(sglist->page) +
|
|
||||||
sglist->offset),
|
|
||||||
sglist->length, direction);
|
sglist->length, direction);
|
||||||
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -411,8 +411,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sg, s, nelems, i) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
BUG_ON(!s->page);
|
struct page *p = sg_page(s);
|
||||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
|
||||||
|
BUG_ON(!p);
|
||||||
|
s->dma_address = virt_to_bus(sg_virt(s));
|
||||||
s->dma_length = s->length;
|
s->dma_length = s->length;
|
||||||
}
|
}
|
||||||
return nelems;
|
return nelems;
|
||||||
|
@ -432,9 +434,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
|
||||||
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
|
||||||
|
|
||||||
for_each_sg(sg, s, nelems, i) {
|
for_each_sg(sg, s, nelems, i) {
|
||||||
BUG_ON(!s->page);
|
BUG_ON(!sg_page(s));
|
||||||
|
|
||||||
vaddr = (unsigned long)page_address(s->page) + s->offset;
|
vaddr = (unsigned long) sg_virt(s);
|
||||||
npages = num_dma_pages(vaddr, s->length);
|
npages = num_dma_pages(vaddr, s->length);
|
||||||
|
|
||||||
entry = iommu_range_alloc(tbl, npages);
|
entry = iommu_range_alloc(tbl, npages);
|
||||||
|
|
|
@ -302,7 +302,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
unsigned long addr = page_to_phys(s->page) + s->offset;
|
unsigned long addr = sg_phys(s);
|
||||||
if (nonforced_iommu(dev, addr, s->length)) {
|
if (nonforced_iommu(dev, addr, s->length)) {
|
||||||
addr = dma_map_area(dev, addr, s->length, dir);
|
addr = dma_map_area(dev, addr, s->length, dir);
|
||||||
if (addr == bad_dma_address) {
|
if (addr == bad_dma_address) {
|
||||||
|
@ -397,7 +397,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
start_sg = sgmap = sg;
|
start_sg = sgmap = sg;
|
||||||
ps = NULL; /* shut up gcc */
|
ps = NULL; /* shut up gcc */
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
dma_addr_t addr = page_to_phys(s->page) + s->offset;
|
dma_addr_t addr = sg_phys(s);
|
||||||
s->dma_address = addr;
|
s->dma_address = addr;
|
||||||
BUG_ON(s->length == 0);
|
BUG_ON(s->length == 0);
|
||||||
|
|
||||||
|
|
|
@ -62,8 +62,8 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
BUG_ON(!s->page);
|
BUG_ON(!sg_page(s));
|
||||||
s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
|
s->dma_address = virt_to_bus(sg_virt(s));
|
||||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||||
return 0;
|
return 0;
|
||||||
s->dma_length = s->length;
|
s->dma_length = s->length;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче