powerpc: Update remaining dma_mapping_ops to use map/unmap_page

After the merge of the 32 and 64bit DMA code, dma_direct_ops lost
their map/unmap_single() functions but gained map/unmap_page().  This
caused a problem for Cell because Cell's dma_iommu_fixed_ops called
the dma_direct_ops if the fixed linear mapping was to be used or the
iommu ops if the dynamic window was to be used.  So in order to fix
this problem we need to update the 64bit DMA code to use
map/unmap_page.

First, we update the generic IOMMU code so that iommu_map_single()
becomes iommu_map_page() and iommu_unmap_single() becomes
iommu_unmap_page().  Then we propagate these changes up through all
the callers of these two functions and in the process update all the
dma_mapping_ops so that they have map/unmap_page rahter than
map/unmap_single.  We can do this because on 64bit there is no HIGHMEM
memory so map/unmap_page ends up performing exactly the same function
as map/unmap_single, just taking different arguments.

This has no affect on drivers because the dma_map_single_attrs() just
ends up calling the map_page() function of the appropriate
dma_mapping_ops and similarly the dma_unmap_single_attrs() calls
unmap_page().

This fixes an oops on Cell blades, which oops on boot without this
because they call dma_direct_ops.map_single, which is NULL.

Signed-off-by: Mark Nelson <markn@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Mark Nelson 2008-10-27 20:38:08 +00:00 коммит произвёл Paul Mackerras
Родитель b30115ea8f
Коммит f9226d572d
8 изменённых файлов: 104 добавлений и 99 удалений

Просмотреть файл

@ -92,13 +92,14 @@ extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned long mask, gfp_t flag, int node);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
void *vaddr, size_t size, unsigned long mask,
enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs);
extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset,
size_t size, unsigned long mask,
enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs);
extern void iommu_init_early_pSeries(void);
extern void iommu_init_early_iSeries(void);

Просмотреть файл

@ -30,28 +30,26 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
device_to_mask(dev), direction, attrs);
return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
device_to_mask(dev), direction, attrs);
}
static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction,
attrs);
iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
attrs);
}
@ -94,10 +92,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
struct dma_mapping_ops dma_iommu_ops = {
.alloc_coherent = dma_iommu_alloc_coherent,
.free_coherent = dma_iommu_free_coherent,
.map_single = dma_iommu_map_single,
.unmap_single = dma_iommu_unmap_single,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
};
EXPORT_SYMBOL(dma_iommu_ops);

Просмотреть файл

@ -79,20 +79,21 @@ static void ibmebus_free_coherent(struct device *dev,
kfree(vaddr);
}
static dma_addr_t ibmebus_map_single(struct device *dev,
void *ptr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static dma_addr_t ibmebus_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return (dma_addr_t)(ptr);
return (dma_addr_t)(page_address(page) + offset);
}
static void ibmebus_unmap_single(struct device *dev,
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static void ibmebus_unmap_page(struct device *dev,
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return;
}
@ -129,11 +130,11 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
static struct dma_mapping_ops ibmebus_dma_ops = {
.alloc_coherent = ibmebus_alloc_coherent,
.free_coherent = ibmebus_free_coherent,
.map_single = ibmebus_map_single,
.unmap_single = ibmebus_unmap_single,
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
.map_page = ibmebus_map_page,
.unmap_page = ibmebus_unmap_page,
};
static int ibmebus_match_path(struct device *dev, void *data)

Просмотреть файл

@ -565,21 +565,23 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
void *vaddr, size_t size, unsigned long mask,
enum dma_data_direction direction, struct dma_attrs *attrs)
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset, size_t size,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
BUG_ON(direction == DMA_NONE);
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
@ -605,9 +607,9 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
return dma_handle;
}
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned int npages;

Просмотреть файл

@ -516,10 +516,10 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
dma_addr_t ret = DMA_ERROR_CODE;
@ -529,7 +529,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
return ret;
}
ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
if (unlikely(dma_mapping_error(dev, ret))) {
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
@ -538,14 +538,14 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
return ret;
}
static void vio_dma_iommu_unmap_single(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
dma_iommu_ops.unmap_single(dev, dma_handle, size, direction, attrs);
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
}
@ -603,10 +603,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
struct dma_mapping_ops vio_dma_mapping_ops = {
.alloc_coherent = vio_dma_iommu_alloc_coherent,
.free_coherent = vio_dma_iommu_free_coherent,
.map_single = vio_dma_iommu_map_single,
.unmap_single = vio_dma_iommu_unmap_single,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
};
/**

Просмотреть файл

@ -593,31 +593,30 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
}
static dma_addr_t dma_fixed_map_single(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
return dma_direct_ops.map_single(dev, ptr, size, direction,
attrs);
return dma_direct_ops.map_page(dev, page, offset, size,
direction, attrs);
else
return iommu_map_single(dev, cell_get_iommu_table(dev), ptr,
size, device_to_mask(dev), direction,
attrs);
return iommu_map_page(dev, cell_get_iommu_table(dev), page,
offset, size, device_to_mask(dev),
direction, attrs);
}
static void dma_fixed_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
dma_direct_ops.unmap_single(dev, dma_addr, size, direction,
attrs);
dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
iommu_unmap_single(cell_get_iommu_table(dev), dma_addr, size,
direction, attrs);
iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
direction, attrs);
}
static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
@ -652,12 +651,12 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
struct dma_mapping_ops dma_iommu_fixed_ops = {
.alloc_coherent = dma_fixed_alloc_coherent,
.free_coherent = dma_fixed_free_coherent,
.map_single = dma_fixed_map_single,
.unmap_single = dma_fixed_unmap_single,
.map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg,
.dma_supported = dma_fixed_dma_supported,
.set_dma_mask = dma_set_mask_and_switch,
.map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page,
};
static void cell_dma_dev_setup_fixed(struct device *dev);

Просмотреть файл

@ -215,14 +215,15 @@ EXPORT_SYMBOL_GPL(iseries_hv_free);
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
enum dma_data_direction direction)
{
return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
DMA_32BIT_MASK, direction, NULL);
return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
(unsigned long)vaddr % PAGE_SIZE, size,
DMA_32BIT_MASK, direction, NULL);
}
void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
iommu_unmap_single(&vio_iommu_table, dma_handle, size, direction, NULL);
iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
}
void __init iommu_vio_init(void)

Просмотреть файл

@ -555,18 +555,19 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address of the buffer
* passed here is the kernel (virtual) address of the buffer. The buffer
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
static dma_addr_t ps3_sb_map_single(struct device *_dev, void *ptr, size_t size,
enum dma_data_direction direction, struct dma_attrs *attrs)
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
unsigned long bus_addr;
void *ptr = page_address(page) + offset;
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr,
@ -580,15 +581,16 @@ static dma_addr_t ps3_sb_map_single(struct device *_dev, void *ptr, size_t size,
return bus_addr;
}
static dma_addr_t ps3_ioc0_map_single(struct device *_dev, void *ptr,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
unsigned long bus_addr;
u64 iopte_flag;
void *ptr = page_address(page) + offset;
iopte_flag = IOPTE_M;
switch (direction) {
@ -615,7 +617,7 @@ static dma_addr_t ps3_ioc0_map_single(struct device *_dev, void *ptr,
return bus_addr;
}
static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@ -689,21 +691,21 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
static struct dma_mapping_ops ps3_sb_dma_ops = {
.alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent,
.map_single = ps3_sb_map_single,
.unmap_single = ps3_unmap_single,
.map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported
.dma_supported = ps3_dma_supported,
.map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page,
};
static struct dma_mapping_ops ps3_ioc0_dma_ops = {
.alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent,
.map_single = ps3_ioc0_map_single,
.unmap_single = ps3_unmap_single,
.map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported
.dma_supported = ps3_dma_supported,
.map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page,
};
/**