2005-04-17 02:20:36 +04:00
|
|
|
#ifndef _ASM_DMA_MAPPING_H
|
|
|
|
#define _ASM_DMA_MAPPING_H
|
|
|
|
|
|
|
|
#include <asm/scatterlist.h>
|
2013-03-25 22:47:29 +04:00
|
|
|
#include <asm/dma-coherence.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/cache.h>
|
2009-06-03 19:16:04 +04:00
|
|
|
#include <asm-generic/dma-coherent.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-22 15:59:30 +04:00
|
|
|
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
|
2010-10-02 00:27:32 +04:00
|
|
|
#include <dma-coherence.h>
|
2011-05-18 16:14:36 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-10-02 00:27:32 +04:00
|
|
|
extern struct dma_map_ops *mips_dma_map_ops;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-10-02 00:27:32 +04:00
|
|
|
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev && dev->archdata.dma_ops)
|
|
|
|
return dev->archdata.dma_ops;
|
|
|
|
else
|
|
|
|
return mips_dma_map_ops;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-10-02 00:27:32 +04:00
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
|
|
{
|
|
|
|
if (!dev->dma_mask)
|
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-10-02 00:27:32 +04:00
|
|
|
return addr + size <= *dev->dma_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void dma_mark_clean(void *addr, size_t size) {}
|
|
|
|
|
|
|
|
#include <asm-generic/dma-mapping-common.h>
|
|
|
|
|
|
|
|
static inline int dma_supported(struct device *dev, u64 mask)
|
2009-01-22 18:42:11 +03:00
|
|
|
{
|
2010-10-02 00:27:32 +04:00
|
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
return ops->dma_supported(dev, mask);
|
2009-01-22 18:42:11 +03:00
|
|
|
}
|
|
|
|
|
2010-10-02 00:27:32 +04:00
|
|
|
static inline int dma_mapping_error(struct device *dev, u64 mask)
|
|
|
|
{
|
|
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
2012-11-24 01:34:56 +04:00
|
|
|
|
|
|
|
debug_dma_mapping_error(dev, mask);
|
2010-10-02 00:27:32 +04:00
|
|
|
return ops->mapping_error(dev, mask);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
dma_set_mask(struct device *dev, u64 mask)
|
|
|
|
{
|
MIPS: Loongson: Add swiotlb to support All-Memory DMA
Loongson doesn't support DMA address above 4GB traditionally. If memory
is more than 4GB, CONFIG_SWIOTLB and ZONE_DMA32 should be selected. In
this way, DMA pages are allocated below 4GB preferably. However, if low
memory is not enough, high pages are allocated and swiotlb is used for
bouncing.
Moreover, we provide a platform-specific dma_map_ops::set_dma_mask() to
set a device's dma_mask and coherent_dma_mask. We use these masks to
distinguishes an allocated page can be used for DMA directly, or need
swiotlb to bounce.
Recently, we found that 32-bit DMA isn't a hardware bug, but a hardware
configuration issue. So, latest firmware has enable the DMA support as
high as 40-bit. To support all-memory DMA for all devices (besides the
Loongson platform limit, there are still some devices have their own
DMA32 limit), and also to be compatible with old firmware, we keep use
swiotlb.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Hongliang Tao <taohl@lemote.com>
Signed-off-by: Hua Yan <yanh@lemote.com>
Tested-by: Alex Smith <alex.smith@imgtec.com>
Reviewed-by: Alex Smith <alex.smith@imgtec.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J. Hill <Steven.Hill@imgtec.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: linux-mips@linux-mips.org
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/6636
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-03-21 14:44:06 +04:00
|
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if(!dev->dma_mask || !dma_supported(dev, mask))
|
|
|
|
return -EIO;
|
|
|
|
|
MIPS: Loongson: Add swiotlb to support All-Memory DMA
Loongson doesn't support DMA address above 4GB traditionally. If memory
is more than 4GB, CONFIG_SWIOTLB and ZONE_DMA32 should be selected. In
this way, DMA pages are allocated below 4GB preferably. However, if low
memory is not enough, high pages are allocated and swiotlb is used for
bouncing.
Moreover, we provide a platform-specific dma_map_ops::set_dma_mask() to
set a device's dma_mask and coherent_dma_mask. We use these masks to
distinguishes an allocated page can be used for DMA directly, or need
swiotlb to bounce.
Recently, we found that 32-bit DMA isn't a hardware bug, but a hardware
configuration issue. So, latest firmware has enable the DMA support as
high as 40-bit. To support all-memory DMA for all devices (besides the
Loongson platform limit, there are still some devices have their own
DMA32 limit), and also to be compatible with old firmware, we keep use
swiotlb.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Signed-off-by: Hongliang Tao <taohl@lemote.com>
Signed-off-by: Hua Yan <yanh@lemote.com>
Tested-by: Alex Smith <alex.smith@imgtec.com>
Reviewed-by: Alex Smith <alex.smith@imgtec.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J. Hill <Steven.Hill@imgtec.com>
Cc: Aurelien Jarno <aurelien@aurel32.net>
Cc: linux-mips@linux-mips.org
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/6636
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-03-21 14:44:06 +04:00
|
|
|
if (ops->set_dma_mask)
|
|
|
|
return ops->set_dma_mask(dev, mask);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
*dev->dma_mask = mask;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-07 07:38:56 +03:00
|
|
|
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
2005-04-17 02:20:36 +04:00
|
|
|
enum dma_data_direction direction);
|
|
|
|
|
2012-03-27 16:32:21 +04:00
|
|
|
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
|
|
|
|
|
|
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t gfp,
|
|
|
|
struct dma_attrs *attrs)
|
2010-10-02 00:27:32 +04:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
2012-03-27 16:32:21 +04:00
|
|
|
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
2010-10-02 00:27:32 +04:00
|
|
|
|
|
|
|
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-27 16:32:21 +04:00
|
|
|
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
|
|
|
|
|
|
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
|
|
void *vaddr, dma_addr_t dma_handle,
|
|
|
|
struct dma_attrs *attrs)
|
2010-10-02 00:27:32 +04:00
|
|
|
{
|
|
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
2012-03-27 16:32:21 +04:00
|
|
|
ops->free(dev, size, vaddr, dma_handle, attrs);
|
2010-10-02 00:27:32 +04:00
|
|
|
|
|
|
|
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t flag);
|
|
|
|
|
|
|
|
void dma_free_noncoherent(struct device *dev, size_t size,
|
|
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /* _ASM_DMA_MAPPING_H */
|