dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c485b465a0
Коммит
8d8bb39b9e
|
@ -298,10 +298,10 @@ recommended that you never use these unless you really know what the
|
||||||
cache width is.
|
cache width is.
|
||||||
|
|
||||||
int
|
int
|
||||||
dma_mapping_error(dma_addr_t dma_addr)
|
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
|
|
||||||
int
|
int
|
||||||
pci_dma_mapping_error(dma_addr_t dma_addr)
|
pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr)
|
||||||
|
|
||||||
In some circumstances dma_map_single and dma_map_page will fail to create
|
In some circumstances dma_map_single and dma_map_page will fail to create
|
||||||
a mapping. A driver can check for these errors by testing the returned
|
a mapping. A driver can check for these errors by testing the returned
|
||||||
|
|
|
@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||||
/*
|
/*
|
||||||
* Trying to unmap an invalid mapping
|
* Trying to unmap an invalid mapping
|
||||||
*/
|
*/
|
||||||
if (dma_mapping_error(dma_addr)) {
|
if (dma_mapping_error(dev, dma_addr)) {
|
||||||
dev_err(dev, "Trying to unmap invalid mapping\n");
|
dev_err(dev, "Trying to unmap invalid mapping\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
hwsw_dma_mapping_error (dma_addr_t dma_addr)
|
hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr);
|
return hwiommu_dma_mapping_error(dev, dma_addr) ||
|
||||||
|
swiotlb_dma_mapping_error(dev, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(hwsw_dma_mapping_error);
|
EXPORT_SYMBOL(hwsw_dma_mapping_error);
|
||||||
|
|
|
@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
sba_dma_mapping_error (dma_addr_t dma_addr)
|
sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
|
EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
|
||||||
|
|
||||||
int sn_dma_mapping_error(dma_addr_t dma_addr)
|
int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
|
||||||
|
|
||||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||||
|
|
||||||
int dma_mapping_error(dma_addr_t dma_addr)
|
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
|
||||||
|
|
||||||
dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
|
dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dummy_page_da)) {
|
if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
|
||||||
pr_err("PCIEX:Map dummy page failed.\n");
|
pr_err("PCIEX:Map dummy page failed.\n");
|
||||||
kfree(dummy_page_va);
|
kfree(dummy_page_va);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
|
||||||
|
|
||||||
dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
|
dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dummy_page_da)) {
|
if (dma_mapping_error(phb->parent, dummy_page_da)) {
|
||||||
pr_err("SPIDER-IOWA:Map dummy page filed.\n");
|
pr_err("SPIDER-IOWA:Map dummy page filed.\n");
|
||||||
kfree(dummy_page_va);
|
kfree(dummy_page_va);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
|
||||||
count = 256 - off;
|
count = 256 - off;
|
||||||
|
|
||||||
dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
|
dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dma_addr))
|
if (dma_mapping_error(NULL, dma_addr))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(page, 0, off + count);
|
memset(page, 0, off + count);
|
||||||
memset(&vsp_cmd, 0, sizeof(vsp_cmd));
|
memset(&vsp_cmd, 0, sizeof(vsp_cmd));
|
||||||
|
|
|
@ -544,7 +544,7 @@ error:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dma_mapping_ops calgary_dma_ops = {
|
static struct dma_mapping_ops calgary_dma_ops = {
|
||||||
.alloc_coherent = calgary_alloc_coherent,
|
.alloc_coherent = calgary_alloc_coherent,
|
||||||
.map_single = calgary_map_single,
|
.map_single = calgary_map_single,
|
||||||
.unmap_single = calgary_unmap_single,
|
.unmap_single = calgary_unmap_single,
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
static int forbid_dac __read_mostly;
|
static int forbid_dac __read_mostly;
|
||||||
|
|
||||||
const struct dma_mapping_ops *dma_ops;
|
struct dma_mapping_ops *dma_ops;
|
||||||
EXPORT_SYMBOL(dma_ops);
|
EXPORT_SYMBOL(dma_ops);
|
||||||
|
|
||||||
static int iommu_sac_force __read_mostly;
|
static int iommu_sac_force __read_mostly;
|
||||||
|
@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
|
||||||
|
|
||||||
int dma_supported(struct device *dev, u64 mask)
|
int dma_supported(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
if (mask > 0xffffffff && forbid_dac > 0) {
|
if (mask > 0xffffffff && forbid_dac > 0) {
|
||||||
dev_info(dev, "PCI: Disallowing DAC for device\n");
|
dev_info(dev, "PCI: Disallowing DAC for device\n");
|
||||||
|
@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (dma_ops->dma_supported)
|
if (ops->dma_supported)
|
||||||
return dma_ops->dma_supported(dev, mask);
|
return ops->dma_supported(dev, mask);
|
||||||
|
|
||||||
/* Copied from i386. Doesn't make much sense, because it will
|
/* Copied from i386. Doesn't make much sense, because it will
|
||||||
only work for pci_alloc_coherent.
|
only work for pci_alloc_coherent.
|
||||||
|
@ -367,6 +369,7 @@ void *
|
||||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
void *memory = NULL;
|
void *memory = NULL;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long dma_mask = 0;
|
unsigned long dma_mask = 0;
|
||||||
|
@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
/* Let low level make its own zone decisions */
|
/* Let low level make its own zone decisions */
|
||||||
gfp &= ~(GFP_DMA32|GFP_DMA);
|
gfp &= ~(GFP_DMA32|GFP_DMA);
|
||||||
|
|
||||||
if (dma_ops->alloc_coherent)
|
if (ops->alloc_coherent)
|
||||||
return dma_ops->alloc_coherent(dev, size,
|
return ops->alloc_coherent(dev, size,
|
||||||
dma_handle, gfp);
|
dma_handle, gfp);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_ops->alloc_coherent) {
|
if (ops->alloc_coherent) {
|
||||||
free_pages((unsigned long)memory, get_order(size));
|
free_pages((unsigned long)memory, get_order(size));
|
||||||
gfp &= ~(GFP_DMA|GFP_DMA32);
|
gfp &= ~(GFP_DMA|GFP_DMA32);
|
||||||
return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
|
return ops->alloc_coherent(dev, size, dma_handle, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_ops->map_simple) {
|
if (ops->map_simple) {
|
||||||
*dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
|
*dma_handle = ops->map_simple(dev, virt_to_phys(memory),
|
||||||
size,
|
size,
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
PCI_DMA_BIDIRECTIONAL);
|
||||||
if (*dma_handle != bad_dma_address)
|
if (*dma_handle != bad_dma_address)
|
||||||
|
@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
|
||||||
void dma_free_coherent(struct device *dev, size_t size,
|
void dma_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t bus)
|
void *vaddr, dma_addr_t bus)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
int order = get_order(size);
|
int order = get_order(size);
|
||||||
WARN_ON(irqs_disabled()); /* for portability */
|
WARN_ON(irqs_disabled()); /* for portability */
|
||||||
if (dma_release_coherent(dev, order, vaddr))
|
if (dma_release_coherent(dev, order, vaddr))
|
||||||
return;
|
return;
|
||||||
if (dma_ops->unmap_single)
|
if (ops->unmap_single)
|
||||||
dma_ops->unmap_single(dev, bus, size, 0);
|
ops->unmap_single(dev, bus, size, 0);
|
||||||
free_pages((unsigned long)vaddr, order);
|
free_pages((unsigned long)vaddr, order);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_free_coherent);
|
EXPORT_SYMBOL(dma_free_coherent);
|
||||||
|
|
|
@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
|
||||||
|
|
||||||
extern int agp_amd64_init(void);
|
extern int agp_amd64_init(void);
|
||||||
|
|
||||||
static const struct dma_mapping_ops gart_dma_ops = {
|
static struct dma_mapping_ops gart_dma_ops = {
|
||||||
.mapping_error = NULL,
|
|
||||||
.map_single = gart_map_single,
|
.map_single = gart_map_single,
|
||||||
.map_simple = gart_map_simple,
|
.map_simple = gart_map_simple,
|
||||||
.unmap_single = gart_unmap_single,
|
.unmap_single = gart_unmap_single,
|
||||||
|
|
|
@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
return nents;
|
return nents;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure we keep the same behaviour */
|
struct dma_mapping_ops nommu_dma_ops = {
|
||||||
static int nommu_mapping_error(dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
return 0;
|
|
||||||
#else
|
|
||||||
return (dma_addr == bad_dma_address);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
const struct dma_mapping_ops nommu_dma_ops = {
|
|
||||||
.map_single = nommu_map_single,
|
.map_single = nommu_map_single,
|
||||||
.map_sg = nommu_map_sg,
|
.map_sg = nommu_map_sg,
|
||||||
.mapping_error = nommu_mapping_error,
|
|
||||||
.is_phys = 1,
|
.is_phys = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
|
||||||
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
|
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct dma_mapping_ops swiotlb_dma_ops = {
|
struct dma_mapping_ops swiotlb_dma_ops = {
|
||||||
.mapping_error = swiotlb_dma_mapping_error,
|
.mapping_error = swiotlb_dma_mapping_error,
|
||||||
.alloc_coherent = swiotlb_alloc_coherent,
|
.alloc_coherent = swiotlb_alloc_coherent,
|
||||||
.free_coherent = swiotlb_free_coherent,
|
.free_coherent = swiotlb_free_coherent,
|
||||||
|
|
|
@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
|
||||||
|
|
||||||
address = dma_map_page(card->device, buffer->pages[i],
|
address = dma_map_page(card->device, buffer->pages[i],
|
||||||
0, PAGE_SIZE, direction);
|
0, PAGE_SIZE, direction);
|
||||||
if (dma_mapping_error(address)) {
|
if (dma_mapping_error(card->device, address)) {
|
||||||
__free_page(buffer->pages[i]);
|
__free_page(buffer->pages[i]);
|
||||||
goto out_pages;
|
goto out_pages;
|
||||||
}
|
}
|
||||||
|
|
|
@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
|
||||||
payload_bus =
|
payload_bus =
|
||||||
dma_map_single(ohci->card.device, packet->payload,
|
dma_map_single(ohci->card.device, packet->payload,
|
||||||
packet->payload_length, DMA_TO_DEVICE);
|
packet->payload_length, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(payload_bus)) {
|
if (dma_mapping_error(ohci->card.device, payload_bus)) {
|
||||||
packet->ack = RCODE_SEND_ERROR;
|
packet->ack = RCODE_SEND_ERROR;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
||||||
orb->response_bus =
|
orb->response_bus =
|
||||||
dma_map_single(device->card->device, &orb->response,
|
dma_map_single(device->card->device, &orb->response,
|
||||||
sizeof(orb->response), DMA_FROM_DEVICE);
|
sizeof(orb->response), DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(orb->response_bus))
|
if (dma_mapping_error(device->card->device, orb->response_bus))
|
||||||
goto fail_mapping_response;
|
goto fail_mapping_response;
|
||||||
|
|
||||||
orb->request.response.high = 0;
|
orb->request.response.high = 0;
|
||||||
|
@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
|
||||||
orb->base.request_bus =
|
orb->base.request_bus =
|
||||||
dma_map_single(device->card->device, &orb->request,
|
dma_map_single(device->card->device, &orb->request,
|
||||||
sizeof(orb->request), DMA_TO_DEVICE);
|
sizeof(orb->request), DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(orb->base.request_bus))
|
if (dma_mapping_error(device->card->device, orb->base.request_bus))
|
||||||
goto fail_mapping_request;
|
goto fail_mapping_request;
|
||||||
|
|
||||||
sbp2_send_orb(&orb->base, lu, node_id, generation,
|
sbp2_send_orb(&orb->base, lu, node_id, generation,
|
||||||
|
@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
|
||||||
orb->page_table_bus =
|
orb->page_table_bus =
|
||||||
dma_map_single(device->card->device, orb->page_table,
|
dma_map_single(device->card->device, orb->page_table,
|
||||||
sizeof(orb->page_table), DMA_TO_DEVICE);
|
sizeof(orb->page_table), DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(orb->page_table_bus))
|
if (dma_mapping_error(device->card->device, orb->page_table_bus))
|
||||||
goto fail_page_table;
|
goto fail_page_table;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
|
||||||
orb->base.request_bus =
|
orb->base.request_bus =
|
||||||
dma_map_single(device->card->device, &orb->request,
|
dma_map_single(device->card->device, &orb->request,
|
||||||
sizeof(orb->request), DMA_TO_DEVICE);
|
sizeof(orb->request), DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(orb->base.request_bus))
|
if (dma_mapping_error(device->card->device, orb->base.request_bus))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
|
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
|
||||||
|
|
|
@ -698,7 +698,7 @@ retry:
|
||||||
|
|
||||||
addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
|
addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
|
||||||
tx->map_len, DMA_TO_DEVICE);
|
tx->map_len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(addr)) {
|
if (dma_mapping_error(&dd->pcidev->dev, addr)) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
|
||||||
|
|
||||||
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
|
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dma_addr)) {
|
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_unmap;
|
goto free_unmap;
|
||||||
}
|
}
|
||||||
|
@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
|
||||||
pages[j], 0, flen, DMA_TO_DEVICE);
|
pages[j], 0, flen, DMA_TO_DEVICE);
|
||||||
unsigned long fofs = addr & ~PAGE_MASK;
|
unsigned long fofs = addr & ~PAGE_MASK;
|
||||||
|
|
||||||
if (dma_mapping_error(dma_addr)) {
|
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
|
||||||
if (page) {
|
if (page) {
|
||||||
dma_addr = dma_map_page(&dd->pcidev->dev,
|
dma_addr = dma_map_page(&dd->pcidev->dev,
|
||||||
page, 0, len, DMA_TO_DEVICE);
|
page, 0, len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dma_addr)) {
|
if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_pbc;
|
goto free_pbc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
|
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
|
||||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||||
if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
|
if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
|
||||||
__free_page(dev->eq_table.icm_page);
|
__free_page(dev->eq_table.icm_page);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto)
|
||||||
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
|
pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
|
||||||
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
|
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
return pci_dma_mapping_error(pluto->dma_addr);
|
return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pluto_dma_unmap(struct pluto *pluto)
|
static void pluto_dma_unmap(struct pluto *pluto)
|
||||||
|
|
|
@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
||||||
|
|
||||||
host->align_addr = dma_map_single(mmc_dev(host->mmc),
|
host->align_addr = dma_map_single(mmc_dev(host->mmc),
|
||||||
host->align_buffer, 128 * 4, direction);
|
host->align_buffer, 128 * 4, direction);
|
||||||
if (dma_mapping_error(host->align_addr))
|
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
|
||||||
goto fail;
|
goto fail;
|
||||||
BUG_ON(host->align_addr & 0x3);
|
BUG_ON(host->align_addr & 0x3);
|
||||||
|
|
||||||
|
@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
||||||
|
|
||||||
host->adma_addr = dma_map_single(mmc_dev(host->mmc),
|
host->adma_addr = dma_map_single(mmc_dev(host->mmc),
|
||||||
host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
|
host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(host->align_addr))
|
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
|
||||||
goto unmap_entries;
|
goto unmap_entries;
|
||||||
BUG_ON(host->adma_addr & 0x3);
|
BUG_ON(host->adma_addr & 0x3);
|
||||||
|
|
||||||
|
|
|
@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
|
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(d)) {
|
if (dma_mapping_error(NULL, d)) {
|
||||||
free_page((unsigned long)page);
|
free_page((unsigned long)page);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
|
d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(d)) {
|
if (dma_mapping_error(NULL, d)) {
|
||||||
free_page((unsigned long)page);
|
free_page((unsigned long)page);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
|
||||||
|
|
||||||
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
|
mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (unlikely(dma_mapping_error(mapping))) {
|
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
||||||
__free_pages(page, PAGES_PER_SGE_SHIFT);
|
__free_pages(page, PAGES_PER_SGE_SHIFT);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
|
||||||
|
|
||||||
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
|
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (unlikely(dma_mapping_error(mapping))) {
|
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
|
|
||||||
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
|
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
|
||||||
if (unlikely(pci_dma_mapping_error(mapping)))
|
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pci_unmap_addr_set(sd, dma_addr, mapping);
|
pci_unmap_addr_set(sd, dma_addr, mapping);
|
||||||
|
|
|
@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
|
||||||
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
|
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
|
||||||
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
|
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (pci_dma_mapping_error(rx->dma_addr)) {
|
if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
|
||||||
dev_kfree_skb_any(rx->skb);
|
dev_kfree_skb_any(rx->skb);
|
||||||
rx->skb = NULL;
|
rx->skb = NULL;
|
||||||
rx->dma_addr = 0;
|
rx->dma_addr = 0;
|
||||||
|
|
|
@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
||||||
tx_ring->buffer_info[i].dma =
|
tx_ring->buffer_info[i].dma =
|
||||||
pci_map_single(pdev, skb->data, skb->len,
|
pci_map_single(pdev, skb->data, skb->len,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
|
if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
|
||||||
ret_val = 4;
|
ret_val = 4;
|
||||||
goto err_nomem;
|
goto err_nomem;
|
||||||
}
|
}
|
||||||
|
@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
|
||||||
rx_ring->buffer_info[i].dma =
|
rx_ring->buffer_info[i].dma =
|
||||||
pci_map_single(pdev, skb->data, 2048,
|
pci_map_single(pdev, skb->data, 2048,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
|
if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
|
||||||
ret_val = 8;
|
ret_val = 8;
|
||||||
goto err_nomem;
|
goto err_nomem;
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ map_skb:
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
adapter->rx_buffer_len,
|
adapter->rx_buffer_len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(buffer_info->dma)) {
|
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
|
||||||
dev_err(&pdev->dev, "RX DMA map failed\n");
|
dev_err(&pdev->dev, "RX DMA map failed\n");
|
||||||
adapter->rx_dma_failed++;
|
adapter->rx_dma_failed++;
|
||||||
break;
|
break;
|
||||||
|
@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
|
||||||
ps_page->page,
|
ps_page->page,
|
||||||
0, PAGE_SIZE,
|
0, PAGE_SIZE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(ps_page->dma)) {
|
if (pci_dma_mapping_error(pdev, ps_page->dma)) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"RX DMA page map failed\n");
|
"RX DMA page map failed\n");
|
||||||
adapter->rx_dma_failed++;
|
adapter->rx_dma_failed++;
|
||||||
|
@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
|
||||||
buffer_info->dma = pci_map_single(pdev, skb->data,
|
buffer_info->dma = pci_map_single(pdev, skb->data,
|
||||||
adapter->rx_ps_bsize0,
|
adapter->rx_ps_bsize0,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(buffer_info->dma)) {
|
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
|
||||||
dev_err(&pdev->dev, "RX DMA map failed\n");
|
dev_err(&pdev->dev, "RX DMA map failed\n");
|
||||||
adapter->rx_dma_failed++;
|
adapter->rx_dma_failed++;
|
||||||
/* cleanup skb */
|
/* cleanup skb */
|
||||||
|
@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
skb->data + offset,
|
skb->data + offset,
|
||||||
size,
|
size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(buffer_info->dma)) {
|
if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
|
||||||
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
|
dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
|
||||||
adapter->tx_dma_failed++;
|
adapter->tx_dma_failed++;
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
|
||||||
offset,
|
offset,
|
||||||
size,
|
size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(buffer_info->dma)) {
|
if (pci_dma_mapping_error(adapter->pdev,
|
||||||
|
buffer_info->dma)) {
|
||||||
dev_err(&adapter->pdev->dev,
|
dev_err(&adapter->pdev->dev,
|
||||||
"TX DMA page map failed\n");
|
"TX DMA page map failed\n");
|
||||||
adapter->tx_dma_failed++;
|
adapter->tx_dma_failed++;
|
||||||
|
|
|
@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
||||||
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
||||||
pool->buff_size, DMA_FROM_DEVICE);
|
pool->buff_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (dma_mapping_error(dma_addr))
|
if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
|
||||||
goto failure;
|
goto failure;
|
||||||
|
|
||||||
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
||||||
|
@ -294,7 +294,7 @@ failure:
|
||||||
pool->consumer_index = pool->size - 1;
|
pool->consumer_index = pool->size - 1;
|
||||||
else
|
else
|
||||||
pool->consumer_index--;
|
pool->consumer_index--;
|
||||||
if (!dma_mapping_error(dma_addr))
|
if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
|
||||||
dma_unmap_single(&adapter->vdev->dev,
|
dma_unmap_single(&adapter->vdev->dev,
|
||||||
pool->dma_addr[index], pool->buff_size,
|
pool->dma_addr[index], pool->buff_size,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
|
||||||
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
struct device *dev = &adapter->vdev->dev;
|
||||||
|
|
||||||
if(adapter->buffer_list_addr != NULL) {
|
if(adapter->buffer_list_addr != NULL) {
|
||||||
if(!dma_mapping_error(adapter->buffer_list_dma)) {
|
if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
|
||||||
dma_unmap_single(&adapter->vdev->dev,
|
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
|
||||||
adapter->buffer_list_dma, 4096,
|
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
adapter->buffer_list_dma = DMA_ERROR_CODE;
|
adapter->buffer_list_dma = DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||||
}
|
}
|
||||||
|
|
||||||
if(adapter->filter_list_addr != NULL) {
|
if(adapter->filter_list_addr != NULL) {
|
||||||
if(!dma_mapping_error(adapter->filter_list_dma)) {
|
if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
|
||||||
dma_unmap_single(&adapter->vdev->dev,
|
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
|
||||||
adapter->filter_list_dma, 4096,
|
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
adapter->filter_list_dma = DMA_ERROR_CODE;
|
adapter->filter_list_dma = DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||||
}
|
}
|
||||||
|
|
||||||
if(adapter->rx_queue.queue_addr != NULL) {
|
if(adapter->rx_queue.queue_addr != NULL) {
|
||||||
if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
|
if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
|
||||||
dma_unmap_single(&adapter->vdev->dev,
|
dma_unmap_single(dev,
|
||||||
adapter->rx_queue.queue_dma,
|
adapter->rx_queue.queue_dma,
|
||||||
adapter->rx_queue.queue_len,
|
adapter->rx_queue.queue_len,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
|
||||||
int rc;
|
int rc;
|
||||||
union ibmveth_buf_desc rxq_desc;
|
union ibmveth_buf_desc rxq_desc;
|
||||||
int i;
|
int i;
|
||||||
|
struct device *dev;
|
||||||
|
|
||||||
ibmveth_debug_printk("open starting\n");
|
ibmveth_debug_printk("open starting\n");
|
||||||
|
|
||||||
|
@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
|
dev = &adapter->vdev->dev;
|
||||||
|
|
||||||
|
adapter->buffer_list_dma = dma_map_single(dev,
|
||||||
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
|
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
|
||||||
adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
|
adapter->filter_list_dma = dma_map_single(dev,
|
||||||
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
|
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
|
||||||
adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
|
adapter->rx_queue.queue_dma = dma_map_single(dev,
|
||||||
adapter->rx_queue.queue_addr,
|
adapter->rx_queue.queue_addr,
|
||||||
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
|
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if((dma_mapping_error(adapter->buffer_list_dma) ) ||
|
if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
|
||||||
(dma_mapping_error(adapter->filter_list_dma)) ||
|
(dma_mapping_error(dev, adapter->filter_list_dma)) ||
|
||||||
(dma_mapping_error(adapter->rx_queue.queue_dma))) {
|
(dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
|
||||||
ibmveth_error_printk("unable to map filter or buffer list pages\n");
|
ibmveth_error_printk("unable to map filter or buffer list pages\n");
|
||||||
ibmveth_cleanup(adapter);
|
ibmveth_cleanup(adapter);
|
||||||
napi_disable(&adapter->napi);
|
napi_disable(&adapter->napi);
|
||||||
|
@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
|
||||||
adapter->bounce_buffer_dma =
|
adapter->bounce_buffer_dma =
|
||||||
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
|
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
|
||||||
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
|
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
|
||||||
if (dma_mapping_error(adapter->bounce_buffer_dma)) {
|
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
|
||||||
ibmveth_error_printk("unable to map bounce buffer\n");
|
ibmveth_error_printk("unable to map bounce buffer\n");
|
||||||
ibmveth_cleanup(adapter);
|
ibmveth_cleanup(adapter);
|
||||||
napi_disable(&adapter->napi);
|
napi_disable(&adapter->napi);
|
||||||
|
@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||||
buf[1] = 0;
|
buf[1] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dma_mapping_error(data_dma_addr)) {
|
if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
|
||||||
if (!firmware_has_feature(FW_FEATURE_CMO))
|
if (!firmware_has_feature(FW_FEATURE_CMO))
|
||||||
ibmveth_error_printk("tx: unable to map xmit buffer\n");
|
ibmveth_error_printk("tx: unable to map xmit buffer\n");
|
||||||
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
|
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
|
||||||
|
|
|
@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
|
||||||
msg->data.addr[0] = dma_map_single(port->dev, skb->data,
|
msg->data.addr[0] = dma_map_single(port->dev, skb->data,
|
||||||
skb->len, DMA_TO_DEVICE);
|
skb->len, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (dma_mapping_error(msg->data.addr[0]))
|
if (dma_mapping_error(port->dev, msg->data.addr[0]))
|
||||||
goto recycle_and_drop;
|
goto recycle_and_drop;
|
||||||
|
|
||||||
msg->dev = port->dev;
|
msg->dev = port->dev;
|
||||||
|
@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
|
||||||
dma_address = msg->data.addr[0];
|
dma_address = msg->data.addr[0];
|
||||||
dma_length = msg->data.len[0];
|
dma_length = msg->data.len[0];
|
||||||
|
|
||||||
if (!dma_mapping_error(dma_address))
|
if (!dma_mapping_error(msg->dev, dma_address))
|
||||||
dma_unmap_single(msg->dev, dma_address, dma_length,
|
dma_unmap_single(msg->dev, dma_address, dma_length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
|
|
@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
|
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
|
||||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||||
if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
|
if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
|
||||||
__free_page(priv->eq_table.icm_page);
|
__free_page(priv->eq_table.icm_page);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
|
||||||
mac->bufsz - LOCAL_SKB_ALIGN,
|
mac->bufsz - LOCAL_SKB_ALIGN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(dma))) {
|
if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
|
||||||
dev_kfree_skb_irq(info->skb);
|
dev_kfree_skb_irq(info->skb);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||||
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
|
map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
map_size[0] = skb_headlen(skb);
|
map_size[0] = skb_headlen(skb);
|
||||||
if (dma_mapping_error(map[0]))
|
if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
|
||||||
goto out_err_nolock;
|
goto out_err_nolock;
|
||||||
|
|
||||||
for (i = 0; i < nfrags; i++) {
|
for (i = 0; i < nfrags; i++) {
|
||||||
|
@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||||
frag->page_offset, frag->size,
|
frag->page_offset, frag->size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
map_size[i+1] = frag->size;
|
map_size[i+1] = frag->size;
|
||||||
if (dma_mapping_error(map[i+1])) {
|
if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
|
||||||
nfrags = i;
|
nfrags = i;
|
||||||
goto out_err_nolock;
|
goto out_err_nolock;
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
|
||||||
qdev->lrg_buffer_len -
|
qdev->lrg_buffer_len -
|
||||||
QL_HEADER_SPACE,
|
QL_HEADER_SPACE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||||
qdev->ndev->name, err);
|
qdev->ndev->name, err);
|
||||||
|
@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
|
||||||
QL_HEADER_SPACE,
|
QL_HEADER_SPACE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||||
qdev->ndev->name, err);
|
qdev->ndev->name, err);
|
||||||
|
@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
||||||
*/
|
*/
|
||||||
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||||
qdev->ndev->name, err);
|
qdev->ndev->name, err);
|
||||||
|
@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
||||||
sizeof(struct oal),
|
sizeof(struct oal),
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
|
|
||||||
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
|
||||||
|
@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
|
||||||
frag->page_offset, frag->size,
|
frag->page_offset, frag->size,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
|
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
|
||||||
qdev->ndev->name, err);
|
qdev->ndev->name, err);
|
||||||
|
@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
||||||
QL_HEADER_SPACE,
|
QL_HEADER_SPACE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
err = pci_dma_mapping_error(map);
|
err = pci_dma_mapping_error(qdev->pdev, map);
|
||||||
if(err) {
|
if(err) {
|
||||||
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
|
||||||
qdev->ndev->name, err);
|
qdev->ndev->name, err);
|
||||||
|
|
|
@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
|
||||||
* Return Value:
|
* Return Value:
|
||||||
* SUCCESS on success or an appropriate -ve value on failure.
|
* SUCCESS on success or an appropriate -ve value on failure.
|
||||||
*/
|
*/
|
||||||
|
static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
|
||||||
static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
int from_card_up)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct RxD_t *rxdp;
|
struct RxD_t *rxdp;
|
||||||
|
@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
||||||
rxdp1->Buffer0_ptr = pci_map_single
|
rxdp1->Buffer0_ptr = pci_map_single
|
||||||
(ring->pdev, skb->data, size - NET_IP_ALIGN,
|
(ring->pdev, skb->data, size - NET_IP_ALIGN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
|
if (pci_dma_mapping_error(nic->pdev,
|
||||||
|
rxdp1->Buffer0_ptr))
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
rxdp->Control_2 =
|
rxdp->Control_2 =
|
||||||
|
@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
||||||
rxdp3->Buffer0_ptr =
|
rxdp3->Buffer0_ptr =
|
||||||
pci_map_single(ring->pdev, ba->ba_0,
|
pci_map_single(ring->pdev, ba->ba_0,
|
||||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
|
if (pci_dma_mapping_error(nic->pdev,
|
||||||
|
rxdp3->Buffer0_ptr))
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
} else
|
} else
|
||||||
pci_dma_sync_single_for_device(ring->pdev,
|
pci_dma_sync_single_for_device(ring->pdev,
|
||||||
|
@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
||||||
(ring->pdev, skb->data, ring->mtu + 4,
|
(ring->pdev, skb->data, ring->mtu + 4,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
|
if (pci_dma_mapping_error(nic->pdev,
|
||||||
|
rxdp3->Buffer2_ptr))
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
if (from_card_up) {
|
if (from_card_up) {
|
||||||
|
@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
|
||||||
ba->ba_1, BUF1_LEN,
|
ba->ba_1, BUF1_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (pci_dma_mapping_error
|
if (pci_dma_mapping_error(nic->pdev,
|
||||||
(rxdp3->Buffer1_ptr)) {
|
rxdp3->Buffer1_ptr)) {
|
||||||
pci_unmap_single
|
pci_unmap_single
|
||||||
(ring->pdev,
|
(ring->pdev,
|
||||||
(dma_addr_t)(unsigned long)
|
(dma_addr_t)(unsigned long)
|
||||||
|
@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int s2io_chk_rx_buffers(struct ring_info *ring)
|
static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
|
||||||
{
|
{
|
||||||
if (fill_rx_buffers(ring, 0) == -ENOMEM) {
|
if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
|
||||||
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
|
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
|
||||||
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
|
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
|
||||||
}
|
}
|
||||||
|
@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pkts_processed = rx_intr_handler(ring, budget);
|
pkts_processed = rx_intr_handler(ring, budget);
|
||||||
s2io_chk_rx_buffers(ring);
|
s2io_chk_rx_buffers(nic, ring);
|
||||||
|
|
||||||
if (pkts_processed < budget_org) {
|
if (pkts_processed < budget_org) {
|
||||||
netif_rx_complete(dev, napi);
|
netif_rx_complete(dev, napi);
|
||||||
|
@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
|
||||||
for (i = 0; i < config->rx_ring_num; i++) {
|
for (i = 0; i < config->rx_ring_num; i++) {
|
||||||
ring = &mac_control->rings[i];
|
ring = &mac_control->rings[i];
|
||||||
ring_pkts_processed = rx_intr_handler(ring, budget);
|
ring_pkts_processed = rx_intr_handler(ring, budget);
|
||||||
s2io_chk_rx_buffers(ring);
|
s2io_chk_rx_buffers(nic, ring);
|
||||||
pkts_processed += ring_pkts_processed;
|
pkts_processed += ring_pkts_processed;
|
||||||
budget -= ring_pkts_processed;
|
budget -= ring_pkts_processed;
|
||||||
if (budget <= 0)
|
if (budget <= 0)
|
||||||
|
@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
|
||||||
rx_intr_handler(&mac_control->rings[i], 0);
|
rx_intr_handler(&mac_control->rings[i], 0);
|
||||||
|
|
||||||
for (i = 0; i < config->rx_ring_num; i++) {
|
for (i = 0; i < config->rx_ring_num; i++) {
|
||||||
if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
|
if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
|
||||||
|
-ENOMEM) {
|
||||||
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
|
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
|
||||||
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
|
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
|
||||||
break;
|
break;
|
||||||
|
@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
|
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
|
||||||
fifo->ufo_in_band_v,
|
fifo->ufo_in_band_v,
|
||||||
sizeof(u64), PCI_DMA_TODEVICE);
|
sizeof(u64), PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
|
if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
txdp++;
|
txdp++;
|
||||||
}
|
}
|
||||||
|
|
||||||
txdp->Buffer_Pointer = pci_map_single
|
txdp->Buffer_Pointer = pci_map_single
|
||||||
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
|
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
|
if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
|
||||||
goto pci_map_failed;
|
goto pci_map_failed;
|
||||||
|
|
||||||
txdp->Host_Control = (unsigned long) skb;
|
txdp->Host_Control = (unsigned long) skb;
|
||||||
|
@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
|
||||||
netif_rx_schedule(dev, &ring->napi);
|
netif_rx_schedule(dev, &ring->napi);
|
||||||
} else {
|
} else {
|
||||||
rx_intr_handler(ring, 0);
|
rx_intr_handler(ring, 0);
|
||||||
s2io_chk_rx_buffers(ring);
|
s2io_chk_rx_buffers(sp, ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
|
||||||
*/
|
*/
|
||||||
if (!config->napi) {
|
if (!config->napi) {
|
||||||
for (i = 0; i < config->rx_ring_num; i++)
|
for (i = 0; i < config->rx_ring_num; i++)
|
||||||
s2io_chk_rx_buffers(&mac_control->rings[i]);
|
s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
|
||||||
}
|
}
|
||||||
writeq(sp->general_int_mask, &bar0->general_int_mask);
|
writeq(sp->general_int_mask, &bar0->general_int_mask);
|
||||||
readl(&bar0->general_int_status);
|
readl(&bar0->general_int_status);
|
||||||
|
@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
pci_map_single( sp->pdev, (*skb)->data,
|
pci_map_single( sp->pdev, (*skb)->data,
|
||||||
size - NET_IP_ALIGN,
|
size - NET_IP_ALIGN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
|
if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
|
||||||
goto memalloc_failed;
|
goto memalloc_failed;
|
||||||
rxdp->Host_Control = (unsigned long) (*skb);
|
rxdp->Host_Control = (unsigned long) (*skb);
|
||||||
}
|
}
|
||||||
|
@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
pci_map_single(sp->pdev, (*skb)->data,
|
pci_map_single(sp->pdev, (*skb)->data,
|
||||||
dev->mtu + 4,
|
dev->mtu + 4,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
|
if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
|
||||||
goto memalloc_failed;
|
goto memalloc_failed;
|
||||||
rxdp3->Buffer0_ptr = *temp0 =
|
rxdp3->Buffer0_ptr = *temp0 =
|
||||||
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
|
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
|
if (pci_dma_mapping_error(sp->pdev,
|
||||||
|
rxdp3->Buffer0_ptr)) {
|
||||||
pci_unmap_single (sp->pdev,
|
pci_unmap_single (sp->pdev,
|
||||||
(dma_addr_t)rxdp3->Buffer2_ptr,
|
(dma_addr_t)rxdp3->Buffer2_ptr,
|
||||||
dev->mtu + 4, PCI_DMA_FROMDEVICE);
|
dev->mtu + 4, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
|
||||||
rxdp3->Buffer1_ptr = *temp1 =
|
rxdp3->Buffer1_ptr = *temp1 =
|
||||||
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
|
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
|
if (pci_dma_mapping_error(sp->pdev,
|
||||||
|
rxdp3->Buffer1_ptr)) {
|
||||||
pci_unmap_single (sp->pdev,
|
pci_unmap_single (sp->pdev,
|
||||||
(dma_addr_t)rxdp3->Buffer0_ptr,
|
(dma_addr_t)rxdp3->Buffer0_ptr,
|
||||||
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
BUF0_LEN, PCI_DMA_FROMDEVICE);
|
||||||
|
@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
|
||||||
|
|
||||||
for (i = 0; i < config->rx_ring_num; i++) {
|
for (i = 0; i < config->rx_ring_num; i++) {
|
||||||
mac_control->rings[i].mtu = dev->mtu;
|
mac_control->rings[i].mtu = dev->mtu;
|
||||||
ret = fill_rx_buffers(&mac_control->rings[i], 1);
|
ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
|
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
|
||||||
dev->name);
|
dev->name);
|
||||||
|
|
|
@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
|
||||||
rx_buf->data, rx_buf->len,
|
rx_buf->data, rx_buf->len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
|
if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
|
||||||
dev_kfree_skb_any(rx_buf->skb);
|
dev_kfree_skb_any(rx_buf->skb);
|
||||||
rx_buf->skb = NULL;
|
rx_buf->skb = NULL;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
|
||||||
0, efx_rx_buf_size(efx),
|
0, efx_rx_buf_size(efx),
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
if (unlikely(pci_dma_mapping_error(dma_addr))) {
|
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
|
||||||
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
||||||
rx_buf->page = NULL;
|
rx_buf->page = NULL;
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
|
@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
|
||||||
|
|
||||||
/* Process all fragments */
|
/* Process all fragments */
|
||||||
while (1) {
|
while (1) {
|
||||||
if (unlikely(pci_dma_mapping_error(dma_addr)))
|
if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
|
||||||
goto pci_err;
|
goto pci_err;
|
||||||
|
|
||||||
/* Store fields for marking in the per-fragment final
|
/* Store fields for marking in the per-fragment final
|
||||||
|
@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
|
||||||
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
|
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
|
||||||
TSOH_BUFFER(tsoh), header_len,
|
TSOH_BUFFER(tsoh), header_len,
|
||||||
PCI_DMA_TODEVICE);
|
PCI_DMA_TODEVICE);
|
||||||
if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
|
if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
|
||||||
|
tsoh->dma_addr))) {
|
||||||
kfree(tsoh);
|
kfree(tsoh);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
|
||||||
|
|
||||||
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
|
st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
|
||||||
len, PCI_DMA_TODEVICE);
|
len, PCI_DMA_TODEVICE);
|
||||||
if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
|
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
|
||||||
st->ifc.unmap_len = len;
|
st->ifc.unmap_len = len;
|
||||||
st->ifc.len = len;
|
st->ifc.len = len;
|
||||||
st->ifc.dma_addr = st->ifc.unmap_addr;
|
st->ifc.dma_addr = st->ifc.unmap_addr;
|
||||||
|
|
|
@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||||
/* iommu-map the skb */
|
/* iommu-map the skb */
|
||||||
buf = pci_map_single(card->pdev, descr->skb->data,
|
buf = pci_map_single(card->pdev, descr->skb->data,
|
||||||
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(buf)) {
|
if (pci_dma_mapping_error(card->pdev, buf)) {
|
||||||
dev_kfree_skb_any(descr->skb);
|
dev_kfree_skb_any(descr->skb);
|
||||||
descr->skb = NULL;
|
descr->skb = NULL;
|
||||||
if (netif_msg_rx_err(card) && net_ratelimit())
|
if (netif_msg_rx_err(card) && net_ratelimit())
|
||||||
|
@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||||
if (pci_dma_mapping_error(buf)) {
|
if (pci_dma_mapping_error(card->pdev, buf)) {
|
||||||
if (netif_msg_tx_err(card) && net_ratelimit())
|
if (netif_msg_tx_err(card) && net_ratelimit())
|
||||||
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
|
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
|
||||||
"Dropping packet\n", skb->data, skb->len);
|
"Dropping packet\n", skb->data, skb->len);
|
||||||
|
|
|
@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
|
||||||
return NULL;
|
return NULL;
|
||||||
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
|
*dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(*dma_handle)) {
|
if (pci_dma_mapping_error(hwdev, *dma_handle)) {
|
||||||
free_page((unsigned long)buf);
|
free_page((unsigned long)buf);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
|
||||||
return NULL;
|
return NULL;
|
||||||
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
|
*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (pci_dma_mapping_error(*dma_handle)) {
|
if (pci_dma_mapping_error(hwdev, *dma_handle)) {
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
|
||||||
bf->skb = skb;
|
bf->skb = skb;
|
||||||
bf->skbaddr = pci_map_single(sc->pdev,
|
bf->skbaddr = pci_map_single(sc->pdev,
|
||||||
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
|
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
|
||||||
if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
|
if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
|
||||||
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
|
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
bf->skb = NULL;
|
bf->skb = NULL;
|
||||||
|
@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
|
||||||
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
|
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
|
||||||
"skbaddr %llx\n", skb, skb->data, skb->len,
|
"skbaddr %llx\n", skb, skb->data, skb->len,
|
||||||
(unsigned long long)bf->skbaddr);
|
(unsigned long long)bf->skbaddr);
|
||||||
if (pci_dma_mapping_error(bf->skbaddr)) {
|
if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
|
||||||
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
|
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
|
||||||
crq->msg_token = dma_map_single(dev, crq->msgs,
|
crq->msg_token = dma_map_single(dev, crq->msgs,
|
||||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(crq->msg_token))
|
if (dma_mapping_error(dev, crq->msg_token))
|
||||||
goto map_failed;
|
goto map_failed;
|
||||||
|
|
||||||
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
|
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
|
||||||
|
@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
|
||||||
async_q->size * sizeof(*async_q->msgs),
|
async_q->size * sizeof(*async_q->msgs),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(async_q->msg_token)) {
|
if (dma_mapping_error(dev, async_q->msg_token)) {
|
||||||
dev_err(dev, "Failed to map async queue\n");
|
dev_err(dev, "Failed to map async queue\n");
|
||||||
goto free_async_crq;
|
goto free_async_crq;
|
||||||
}
|
}
|
||||||
|
|
|
@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
|
||||||
sizeof(hostdata->madapter_info),
|
sizeof(hostdata->madapter_info),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(req->buffer)) {
|
if (dma_mapping_error(hostdata->dev, req->buffer)) {
|
||||||
if (!firmware_has_feature(FW_FEATURE_CMO))
|
if (!firmware_has_feature(FW_FEATURE_CMO))
|
||||||
dev_err(hostdata->dev,
|
dev_err(hostdata->dev,
|
||||||
"Unable to map request_buffer for "
|
"Unable to map request_buffer for "
|
||||||
|
@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
|
||||||
length,
|
length,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(host_config->buffer)) {
|
if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
|
||||||
if (!firmware_has_feature(FW_FEATURE_CMO))
|
if (!firmware_has_feature(FW_FEATURE_CMO))
|
||||||
dev_err(hostdata->dev,
|
dev_err(hostdata->dev,
|
||||||
"dma_mapping error getting host config\n");
|
"dma_mapping error getting host config\n");
|
||||||
|
|
|
@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
|
||||||
queue->size * sizeof(*queue->msgs),
|
queue->size * sizeof(*queue->msgs),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(queue->msg_token))
|
if (dma_mapping_error(target->dev, queue->msg_token))
|
||||||
goto map_failed;
|
goto map_failed;
|
||||||
|
|
||||||
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
|
err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
|
||||||
|
|
|
@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
|
||||||
queue->size * sizeof(*queue->msgs),
|
queue->size * sizeof(*queue->msgs),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (dma_mapping_error(queue->msg_token))
|
if (dma_mapping_error(hostdata->dev, queue->msg_token))
|
||||||
goto map_failed;
|
goto map_failed;
|
||||||
|
|
||||||
gather_partition_info();
|
gather_partition_info();
|
||||||
|
|
|
@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
|
||||||
xfer->tx_dma = dma_map_single(dev,
|
xfer->tx_dma = dma_map_single(dev,
|
||||||
(void *) xfer->tx_buf, xfer->len,
|
(void *) xfer->tx_buf, xfer->len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(xfer->tx_dma))
|
if (dma_mapping_error(dev, xfer->tx_dma))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
if (xfer->rx_buf) {
|
if (xfer->rx_buf) {
|
||||||
xfer->rx_dma = dma_map_single(dev,
|
xfer->rx_dma = dma_map_single(dev,
|
||||||
xfer->rx_buf, xfer->len,
|
xfer->rx_buf, xfer->len,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(xfer->rx_dma)) {
|
if (dma_mapping_error(dev, xfer->rx_dma)) {
|
||||||
if (xfer->tx_buf)
|
if (xfer->tx_buf)
|
||||||
dma_unmap_single(dev,
|
dma_unmap_single(dev,
|
||||||
xfer->tx_dma, xfer->len,
|
xfer->tx_dma, xfer->len,
|
||||||
|
|
|
@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
|
||||||
hw->dma_rx_tmpbuf_size = size;
|
hw->dma_rx_tmpbuf_size = size;
|
||||||
hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
|
hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
|
||||||
size, DMA_FROM_DEVICE);
|
size, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) {
|
if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
|
||||||
kfree(hw->dma_rx_tmpbuf);
|
kfree(hw->dma_rx_tmpbuf);
|
||||||
hw->dma_rx_tmpbuf = 0;
|
hw->dma_rx_tmpbuf = 0;
|
||||||
hw->dma_rx_tmpbuf_size = 0;
|
hw->dma_rx_tmpbuf_size = 0;
|
||||||
|
@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
|
||||||
dma_rx_addr = dma_map_single(hw->dev,
|
dma_rx_addr = dma_map_single(hw->dev,
|
||||||
(void *)t->rx_buf,
|
(void *)t->rx_buf,
|
||||||
t->len, DMA_FROM_DEVICE);
|
t->len, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dma_rx_addr))
|
if (dma_mapping_error(hw->dev, dma_rx_addr))
|
||||||
dev_err(hw->dev, "rx dma map error\n");
|
dev_err(hw->dev, "rx dma map error\n");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
|
||||||
dma_tx_addr = dma_map_single(hw->dev,
|
dma_tx_addr = dma_map_single(hw->dev,
|
||||||
(void *)t->tx_buf,
|
(void *)t->tx_buf,
|
||||||
t->len, DMA_TO_DEVICE);
|
t->len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dma_tx_addr))
|
if (dma_mapping_error(hw->dev, dma_tx_addr))
|
||||||
dev_err(hw->dev, "tx dma map error\n");
|
dev_err(hw->dev, "tx dma map error\n");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
|
||||||
if (tx_buf != NULL) {
|
if (tx_buf != NULL) {
|
||||||
t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
|
t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
|
||||||
len, DMA_TO_DEVICE);
|
len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(t->tx_dma)) {
|
if (dma_mapping_error(&spi->dev, t->tx_dma)) {
|
||||||
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
|
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
|
||||||
'T', len);
|
'T', len);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
|
||||||
if (rx_buf != NULL) {
|
if (rx_buf != NULL) {
|
||||||
t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
|
t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(t->rx_dma)) {
|
if (dma_mapping_error(&spi->dev, t->rx_dma)) {
|
||||||
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
|
dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
|
||||||
'R', len);
|
'R', len);
|
||||||
if (tx_buf != NULL)
|
if (tx_buf != NULL)
|
||||||
|
|
|
@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||||
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
|
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
|
||||||
drv_data->rx_map_len,
|
drv_data->rx_map_len,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(drv_data->rx_dma))
|
if (dma_mapping_error(dev, drv_data->rx_dma))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Stream map the tx buffer */
|
/* Stream map the tx buffer */
|
||||||
|
@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||||
drv_data->tx_map_len,
|
drv_data->tx_map_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (dma_mapping_error(drv_data->tx_dma)) {
|
if (dma_mapping_error(dev, drv_data->tx_dma)) {
|
||||||
dma_unmap_single(dev, drv_data->rx_dma,
|
dma_unmap_single(dev, drv_data->rx_dma,
|
||||||
drv_data->rx_map_len, DMA_FROM_DEVICE);
|
drv_data->rx_map_len, DMA_FROM_DEVICE);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||||
buf,
|
buf,
|
||||||
drv_data->tx_map_len,
|
drv_data->tx_map_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(drv_data->tx_dma))
|
if (dma_mapping_error(dev, drv_data->tx_dma))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
drv_data->tx_dma_needs_unmap = 1;
|
drv_data->tx_dma_needs_unmap = 1;
|
||||||
|
@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||||
buf,
|
buf,
|
||||||
drv_data->len,
|
drv_data->len,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(drv_data->rx_dma))
|
if (dma_mapping_error(dev, drv_data->rx_dma))
|
||||||
return -1;
|
return -1;
|
||||||
drv_data->rx_dma_needs_unmap = 1;
|
drv_data->rx_dma_needs_unmap = 1;
|
||||||
}
|
}
|
||||||
|
@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
|
||||||
buf,
|
buf,
|
||||||
drv_data->tx_map_len,
|
drv_data->tx_map_len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(drv_data->tx_dma)) {
|
if (dma_mapping_error(dev, drv_data->tx_dma)) {
|
||||||
if (drv_data->rx_dma) {
|
if (drv_data->rx_dma) {
|
||||||
dma_unmap_single(dev,
|
dma_unmap_single(dev,
|
||||||
drv_data->rx_dma,
|
drv_data->rx_dma,
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
|
pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
|
||||||
#define dma_supported(dev, mask) \
|
#define dma_supported(dev, mask) \
|
||||||
pci_dma_supported(alpha_gendev_to_pci(dev), mask)
|
pci_dma_supported(alpha_gendev_to_pci(dev), mask)
|
||||||
#define dma_mapping_error(addr) \
|
#define dma_mapping_error(dev, addr) \
|
||||||
pci_dma_mapping_error(addr)
|
pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
|
||||||
|
|
||||||
#else /* no PCI - no IOMMU. */
|
#else /* no PCI - no IOMMU. */
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
#define dma_unmap_page(dev, addr, size, dir) ((void)0)
|
#define dma_unmap_page(dev, addr, size, dir) ((void)0)
|
||||||
#define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
|
#define dma_unmap_sg(dev, sg, nents, dir) ((void)0)
|
||||||
|
|
||||||
#define dma_mapping_error(addr) (0)
|
#define dma_mapping_error(dev, addr) (0)
|
||||||
|
|
||||||
#endif /* !CONFIG_PCI */
|
#endif /* !CONFIG_PCI */
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
|
||||||
/* Test for pci_map_single or pci_map_page having generated an error. */
|
/* Test for pci_map_single or pci_map_page having generated an error. */
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
pci_dma_mapping_error(dma_addr_t dma_addr)
|
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return dma_addr == 0;
|
return dma_addr == 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
|
||||||
/*
|
/*
|
||||||
* DMA errors are defined by all-bits-set in the DMA address.
|
* DMA errors are defined by all-bits-set in the DMA address.
|
||||||
*/
|
*/
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return dma_addr == ~0;
|
return dma_addr == ~0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||||
/*
|
/*
|
||||||
* dma_map_single can't fail as it is implemented now.
|
* dma_map_single can't fail as it is implemented now.
|
||||||
*/
|
*/
|
||||||
static inline int dma_mapping_error(dma_addr_t addr)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_mapping_error(dma_addr_t dma_addr)
|
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
int dma_mapping_error(dma_addr_t dma_addr)
|
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
|
||||||
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
|
#define dma_sync_sg_for_device dma_sync_sg_for_cpu
|
||||||
|
|
||||||
extern int
|
extern int
|
||||||
dma_mapping_error(dma_addr_t dma_addr);
|
dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||||
|
|
||||||
extern int
|
extern int
|
||||||
dma_supported(struct device *dev, u64 mask);
|
dma_supported(struct device *dev, u64 mask);
|
||||||
|
|
|
@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_mapping_error(dma_addr_t dma_addr)
|
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return pci_dma_mapping_error(dma_addr);
|
return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
pci_dma_mapping_error(dma_addr_t dma_addr)
|
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return dma_mapping_error(dma_addr);
|
return dma_mapping_error(&pdev->dev, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_
|
||||||
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
|
typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
|
||||||
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
|
typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
|
||||||
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
|
typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
|
||||||
typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
|
typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
|
||||||
typedef int ia64_mv_dma_supported (struct device *, u64);
|
typedef int ia64_mv_dma_supported (struct device *, u64);
|
||||||
|
|
||||||
typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
|
typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
|
||||||
|
|
|
@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t handle)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||||
int nelems, enum dma_data_direction direction);
|
int nelems, enum dma_data_direction direction);
|
||||||
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
int nelems, enum dma_data_direction direction);
|
int nelems, enum dma_data_direction direction);
|
||||||
extern int dma_mapping_error(dma_addr_t dma_addr);
|
extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||||
extern int dma_supported(struct device *dev, u64 mask);
|
extern int dma_supported(struct device *dev, u64 mask);
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
|
|
@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
int dma_mapping_error(dma_addr_t dma_addr)
|
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* At the moment, we panic on error for IOMMU resource exaustion */
|
/* At the moment, we panic on error for IOMMU resource exaustion */
|
||||||
#define dma_mapping_error(x) 0
|
#define dma_mapping_error(dev, x) 0
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
|
||||||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
return (dma_addr == DMA_ERROR_CODE);
|
||||||
|
|
|
@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void)
|
||||||
return L1_CACHE_BYTES;
|
return L1_CACHE_BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return dma_addr == 0;
|
return dma_addr == 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
|
||||||
/* No flushing needed to sync cpu writes to the device. */
|
/* No flushing needed to sync cpu writes to the device. */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
return (dma_addr == DMA_ERROR_CODE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||||
|
|
||||||
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||||
|
|
||||||
static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
|
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return (dma_addr == PCI_DMA_ERROR_CODE);
|
return (dma_addr == PCI_DMA_ERROR_CODE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
|
||||||
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
|
#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
|
||||||
#define PCI64_ADDR_BASE 0xfffc000000000000UL
|
#define PCI64_ADDR_BASE 0xfffc000000000000UL
|
||||||
|
|
||||||
static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
|
static inline int pci_dma_mapping_error(struct pci_dev *pdev,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return dma_mapping_error(dma_addr);
|
return dma_mapping_error(&pdev->dev, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
|
|
|
@ -5,6 +5,9 @@ struct dev_archdata {
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
void *acpi_handle;
|
void *acpi_handle;
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
struct dma_mapping_ops *dma_ops;
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_DMAR
|
#ifdef CONFIG_DMAR
|
||||||
void *iommu; /* hook for IOMMU specific extension */
|
void *iommu; /* hook for IOMMU specific extension */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -17,7 +17,8 @@ extern int panic_on_overflow;
|
||||||
extern int force_iommu;
|
extern int force_iommu;
|
||||||
|
|
||||||
struct dma_mapping_ops {
|
struct dma_mapping_ops {
|
||||||
int (*mapping_error)(dma_addr_t dma_addr);
|
int (*mapping_error)(struct device *dev,
|
||||||
|
dma_addr_t dma_addr);
|
||||||
void* (*alloc_coherent)(struct device *dev, size_t size,
|
void* (*alloc_coherent)(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t gfp);
|
dma_addr_t *dma_handle, gfp_t gfp);
|
||||||
void (*free_coherent)(struct device *dev, size_t size,
|
void (*free_coherent)(struct device *dev, size_t size,
|
||||||
|
@ -56,14 +57,32 @@ struct dma_mapping_ops {
|
||||||
int is_phys;
|
int is_phys;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct dma_mapping_ops *dma_ops;
|
extern struct dma_mapping_ops *dma_ops;
|
||||||
|
|
||||||
static inline int dma_mapping_error(dma_addr_t dma_addr)
|
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
||||||
{
|
{
|
||||||
if (dma_ops->mapping_error)
|
#ifdef CONFIG_X86_32
|
||||||
return dma_ops->mapping_error(dma_addr);
|
return dma_ops;
|
||||||
|
#else
|
||||||
|
if (unlikely(!dev) || !dev->archdata.dma_ops)
|
||||||
|
return dma_ops;
|
||||||
|
else
|
||||||
|
return dev->archdata.dma_ops;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Make sure we keep the same behaviour */
|
||||||
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
return 0;
|
||||||
|
#else
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
|
if (ops->mapping_error)
|
||||||
|
return ops->mapping_error(dev, dma_addr);
|
||||||
|
|
||||||
return (dma_addr == bad_dma_address);
|
return (dma_addr == bad_dma_address);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
|
@ -83,44 +102,53 @@ static inline dma_addr_t
|
||||||
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->unmap_single)
|
if (ops->unmap_single)
|
||||||
dma_ops->unmap_single(dev, addr, size, direction);
|
ops->unmap_single(dev, addr, size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nents, int direction)
|
int nents, int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
return dma_ops->map_sg(hwdev, sg, nents, direction);
|
return ops->map_sg(hwdev, sg, nents, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->unmap_sg)
|
if (ops->unmap_sg)
|
||||||
dma_ops->unmap_sg(hwdev, sg, nents, direction);
|
ops->unmap_sg(hwdev, sg, nents, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
size_t size, int direction)
|
size_t size, int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->sync_single_for_cpu)
|
if (ops->sync_single_for_cpu)
|
||||||
dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
|
ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
|
||||||
direction);
|
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,10 +156,11 @@ static inline void
|
||||||
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
size_t size, int direction)
|
size_t size, int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->sync_single_for_device)
|
if (ops->sync_single_for_device)
|
||||||
dma_ops->sync_single_for_device(hwdev, dma_handle, size,
|
ops->sync_single_for_device(hwdev, dma_handle, size, direction);
|
||||||
direction);
|
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,11 +168,12 @@ static inline void
|
||||||
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
unsigned long offset, size_t size, int direction)
|
unsigned long offset, size_t size, int direction)
|
||||||
{
|
{
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
if (dma_ops->sync_single_range_for_cpu)
|
|
||||||
dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
||||||
size, direction);
|
|
||||||
|
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (ops->sync_single_range_for_cpu)
|
||||||
|
ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
||||||
|
size, direction);
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
||||||
unsigned long offset, size_t size,
|
unsigned long offset, size_t size,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
if (dma_ops->sync_single_range_for_device)
|
|
||||||
dma_ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
||||||
offset, size, direction);
|
|
||||||
|
|
||||||
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
|
if (ops->sync_single_range_for_device)
|
||||||
|
ops->sync_single_range_for_device(hwdev, dma_handle,
|
||||||
|
offset, size, direction);
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,9 +195,11 @@ static inline void
|
||||||
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nelems, int direction)
|
int nelems, int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->sync_sg_for_cpu)
|
if (ops->sync_sg_for_cpu)
|
||||||
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,9 +207,11 @@ static inline void
|
||||||
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nelems, int direction)
|
int nelems, int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
if (dma_ops->sync_sg_for_device)
|
if (ops->sync_sg_for_device)
|
||||||
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
||||||
|
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||||
size_t offset, size_t size,
|
size_t offset, size_t size,
|
||||||
int direction)
|
int direction)
|
||||||
{
|
{
|
||||||
|
struct dma_mapping_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
BUG_ON(!valid_dma_direction(direction));
|
BUG_ON(!valid_dma_direction(direction));
|
||||||
return dma_ops->map_single(dev, page_to_phys(page)+offset,
|
return ops->map_single(dev, page_to_phys(page) + offset,
|
||||||
size, direction);
|
size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||||
|
|
|
@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nents, int direction);
|
int nents, int direction);
|
||||||
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
|
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
|
||||||
int nents, int direction);
|
int nents, int direction);
|
||||||
extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
|
extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
||||||
extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
|
extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
void *vaddr, dma_addr_t dma_handle);
|
||||||
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
||||||
|
|
|
@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
|
||||||
consistent_sync(sg_virt(sg), sg->length, dir);
|
consistent_sync(sg_virt(sg), sg->length, dir);
|
||||||
}
|
}
|
||||||
static inline int
|
static inline int
|
||||||
dma_mapping_error(dma_addr_t dma_addr)
|
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
|
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
|
||||||
if (!dma_mapping_error(dma_addr)) {
|
if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
|
||||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||||
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
|
||||||
*mptr++ = cpu_to_le32(0x7C020002);
|
*mptr++ = cpu_to_le32(0x7C020002);
|
||||||
|
|
|
@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
|
||||||
{
|
{
|
||||||
switch (dev->bus->bustype) {
|
switch (dev->bus->bustype) {
|
||||||
case SSB_BUSTYPE_PCI:
|
case SSB_BUSTYPE_PCI:
|
||||||
return pci_dma_mapping_error(addr);
|
return pci_dma_mapping_error(dev->bus->host_pci, addr);
|
||||||
case SSB_BUSTYPE_SSB:
|
case SSB_BUSTYPE_SSB:
|
||||||
return dma_mapping_error(addr);
|
return dma_mapping_error(dev->dev, addr);
|
||||||
default:
|
default:
|
||||||
__ssb_dma_not_implemented(dev);
|
__ssb_dma_not_implemented(dev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
|
||||||
{
|
{
|
||||||
if (dev->dma_ops)
|
if (dev->dma_ops)
|
||||||
return dev->dma_ops->mapping_error(dev, dma_addr);
|
return dev->dma_ops->mapping_error(dev, dma_addr);
|
||||||
return dma_mapping_error(dma_addr);
|
return dma_mapping_error(dev->dma_device, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||||
*/
|
*/
|
||||||
dma_addr_t handle;
|
dma_addr_t handle;
|
||||||
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
|
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
|
||||||
if (swiotlb_dma_mapping_error(handle))
|
if (swiotlb_dma_mapping_error(hwdev, handle))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ret = bus_to_virt(handle);
|
ret = bus_to_virt(handle);
|
||||||
|
@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
|
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
|
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,7 +169,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
|
||||||
(void *)
|
(void *)
|
||||||
vec->sge[xdr_sge_no].iov_base + sge_off,
|
vec->sge[xdr_sge_no].iov_base + sge_off,
|
||||||
sge_bytes, DMA_TO_DEVICE);
|
sge_bytes, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(sge[sge_no].addr))
|
if (dma_mapping_error(xprt->sc_cm_id->device->dma_device,
|
||||||
|
sge[sge_no].addr))
|
||||||
goto err;
|
goto err;
|
||||||
sge_off = 0;
|
sge_off = 0;
|
||||||
sge_no++;
|
sge_no++;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче