RDMA: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below. It has been hand modified to use 'dma_set_mask_and_coherent()' instead of 'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable. This is less verbose. It has been compile tested. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Link: https://lore.kernel.org/r/259e53b7a00f64bf081d41da8761b171b2ad8f5c.1629634798.git.christophe.jaillet@wanadoo.fr Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Родитель
03da1b26fa
Коммит
3f69f4e0d6
|
@ -92,25 +92,18 @@ int hfi1_pcie_init(struct hfi1_devdata *dd)
|
|||
goto bail;
|
||||
}
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (ret) {
|
||||
/*
|
||||
* If the 64 bit setup fails, try 32 bit. Some systems
|
||||
* do not setup 64 bit maps on systems with 2GB or less
|
||||
* memory installed.
|
||||
*/
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
|
||||
goto bail;
|
||||
}
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
} else {
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
}
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
|
|
@ -177,8 +177,8 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
|
|||
struct mm_struct *mm;
|
||||
|
||||
if (mapped) {
|
||||
pci_unmap_single(dd->pcidev, node->dma_addr,
|
||||
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
|
||||
node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
pages = &node->pages[idx];
|
||||
mm = mm_from_tid_node(node);
|
||||
} else {
|
||||
|
@ -739,9 +739,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
|
|||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
phys = pci_map_single(dd->pcidev,
|
||||
__va(page_to_phys(pages[0])),
|
||||
npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
|
||||
npages * PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&dd->pcidev->dev, phys)) {
|
||||
dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
|
||||
phys);
|
||||
|
@ -783,8 +782,8 @@ out_unmap:
|
|||
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
|
||||
node->rcventry, node->notifier.interval_tree.start,
|
||||
node->phys, ret);
|
||||
pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
kfree(node);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -617,9 +617,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
|
|||
|
||||
mthca_free_mr(dev, &eq->mr);
|
||||
for (i = 0; i < npages; ++i)
|
||||
pci_free_consistent(dev->pdev, PAGE_SIZE,
|
||||
eq->page_list[i].buf,
|
||||
dma_unmap_addr(&eq->page_list[i], mapping));
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
eq->page_list[i].buf,
|
||||
dma_unmap_addr(&eq->page_list[i], mapping));
|
||||
|
||||
kfree(eq->page_list);
|
||||
mthca_free_mailbox(dev, mailbox);
|
||||
|
@ -739,17 +739,18 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
|
|||
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
|
||||
if (!dev->eq_table.icm_page)
|
||||
return -ENOMEM;
|
||||
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
|
||||
dev->eq_table.icm_dma =
|
||||
dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) {
|
||||
__free_page(dev->eq_table.icm_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
|
||||
if (ret) {
|
||||
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
__free_page(dev->eq_table.icm_page);
|
||||
}
|
||||
|
||||
|
@ -759,8 +760,8 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
|
|||
void mthca_unmap_eq_icm(struct mthca_dev *dev)
|
||||
{
|
||||
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
|
||||
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
__free_page(dev->eq_table.icm_page);
|
||||
}
|
||||
|
||||
|
|
|
@ -937,26 +937,15 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
|
||||
goto err_free_res;
|
||||
}
|
||||
}
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (err) {
|
||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
|
||||
"consistent PCI DMA mask.\n");
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
|
||||
"aborting.\n");
|
||||
goto err_free_res;
|
||||
}
|
||||
}
|
||||
|
||||
/* We can handle large RDMA requests, so allow larger segments. */
|
||||
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
|
||||
|
|
|
@ -66,8 +66,8 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
|
|||
int i;
|
||||
|
||||
if (chunk->nsg > 0)
|
||||
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
__free_pages(sg_page(&chunk->mem[i]),
|
||||
|
@ -184,9 +184,10 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
|
|||
if (coherent)
|
||||
++chunk->nsg;
|
||||
else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
chunk->nsg =
|
||||
dma_map_sg(&dev->pdev->dev, chunk->mem,
|
||||
chunk->npages,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (chunk->nsg <= 0)
|
||||
goto fail;
|
||||
|
@ -204,9 +205,8 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
|
|||
}
|
||||
|
||||
if (!coherent && chunk) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
|
||||
chunk->npages, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (chunk->nsg <= 0)
|
||||
goto fail;
|
||||
|
@ -480,7 +480,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
|
||||
uaddr & ~PAGE_MASK);
|
||||
|
||||
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0) {
|
||||
unpin_user_page(pages[0]);
|
||||
goto out;
|
||||
|
@ -489,7 +490,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
|
||||
mthca_uarc_virt(dev, uar, i));
|
||||
if (ret) {
|
||||
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
|
||||
DMA_TO_DEVICE);
|
||||
unpin_user_page(sg_page(&db_tab->page[i].mem));
|
||||
goto out;
|
||||
}
|
||||
|
@ -555,7 +557,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
|
||||
if (db_tab->page[i].uvirt) {
|
||||
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
|
||||
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
|
||||
dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
|
||||
DMA_TO_DEVICE);
|
||||
unpin_user_page(sg_page(&db_tab->page[i].mem));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -429,8 +429,8 @@ cleanup:
|
|||
dd->f_put_tid(dd, &tidbase[tid],
|
||||
RCVHQ_RCV_TYPE_EXPECTED,
|
||||
dd->tidinvalid);
|
||||
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&dd->pcidev->dev, phys,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dd->pageshadow[ctxttid + tid] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -544,8 +544,8 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
|
|||
*/
|
||||
dd->f_put_tid(dd, &tidbase[tid],
|
||||
RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
|
||||
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
qib_release_user_pages(&p, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1781,8 +1781,8 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
|
|||
phys = dd->physshadow[i];
|
||||
dd->physshadow[i] = dd->tidinvalid;
|
||||
dd->pageshadow[i] = NULL;
|
||||
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
qib_release_user_pages(&p, 1);
|
||||
cnt++;
|
||||
}
|
||||
|
|
|
@ -1335,8 +1335,8 @@ static void cleanup_device_data(struct qib_devdata *dd)
|
|||
for (i = ctxt_tidbase; i < maxtid; i++) {
|
||||
if (!tmpp[i])
|
||||
continue;
|
||||
pci_unmap_page(dd->pcidev, tmpd[i],
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_page(&dd->pcidev->dev, tmpd[i],
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
qib_release_user_pages(&tmpp[i], 1);
|
||||
tmpp[i] = NULL;
|
||||
}
|
||||
|
|
|
@ -60,15 +60,15 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
|
|||
{
|
||||
dma_addr_t phys;
|
||||
|
||||
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(hwdev, phys))
|
||||
phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&hwdev->dev, phys))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!phys) {
|
||||
pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(hwdev, phys))
|
||||
dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&hwdev->dev, phys))
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* FIXME: If we get 0 again, we should keep this page,
|
||||
|
|
|
@ -811,18 +811,10 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
|||
}
|
||||
|
||||
/* Enable 64-Bit DMA */
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_consistent_dma_mask failed\n");
|
||||
goto err_free_resource;
|
||||
}
|
||||
} else {
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"pci_set_dma_mask failed\n");
|
||||
dev_err(&pdev->dev, "dma_set_mask failed\n");
|
||||
goto err_free_resource;
|
||||
}
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче