scsi: efct: Don't pass GFP_DMA to dma_alloc_coherent()
dma_alloc_coherent() ignores the zone specifiers so this is pointless and confusing. Link: https://lore.kernel.org/r/20211214163605.416288-1-hch@lst.de Reviewed-by: James Smart <jsmart2021@gmail.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Родитель
99c66a8868
Коммит
efac162a4e
|
@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
|
|||
|
||||
dma.size = FW_WRITE_BUFSIZE;
|
||||
dma.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma.size, &dma.phys, GFP_DMA);
|
||||
dma.size, &dma.phys, GFP_KERNEL);
|
||||
if (!dma.virt)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
|
|||
dma = &hw->xfer_rdy;
|
||||
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
|
||||
dma->virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma->size, &dma->phys, GFP_DMA);
|
||||
dma->size, &dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
|
|||
sizeof(struct sli4_sge);
|
||||
dma->virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma->size, &dma->phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!dma->virt) {
|
||||
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
|
||||
memset(&io->def_sgl, 0,
|
||||
|
@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
|
|||
memset(&req, 0, sizeof(struct efc_dma));
|
||||
req.size = 32 + sgls_per_request * 16;
|
||||
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!req.virt) {
|
||||
kfree(sgls);
|
||||
return -ENOMEM;
|
||||
|
@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
|
|||
dma = &hw->loop_map;
|
||||
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
|
||||
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
|
@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
|
|||
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
prq->dma.size,
|
||||
&prq->dma.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!prq->dma.virt) {
|
||||
efc_log_err(hw->os, "DMA allocation failed\n");
|
||||
kfree(rq_buf);
|
||||
|
|
|
@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
|
|||
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
|
||||
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
io->rspbuf.size,
|
||||
&io->rspbuf.phys, GFP_DMA);
|
||||
&io->rspbuf.phys, GFP_KERNEL);
|
||||
if (!io->rspbuf.virt) {
|
||||
efc_log_err(efct, "dma_alloc rspbuf failed\n");
|
||||
efct_io_pool_free(io_pool);
|
||||
|
|
|
@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
|
|||
nport->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
nport->dma.size, &nport->dma.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!nport->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
|
@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
|
|||
domain->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
domain->dma.size,
|
||||
&domain->dma.phys, GFP_DMA);
|
||||
&domain->dma.phys, GFP_KERNEL);
|
||||
if (!domain->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
return -EIO;
|
||||
|
|
|
@ -71,7 +71,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
|||
/* now allocate DMA for request and response */
|
||||
els->io.req.size = reqlen;
|
||||
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
|
||||
&els->io.req.phys, GFP_DMA);
|
||||
&els->io.req.phys, GFP_KERNEL);
|
||||
if (!els->io.req.virt) {
|
||||
mempool_free(els, efc->els_io_pool);
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
|
@ -80,7 +80,7 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
|||
|
||||
els->io.rsp.size = rsplen;
|
||||
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
|
||||
&els->io.rsp.phys, GFP_DMA);
|
||||
&els->io.rsp.phys, GFP_KERNEL);
|
||||
if (!els->io.rsp.virt) {
|
||||
dma_free_coherent(&efc->pci->dev, els->io.req.size,
|
||||
els->io.req.virt, els->io.req.phys);
|
||||
|
|
|
@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
|
|||
|
||||
dma->size = payload_size;
|
||||
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
|
||||
&dma->phys, GFP_DMA);
|
||||
&dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
|
@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
|
|||
|
||||
q->dma.size = size * n_entries;
|
||||
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
|
||||
&q->dma.phys, GFP_DMA);
|
||||
&q->dma.phys, GFP_KERNEL);
|
||||
if (!q->dma.virt) {
|
||||
memset(&q->dma, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
|
||||
|
@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
|
|||
|
||||
dma->size = payload_size;
|
||||
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
|
||||
&dma->phys, GFP_DMA);
|
||||
&dma->phys, GFP_KERNEL);
|
||||
if (!dma->virt)
|
||||
return -EIO;
|
||||
|
||||
|
@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
|
|||
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
|
||||
data.size = psize;
|
||||
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
|
||||
&data.phys, GFP_DMA);
|
||||
&data.phys, GFP_KERNEL);
|
||||
if (!data.virt) {
|
||||
memset(&data, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
|
||||
|
@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
|
|||
*/
|
||||
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
|
||||
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
|
||||
&sli4->bmbx.phys, GFP_DMA);
|
||||
&sli4->bmbx.phys, GFP_KERNEL);
|
||||
if (!sli4->bmbx.virt) {
|
||||
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
|
||||
|
@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
|
|||
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
|
||||
sli4->vpd_data.size,
|
||||
&sli4->vpd_data.phys,
|
||||
GFP_DMA);
|
||||
GFP_KERNEL);
|
||||
if (!sli4->vpd_data.virt) {
|
||||
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
|
||||
/* Note that failure isn't fatal in this specific case */
|
||||
|
@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
|
|||
payload_dma->size = payload_size;
|
||||
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
|
||||
payload_dma->size,
|
||||
&payload_dma->phys, GFP_DMA);
|
||||
&payload_dma->phys, GFP_KERNEL);
|
||||
if (!payload_dma->virt) {
|
||||
memset(payload_dma, 0, sizeof(struct efc_dma));
|
||||
efc_log_err(sli4, "mbox payload memory allocation fail\n");
|
||||
|
|
Загрузка…
Ссылка в новой задаче