Christoph Hellwig says:

====================
net: don't pass a NULL struct device to DMA API functions v2

We still have a few drivers which pass a NULL struct device pointer
to DMA API functions, which generally is a bad idea as the API
implementations rely on the device not only for ops selection, but
also the dma mask and various other attributes.

This series contains all easy conversions to pass a struct device,
besides that there also is some arch code that needs separate handling,
a driver that should not use the DMA API at all, and one that is
a complete basket case to be deal with separately.

Changes since v1:
 - fix an inverted ifdef in CAIF
 - update the smc911x changelog
 - split the series, this only contains the networking patches
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-02-12 12:09:24 -05:00
Родитель 99e1311475 0eb1645a8d
Коммит 5e9c51b301
9 изменённых файлов: 59 добавлений и 43 удалений

Просмотреть файл

@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
#define LOW_WATER_MARK 100
#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
#ifdef CONFIG_UML
#ifndef CONFIG_HAS_DMA
/*
* We sometimes use UML for debugging, but it cannot handle
* dma_alloc_coherent so we have to wrap it.
*/
static inline void *dma_alloc(dma_addr_t *daddr)
static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
}
static inline void dma_free(void *cpu_addr, dma_addr_t handle)
static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
dma_addr_t handle)
{
kfree(cpu_addr);
}
#else
static inline void *dma_alloc(dma_addr_t *daddr)
static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr,
GFP_KERNEL);
}
static inline void dma_free(void *cpu_addr, dma_addr_t handle)
static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
dma_addr_t handle)
{
dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
}
#endif /* CONFIG_UML */
#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_DEBUG_FS
@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev)
}
/* Allocate DMA buffers. */
cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]);
if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
goto err_dma_alloc_tx_0;
}
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx);
if (!cfspi->xfer.va_rx) {
res = -ENODEV;
@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev)
return 0;
err_create_wq:
dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
err_dma_alloc_rx:
dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
err_dma_alloc_tx_0:
return res;
}
@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev)
cfspi->ndev = NULL;
/* Free DMA buffers. */
dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
set_bit(SPI_TERMINATE, &cfspi->state);
wake_up_interruptible(&cfspi->wait);
destroy_workqueue(cfspi->wq);

Просмотреть файл

@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE *
aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0,
DMA_ATTR_NON_CONSISTENT);
@ -1349,7 +1349,7 @@ err_remap3:
err_remap2:
iounmap(aup->mac);
err_remap1:
dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);
err_vaddr:
@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev)
if (aup->tx_db_inuse[i])
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);

Просмотреть файл

@ -3673,9 +3673,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(NULL, lp->skb_physaddr)) {
lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
@ -3765,7 +3765,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr,
dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
lp->skb_length, DMA_TO_DEVICE);
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->skb_length;

Просмотреть файл

@ -112,10 +112,12 @@ struct ltq_etop_priv {
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |

Просмотреть файл

@ -201,6 +201,7 @@ struct tx_desc {
};
struct pxa168_eth_private {
struct platform_device *pdev;
int port_num; /* User Ethernet port number */
int phy_addr;
int phy_speed;
@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev)
used_rx_desc = pep->rx_used_desc_q;
p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
size = skb_end_pointer(skb) - skb->data;
p_used_rx_desc->buf_ptr = dma_map_single(NULL,
p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
skb->data,
size,
DMA_FROM_DEVICE);
@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force)
netdev_err(dev, "Error in TX\n");
dev->stats.tx_errors++;
}
dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
if (skb)
dev_kfree_skb_irq(skb);
released++;
@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget)
if (rx_next_curr_desc == rx_used_desc)
pep->rx_resource_err = 1;
pep->rx_desc_count--;
dma_unmap_single(NULL, rx_desc->buf_ptr,
dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
rx_desc->buf_size,
DMA_FROM_DEVICE);
received_packets++;
@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb->len;
pep->tx_skb[tx_index] = skb;
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
DMA_TO_DEVICE);
skb_tx_timestamp(skb);
@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (err)
goto err_free_mdio;
pep->pdev = pdev;
SET_NETDEV_DEV(dev, &pdev->dev);
pxa168_init_hw(pep);
err = register_netdev(dev);

Просмотреть файл

@ -81,11 +81,13 @@ static void moxart_mac_free_memory(struct net_device *ndev)
priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base)
dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
dma_free_coherent(&priv->pdev->dev,
TX_REG_DESC_SIZE * TX_DESC_NUM,
priv->tx_desc_base, priv->tx_base);
if (priv->rx_desc_base)
dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
dma_free_coherent(&priv->pdev->dev,
RX_REG_DESC_SIZE * RX_DESC_NUM,
priv->rx_desc_base, priv->rx_base);
kfree(priv->tx_buf_base);
@ -476,6 +478,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ndev->base_addr = res->start;
@ -491,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@ -499,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {

Просмотреть файл

@ -292,6 +292,7 @@
#define LINK_STATUS 0x4
struct moxart_mac_priv_t {
struct platform_device *pdev;
void __iomem *base;
unsigned int reg_maccr;
unsigned int reg_imr;

Просмотреть файл

@ -68,6 +68,8 @@ module_param(timeout, int, 0);
* packets in and out, so there is place for a packet
*/
struct meth_private {
struct platform_device *pdev;
/* in-memory copy of MAC Control register */
u64 mac_ctrl;
@ -211,8 +213,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
&priv->tx_ring_dma, GFP_ATOMIC);
priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev,
TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_KERNEL);
if (!priv->tx_ring)
return -ENOMEM;
@ -236,7 +238,7 @@ static int meth_init_rx_ring(struct meth_private *priv)
priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
/* I'll need to re-sync it after each RX */
priv->rx_ring_dmas[i] =
dma_map_single(NULL, priv->rx_ring[i],
dma_map_single(&priv->pdev->dev, priv->rx_ring[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[i];
}
@ -253,7 +255,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
priv->tx_ring_dma);
}
@ -263,7 +265,7 @@ static void meth_free_rx_ring(struct meth_private *priv)
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
dma_unmap_single(NULL, priv->rx_ring_dmas[i],
dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
priv->rx_ring[i] = 0;
priv->rx_ring_dmas[i] = 0;
@ -393,7 +395,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
fifo_rptr = (fifo_rptr - 1) & 0x0f;
}
while (priv->rx_write != fifo_rptr) {
dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
dma_unmap_single(&priv->pdev->dev,
priv->rx_ring_dmas[priv->rx_write],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
status = priv->rx_ring[priv->rx_write]->status.raw;
#if MFE_DEBUG
@ -454,7 +457,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
priv->rx_ring[priv->rx_write]->status.raw = 0;
priv->rx_ring_dmas[priv->rx_write] =
dma_map_single(NULL, priv->rx_ring[priv->rx_write],
dma_map_single(&priv->pdev->dev,
priv->rx_ring[priv->rx_write],
METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
ADVANCE_RX_PTR(priv->rx_write);
@ -637,7 +641,7 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
}
/* first page */
catbuf = dma_map_single(NULL, buffer_data, buffer_len,
catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len,
DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
desc->data.cat_buf[0].form.len = buffer_len - 1;
@ -663,12 +667,12 @@ static void meth_tx_2page_prepare(struct meth_private *priv,
}
/* first page */
catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len,
DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
desc->data.cat_buf[0].form.len = buffer1_len - 1;
/* second page */
catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len,
DMA_TO_DEVICE);
desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
desc->data.cat_buf[1].form.len = buffer2_len - 1;
@ -840,6 +844,7 @@ static int meth_probe(struct platform_device *pdev)
memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
priv = netdev_priv(dev);
priv->pdev = pdev;
spin_lock_init(&priv->meth_lock);
SET_NETDEV_DEV(dev, &pdev->dev);

Просмотреть файл

@ -1188,7 +1188,7 @@ smc911x_tx_dma_irq(void *data)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
@ -1219,7 +1219,7 @@ smc911x_rx_dma_irq(void *data)
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
BUG_ON(skb == NULL);
lp->current_rx_skb = NULL;
PRINT_PKT(skb->data, skb->len);