net: bcmgenet: rework Rx queue init

In preparation for supporting multiple Rx queues:
1. Move the initialization of priv->num_rx_bds, priv->rx_bds, and
   priv->rx_cbs from bcmgenet_init_rx_ring() to bcmgenet_init_dma()
   since they are not specific to a single Rx queue. Mimics the Tx
   init model where priv->num_tx_bds, priv->tx_bds, and priv->tx_cbs
   are initialized in bcmgenet_init_dma().
2. Program DMA_MBUF_DONE_THRESH = 1 so that future Rx queues Q0-Q15
   will get per-packet Rx interrupt.
3. Group DMA_START_ADDR, RDMA_READ_PTR, RDMA_WRITE_PTR, and DMA_END_ADDR
   initialization together. Mimics the Tx init model.
4. There is 1-to-1 mapping between RxCBs and RxBDs.
   Precalculate RxCB->bd_addr so that it can be used in the future.

Signed-off-by: Petri Gynther <pgynther@google.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Petri Gynther 2015-03-06 13:45:00 -08:00 коммит произвёл David S. Miller
Родитель ae00ec9548
Коммит 6f5a272c99
1 изменённых файлов: 27 добавлений и 16 удалений

Просмотреть файл

@ -1783,37 +1783,33 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
u32 words_per_bd = WORDS_PER_BD(priv);
int ret;
priv->num_rx_bds = TOTAL_DESC;
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->rx_bd_assign_ptr = priv->rx_bds;
priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->rx_cbs)
return -ENOMEM;
ret = bcmgenet_alloc_rx_buffers(priv);
if (ret) {
kfree(priv->rx_cbs);
return ret;
}
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
bcmgenet_rdma_ring_writel(priv, index,
((size << DMA_RING_SIZE_SHIFT) |
RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
bcmgenet_rdma_ring_writel(priv, index,
words_per_bd * size - 1, DMA_END_ADDR);
bcmgenet_rdma_ring_writel(priv, index,
(DMA_FC_THRESH_LO <<
DMA_XOFF_THRESHOLD_SHIFT) |
DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
/* Set start and end address, read and write pointers */
bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
bcmgenet_rdma_ring_writel(priv, index, words_per_bd * size - 1,
DMA_END_ADDR);
return ret;
}
@ -1976,18 +1972,33 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
unsigned int i;
struct enet_cb *cb;
netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
/* by default, enable ring 16 (descriptor based) */
/* Init rDma */
bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
GFP_KERNEL);
if (!priv->rx_cbs)
return -ENOMEM;
for (i = 0; i < priv->num_rx_bds; i++) {
cb = priv->rx_cbs + i;
cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
}
/* Initialize Rx default queue 16 */
ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
if (ret) {
netdev_err(priv->dev, "failed to initialize RX ring\n");
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
return ret;
}
/* init rDma */
bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
/* Init tDma */
bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);