|
|
|
@ -296,6 +296,12 @@
|
|
|
|
|
/* descriptor aligned size */
|
|
|
|
|
#define MVNETA_DESC_ALIGNED_SIZE 32
|
|
|
|
|
|
|
|
|
|
/* Number of bytes to be taken into account by HW when putting incoming data
|
|
|
|
|
* to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
|
|
|
|
|
* offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
|
|
|
|
|
*/
|
|
|
|
|
#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
|
|
|
|
|
|
|
|
|
|
#define MVNETA_RX_PKT_SIZE(mtu) \
|
|
|
|
|
ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
|
|
|
|
|
ETH_HLEN + ETH_FCS_LEN, \
|
|
|
|
@ -391,6 +397,9 @@ struct mvneta_port {
|
|
|
|
|
spinlock_t lock;
|
|
|
|
|
bool is_stopped;
|
|
|
|
|
|
|
|
|
|
u32 cause_rx_tx;
|
|
|
|
|
struct napi_struct napi;
|
|
|
|
|
|
|
|
|
|
/* Core clock */
|
|
|
|
|
struct clk *clk;
|
|
|
|
|
/* AXI clock */
|
|
|
|
@ -416,6 +425,10 @@ struct mvneta_port {
|
|
|
|
|
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
|
|
|
|
|
|
|
|
|
|
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
|
|
|
|
|
|
|
|
|
|
/* Flags for special SoC configurations */
|
|
|
|
|
bool neta_armada3700;
|
|
|
|
|
u16 rx_offset_correction;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
|
|
|
|
@ -561,6 +574,9 @@ struct mvneta_rx_queue {
|
|
|
|
|
u32 pkts_coal;
|
|
|
|
|
u32 time_coal;
|
|
|
|
|
|
|
|
|
|
/* Virtual address of the RX buffer */
|
|
|
|
|
void **buf_virt_addr;
|
|
|
|
|
|
|
|
|
|
/* Virtual address of the RX DMA descriptors array */
|
|
|
|
|
struct mvneta_rx_desc *descs;
|
|
|
|
|
|
|
|
|
@ -955,14 +971,9 @@ static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Assign and initialize pools for port. In case of fail
|
|
|
|
|
* buffer manager will remain disabled for current port.
|
|
|
|
|
*/
|
|
|
|
|
static int mvneta_bm_port_init(struct platform_device *pdev,
|
|
|
|
|
struct mvneta_port *pp)
|
|
|
|
|
static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
|
|
|
|
|
{
|
|
|
|
|
struct device_node *dn = pdev->dev.of_node;
|
|
|
|
|
u32 long_pool_id, short_pool_id, wsize;
|
|
|
|
|
u32 wsize;
|
|
|
|
|
u8 target, attr;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
@ -981,6 +992,25 @@ static int mvneta_bm_port_init(struct platform_device *pdev,
|
|
|
|
|
netdev_info(pp->dev, "fail to configure mbus window to BM\n");
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Assign and initialize pools for port. In case of fail
|
|
|
|
|
* buffer manager will remain disabled for current port.
|
|
|
|
|
*/
|
|
|
|
|
static int mvneta_bm_port_init(struct platform_device *pdev,
|
|
|
|
|
struct mvneta_port *pp)
|
|
|
|
|
{
|
|
|
|
|
struct device_node *dn = pdev->dev.of_node;
|
|
|
|
|
u32 long_pool_id, short_pool_id;
|
|
|
|
|
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = mvneta_bm_port_mbus_init(pp);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
|
|
|
|
|
netdev_info(pp->dev, "missing long pool id\n");
|
|
|
|
@ -1349,22 +1379,27 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
|
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
|
|
int rxq_map = 0, txq_map = 0;
|
|
|
|
|
int rxq, txq;
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
for (rxq = 0; rxq < rxq_number; rxq++)
|
|
|
|
|
if ((rxq % max_cpu) == cpu)
|
|
|
|
|
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
|
|
|
|
|
|
|
|
|
|
for (rxq = 0; rxq < rxq_number; rxq++)
|
|
|
|
|
if ((rxq % max_cpu) == cpu)
|
|
|
|
|
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
|
|
|
|
|
for (txq = 0; txq < txq_number; txq++)
|
|
|
|
|
if ((txq % max_cpu) == cpu)
|
|
|
|
|
txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
|
|
|
|
|
|
|
|
|
|
for (txq = 0; txq < txq_number; txq++)
|
|
|
|
|
if ((txq % max_cpu) == cpu)
|
|
|
|
|
txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
|
|
|
|
|
/* With only one TX queue we configure a special case
|
|
|
|
|
* which will allow to get all the irq on a single
|
|
|
|
|
* CPU
|
|
|
|
|
*/
|
|
|
|
|
if (txq_number == 1)
|
|
|
|
|
txq_map = (cpu == pp->rxq_def) ?
|
|
|
|
|
MVNETA_CPU_TXQ_ACCESS(1) : 0;
|
|
|
|
|
|
|
|
|
|
/* With only one TX queue we configure a special case
|
|
|
|
|
* which will allow to get all the irq on a single
|
|
|
|
|
* CPU
|
|
|
|
|
*/
|
|
|
|
|
if (txq_number == 1)
|
|
|
|
|
txq_map = (cpu == pp->rxq_def) ?
|
|
|
|
|
MVNETA_CPU_TXQ_ACCESS(1) : 0;
|
|
|
|
|
} else {
|
|
|
|
|
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
|
|
|
|
|
rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
|
|
|
|
|
}
|
|
|
|
@ -1573,10 +1608,14 @@ static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
|
|
|
|
|
|
|
|
|
|
/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
|
|
|
|
|
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
|
|
|
|
|
u32 phys_addr, u32 cookie)
|
|
|
|
|
u32 phys_addr, void *virt_addr,
|
|
|
|
|
struct mvneta_rx_queue *rxq)
|
|
|
|
|
{
|
|
|
|
|
rx_desc->buf_cookie = cookie;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
rx_desc->buf_phys_addr = phys_addr;
|
|
|
|
|
i = rx_desc - rxq->descs;
|
|
|
|
|
rxq->buf_virt_addr[i] = virt_addr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Decrement sent descriptors counter */
|
|
|
|
@ -1781,7 +1820,8 @@ EXPORT_SYMBOL_GPL(mvneta_frag_free);
|
|
|
|
|
|
|
|
|
|
/* Refill processing for SW buffer management */
|
|
|
|
|
static int mvneta_rx_refill(struct mvneta_port *pp,
|
|
|
|
|
struct mvneta_rx_desc *rx_desc)
|
|
|
|
|
struct mvneta_rx_desc *rx_desc,
|
|
|
|
|
struct mvneta_rx_queue *rxq)
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
dma_addr_t phys_addr;
|
|
|
|
@ -1799,7 +1839,8 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
|
|
|
|
|
phys_addr += pp->rx_offset_correction;
|
|
|
|
|
mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1861,7 +1902,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < rxq->size; i++) {
|
|
|
|
|
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
|
|
|
|
|
void *data = (void *)rx_desc->buf_cookie;
|
|
|
|
|
void *data = rxq->buf_virt_addr[i];
|
|
|
|
|
|
|
|
|
|
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
|
|
|
|
|
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
|
|
|
|
@ -1894,12 +1935,13 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
|
|
|
|
|
unsigned char *data;
|
|
|
|
|
dma_addr_t phys_addr;
|
|
|
|
|
u32 rx_status, frag_size;
|
|
|
|
|
int rx_bytes, err;
|
|
|
|
|
int rx_bytes, err, index;
|
|
|
|
|
|
|
|
|
|
rx_done++;
|
|
|
|
|
rx_status = rx_desc->status;
|
|
|
|
|
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
|
|
|
|
|
data = (unsigned char *)rx_desc->buf_cookie;
|
|
|
|
|
index = rx_desc - rxq->descs;
|
|
|
|
|
data = rxq->buf_virt_addr[index];
|
|
|
|
|
phys_addr = rx_desc->buf_phys_addr;
|
|
|
|
|
|
|
|
|
|
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
|
|
|
|
@ -1918,7 +1960,7 @@ err_drop_frame:
|
|
|
|
|
goto err_drop_frame;
|
|
|
|
|
|
|
|
|
|
dma_sync_single_range_for_cpu(dev->dev.parent,
|
|
|
|
|
rx_desc->buf_phys_addr,
|
|
|
|
|
phys_addr,
|
|
|
|
|
MVNETA_MH_SIZE + NET_SKB_PAD,
|
|
|
|
|
rx_bytes,
|
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
@ -1938,7 +1980,7 @@ err_drop_frame:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Refill processing */
|
|
|
|
|
err = mvneta_rx_refill(pp, rx_desc);
|
|
|
|
|
err = mvneta_rx_refill(pp, rx_desc, rxq);
|
|
|
|
|
if (err) {
|
|
|
|
|
netdev_err(dev, "Linux processing - Can't refill\n");
|
|
|
|
|
rxq->missed++;
|
|
|
|
@ -2020,7 +2062,7 @@ static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
|
|
|
|
|
rx_done++;
|
|
|
|
|
rx_status = rx_desc->status;
|
|
|
|
|
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
|
|
|
|
|
data = (unsigned char *)rx_desc->buf_cookie;
|
|
|
|
|
data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
|
|
|
|
|
phys_addr = rx_desc->buf_phys_addr;
|
|
|
|
|
pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
|
|
|
|
|
bm_pool = &pp->bm_priv->bm_pools[pool_id];
|
|
|
|
@ -2609,6 +2651,17 @@ static void mvneta_set_rx_mode(struct net_device *dev)
|
|
|
|
|
|
|
|
|
|
/* Interrupt handling - the callback for request_irq() */
|
|
|
|
|
static irqreturn_t mvneta_isr(int irq, void *dev_id)
|
|
|
|
|
{
|
|
|
|
|
struct mvneta_port *pp = (struct mvneta_port *)dev_id;
|
|
|
|
|
|
|
|
|
|
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
|
|
|
|
|
napi_schedule(&pp->napi);
|
|
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Interrupt handling - the callback for request_percpu_irq() */
|
|
|
|
|
static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
|
|
|
|
|
{
|
|
|
|
|
struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
|
|
|
|
|
|
|
|
|
@ -2657,7 +2710,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
|
|
|
|
|
|
|
|
|
|
if (!netif_running(pp->dev)) {
|
|
|
|
|
napi_complete(&port->napi);
|
|
|
|
|
napi_complete(napi);
|
|
|
|
|
return rx_done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2686,7 +2739,8 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
*/
|
|
|
|
|
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
|
|
|
|
|
|
|
|
|
|
cause_rx_tx |= port->cause_rx_tx;
|
|
|
|
|
cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
|
|
|
|
|
port->cause_rx_tx;
|
|
|
|
|
|
|
|
|
|
if (rx_queue) {
|
|
|
|
|
rx_queue = rx_queue - 1;
|
|
|
|
@ -2700,11 +2754,27 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|
|
|
|
|
|
|
|
|
if (budget > 0) {
|
|
|
|
|
cause_rx_tx = 0;
|
|
|
|
|
napi_complete(&port->napi);
|
|
|
|
|
enable_percpu_irq(pp->dev->irq, 0);
|
|
|
|
|
napi_complete(napi);
|
|
|
|
|
|
|
|
|
|
if (pp->neta_armada3700) {
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
|
|
|
|
|
MVNETA_RX_INTR_MASK(rxq_number) |
|
|
|
|
|
MVNETA_TX_INTR_MASK(txq_number) |
|
|
|
|
|
MVNETA_MISCINTR_INTR_MASK);
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
} else {
|
|
|
|
|
enable_percpu_irq(pp->dev->irq, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
port->cause_rx_tx = cause_rx_tx;
|
|
|
|
|
if (pp->neta_armada3700)
|
|
|
|
|
pp->cause_rx_tx = cause_rx_tx;
|
|
|
|
|
else
|
|
|
|
|
port->cause_rx_tx = cause_rx_tx;
|
|
|
|
|
|
|
|
|
|
return rx_done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2716,7 +2786,7 @@ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
|
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
|
|
|
|
|
if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
|
|
|
|
|
if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
|
|
|
|
|
netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
|
|
|
|
|
__func__, rxq->id, i, num);
|
|
|
|
|
break;
|
|
|
|
@ -2773,7 +2843,7 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
|
|
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
|
|
|
|
|
|
|
|
|
|
/* Set Offset */
|
|
|
|
|
mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
|
|
|
|
|
mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
|
|
|
|
|
|
|
|
|
|
/* Set coalescing pkts and time */
|
|
|
|
|
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
|
|
|
|
@ -2784,14 +2854,14 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
|
|
mvneta_rxq_buf_size_set(pp, rxq,
|
|
|
|
|
MVNETA_RX_BUF_SIZE(pp->pkt_size));
|
|
|
|
|
mvneta_rxq_bm_disable(pp, rxq);
|
|
|
|
|
mvneta_rxq_fill(pp, rxq, rxq->size);
|
|
|
|
|
} else {
|
|
|
|
|
mvneta_rxq_bm_enable(pp, rxq);
|
|
|
|
|
mvneta_rxq_long_pool_set(pp, rxq);
|
|
|
|
|
mvneta_rxq_short_pool_set(pp, rxq);
|
|
|
|
|
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mvneta_rxq_fill(pp, rxq, rxq->size);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2974,11 +3044,16 @@ static void mvneta_start_dev(struct mvneta_port *pp)
|
|
|
|
|
/* start the Rx/Tx activity */
|
|
|
|
|
mvneta_port_enable(pp);
|
|
|
|
|
|
|
|
|
|
/* Enable polling on the port */
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
/* Enable polling on the port */
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port =
|
|
|
|
|
per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
|
|
|
|
|
napi_enable(&port->napi);
|
|
|
|
|
napi_enable(&port->napi);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
napi_enable(&pp->napi);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Unmask interrupts. It has to be done from each CPU */
|
|
|
|
@ -3000,10 +3075,15 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
|
|
|
|
|
|
|
|
|
|
phy_stop(ndev->phydev);
|
|
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port =
|
|
|
|
|
per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
|
|
|
|
|
napi_disable(&port->napi);
|
|
|
|
|
napi_disable(&port->napi);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
napi_disable(&pp->napi);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
netif_carrier_off(pp->dev);
|
|
|
|
@ -3413,31 +3493,37 @@ static int mvneta_open(struct net_device *dev)
|
|
|
|
|
goto err_cleanup_rxqs;
|
|
|
|
|
|
|
|
|
|
/* Connect to port interrupt line */
|
|
|
|
|
ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
|
|
|
|
|
MVNETA_DRIVER_NAME, pp->ports);
|
|
|
|
|
if (pp->neta_armada3700)
|
|
|
|
|
ret = request_irq(pp->dev->irq, mvneta_isr, 0,
|
|
|
|
|
dev->name, pp);
|
|
|
|
|
else
|
|
|
|
|
ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
|
|
|
|
|
dev->name, pp->ports);
|
|
|
|
|
if (ret) {
|
|
|
|
|
netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
|
|
|
|
|
goto err_cleanup_txqs;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Enable per-CPU interrupt on all the CPU to handle our RX
|
|
|
|
|
* queue interrupts
|
|
|
|
|
*/
|
|
|
|
|
on_each_cpu(mvneta_percpu_enable, pp, true);
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
/* Enable per-CPU interrupt on all the CPU to handle our RX
|
|
|
|
|
* queue interrupts
|
|
|
|
|
*/
|
|
|
|
|
on_each_cpu(mvneta_percpu_enable, pp, true);
|
|
|
|
|
|
|
|
|
|
pp->is_stopped = false;
|
|
|
|
|
/* Register a CPU notifier to handle the case where our CPU
|
|
|
|
|
* might be taken offline.
|
|
|
|
|
*/
|
|
|
|
|
ret = cpuhp_state_add_instance_nocalls(online_hpstate,
|
|
|
|
|
&pp->node_online);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto err_free_irq;
|
|
|
|
|
pp->is_stopped = false;
|
|
|
|
|
/* Register a CPU notifier to handle the case where our CPU
|
|
|
|
|
* might be taken offline.
|
|
|
|
|
*/
|
|
|
|
|
ret = cpuhp_state_add_instance_nocalls(online_hpstate,
|
|
|
|
|
&pp->node_online);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto err_free_irq;
|
|
|
|
|
|
|
|
|
|
ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
|
|
&pp->node_dead);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto err_free_online_hp;
|
|
|
|
|
ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
|
|
&pp->node_dead);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto err_free_online_hp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* In default link is down */
|
|
|
|
|
netif_carrier_off(pp->dev);
|
|
|
|
@ -3453,13 +3539,20 @@ static int mvneta_open(struct net_device *dev)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_free_dead_hp:
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
|
|
&pp->node_dead);
|
|
|
|
|
if (!pp->neta_armada3700)
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
|
|
&pp->node_dead);
|
|
|
|
|
err_free_online_hp:
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
|
|
|
|
|
if (!pp->neta_armada3700)
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(online_hpstate,
|
|
|
|
|
&pp->node_online);
|
|
|
|
|
err_free_irq:
|
|
|
|
|
on_each_cpu(mvneta_percpu_disable, pp, true);
|
|
|
|
|
free_percpu_irq(pp->dev->irq, pp->ports);
|
|
|
|
|
if (pp->neta_armada3700) {
|
|
|
|
|
free_irq(pp->dev->irq, pp);
|
|
|
|
|
} else {
|
|
|
|
|
on_each_cpu(mvneta_percpu_disable, pp, true);
|
|
|
|
|
free_percpu_irq(pp->dev->irq, pp->ports);
|
|
|
|
|
}
|
|
|
|
|
err_cleanup_txqs:
|
|
|
|
|
mvneta_cleanup_txqs(pp);
|
|
|
|
|
err_cleanup_rxqs:
|
|
|
|
@ -3472,23 +3565,30 @@ static int mvneta_stop(struct net_device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
|
|
|
|
|
|
|
/* Inform that we are stopping so we don't want to setup the
|
|
|
|
|
* driver for new CPUs in the notifiers. The code of the
|
|
|
|
|
* notifier for CPU online is protected by the same spinlock,
|
|
|
|
|
* so when we get the lock, the notifer work is done.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&pp->lock);
|
|
|
|
|
pp->is_stopped = true;
|
|
|
|
|
spin_unlock(&pp->lock);
|
|
|
|
|
if (!pp->neta_armada3700) {
|
|
|
|
|
/* Inform that we are stopping so we don't want to setup the
|
|
|
|
|
* driver for new CPUs in the notifiers. The code of the
|
|
|
|
|
* notifier for CPU online is protected by the same spinlock,
|
|
|
|
|
* so when we get the lock, the notifer work is done.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&pp->lock);
|
|
|
|
|
pp->is_stopped = true;
|
|
|
|
|
spin_unlock(&pp->lock);
|
|
|
|
|
|
|
|
|
|
mvneta_stop_dev(pp);
|
|
|
|
|
mvneta_mdio_remove(pp);
|
|
|
|
|
mvneta_stop_dev(pp);
|
|
|
|
|
mvneta_mdio_remove(pp);
|
|
|
|
|
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
|
|
|
|
|
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
|
|
|
|
|
&pp->node_dead);
|
|
|
|
|
on_each_cpu(mvneta_percpu_disable, pp, true);
|
|
|
|
|
free_percpu_irq(dev->irq, pp->ports);
|
|
|
|
|
on_each_cpu(mvneta_percpu_disable, pp, true);
|
|
|
|
|
free_percpu_irq(dev->irq, pp->ports);
|
|
|
|
|
} else {
|
|
|
|
|
mvneta_stop_dev(pp);
|
|
|
|
|
mvneta_mdio_remove(pp);
|
|
|
|
|
free_irq(dev->irq, pp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mvneta_cleanup_rxqs(pp);
|
|
|
|
|
mvneta_cleanup_txqs(pp);
|
|
|
|
|
|
|
|
|
@ -3767,6 +3867,11 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
|
|
|
|
|
const u8 *key, const u8 hfunc)
|
|
|
|
|
{
|
|
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
|
|
|
|
|
|
|
/* Current code for Armada 3700 doesn't support RSS features yet */
|
|
|
|
|
if (pp->neta_armada3700)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
/* We require at least one supported parameter to be changed
|
|
|
|
|
* and no change in any of the unsupported parameters
|
|
|
|
|
*/
|
|
|
|
@ -3787,6 +3892,10 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
|
|
|
|
|
{
|
|
|
|
|
struct mvneta_port *pp = netdev_priv(dev);
|
|
|
|
|
|
|
|
|
|
/* Current code for Armada 3700 doesn't support RSS features yet */
|
|
|
|
|
if (pp->neta_armada3700)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
|
|
if (hfunc)
|
|
|
|
|
*hfunc = ETH_RSS_HASH_TOP;
|
|
|
|
|
|
|
|
|
@ -3865,6 +3974,11 @@ static int mvneta_init(struct device *dev, struct mvneta_port *pp)
|
|
|
|
|
rxq->size = pp->rx_ring_size;
|
|
|
|
|
rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
|
|
|
|
|
rxq->time_coal = MVNETA_RX_COAL_USEC;
|
|
|
|
|
rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
|
|
|
|
|
rxq->size * sizeof(void *),
|
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
if (!rxq->buf_virt_addr)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
@ -3889,16 +4003,29 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
|
|
|
|
|
win_enable = 0x3f;
|
|
|
|
|
win_protect = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < dram->num_cs; i++) {
|
|
|
|
|
const struct mbus_dram_window *cs = dram->cs + i;
|
|
|
|
|
mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
|
|
|
|
|
(cs->mbus_attr << 8) | dram->mbus_dram_target_id);
|
|
|
|
|
if (dram) {
|
|
|
|
|
for (i = 0; i < dram->num_cs; i++) {
|
|
|
|
|
const struct mbus_dram_window *cs = dram->cs + i;
|
|
|
|
|
|
|
|
|
|
mvreg_write(pp, MVNETA_WIN_SIZE(i),
|
|
|
|
|
(cs->size - 1) & 0xffff0000);
|
|
|
|
|
mvreg_write(pp, MVNETA_WIN_BASE(i),
|
|
|
|
|
(cs->base & 0xffff0000) |
|
|
|
|
|
(cs->mbus_attr << 8) |
|
|
|
|
|
dram->mbus_dram_target_id);
|
|
|
|
|
|
|
|
|
|
win_enable &= ~(1 << i);
|
|
|
|
|
win_protect |= 3 << (2 * i);
|
|
|
|
|
mvreg_write(pp, MVNETA_WIN_SIZE(i),
|
|
|
|
|
(cs->size - 1) & 0xffff0000);
|
|
|
|
|
|
|
|
|
|
win_enable &= ~(1 << i);
|
|
|
|
|
win_protect |= 3 << (2 * i);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* For Armada3700 open default 4GB Mbus window, leaving
|
|
|
|
|
* arbitration of target/attribute to a different layer
|
|
|
|
|
* of configuration.
|
|
|
|
|
*/
|
|
|
|
|
mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
|
|
|
|
|
win_enable &= ~BIT(0);
|
|
|
|
|
win_protect = 3;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
|
|
|
|
@ -4019,8 +4146,19 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
|
|
|
|
|
|
|
pp->rxq_def = rxq_def;
|
|
|
|
|
|
|
|
|
|
/* Set RX packet offset correction for platforms, whose
|
|
|
|
|
* NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
|
|
|
|
|
* platforms and 0B for 32-bit ones.
|
|
|
|
|
*/
|
|
|
|
|
pp->rx_offset_correction =
|
|
|
|
|
max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
|
|
|
|
|
|
|
|
|
|
pp->indir[0] = rxq_def;
|
|
|
|
|
|
|
|
|
|
/* Get special SoC configurations */
|
|
|
|
|
if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
|
|
|
|
|
pp->neta_armada3700 = true;
|
|
|
|
|
|
|
|
|
|
pp->clk = devm_clk_get(&pdev->dev, "core");
|
|
|
|
|
if (IS_ERR(pp->clk))
|
|
|
|
|
pp->clk = devm_clk_get(&pdev->dev, NULL);
|
|
|
|
@ -4088,7 +4226,11 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
|
|
pp->tx_csum_limit = tx_csum_limit;
|
|
|
|
|
|
|
|
|
|
dram_target_info = mv_mbus_dram_info();
|
|
|
|
|
if (dram_target_info)
|
|
|
|
|
/* Armada3700 requires setting default configuration of Mbus
|
|
|
|
|
* windows, however without using filled mbus_dram_target_info
|
|
|
|
|
* structure.
|
|
|
|
|
*/
|
|
|
|
|
if (dram_target_info || pp->neta_armada3700)
|
|
|
|
|
mvneta_conf_mbus_windows(pp, dram_target_info);
|
|
|
|
|
|
|
|
|
|
pp->tx_ring_size = MVNETA_MAX_TXD;
|
|
|
|
@ -4121,11 +4263,20 @@ static int mvneta_probe(struct platform_device *pdev)
|
|
|
|
|
goto err_netdev;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
/* Armada3700 network controller does not support per-cpu
|
|
|
|
|
* operation, so only single NAPI should be initialized.
|
|
|
|
|
*/
|
|
|
|
|
if (pp->neta_armada3700) {
|
|
|
|
|
netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
|
|
|
|
|
} else {
|
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
|
|
struct mvneta_pcpu_port *port =
|
|
|
|
|
per_cpu_ptr(pp->ports, cpu);
|
|
|
|
|
|
|
|
|
|
netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
|
|
|
|
|
port->pp = pp;
|
|
|
|
|
netif_napi_add(dev, &port->napi, mvneta_poll,
|
|
|
|
|
NAPI_POLL_WEIGHT);
|
|
|
|
|
port->pp = pp;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
|
|
|
|
@ -4210,6 +4361,7 @@ static int mvneta_remove(struct platform_device *pdev)
|
|
|
|
|
static const struct of_device_id mvneta_match[] = {
|
|
|
|
|
{ .compatible = "marvell,armada-370-neta" },
|
|
|
|
|
{ .compatible = "marvell,armada-xp-neta" },
|
|
|
|
|
{ .compatible = "marvell,armada-3700-neta" },
|
|
|
|
|
{ }
|
|
|
|
|
};
|
|
|
|
|
MODULE_DEVICE_TABLE(of, mvneta_match);
|
|
|
|
|