From 60724d4bae14cd295b27b1610cad9a2720eb0860 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:48 -0700 Subject: [PATCH 1/5] net: dsa: Add support for DSA specific notifiers In preparation for communicating a given DSA network device's port number and switch index, create a specialized DSA notifier and two events: DSA_PORT_REGISTER and DSA_PORT_UNREGISTER that communicate: the slave network device (slave_dev), port number and switch number in the tree. This will be later used for network device drivers like bcmsysport which needs to cooperate with its DSA network devices to set-up queue mapping and scheduling. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 45 +++++++++++++++++++++++++++++++++++++++++++++ net/dsa/dsa.c | 23 +++++++++++++++++++++++ net/dsa/slave.c | 13 +++++++++++++ 3 files changed, 81 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 10dceccd9ce8..40a709a0754d 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -471,4 +471,49 @@ static inline int dsa_switch_resume(struct dsa_switch *ds) } #endif /* CONFIG_PM_SLEEP */ +enum dsa_notifier_type { + DSA_PORT_REGISTER, + DSA_PORT_UNREGISTER, +}; + +struct dsa_notifier_info { + struct net_device *dev; +}; + +struct dsa_notifier_register_info { + struct dsa_notifier_info info; /* must be first */ + struct net_device *master; + unsigned int port_number; + unsigned int switch_number; +}; + +static inline struct net_device * +dsa_notifier_info_to_dev(const struct dsa_notifier_info *info) +{ + return info->dev; +} + +#if IS_ENABLED(CONFIG_NET_DSA) +int register_dsa_notifier(struct notifier_block *nb); +int unregister_dsa_notifier(struct notifier_block *nb); +int call_dsa_notifiers(unsigned long val, struct net_device *dev, + struct dsa_notifier_info *info); +#else +static inline int register_dsa_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_dsa_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev, + struct dsa_notifier_info *info) +{ + return NOTIFY_DONE; +} +#endif + #endif diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 51ca2a524a27..832c659ff993 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -261,6 +262,28 @@ bool dsa_schedule_work(struct work_struct *work) return queue_work(dsa_owq, work); } +static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain); + +int register_dsa_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&dsa_notif_chain, nb); +} +EXPORT_SYMBOL_GPL(register_dsa_notifier); + +int unregister_dsa_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&dsa_notif_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_dsa_notifier); + +int call_dsa_notifiers(unsigned long val, struct net_device *dev, + struct dsa_notifier_info *info) +{ + info->dev = dev; + return atomic_notifier_call_chain(&dsa_notif_chain, val, info); +} +EXPORT_SYMBOL_GPL(call_dsa_notifiers); + static int __init dsa_init_module(void) { int rc; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index fb2954ff198c..45f4ea845c07 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1116,6 +1116,7 @@ int dsa_slave_resume(struct net_device *slave_dev) int dsa_slave_create(struct dsa_port *port, const char *name) { + struct dsa_notifier_register_info rinfo = { }; struct dsa_switch *ds = port->ds; struct net_device *master; struct net_device *slave_dev; @@ -1177,6 +1178,12 @@ int dsa_slave_create(struct dsa_port *port, const char *name) goto out_free; } + rinfo.info.dev = slave_dev; + rinfo.master = master; + rinfo.port_number = p->dp->index; + rinfo.switch_number = p->dp->ds->index; + call_dsa_notifiers(DSA_PORT_REGISTER, slave_dev, &rinfo.info); + ret = register_netdev(slave_dev); if (ret) { netdev_err(master, "error %d registering interface %s\n", @@ -1200,6 +1207,7 @@ out_free: void dsa_slave_destroy(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + struct dsa_notifier_register_info rinfo = { }; struct device_node *port_dn; port_dn = p->dp->dn; @@ -1211,6 +1219,11 @@ void dsa_slave_destroy(struct net_device *slave_dev) if (of_phy_is_fixed_link(port_dn)) of_phy_deregister_fixed_link(port_dn); } + rinfo.info.dev = slave_dev; + rinfo.master = p->dp->cpu_dp->netdev; + rinfo.port_number = p->dp->index; + rinfo.switch_number = p->dp->ds->index; + call_dsa_notifiers(DSA_PORT_UNREGISTER, slave_dev, &rinfo.info); unregister_netdev(slave_dev); free_percpu(p->stats64); free_netdev(slave_dev); From 0a5f14ce67a6e093e651d3cd75e6ac281123d93a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:49 -0700 Subject: [PATCH 2/5] net: dsa: tag_brcm: Indicate to master netdevice port + queue We need to tell the DSA master network device doing the actual transmission what the desired switch port and queue number is for it to resolve that to the internal transmit queue it is mapped to. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 5 +++++ net/dsa/tag_brcm.c | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 40a709a0754d..ce1d622734d7 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -516,4 +516,9 @@ static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev, } #endif +/* Broadcom tag specific helpers to insert and extract queue/port number */ +#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q) +#define BRCM_TAG_GET_PORT(v) ((v) >> 8) +#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff) + #endif diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index 8e4bdb9d9ae3..cc4f472fbd77 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -86,6 +86,12 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK; + /* Now tell the master network device about the desired output queue + * as well + */ + skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(p->dp->index, + queue)); + return skb; } From d156576362c07e954dc36e07b0d7b0733a010f7d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:50 -0700 Subject: [PATCH 3/5] net: systemport: Establish lower/upper queue mapping Establish a queue mapping between the DSA slave network device queues created that correspond to switch port queues, and the transmit queue that SYSTEMPORT manages. We need to configure the SYSTEMPORT transmit queue with the switch port number and switch port queue number in order for the switch and SYSTEMPORT hardware to utilize the out of band congestion notification. This hardware mechanism works by looking at the switch port egress queue and determines whether there is enough buffers for this queue, with that class of service for a successful transmission and if not, backpressures the SYSTEMPORT queue that is being used. For this to work, we implement a notifier which looks at the DSA_PORT_REGISTER event. When DSA network devices are registered, the framework calls the DSA notifiers when that happens, extracts the number of queues for these devices and their associated port number, remembers that in the driver private structure and linearly maps those queues to TX rings/queues that we manage. This scheme works because DSA slave network deviecs always transmit through SYSTEMPORT so when DSA slave network devices are destroyed/brought down, the corresponding SYSTEMPORT queues are no longer used. Also, by design of the DSA framework, the master network device (SYSTEMPORT) is registered first. For faster lookups we use an array of up to DSA_MAX_PORTS * number of queues per port, and then map pointers to bcm_sysport_tx_ring such that our ndo_select_queue() implementation can just index into that array to locate the corresponding ring index. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 115 ++++++++++++++++++++- drivers/net/ethernet/broadcom/bcmsysport.h | 11 +- 2 files changed, 121 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 83eec9a8c275..78bed9a84e81 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1416,7 +1416,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); - tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); + + /* Configure QID and port mapping */ + reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); + reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); + reg |= ring->switch_queue & RING_QID_MASK; + reg |= ring->switch_port << RING_PORT_ID_SHIFT; + reg |= RING_IGNORE_STATUS; + tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); /* Do not use tdma_control_bit() here because TSB_SWAP1 collides @@ -1447,8 +1454,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, napi_enable(&ring->napi); netif_dbg(priv, hw, priv->netdev, - "TDMA cfg, size=%d, desc_cpu=%p\n", - ring->size, ring->desc_cpu); + "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", + ring->size, ring->desc_cpu, ring->switch_queue, + ring->switch_port); return 0; } @@ -2011,6 +2019,92 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u16 queue = skb_get_queue_mapping(skb); + struct bcm_sysport_tx_ring *tx_ring; + unsigned int q, port; + + if (!netdev_uses_dsa(dev)) + return fallback(dev, skb); + + /* DSA tagging layer will have configured the correct queue */ + q = BRCM_TAG_GET_QUEUE(queue); + port = BRCM_TAG_GET_PORT(queue); + tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; + + return tx_ring->index; +} + +static int bcm_sysport_map_queues(struct net_device *dev, + struct dsa_notifier_register_info *info) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *ring; + struct net_device *slave_dev; + unsigned int num_tx_queues; + unsigned int q, start, port; + + /* We can't be setting up queue inspection for non directly attached + * switches + */ + if (info->switch_number) + return 0; + + port = info->port_number; + slave_dev = info->info.dev; + + /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a + * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of + * per-port (slave_dev) network devices queue, we achieve just that. + * This need to happen now before any slave network device is used such + * it accurately reflects the number of real TX queues. + */ + if (priv->is_lite) + netif_set_real_num_tx_queues(slave_dev, + slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; + + if (priv->per_port_num_tx_queues && + priv->per_port_num_tx_queues != num_tx_queues) + netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + + priv->per_port_num_tx_queues = num_tx_queues; + + start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); + for (q = 0; q < num_tx_queues; q++) { + ring = &priv->tx_rings[q + start]; + + /* Just remember the mapping actual programming done + * during bcm_sysport_init_tx_ring + */ + ring->switch_queue = q; + ring->switch_port = port; + priv->ring_map[q + port * num_tx_queues] = ring; + + /* Set all queues as being used now */ + set_bit(q + start, &priv->queue_bitmap); + } + + return 0; +} + +static int bcm_sysport_dsa_notifier(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct dsa_notifier_register_info *info; + + if (event != DSA_PORT_REGISTER) + return NOTIFY_DONE; + + info = ptr; + + return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); +} + static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_start_xmit = bcm_sysport_xmit, .ndo_tx_timeout = bcm_sysport_tx_timeout, @@ -2023,6 +2117,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_poll_controller = bcm_sysport_poll_controller, #endif .ndo_get_stats64 = bcm_sysport_get_stats64, + .ndo_select_queue = bcm_sysport_select_queue, }; #define REV_FMT "v%2x.%02x" @@ -2172,10 +2267,18 @@ static int bcm_sysport_probe(struct platform_device *pdev) u64_stats_init(&priv->syncp); + priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; + + ret = register_dsa_notifier(&priv->dsa_notifier); + if (ret) { + dev_err(&pdev->dev, "failed to register DSA notifier\n"); + goto err_deregister_fixed_link; + } + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); - goto err_deregister_fixed_link; + goto err_deregister_notifier; } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; @@ -2188,6 +2291,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) return 0; +err_deregister_notifier: + unregister_dsa_notifier(&priv->dsa_notifier); err_deregister_fixed_link: if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); @@ -2199,11 +2304,13 @@ err_free_netdev: static int bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct bcm_sysport_priv *priv = netdev_priv(dev); struct device_node *dn = pdev->dev.of_node; /* Not much to do, ndo_close has been called * and we use managed allocations */ + unregister_dsa_notifier(&priv->dsa_notifier); unregister_netdev(dev); if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 82e401df199e..82f70a6783cb 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -404,7 +404,7 @@ struct bcm_rsb { #define RING_CONS_INDEX_MASK 0xffff #define RING_MAPPING 0x14 -#define RING_QID_MASK 0x3 +#define RING_QID_MASK 0x7 #define RING_PORT_ID_SHIFT 3 #define RING_PORT_ID_MASK 0x7 #define RING_IGNORE_STATUS (1 << 6) @@ -712,6 +712,8 @@ struct bcm_sysport_tx_ring { struct bcm_sysport_priv *priv; /* private context backpointer */ unsigned long packets; /* packets statistics */ unsigned long bytes; /* bytes statistics */ + unsigned int switch_queue; /* switch port queue number */ + unsigned int switch_port; /* switch port queue number */ }; /* Driver private structure */ @@ -765,5 +767,12 @@ struct bcm_sysport_priv { /* For atomic update generic 64bit value on 32bit Machine */ struct u64_stats_sync syncp; + + /* map information between switch port queues and local queues */ + struct notifier_block dsa_notifier; + unsigned int per_port_num_tx_queues; + unsigned long queue_bitmap; + struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8]; + }; #endif /* __BCM_SYSPORT_H */ From 32e47ff0cd236d5fcab8ffcce85d1dcded663f61 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:51 -0700 Subject: [PATCH 4/5] net: dsa: bcm_sf2: Turn on ACB at the switch level Turn on the out of band Advanced Congestion Buffering (ACB) mechanism at the switch level now that we have properly established the queue mapping between the switch egress queues and the SYSTEMPORT egress queues. This allows the switch to correctly backpressure the host system when one of its queue drops below the configured thresholds. This is also helping achieve so called "lossless" behavior by adapting the TX interrupt pacing to the actual speed and capacity of the switch port. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 30 ++++++++++++++++++++++++++++++ drivers/net/dsa/bcm_sf2_regs.h | 23 +++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7aecc98d0a18..32025b990437 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -205,6 +205,19 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, if (port == priv->moca_port) bcm_sf2_port_intr_enable(priv, port); + /* Set per-queue pause threshold to 32 */ + core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port)); + + /* Set ACB threshold to 24 */ + for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) { + reg = acb_readl(priv, ACB_QUEUE_CFG(port * + SF2_NUM_EGRESS_QUEUES + i)); + reg &= ~XOFF_THRESHOLD_MASK; + reg |= 24; + acb_writel(priv, reg, ACB_QUEUE_CFG(port * + SF2_NUM_EGRESS_QUEUES + i)); + } + return b53_enable_port(ds, port, phy); } @@ -613,6 +626,20 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, status->pause = 1; } +static void bcm_sf2_enable_acb(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg; + + /* Enable ACB globally */ + reg = acb_readl(priv, ACB_CONTROL); + reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); + acb_writel(priv, reg, ACB_CONTROL); + reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT); + reg |= ACB_EN | ACB_ALGORITHM; + acb_writel(priv, reg, ACB_CONTROL); +} + static int bcm_sf2_sw_suspend(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@ -655,6 +682,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) bcm_sf2_imp_setup(ds, port); } + bcm_sf2_enable_acb(ds); + return 0; } @@ -766,6 +795,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) } bcm_sf2_sw_configure_vlan(ds); + bcm_sf2_enable_acb(ds); return 0; } diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index d8b8074a47b9..d1596dfca323 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -115,6 +115,24 @@ enum bcm_sf2_reg_offs { #define P7_IRQ_OFF 0 #define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ) +/* Register set relative to 'ACB' */ +#define ACB_CONTROL 0x00 +#define ACB_EN (1 << 0) +#define ACB_ALGORITHM (1 << 1) +#define ACB_FLUSH_SHIFT 2 +#define ACB_FLUSH_MASK 0x3 + +#define ACB_QUEUE_0_CFG 0x08 +#define XOFF_THRESHOLD_MASK 0x7ff +#define XON_EN (1 << 11) +#define TOTAL_XOFF_THRESHOLD_SHIFT 12 +#define TOTAL_XOFF_THRESHOLD_MASK 0x7ff +#define TOTAL_XOFF_EN (1 << 23) +#define TOTAL_XON_EN (1 << 24) +#define PKTLEN_SHIFT 25 +#define PKTLEN_MASK 0x3f +#define ACB_QUEUE_CFG(x) (ACB_QUEUE_0_CFG + ((x) * 0x4)) + /* Register set relative to 'CORE' */ #define CORE_G_PCTL_PORT0 0x00000 #define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4)) @@ -237,6 +255,11 @@ enum bcm_sf2_reg_offs { #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff +#define CORE_TXQ_THD_PAUSE_QN_PORT_0 0x2c80 +#define TXQ_PAUSE_THD_MASK 0x7ff +#define CORE_TXQ_THD_PAUSE_QN_PORT(x) (CORE_TXQ_THD_PAUSE_QN_PORT_0 + \ + (x) * 0x8) + #define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) #define CFI_SHIFT 12 #define PRI_SHIFT 13 From 723934fb792f2dbc76ee3ac334fcde95136bf409 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:52 -0700 Subject: [PATCH 5/5] net: systemport: Turn on ACB at the SYSTEMPORT level Now that we have established the queue mapping between the switch port egress queues and the SYSTEMPORT egress queues, we can turn on Advanced Congestion Buffering (ACB) at the SYSTEMPORT level. This enables the Ethernet MAC controller to get out of band flow control information directly from the switch port and queue that it monitors such that its internal TDMA can be appropriately backpressured. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 78bed9a84e81..dafc26690555 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1422,10 +1422,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); reg |= ring->switch_queue & RING_QID_MASK; reg |= ring->switch_port << RING_PORT_ID_SHIFT; - reg |= RING_IGNORE_STATUS; tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + /* Enable ACB algorithm 2 */ + reg = tdma_readl(priv, TDMA_CONTROL); + reg |= tdma_control_bit(priv, ACB_ALGO); + tdma_writel(priv, reg, TDMA_CONTROL); + /* Do not use tdma_control_bit() here because TSB_SWAP1 collides * with the original definition of ACB_ALGO */