drivers: net: remove NETDEV_TX_LOCKED
replace the trylock by a full spin_lock and remove TX_LOCKED return value. Followup patch will remove TX_LOCKED from the kernel. Cc: Jon Mason <jdmason@kudzu.us> Cc: Andy Gospodarek <andy@greyhouse.net> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
926f273001
Коммит
a6086a8937
|
@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
|
|||
struct cmdQ *q = &sge->cmdQ[qid];
|
||||
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
|
||||
|
||||
if (!spin_trylock(&q->lock))
|
||||
return NETDEV_TX_LOCKED;
|
||||
spin_lock(&q->lock);
|
||||
|
||||
reclaim_completed_tx(sge, q);
|
||||
|
||||
|
|
|
@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned long flags = 0;
|
||||
u16 vlan_tag = 0;
|
||||
struct fifo_info *fifo = NULL;
|
||||
int do_spin_lock = 1;
|
||||
int offload_type;
|
||||
int enable_per_list_interrupt = 0;
|
||||
struct config_param *config = &sp->config;
|
||||
|
@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
queue += sp->udp_fifo_idx;
|
||||
if (skb->len > 1024)
|
||||
enable_per_list_interrupt = 1;
|
||||
do_spin_lock = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
[skb->priority & (MAX_TX_FIFOS - 1)];
|
||||
fifo = &mac_control->fifos[queue];
|
||||
|
||||
if (do_spin_lock)
|
||||
spin_lock_irqsave(&fifo->tx_lock, flags);
|
||||
else {
|
||||
if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
spin_lock_irqsave(&fifo->tx_lock, flags);
|
||||
|
||||
if (sp->config.multiq) {
|
||||
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
|
||||
|
|
|
@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|||
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned long flags;
|
||||
|
||||
if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
|
||||
/* Collision - tell upper layer to requeue */
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
spin_trylock_irqsave(&tx_ring->tx_lock, flags);
|
||||
|
||||
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
|
||||
netif_stop_queue(netdev);
|
||||
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
|
||||
|
|
|
@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
|
|||
* o NETDEV_TX_BUSY Cannot transmit packet, try later
|
||||
* Usually a bug, means queue start/stop flow control is broken in
|
||||
* the driver. Note: the driver must NOT put the skb in its DMA ring.
|
||||
* o NETDEV_TX_LOCKED Locking failed, please retry quickly.
|
||||
*/
|
||||
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
|
||||
struct net_device *ndev)
|
||||
|
@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
|
|||
|
||||
ENTER;
|
||||
local_irq_save(flags);
|
||||
if (!spin_trylock(&priv->tx_lock)) {
|
||||
local_irq_restore(flags);
|
||||
DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
|
||||
BDX_DRV_NAME, ndev->name);
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
/* build tx descriptor */
|
||||
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
|
||||
|
|
|
@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
unsigned long flags;
|
||||
int add_num = 1;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (!spin_trylock(&rnet->tx_lock)) {
|
||||
local_irq_restore(flags);
|
||||
return NETDEV_TX_LOCKED;
|
||||
}
|
||||
spin_lock_irqsave(&rnet->tx_lock, flags);
|
||||
|
||||
if (is_multicast_ether_addr(eth->h_dest))
|
||||
add_num = nets[rnet->mport->id].nact;
|
||||
|
|
Загрузка…
Ссылка в новой задаче