mlx4_en: Removed redundant cq->armed flag
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
48374ddce7
Коммит
c03ea21fcf
|
@ -137,7 +137,6 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
|||
|
||||
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
cq->armed = 1;
|
||||
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
|
||||
&priv->mdev->uar_lock);
|
||||
|
||||
|
|
|
@ -379,8 +379,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
|
|||
|
||||
/* Wakeup Tx queue if this ring stopped it */
|
||||
if (unlikely(ring->blocked)) {
|
||||
if (((u32) (ring->prod - ring->cons) <=
|
||||
ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
|
||||
if ((u32) (ring->prod - ring->cons) <=
|
||||
ring->size - HEADROOM - MAX_DESC_TXBBS) {
|
||||
|
||||
/* TODO: support multiqueue netdevs. Currently, we block
|
||||
* when *any* ring is full. Note that:
|
||||
|
@ -404,7 +404,6 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
|
|||
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
|
||||
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
|
||||
|
||||
cq->armed = 0;
|
||||
if (!spin_trylock(&ring->comp_lock))
|
||||
return;
|
||||
mlx4_en_process_tx_cq(cq->dev, cq);
|
||||
|
|
|
@ -311,7 +311,6 @@ struct mlx4_en_cq {
|
|||
enum cq_type is_tx;
|
||||
u16 moder_time;
|
||||
u16 moder_cnt;
|
||||
int armed;
|
||||
struct mlx4_cqe *buf;
|
||||
#define MLX4_EN_OPCODE_ERROR 0x1e
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче