Merge branch 'xmit_more-softnet_data'

Florian Westphal says:

====================
net: move skb->xmit_more to percpu softnet data

Eric Dumazet mentioned we could place xmit_more hint in same
spot as device xmit recursion counter, instead of using
an sk_buff flag bit.

This series places xmit_recursion counter and xmit_more hint
in softnet data, filling a hole.

After this, skb->xmit_more is always zero.  Drivers are converted
to use "netdev_xmit_more()" helper instead.

Last patch removes the skb->xmit_more flag.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-04-01 18:35:02 -07:00
Родитель 74dcb4c1a5 4f296edeb9
Коммит 3dc93e85f9
39 изменённых файлов: 97 добавлений и 76 удалений

Просмотреть файл

@ -2236,7 +2236,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
if (netif_xmit_stopped(txq) || !skb->xmit_more) {
if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/

Просмотреть файл

@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xgbe_tx_start_xmit(channel, ring);

Просмотреть файл

@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int dma_len;
unsigned int align;
unsigned int next;
bool xmit_more;
if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
xmit_more = netdev_xmit_more();
if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
skb->xmit_more = 0;
xmit_more = false;
}
next = priv->tx_next;
@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
desc->n_addr = priv->tx_bufs[next].dma_desc;
desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
if (!skb->xmit_more)
if (!xmit_more)
desc->config |= DESC_EOC;
txb->skb = skb;
@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_next = next;
if (!skb->xmit_more) {
if (!xmit_more) {
smp_wmb();
priv->tx_chain->ready = true;
priv->tx_chain = NULL;

Просмотреть файл

@ -551,7 +551,7 @@ normal_tx:
prod = NEXT_TX(prod);
txr->tx_prod = prod;
if (!skb->xmit_more || netif_xmit_stopped(txq))
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
@ -559,7 +559,7 @@ tx_done:
mmiowb();
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);

Просмотреть файл

@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
if (!skb->xmit_more || netif_xmit_stopped(txq))
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
/* Packets are ready, update producer index */
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);

Просмотреть файл

@ -8156,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
if (!skb->xmit_more || netif_xmit_stopped(txq)) {
if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
mmiowb();

Просмотреть файл

@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
xmit_more = skb->xmit_more;
xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);

Просмотреть файл

@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
xmit_more = skb->xmit_more;
xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);

Просмотреть файл

@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
skb_tx_timestamp(skb);
if (!skb->xmit_more || netif_xmit_stopped(txq))
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
spin_unlock(&enic->wq_lock[txq_map]);

Просмотреть файл

@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 q_idx = skb_get_queue_mapping(skb);
struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_wrb_params wrb_params = { 0 };
bool flush = !skb->xmit_more;
bool flush = !netdev_xmit_more();
u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &wrb_params);

Просмотреть файл

@ -518,7 +518,7 @@ process_sq_wqe:
flush_skbs:
netdev_txq = netdev_get_tx_queue(netdev, q_id);
if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;

Просмотреть файл

@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
if (!skb->xmit_more ||
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to

Просмотреть файл

@ -5897,7 +5897,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
if (!skb->xmit_more ||
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring,

Просмотреть файл

@ -1035,7 +1035,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -3469,7 +3469,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -1646,7 +1646,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -6029,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -939,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail

Просмотреть файл

@ -2467,7 +2467,7 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
if (!skb->xmit_more || netif_xmit_stopped(nq) ||
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else

Просмотреть файл

@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
return 0;

Просмотреть файл

@ -1042,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
skb->xmit_more);
netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;

Просмотреть файл

@ -772,7 +772,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi);
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);

Просмотреть файл

@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping;
}
@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb;

Просмотреть файл

@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++;
}
if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
if (!xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi)
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more;
stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@ -423,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg);
num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK;
@ -449,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!skb))
return NETDEV_TX_OK;
return mlx5e_sq_xmit(sq, skb, wqe, pi);
return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
}
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@ -659,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
stats->xmit_more += skb->xmit_more;
stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@ -704,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
num_dma, wi, cseg);
num_dma, wi, cseg, false);
return NETDEV_TX_OK;

Просмотреть файл

@ -909,7 +909,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;

Просмотреть файл

@ -1665,12 +1665,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
if (skb->xmit_more)
if (netdev_xmit_more())
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);

Просмотреть файл

@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
if (!skb->xmit_more || netif_queue_stopped(dev))
if (!netdev_xmit_more() || netif_queue_stopped(dev))
iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;

Просмотреть файл

@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
ef4_nic_push_buffers(tx_queue);
} else {
tx_queue->xmit_more_available = skb->xmit_more;
tx_queue->xmit_more_available = netdev_xmit_more();
}
tx_queue->tx_packets++;

Просмотреть файл

@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
next = skb->next;
skb->next = NULL;
if (next)
skb->xmit_more = true;
efx_enqueue_skb(tx_queue, skb);
skb = next;
}
@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = skb->xmit_more;
bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (rc)
goto err;
#ifdef EFX_USE_PIO
} else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_nic_may_tx_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
* SKBs had skb->xmit_more set. If we do not push those they
/* There could be packets left on the partner queue if
* xmit_more was set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_nic_push_buffers(tx_queue);
} else {
tx_queue->xmit_more_available = skb->xmit_more;
tx_queue->xmit_more_available = xmit_more;
}
if (segments) {

Просмотреть файл

@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
if (!pkt_info->skb->xmit_more ||
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xlgmac_tx_start_xmit(channel, ring);

Просмотреть файл

@ -964,7 +964,7 @@ int netvsc_send(struct net_device *ndev,
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
xmit_more = skb->xmit_more &&
xmit_more = netdev_xmit_more() &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));

Просмотреть файл

@ -1568,7 +1568,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !skb->xmit_more;
bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */

Просмотреть файл

@ -741,7 +741,8 @@ static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
wmb();
atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
return 0;
@ -935,7 +936,8 @@ static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
return 0;

Просмотреть файл

@ -2659,14 +2659,6 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion);
#define XMIT_RECURSION_LIMIT 10
static inline int dev_recursion_level(void)
{
return this_cpu_read(xmit_recursion);
}
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@ -3015,6 +3007,11 @@ struct softnet_data {
#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
/* written and read only by owning cpu: */
struct {
u16 recursion;
u8 more;
} xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@ -3050,6 +3047,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
static inline int dev_recursion_level(void)
{
return __this_cpu_read(softnet_data.xmit.recursion);
}
#define XMIT_RECURSION_LIMIT 10
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
XMIT_RECURSION_LIMIT);
}
static inline void dev_xmit_recursion_inc(void)
{
__this_cpu_inc(softnet_data.xmit.recursion);
}
static inline void dev_xmit_recursion_dec(void)
{
__this_cpu_dec(softnet_data.xmit.recursion);
}
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@ -4405,10 +4424,15 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
skb->xmit_more = more ? 1 : 0;
__this_cpu_write(softnet_data.xmit.more, more);
return ops->ndo_start_xmit(skb, dev);
}
static inline bool netdev_xmit_more(void)
{
return __this_cpu_read(softnet_data.xmit.more);
}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{

Просмотреть файл

@ -657,7 +657,6 @@ typedef unsigned char *sk_buff_data_t;
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
@ -764,7 +763,6 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
xmit_more:1,
pfmemalloc:1;
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;

Просмотреть файл

@ -3566,9 +3566,6 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion);
/**
* dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in
@ -3857,8 +3854,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
if (unlikely(__this_cpu_read(xmit_recursion) >
XMIT_RECURSION_LIMIT))
if (dev_xmit_recursion())
goto recursion_alert;
skb = validate_xmit_skb(skb, dev, &again);
@ -3868,9 +3864,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
dev_xmit_recursion_inc();
skb = dev_hard_start_xmit(skb, dev, txq, &rc);
__this_cpu_dec(xmit_recursion);
dev_xmit_recursion_dec();
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;

Просмотреть файл

@ -2016,7 +2016,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
@ -2024,9 +2024,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
__this_cpu_inc(xmit_recursion);
dev_xmit_recursion_inc();
ret = dev_queue_xmit(skb);
__this_cpu_dec(xmit_recursion);
dev_xmit_recursion_dec();
return ret;
}