mlx5e-updates-2018-05-25
This series includes updates for mlx5e netdev driver. 1) Allowr flow based VF vport mirroring under sriov switchdev scheme, added support for offloading the TC mirred mirror sub-action, from Chris Mi. ================= From: Or Gerlitz <ogerlitz@mellanox.com> The user will typically set the actions order such that the mirror port (mirror VF) sees packets as the original port (VF under mirroring) sent them or as it will receive them. In the general case, it means that packets are potentially sent to the mirror port before or after some actions were applied on them. To properly do that, we follow on the exact action order as set for the flow and make sure this will also be the case when we program the HW offload. If all the actions should apply before forwarding to the mirror and dest port, mirroring is just multicasting to the two vports. Otherwise, we split the TC flow to two HW rules, where the 1st applies only the actions needed up to the mirror (if there are such) and the 2nd the rest of the actions plus the forwarding to the dest vport. ================= 2) Move to order-0 only allocations (using fragmented work queues) for all work queues used by the driver, RX and TX descriptor rings (RQs, SQs and Completion Queues (CQs)), from Tariq Toukan. 3) Avoid resetting netdevice statistics on netdevice state changes, from Eran Ben Elisha. -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbCKCcAAoJEEg/ir3gV/o++7sH/1FPmwvpf4qNEsusr714lNnl PxzVjnwxKZNYyovIjr6QGSxMM1qDiPejyYnZIAzH00B+XWCq3zn8H3sfJLFbmxN3 clayd6dGV27HzLZwV2aD9vXfVb7snNhQtTp5zoajsnZY4xO335n3FA3kF5TxLqKO bxsnY2xSaSpbrBH2z2UvHc+ib9KnvY1Q+gr2WqdRnN5Tm51Zq+bgaUw0nROefGJK XR/aVBub3PsjA8W/0/b3DGfiP1bgPeU6QmLhdjn3IEufFtbEJH+K/4u53l3A4RLR fXmJrWKlZn8j7LUBFOD0/G43RU/YzqRgwF/iUwCxbUU2wK/J0cPv23drLDVYzaY= =n7Zz -----END PGP SIGNATURE----- Merge tag 'mlx5e-updates-2018-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5e-updates-2018-05-25 This series includes updates for mlx5e netdev driver. 1) Allowr flow based VF vport mirroring under sriov switchdev scheme, added support for offloading the TC mirred mirror sub-action, from Chris Mi. ================= From: Or Gerlitz <ogerlitz@mellanox.com> The user will typically set the actions order such that the mirror port (mirror VF) sees packets as the original port (VF under mirroring) sent them or as it will receive them. In the general case, it means that packets are potentially sent to the mirror port before or after some actions were applied on them. To properly do that, we follow on the exact action order as set for the flow and make sure this will also be the case when we program the HW offload. If all the actions should apply before forwarding to the mirror and dest port, mirroring is just multicasting to the two vports. Otherwise, we split the TC flow to two HW rules, where the 1st applies only the actions needed up to the mirror (if there are such) and the 2nd the rest of the actions plus the forwarding to the dest vport. ================= 2) Move to order-0 only allocations (using fragmented work queues) for all work queues used by the driver, RX and TX descriptor rings (RQs, SQs and Completion Queues (CQs)), from Tariq Toukan. 3) Avoid resetting netdevice statistics on netdevice state changes, from Eran Ben Elisha. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
874fcf1de6
|
@ -183,6 +183,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
|||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
struct mlx5_wqe_data_seg data[0];
|
||||
};
|
||||
|
||||
struct mlx5e_rx_wqe {
|
||||
|
@ -313,7 +314,7 @@ struct mlx5e_cq {
|
|||
|
||||
/* control */
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_frag_wq_ctrl wq_ctrl;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct mlx5e_tx_wqe_info {
|
||||
|
@ -357,7 +358,6 @@ struct mlx5e_txqsq {
|
|||
/* dirtied @xmit */
|
||||
u16 pc ____cacheline_aligned_in_smp;
|
||||
u32 dma_fifo_pc;
|
||||
struct mlx5e_sq_stats stats;
|
||||
|
||||
struct mlx5e_cq cq;
|
||||
|
||||
|
@ -370,11 +370,11 @@ struct mlx5e_txqsq {
|
|||
/* read only */
|
||||
struct mlx5_wq_cyc wq;
|
||||
u32 dma_fifo_mask;
|
||||
struct mlx5e_sq_stats *stats;
|
||||
void __iomem *uar_map;
|
||||
struct netdev_queue *txq;
|
||||
u32 sqn;
|
||||
u8 min_inline_mode;
|
||||
u16 edge;
|
||||
struct device *pdev;
|
||||
__be32 mkey_be;
|
||||
unsigned long state;
|
||||
|
@ -439,7 +439,6 @@ struct mlx5e_icosq {
|
|||
struct mlx5_wq_cyc wq;
|
||||
void __iomem *uar_map;
|
||||
u32 sqn;
|
||||
u16 edge;
|
||||
unsigned long state;
|
||||
|
||||
/* control path */
|
||||
|
@ -450,7 +449,7 @@ struct mlx5e_icosq {
|
|||
static inline bool
|
||||
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
|
||||
{
|
||||
return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc));
|
||||
return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
|
||||
}
|
||||
|
||||
struct mlx5e_dma_info {
|
||||
|
@ -527,7 +526,7 @@ struct mlx5e_rq {
|
|||
struct mlx5e_channel *channel;
|
||||
struct device *pdev;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_rq_stats stats;
|
||||
struct mlx5e_rq_stats *stats;
|
||||
struct mlx5e_cq cq;
|
||||
struct mlx5e_page_cache page_cache;
|
||||
struct hwtstamp_config *tstamp;
|
||||
|
@ -575,7 +574,7 @@ struct mlx5e_channel {
|
|||
|
||||
/* data path - accessed per napi poll */
|
||||
struct irq_desc *irq_desc;
|
||||
struct mlx5e_ch_stats stats;
|
||||
struct mlx5e_ch_stats *stats;
|
||||
|
||||
/* control */
|
||||
struct mlx5e_priv *priv;
|
||||
|
@ -591,6 +590,12 @@ struct mlx5e_channels {
|
|||
struct mlx5e_params params;
|
||||
};
|
||||
|
||||
struct mlx5e_channel_stats {
|
||||
struct mlx5e_ch_stats ch;
|
||||
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
|
||||
struct mlx5e_rq_stats rq;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
enum mlx5e_traffic_types {
|
||||
MLX5E_TT_IPV4_TCP,
|
||||
MLX5E_TT_IPV6_TCP,
|
||||
|
@ -772,6 +777,8 @@ struct mlx5e_priv {
|
|||
struct mutex state_lock; /* Protects Interface state */
|
||||
struct mlx5e_rq drop_rq;
|
||||
|
||||
rwlock_t stats_lock; /* Protects channels SW stats updates */
|
||||
bool channels_active;
|
||||
struct mlx5e_channels channels;
|
||||
u32 tisn[MLX5E_MAX_NUM_TC];
|
||||
struct mlx5e_rqt indir_rqt;
|
||||
|
@ -792,6 +799,8 @@ struct mlx5e_priv {
|
|||
struct mlx5_core_dev *mdev;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_stats stats;
|
||||
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
|
||||
u8 max_opened_tc;
|
||||
struct hwtstamp_config tstamp;
|
||||
u16 q_counter;
|
||||
u16 drop_rq_q_counter;
|
||||
|
@ -956,10 +965,9 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
|||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
wq = &sq->wq;
|
||||
*pi = sq->pc & wq->sz_m1;
|
||||
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
|
||||
memset(*wqe, 0, sizeof(**wqe));
|
||||
}
|
||||
|
@ -967,7 +975,7 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
|||
static inline
|
||||
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
|
||||
{
|
||||
u16 pi = *pc & wq->sz_m1;
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
|
||||
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
|
|||
int headln;
|
||||
int i;
|
||||
|
||||
sq->stats.tls_ooo++;
|
||||
sq->stats->tls_ooo++;
|
||||
|
||||
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
|
||||
/* We might get here if a retransmission reaches the driver
|
||||
|
@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
|
|||
skb_shinfo(nskb)->nr_frags = info.nr_frags;
|
||||
nskb->data_len = info.sync_len;
|
||||
nskb->len += info.sync_len;
|
||||
sq->stats.tls_resync_bytes += nskb->len;
|
||||
sq->stats->tls_resync_bytes += nskb->len;
|
||||
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
|
||||
cpu_to_be64(info.rcd_sn));
|
||||
mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
|
||||
|
|
|
@ -423,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
rq->ix = c->ix;
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||
|
||||
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
||||
if (IS_ERR(rq->xdp_prog)) {
|
||||
|
@ -646,8 +647,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
|
|||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
|
||||
|
||||
mlx5_fill_page_array(&rq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
|
||||
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
|
||||
|
||||
|
@ -836,13 +837,15 @@ err_free_rq:
|
|||
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5e_icosq *sq = &rq->channel->icosq;
|
||||
u16 pi = sq->pc & sq->wq.sz_m1;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_tx_wqe *nopwqe;
|
||||
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
|
||||
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
||||
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
||||
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
||||
}
|
||||
|
||||
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
||||
|
@ -885,6 +888,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
|||
{
|
||||
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
int err;
|
||||
|
||||
sq->pdev = c->pdev;
|
||||
|
@ -894,10 +898,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
|||
sq->min_inline_mode = params->tx_min_inline_mode;
|
||||
|
||||
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
||||
wq->db = &wq->db[MLX5_SND_DBR];
|
||||
|
||||
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
|
||||
if (err)
|
||||
|
@ -940,23 +944,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
|
|||
{
|
||||
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
int err;
|
||||
|
||||
sq->channel = c;
|
||||
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
||||
|
||||
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
||||
wq->db = &wq->db[MLX5_SND_DBR];
|
||||
|
||||
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
|
||||
if (err)
|
||||
goto err_sq_wq_destroy;
|
||||
|
||||
sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
|
||||
|
||||
return 0;
|
||||
|
||||
err_sq_wq_destroy:
|
||||
|
@ -1001,10 +1004,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|||
int txq_ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param,
|
||||
struct mlx5e_txqsq *sq)
|
||||
struct mlx5e_txqsq *sq,
|
||||
int tc)
|
||||
{
|
||||
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
int err;
|
||||
|
||||
sq->pdev = c->pdev;
|
||||
|
@ -1015,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|||
sq->txq_ix = txq_ix;
|
||||
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
||||
sq->min_inline_mode = params->tx_min_inline_mode;
|
||||
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
|
||||
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
|
||||
if (MLX5_IPSEC_DEV(c->priv->mdev))
|
||||
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
|
||||
|
@ -1022,10 +1028,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|||
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
|
||||
|
||||
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
||||
wq->db = &wq->db[MLX5_SND_DBR];
|
||||
|
||||
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
|
||||
if (err)
|
||||
|
@ -1034,8 +1040,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|||
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
|
||||
sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
|
||||
|
||||
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
|
||||
|
||||
return 0;
|
||||
|
||||
err_sq_wq_destroy:
|
||||
|
@ -1095,7 +1099,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
|
|||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
|
||||
|
||||
mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
|
||||
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
||||
|
||||
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
|
||||
|
||||
|
@ -1174,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
|
|||
int txq_ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param,
|
||||
struct mlx5e_txqsq *sq)
|
||||
struct mlx5e_txqsq *sq,
|
||||
int tc)
|
||||
{
|
||||
struct mlx5e_create_sq_param csp = {};
|
||||
u32 tx_rate;
|
||||
int err;
|
||||
|
||||
err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
|
||||
err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -1238,6 +1244,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
|
|||
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
struct mlx5e_channel *c = sq->channel;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
||||
/* prevent netif_tx_wake_queue */
|
||||
|
@ -1246,12 +1253,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
|||
netif_tx_disable_queue(sq->txq);
|
||||
|
||||
/* last doorbell out, godspeed .. */
|
||||
if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
|
||||
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
struct mlx5e_tx_wqe *nop;
|
||||
|
||||
sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
|
||||
nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
|
||||
sq->db.wqe_info[pi].skb = NULL;
|
||||
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1366,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work)
|
|||
return;
|
||||
|
||||
mlx5e_reset_txqsq_cc_pc(sq);
|
||||
sq->stats.recover++;
|
||||
sq->stats->recover++;
|
||||
recover->last_recover = jiffies;
|
||||
mlx5e_activate_txqsq(sq);
|
||||
}
|
||||
|
@ -1535,7 +1543,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
|
|||
|
||||
static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
||||
{
|
||||
mlx5_cqwq_destroy(&cq->wq_ctrl);
|
||||
mlx5_wq_destroy(&cq->wq_ctrl);
|
||||
}
|
||||
|
||||
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
||||
|
@ -1551,7 +1559,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
int err;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
|
||||
sizeof(u64) * cq->wq_ctrl.buf.npages;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
@ -1560,7 +1568,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
|
||||
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
||||
|
||||
mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
|
||||
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
||||
|
||||
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
||||
|
@ -1568,7 +1576,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|||
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
||||
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
|
||||
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
||||
|
||||
|
@ -1661,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
|
|||
struct mlx5e_params *params,
|
||||
struct mlx5e_channel_param *cparam)
|
||||
{
|
||||
int err;
|
||||
int tc;
|
||||
struct mlx5e_priv *priv = c->priv;
|
||||
int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
|
||||
|
||||
for (tc = 0; tc < params->num_tc; tc++) {
|
||||
int txq_ix = c->ix + tc * params->num_channels;
|
||||
int txq_ix = c->ix + tc * max_nch;
|
||||
|
||||
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
|
||||
params, &cparam->sq, &c->sq[tc]);
|
||||
params, &cparam->sq, &c->sq[tc], tc);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
}
|
||||
|
@ -1798,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|||
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
|
||||
c->num_tc = params->num_tc;
|
||||
c->xdp = !!params->xdp_prog;
|
||||
c->stats = &priv->channel_stats[ix].ch;
|
||||
|
||||
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
||||
c->irq_desc = irq_to_desc(irq);
|
||||
|
@ -2630,7 +2639,7 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
|
|||
struct mlx5e_txqsq *sq;
|
||||
int i, tc;
|
||||
|
||||
for (i = 0; i < priv->channels.num; i++)
|
||||
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
|
||||
for (tc = 0; tc < priv->profile->max_tc; tc++)
|
||||
priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
|
||||
|
||||
|
@ -2654,6 +2663,9 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
|||
|
||||
mlx5e_build_channels_tx_maps(priv);
|
||||
mlx5e_activate_channels(&priv->channels);
|
||||
write_lock(&priv->stats_lock);
|
||||
priv->channels_active = true;
|
||||
write_unlock(&priv->stats_lock);
|
||||
netif_tx_start_all_queues(priv->netdev);
|
||||
|
||||
if (MLX5_VPORT_MANAGER(priv->mdev))
|
||||
|
@ -2675,6 +2687,9 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
|||
*/
|
||||
netif_tx_stop_all_queues(priv->netdev);
|
||||
netif_tx_disable(priv->netdev);
|
||||
write_lock(&priv->stats_lock);
|
||||
priv->channels_active = false;
|
||||
write_unlock(&priv->stats_lock);
|
||||
mlx5e_deactivate_channels(&priv->channels);
|
||||
}
|
||||
|
||||
|
@ -3129,6 +3144,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
|
||||
new_channels.params.num_tc);
|
||||
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
||||
out:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
@ -3219,6 +3236,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
|
||||
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
|
||||
} else {
|
||||
mlx5e_grp_sw_update_stats(priv);
|
||||
stats->rx_packets = sstats->rx_packets;
|
||||
stats->rx_bytes = sstats->rx_bytes;
|
||||
stats->tx_packets = sstats->tx_packets;
|
||||
|
@ -3815,7 +3833,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
|
|||
return false;
|
||||
|
||||
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
|
||||
sq->channel->stats.eq_rearm++;
|
||||
sq->channel->stats->eq_rearm++;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4239,11 +4257,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
|
|||
priv->profile = profile;
|
||||
priv->ppriv = ppriv;
|
||||
priv->msglevel = MLX5E_MSG_LEVEL;
|
||||
priv->max_opened_tc = 1;
|
||||
|
||||
mlx5e_build_nic_params(mdev, &priv->channels.params,
|
||||
profile->max_nch(mdev), netdev->mtu);
|
||||
|
||||
mutex_init(&priv->state_lock);
|
||||
rwlock_init(&priv->stats_lock);
|
||||
|
||||
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
|
||||
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
|
||||
|
|
|
@ -130,28 +130,28 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
|
|||
struct mlx5e_sq_stats *sq_stats;
|
||||
int i, j;
|
||||
|
||||
read_lock(&priv->stats_lock);
|
||||
if (!priv->channels_active)
|
||||
goto out;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
|
||||
rq_stats = &c->rq.stats;
|
||||
rq_stats = c->rq.stats;
|
||||
|
||||
s->rx_packets += rq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes;
|
||||
|
||||
for (j = 0; j < priv->channels.params.num_tc; j++) {
|
||||
sq_stats = &c->sq[j].stats;
|
||||
sq_stats = c->sq[j].stats;
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_rep_update_sw_counters(priv);
|
||||
mlx5e_rep_update_hw_counters(priv);
|
||||
out:
|
||||
read_unlock(&priv->stats_lock);
|
||||
}
|
||||
|
||||
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
|
||||
|
@ -871,6 +871,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
|
|||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_sw_stats *sstats = &priv->stats.sw;
|
||||
|
||||
mlx5e_rep_update_sw_counters(priv);
|
||||
|
||||
stats->rx_packets = sstats->rx_packets;
|
||||
stats->rx_bytes = sstats->rx_bytes;
|
||||
stats->tx_packets = sstats->tx_packets;
|
||||
|
@ -1046,7 +1048,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
|
|||
.cleanup_rx = mlx5e_cleanup_rep_rx,
|
||||
.init_tx = mlx5e_init_rep_tx,
|
||||
.cleanup_tx = mlx5e_cleanup_nic_tx,
|
||||
.update_stats = mlx5e_rep_update_stats,
|
||||
.update_stats = mlx5e_rep_update_hw_counters,
|
||||
.max_nch = mlx5e_get_rep_max_num_channels,
|
||||
.update_carrier = NULL,
|
||||
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
|
||||
|
|
|
@ -54,7 +54,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
|
|||
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
|
||||
void *data)
|
||||
{
|
||||
u32 ci = cqcc & cq->wq.fbc.sz_m1;
|
||||
u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc);
|
||||
|
||||
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
|
|||
mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
|
||||
cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
|
||||
cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
|
||||
rq->stats.cqe_compress_blks++;
|
||||
rq->stats->cqe_compress_blks++;
|
||||
}
|
||||
|
||||
static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
|
||||
|
@ -76,10 +76,11 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
|
|||
|
||||
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
|
||||
u8 op_own = (cqcc >> fbc->log_sz) & 1;
|
||||
u32 wq_sz = 1 << fbc->log_sz;
|
||||
u32 ci = cqcc & fbc->sz_m1;
|
||||
struct mlx5_cqwq *wq = &cq->wq;
|
||||
|
||||
u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
|
||||
u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
|
||||
u32 wq_sz = mlx5_cqwq_get_size(wq);
|
||||
u32 ci_top = min_t(u32, wq_sz, ci + n);
|
||||
|
||||
for (; ci < ci_top; ci++, n--) {
|
||||
|
@ -112,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
|
|||
mpwrq_get_cqe_consumed_strides(&cq->title);
|
||||
else
|
||||
cq->decmprs_wqe_counter =
|
||||
(cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
|
||||
mlx5_wq_ll_ctr2ix(&rq->wq, cq->decmprs_wqe_counter + 1);
|
||||
}
|
||||
|
||||
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
|
||||
|
@ -145,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
|
|||
mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
|
||||
cq->wq.cc = cqcc;
|
||||
cq->decmprs_left -= cqe_count;
|
||||
rq->stats.cqe_compress_pkts += cqe_count;
|
||||
rq->stats->cqe_compress_pkts += cqe_count;
|
||||
|
||||
return cqe_count;
|
||||
}
|
||||
|
@ -175,14 +176,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
|
|||
{
|
||||
struct mlx5e_page_cache *cache = &rq->page_cache;
|
||||
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
|
||||
if (tail_next == cache->head) {
|
||||
rq->stats.cache_full++;
|
||||
stats->cache_full++;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
|
||||
rq->stats.cache_waive++;
|
||||
stats->cache_waive++;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -195,20 +197,21 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
|
|||
struct mlx5e_dma_info *dma_info)
|
||||
{
|
||||
struct mlx5e_page_cache *cache = &rq->page_cache;
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
|
||||
if (unlikely(cache->head == cache->tail)) {
|
||||
rq->stats.cache_empty++;
|
||||
stats->cache_empty++;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
|
||||
rq->stats.cache_busy++;
|
||||
stats->cache_busy++;
|
||||
return false;
|
||||
}
|
||||
|
||||
*dma_info = cache->page_cache[cache->head];
|
||||
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
|
||||
rq->stats.cache_reuse++;
|
||||
stats->cache_reuse++;
|
||||
|
||||
dma_sync_single_for_device(rq->pdev, dma_info->addr,
|
||||
RQ_PAGE_SIZE(rq),
|
||||
|
@ -293,7 +296,7 @@ static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
|
|||
struct mlx5e_wqe_frag_info *wi)
|
||||
{
|
||||
if (mlx5e_page_reuse(rq, wi)) {
|
||||
rq->stats.page_reuse++;
|
||||
rq->stats->page_reuse++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -382,6 +385,22 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
|
|||
return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
}
|
||||
|
||||
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
{
|
||||
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->opcode = MLX5_OPCODE_NOP;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
|
||||
|
@ -390,14 +409,16 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
|
||||
u16 pi, frag_pi;
|
||||
int err;
|
||||
u16 pi;
|
||||
int i;
|
||||
|
||||
/* fill sq edge with nops to avoid wqe wrap around */
|
||||
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
|
||||
if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
|
||||
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
}
|
||||
|
||||
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
|
@ -433,7 +454,7 @@ err_unmap:
|
|||
dma_info--;
|
||||
mlx5e_page_release(rq, dma_info, true);
|
||||
}
|
||||
rq->stats.buff_alloc_err++;
|
||||
rq->stats->buff_alloc_err++;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -461,7 +482,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
|||
|
||||
err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
|
||||
if (unlikely(err)) {
|
||||
rq->stats.buff_alloc_err++;
|
||||
rq->stats->buff_alloc_err++;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -482,7 +503,7 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
|
|||
struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
|
||||
u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
|
||||
struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
|
||||
|
||||
mlx5_cqwq_pop(&cq->wq);
|
||||
|
@ -672,6 +693,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
struct sk_buff *skb,
|
||||
bool lro)
|
||||
{
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
int network_depth = 0;
|
||||
|
||||
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
|
||||
|
@ -679,7 +701,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
|
||||
if (lro) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
rq->stats.csum_unnecessary++;
|
||||
stats->csum_unnecessary++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -697,7 +719,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
if (unlikely(netdev->features & NETIF_F_RXFCS))
|
||||
skb->csum = csum_add(skb->csum,
|
||||
(__force __wsum)mlx5e_get_fcs(skb));
|
||||
rq->stats.csum_complete++;
|
||||
stats->csum_complete++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -707,15 +729,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
|||
if (cqe_is_tunneled(cqe)) {
|
||||
skb->csum_level = 1;
|
||||
skb->encapsulation = 1;
|
||||
rq->stats.csum_unnecessary_inner++;
|
||||
stats->csum_unnecessary_inner++;
|
||||
return;
|
||||
}
|
||||
rq->stats.csum_unnecessary++;
|
||||
stats->csum_unnecessary++;
|
||||
return;
|
||||
}
|
||||
csum_none:
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
rq->stats.csum_none++;
|
||||
stats->csum_none++;
|
||||
}
|
||||
|
||||
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
||||
|
@ -724,6 +746,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
struct net_device *netdev = rq->netdev;
|
||||
|
||||
skb->mac_len = ETH_HLEN;
|
||||
|
@ -733,9 +756,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
/* Subtract one since we already counted this as one
|
||||
* "regular" packet in mlx5e_complete_rx_cqe()
|
||||
*/
|
||||
rq->stats.packets += lro_num_seg - 1;
|
||||
rq->stats.lro_packets++;
|
||||
rq->stats.lro_bytes += cqe_bcnt;
|
||||
stats->packets += lro_num_seg - 1;
|
||||
stats->lro_packets++;
|
||||
stats->lro_bytes += cqe_bcnt;
|
||||
}
|
||||
|
||||
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
|
||||
|
@ -750,7 +773,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
if (cqe_has_vlan(cqe)) {
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
|
||||
be16_to_cpu(cqe->vlan_info));
|
||||
rq->stats.removed_vlan_packets++;
|
||||
stats->removed_vlan_packets++;
|
||||
}
|
||||
|
||||
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
|
||||
|
@ -764,8 +787,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
|
|||
u32 cqe_bcnt,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
rq->stats.packets++;
|
||||
rq->stats.bytes += cqe_bcnt;
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
|
||||
stats->packets++;
|
||||
stats->bytes += cqe_bcnt;
|
||||
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
|
||||
}
|
||||
|
||||
|
@ -773,7 +798,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
|||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_tx_wqe *wqe;
|
||||
u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
|
||||
|
||||
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
|
||||
|
@ -786,7 +811,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||
{
|
||||
struct mlx5e_xdpsq *sq = &rq->xdpsq;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
u16 pi = sq->pc & wq->sz_m1;
|
||||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
|
||||
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
|
||||
|
@ -797,10 +822,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||
dma_addr_t dma_addr = di->addr + data_offset;
|
||||
unsigned int dma_len = xdp->data_end - xdp->data;
|
||||
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
|
||||
prefetchw(wqe);
|
||||
|
||||
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
|
||||
rq->stats.xdp_drop++;
|
||||
stats->xdp_drop++;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -810,7 +837,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||
mlx5e_xmit_xdp_doorbell(sq);
|
||||
sq->db.doorbell = false;
|
||||
}
|
||||
rq->stats.xdp_tx_full++;
|
||||
stats->xdp_tx_full++;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -844,7 +871,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
|
|||
|
||||
sq->db.doorbell = true;
|
||||
|
||||
rq->stats.xdp_tx++;
|
||||
stats->xdp_tx++;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -891,7 +918,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
|
|||
case XDP_ABORTED:
|
||||
trace_xdp_exception(rq->netdev, prog, act);
|
||||
case XDP_DROP:
|
||||
rq->stats.xdp_drop++;
|
||||
rq->stats->xdp_drop++;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -904,7 +931,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
|
|||
struct sk_buff *skb = build_skb(va, frag_size);
|
||||
|
||||
if (unlikely(!skb)) {
|
||||
rq->stats.buff_alloc_err++;
|
||||
rq->stats->buff_alloc_err++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -936,7 +963,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
|
|||
wi->offset += frag_size;
|
||||
|
||||
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
|
||||
rq->stats.wqe_err++;
|
||||
rq->stats->wqe_err++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1053,7 +1080,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
|
|||
skb = napi_alloc_skb(rq->cq.napi,
|
||||
ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
|
||||
if (unlikely(!skb)) {
|
||||
rq->stats.buff_alloc_err++;
|
||||
rq->stats->buff_alloc_err++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1139,12 +1166,12 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
|||
wi->consumed_strides += cstrides;
|
||||
|
||||
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
|
||||
rq->stats.wqe_err++;
|
||||
rq->stats->wqe_err++;
|
||||
goto mpwrq_cqe_out;
|
||||
}
|
||||
|
||||
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
|
||||
rq->stats.mpwqe_filler++;
|
||||
rq->stats->mpwqe_filler++;
|
||||
goto mpwrq_cqe_out;
|
||||
}
|
||||
|
||||
|
@ -1256,7 +1283,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|||
|
||||
last_wqe = (sqcc == wqe_counter);
|
||||
|
||||
ci = sqcc & sq->wq.sz_m1;
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
di = &sq->db.di[ci];
|
||||
|
||||
sqcc++;
|
||||
|
@ -1281,7 +1308,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
|
|||
u16 ci;
|
||||
|
||||
while (sq->cc != sq->pc) {
|
||||
ci = sq->cc & sq->wq.sz_m1;
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
|
||||
di = &sq->db.di[ci];
|
||||
sq->cc++;
|
||||
|
||||
|
@ -1299,6 +1326,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
|||
u32 cqe_bcnt,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
struct hwtstamp_config *tstamp;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
|
@ -1360,9 +1388,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
|||
|
||||
skb->dev = netdev;
|
||||
|
||||
rq->stats.csum_complete++;
|
||||
rq->stats.packets++;
|
||||
rq->stats.bytes += cqe_bcnt;
|
||||
stats->csum_complete++;
|
||||
stats->packets++;
|
||||
stats->bytes += cqe_bcnt;
|
||||
}
|
||||
|
||||
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
|
|
|
@ -81,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
|
||||
};
|
||||
|
||||
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
|
||||
|
@ -109,20 +108,22 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats temp, *s = &temp;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
struct mlx5e_sq_stats *sq_stats;
|
||||
struct mlx5e_ch_stats *ch_stats;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
read_lock(&priv->stats_lock);
|
||||
if (!priv->channels_active)
|
||||
goto out;
|
||||
|
||||
rq_stats = &c->rq.stats;
|
||||
ch_stats = &c->stats;
|
||||
for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
|
||||
struct mlx5e_channel_stats *channel_stats =
|
||||
&priv->channel_stats[i];
|
||||
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
|
||||
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
|
||||
int j;
|
||||
|
||||
s->rx_packets += rq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes;
|
||||
|
@ -149,8 +150,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_cache_waive += rq_stats->cache_waive;
|
||||
s->ch_eq_rearm += ch_stats->eq_rearm;
|
||||
|
||||
for (j = 0; j < priv->channels.params.num_tc; j++) {
|
||||
sq_stats = &c->sq[j].stats;
|
||||
for (j = 0; j < priv->max_opened_tc; j++) {
|
||||
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
|
@ -175,10 +176,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
|||
}
|
||||
}
|
||||
|
||||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
memcpy(&priv->stats.sw, s, sizeof(*s));
|
||||
out:
|
||||
read_unlock(&priv->stats_lock);
|
||||
}
|
||||
|
||||
static const struct counter_desc q_stats_desc[] = {
|
||||
|
@ -580,12 +580,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
|
|||
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
|
||||
};
|
||||
|
||||
#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
|
||||
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
|
||||
|
||||
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
/* "1" for link_down_events special counter */
|
||||
return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
|
||||
NUM_PPORT_PHY_COUNTERS : 0;
|
||||
NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
|
||||
}
|
||||
|
||||
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||
|
@ -593,10 +594,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
|
||||
for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_phy_statistical_stats_desc[i].format);
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
|
||||
|
||||
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_phy_statistical_stats_desc[i].format);
|
||||
return idx;
|
||||
}
|
||||
|
||||
|
@ -604,11 +609,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
|
||||
for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
|
||||
pport_phy_statistical_stats_desc, i);
|
||||
/* link_down_events_phy has special handling since it is not stored in __be64 format */
|
||||
data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
|
||||
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
|
||||
pport_phy_statistical_stats_desc, i);
|
||||
return idx;
|
||||
}
|
||||
|
||||
|
@ -1148,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = {
|
|||
|
||||
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
return (NUM_RQ_STATS * priv->channels.num) +
|
||||
(NUM_CH_STATS * priv->channels.num) +
|
||||
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
|
||||
int max_nch = priv->profile->max_nch(priv->mdev);
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
return 0;
|
||||
|
||||
return (NUM_RQ_STATS * max_nch) +
|
||||
(NUM_CH_STATS * max_nch) +
|
||||
(NUM_SQ_STATS * max_nch * priv->max_opened_tc);
|
||||
}
|
||||
|
||||
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
||||
int idx)
|
||||
{
|
||||
int max_nch = priv->profile->max_nch(priv->mdev);
|
||||
int i, j, tc;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < priv->channels.num; i++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_CH_STATS; j++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
ch_stats_desc[j].format, i);
|
||||
|
||||
for (i = 0; i < priv->channels.num; i++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_RQ_STATS; j++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
|
||||
|
||||
for (tc = 0; tc < priv->channels.params.num_tc; tc++)
|
||||
for (i = 0; i < priv->channels.num; i++)
|
||||
/* priv->channel_tc2txq[i][tc] is valid only when device is open */
|
||||
for (tc = 0; tc < priv->max_opened_tc; tc++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_SQ_STATS; j++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
sq_stats_desc[j].format,
|
||||
|
@ -1183,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
|
|||
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
||||
int idx)
|
||||
{
|
||||
struct mlx5e_channels *channels = &priv->channels;
|
||||
int max_nch = priv->profile->max_nch(priv->mdev);
|
||||
int i, j, tc;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < channels->num; i++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_CH_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&channels->c[i]->stats,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
|
||||
ch_stats_desc, j);
|
||||
|
||||
for (i = 0; i < channels->num; i++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_RQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
|
||||
rq_stats_desc, j);
|
||||
|
||||
for (tc = 0; tc < priv->channels.params.num_tc; tc++)
|
||||
for (i = 0; i < channels->num; i++)
|
||||
for (tc = 0; tc < priv->max_opened_tc; tc++)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_SQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
|
||||
sq_stats_desc, j);
|
||||
|
||||
return idx;
|
||||
|
@ -1217,7 +1235,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
|
|||
.get_num_stats = mlx5e_grp_sw_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_sw_fill_strings,
|
||||
.fill_stats = mlx5e_grp_sw_fill_stats,
|
||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||
.update_stats = mlx5e_grp_sw_update_stats,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -97,9 +97,6 @@ struct mlx5e_sw_stats {
|
|||
u64 tx_tls_ooo;
|
||||
u64 tx_tls_resync_bytes;
|
||||
#endif
|
||||
|
||||
/* Special handling counters */
|
||||
u64 link_down_events_phy;
|
||||
};
|
||||
|
||||
struct mlx5e_qcounter_stats {
|
||||
|
@ -242,4 +239,6 @@ struct mlx5e_stats_grp {
|
|||
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
|
||||
extern const int mlx5e_num_stats_grps;
|
||||
|
||||
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
|
||||
|
||||
#endif /* __MLX5_EN_STATS_H__ */
|
||||
|
|
|
@ -75,12 +75,14 @@ enum {
|
|||
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
|
||||
};
|
||||
|
||||
#define MLX5E_TC_MAX_SPLITS 1
|
||||
|
||||
struct mlx5e_tc_flow {
|
||||
struct rhash_head node;
|
||||
struct mlx5e_priv *priv;
|
||||
u64 cookie;
|
||||
u8 flags;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
|
||||
struct list_head encap; /* flows sharing the same encap ID */
|
||||
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
|
||||
struct list_head hairpin; /* flows sharing the same hairpin */
|
||||
|
@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
|||
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
||||
struct mlx5_fc *counter = NULL;
|
||||
|
||||
counter = mlx5_flow_rule_counter(flow->rule);
|
||||
mlx5_del_flow_rules(flow->rule);
|
||||
counter = mlx5_flow_rule_counter(flow->rule[0]);
|
||||
mlx5_del_flow_rules(flow->rule[0]);
|
||||
mlx5_fc_destroy(priv->mdev, counter);
|
||||
|
||||
if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
|
||||
|
@ -844,8 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
}
|
||||
out_priv = netdev_priv(encap_dev);
|
||||
rpriv = out_priv->ppriv;
|
||||
attr->out_rep = rpriv->rep;
|
||||
attr->out_mdev = out_priv->mdev;
|
||||
attr->out_rep[attr->out_count] = rpriv->rep;
|
||||
attr->out_mdev[attr->out_count++] = out_priv->mdev;
|
||||
}
|
||||
|
||||
err = mlx5_eswitch_add_vlan_action(esw, attr);
|
||||
|
@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
|||
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
|
||||
if (IS_ERR(rule))
|
||||
goto err_add_rule;
|
||||
|
||||
if (attr->mirror_count) {
|
||||
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
|
||||
if (IS_ERR(flow->rule[1]))
|
||||
goto err_fwd_rule;
|
||||
}
|
||||
}
|
||||
return rule;
|
||||
|
||||
err_fwd_rule:
|
||||
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
|
||||
rule = flow->rule[1];
|
||||
err_add_rule:
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
mlx5e_detach_mod_hdr(priv, flow);
|
||||
|
@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
|||
|
||||
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
||||
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
|
||||
if (attr->mirror_count)
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
|
||||
}
|
||||
|
||||
mlx5_eswitch_del_vlan_action(esw, attr);
|
||||
|
@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
|
|||
list_for_each_entry(flow, &e->flows, encap) {
|
||||
esw_attr = flow->esw_attr;
|
||||
esw_attr->encap_id = e->encap_id;
|
||||
flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
|
||||
if (IS_ERR(flow->rule)) {
|
||||
err = PTR_ERR(flow->rule);
|
||||
flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
|
||||
if (IS_ERR(flow->rule[0])) {
|
||||
err = PTR_ERR(flow->rule[0]);
|
||||
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
|
||||
err);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (esw_attr->mirror_count) {
|
||||
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
|
||||
if (IS_ERR(flow->rule[1])) {
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
|
||||
err = PTR_ERR(flow->rule[1]);
|
||||
mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
|
||||
err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
|
||||
}
|
||||
}
|
||||
|
@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
|
|||
|
||||
list_for_each_entry(flow, &e->flows, encap) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
||||
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
||||
|
||||
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
|
||||
if (attr->mirror_count)
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
|||
continue;
|
||||
list_for_each_entry(flow, &e->flows, encap) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
|
||||
counter = mlx5_flow_rule_counter(flow->rule);
|
||||
counter = mlx5_flow_rule_counter(flow->rule[0]);
|
||||
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
|
||||
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
|
||||
neigh_used = true;
|
||||
|
@ -2537,6 +2566,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
return err;
|
||||
|
||||
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
attr->mirror_count = attr->out_count;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2548,12 +2578,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (is_tcf_mirred_egress_redirect(a)) {
|
||||
struct net_device *out_dev;
|
||||
if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
|
||||
struct mlx5e_priv *out_priv;
|
||||
struct net_device *out_dev;
|
||||
|
||||
out_dev = tcf_mirred_dev(a);
|
||||
|
||||
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
|
||||
pr_err("can't support more than %d output ports, can't offload forwarding\n",
|
||||
attr->out_count);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (switchdev_port_same_parent_id(priv->netdev,
|
||||
out_dev) ||
|
||||
is_merged_eswitch_dev(priv, out_dev)) {
|
||||
|
@ -2561,8 +2597,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
out_priv = netdev_priv(out_dev);
|
||||
rpriv = out_priv->ppriv;
|
||||
attr->out_rep = rpriv->rep;
|
||||
attr->out_mdev = out_priv->mdev;
|
||||
attr->out_rep[attr->out_count] = rpriv->rep;
|
||||
attr->out_mdev[attr->out_count++] = out_priv->mdev;
|
||||
} else if (encap) {
|
||||
parse_attr->mirred_ifindex = out_dev->ifindex;
|
||||
parse_attr->tun_info = *info;
|
||||
|
@ -2585,6 +2621,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
encap = true;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
attr->mirror_count = attr->out_count;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2606,6 +2643,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
} else { /* action is TCA_VLAN_ACT_MODIFY */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
attr->mirror_count = attr->out_count;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -2621,6 +2659,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
|
||||
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2700,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
|
|||
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
|
||||
flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
|
||||
} else {
|
||||
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
|
||||
flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
|
||||
}
|
||||
|
||||
if (IS_ERR(flow->rule)) {
|
||||
err = PTR_ERR(flow->rule);
|
||||
if (IS_ERR(flow->rule[0])) {
|
||||
err = PTR_ERR(flow->rule[0]);
|
||||
if (err != -EAGAIN)
|
||||
goto err_free;
|
||||
}
|
||||
|
@ -2782,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
|
|||
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
|
||||
return 0;
|
||||
|
||||
counter = mlx5_flow_rule_counter(flow->rule);
|
||||
counter = mlx5_flow_rule_counter(flow->rule[0]);
|
||||
if (!counter)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -220,34 +220,31 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
|
|||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM;
|
||||
sq->stats.csum_partial_inner++;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats.csum_partial++;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
} else
|
||||
sq->stats.csum_none++;
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
||||
static inline u16
|
||||
mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
|
||||
mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
u16 ihs;
|
||||
|
||||
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
|
||||
|
||||
if (skb->encapsulation) {
|
||||
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
|
||||
sq->stats.tso_inner_packets++;
|
||||
sq->stats.tso_inner_bytes += skb->len - ihs;
|
||||
stats->tso_inner_packets++;
|
||||
stats->tso_inner_bytes += skb->len - ihs;
|
||||
} else {
|
||||
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
sq->stats.tso_packets++;
|
||||
sq->stats.tso_bytes += skb->len - ihs;
|
||||
stats->tso_packets++;
|
||||
stats->tso_bytes += skb->len - ihs;
|
||||
}
|
||||
|
||||
*num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
return ihs;
|
||||
}
|
||||
|
||||
|
@ -300,17 +297,34 @@ dma_unmap_wqe_err:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
|
||||
struct mlx5_wq_cyc *wq,
|
||||
u16 pi, u16 frag_pi)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
|
||||
u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
|
||||
|
||||
edge_wi = wi + nnops;
|
||||
|
||||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->skb = NULL;
|
||||
wi->num_wqebbs = 1;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
}
|
||||
sq->stats->nop += nnops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
|
||||
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
|
||||
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
u16 pi;
|
||||
|
||||
wi->num_bytes = num_bytes;
|
||||
wi->num_dma = num_dma;
|
||||
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
wi->num_wqebbs = num_wqebbs;
|
||||
wi->skb = skb;
|
||||
|
||||
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
|
||||
|
@ -324,84 +338,111 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
sq->pc += wi->num_wqebbs;
|
||||
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
|
||||
netif_tx_stop_queue(sq->txq);
|
||||
sq->stats.stopped++;
|
||||
sq->stats->stopped++;
|
||||
}
|
||||
|
||||
if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
|
||||
|
||||
/* fill sq edge with nops to avoid wqe wrap around */
|
||||
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
|
||||
sq->db.wqe_info[pi].skb = NULL;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
sq->stats.nop++;
|
||||
}
|
||||
}
|
||||
|
||||
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
|
||||
|
||||
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe *wqe, u16 pi)
|
||||
{
|
||||
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
|
||||
|
||||
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
|
||||
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5_wqe_ctrl_seg *cseg;
|
||||
struct mlx5_wqe_eth_seg *eseg;
|
||||
struct mlx5_wqe_data_seg *dseg;
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
unsigned char *skb_data = skb->data;
|
||||
unsigned int skb_len = skb->len;
|
||||
u8 opcode = MLX5_OPCODE_SEND;
|
||||
unsigned int num_bytes;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u16 headlen, ihs, frag_pi;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
u16 headlen;
|
||||
u16 ds_cnt;
|
||||
u16 ihs;
|
||||
__be16 mss;
|
||||
|
||||
/* Calc ihs and ds cnt, no writes to wqe yet */
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
if (skb_is_gso(skb)) {
|
||||
opcode = MLX5_OPCODE_LSO;
|
||||
mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
|
||||
ihs = mlx5e_tx_get_gso_ihs(sq, skb);
|
||||
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
stats->packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
opcode = MLX5_OPCODE_SEND;
|
||||
mss = 0;
|
||||
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
stats->packets++;
|
||||
}
|
||||
|
||||
stats->bytes += num_bytes;
|
||||
stats->xmit_more += skb->xmit_more;
|
||||
|
||||
headlen = skb_len - ihs - skb->data_len;
|
||||
ds_cnt += !!headlen;
|
||||
ds_cnt += skb_shinfo(skb)->nr_frags;
|
||||
|
||||
if (ihs) {
|
||||
ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
|
||||
|
||||
ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
|
||||
ds_cnt += ds_cnt_inl;
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
/* fill wqe */
|
||||
wi = &sq->db.wqe_info[pi];
|
||||
cseg = &wqe->ctrl;
|
||||
eseg = &wqe->eth;
|
||||
dseg = wqe->data;
|
||||
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
opcode = MLX5_OPCODE_LSO;
|
||||
ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
|
||||
sq->stats.packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
sq->stats.packets++;
|
||||
}
|
||||
sq->stats.bytes += num_bytes;
|
||||
sq->stats.xmit_more += skb->xmit_more;
|
||||
eseg->mss = mss;
|
||||
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
if (ihs) {
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
|
||||
ihs += VLAN_HLEN;
|
||||
sq->stats.added_vlan_packets++;
|
||||
mlx5e_insert_vlan(eseg->inline_hdr.start, skb,
|
||||
ihs - VLAN_HLEN, &skb_data, &skb_len);
|
||||
stats->added_vlan_packets++;
|
||||
} else {
|
||||
memcpy(eseg->inline_hdr.start, skb_data, ihs);
|
||||
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
|
||||
}
|
||||
eseg->inline_hdr.sz = cpu_to_be16(ihs);
|
||||
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
|
||||
dseg += ds_cnt_inl;
|
||||
} else if (skb_vlan_tag_present(skb)) {
|
||||
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
|
||||
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
|
||||
eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
|
||||
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
|
||||
sq->stats.added_vlan_packets++;
|
||||
stats->added_vlan_packets++;
|
||||
}
|
||||
|
||||
headlen = skb_len - skb->data_len;
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
|
||||
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
|
||||
if (unlikely(num_dma < 0))
|
||||
goto err_drop;
|
||||
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
|
||||
num_bytes, num_dma, wi, cseg);
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
|
||||
num_dma, wi, cseg);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
err_drop:
|
||||
sq->stats.dropped++;
|
||||
stats->dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -485,7 +526,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover.recover_work);
|
||||
}
|
||||
sq->stats.cqe_err++;
|
||||
sq->stats->cqe_err++;
|
||||
}
|
||||
|
||||
do {
|
||||
|
@ -496,7 +537,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
|
||||
last_wqe = (sqcc == wqe_counter);
|
||||
|
||||
ci = sqcc & sq->wq.sz_m1;
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.wqe_info[ci];
|
||||
skb = wi->skb;
|
||||
|
||||
|
@ -545,7 +586,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
MLX5E_SQ_STOP_ROOM) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
sq->stats.wake++;
|
||||
sq->stats->wake++;
|
||||
}
|
||||
|
||||
return (i == MLX5E_TX_CQ_POLL_BUDGET);
|
||||
|
@ -559,7 +600,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
|
|||
int i;
|
||||
|
||||
while (sq->cc != sq->pc) {
|
||||
ci = sq->cc & sq->wq.sz_m1;
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
|
||||
wi = &sq->db.wqe_info[ci];
|
||||
skb = wi->skb;
|
||||
|
||||
|
@ -581,18 +622,6 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_IPOIB
|
||||
|
||||
struct mlx5_wqe_eth_pad {
|
||||
u8 rsvd0[16];
|
||||
};
|
||||
|
||||
struct mlx5i_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_datagram_seg datagram;
|
||||
struct mlx5_wqe_eth_pad pad;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
};
|
||||
|
||||
static inline void
|
||||
mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
|
||||
struct mlx5_wqe_datagram_seg *dseg)
|
||||
|
@ -605,67 +634,94 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
|
|||
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_av *av, u32 dqpn, u32 dqkey)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
u16 pi = sq->pc & wq->sz_m1;
|
||||
struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
|
||||
struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5i_tx_wqe *wqe;
|
||||
|
||||
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
|
||||
struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
|
||||
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
|
||||
struct mlx5_wqe_datagram_seg *datagram;
|
||||
struct mlx5_wqe_ctrl_seg *cseg;
|
||||
struct mlx5_wqe_eth_seg *eseg;
|
||||
struct mlx5_wqe_data_seg *dseg;
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
unsigned char *skb_data = skb->data;
|
||||
unsigned int skb_len = skb->len;
|
||||
u8 opcode = MLX5_OPCODE_SEND;
|
||||
unsigned int num_bytes;
|
||||
u16 headlen, ihs, pi, frag_pi;
|
||||
u16 ds_cnt, ds_cnt_inl = 0;
|
||||
u8 num_wqebbs, opcode;
|
||||
u32 num_bytes;
|
||||
int num_dma;
|
||||
u16 headlen;
|
||||
u16 ds_cnt;
|
||||
u16 ihs;
|
||||
__be16 mss;
|
||||
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
|
||||
/* Calc ihs and ds cnt, no writes to wqe yet */
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
if (skb_is_gso(skb)) {
|
||||
opcode = MLX5_OPCODE_LSO;
|
||||
mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
|
||||
ihs = mlx5e_tx_get_gso_ihs(sq, skb);
|
||||
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
|
||||
stats->packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
opcode = MLX5_OPCODE_SEND;
|
||||
mss = 0;
|
||||
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
stats->packets++;
|
||||
}
|
||||
|
||||
stats->bytes += num_bytes;
|
||||
stats->xmit_more += skb->xmit_more;
|
||||
|
||||
headlen = skb_len - ihs - skb->data_len;
|
||||
ds_cnt += !!headlen;
|
||||
ds_cnt += skb_shinfo(skb)->nr_frags;
|
||||
|
||||
if (ihs) {
|
||||
ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
|
||||
ds_cnt += ds_cnt_inl;
|
||||
}
|
||||
|
||||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
|
||||
if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
|
||||
mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
}
|
||||
|
||||
/* fill wqe */
|
||||
wi = &sq->db.wqe_info[pi];
|
||||
cseg = &wqe->ctrl;
|
||||
datagram = &wqe->datagram;
|
||||
eseg = &wqe->eth;
|
||||
dseg = wqe->data;
|
||||
|
||||
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
|
||||
|
||||
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
opcode = MLX5_OPCODE_LSO;
|
||||
ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
|
||||
sq->stats.packets += skb_shinfo(skb)->gso_segs;
|
||||
} else {
|
||||
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
|
||||
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
sq->stats.packets++;
|
||||
}
|
||||
eseg->mss = mss;
|
||||
|
||||
sq->stats.bytes += num_bytes;
|
||||
sq->stats.xmit_more += skb->xmit_more;
|
||||
|
||||
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
|
||||
if (ihs) {
|
||||
memcpy(eseg->inline_hdr.start, skb_data, ihs);
|
||||
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
|
||||
eseg->inline_hdr.sz = cpu_to_be16(ihs);
|
||||
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
|
||||
dseg += ds_cnt_inl;
|
||||
}
|
||||
|
||||
headlen = skb_len - skb->data_len;
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
|
||||
(struct mlx5_wqe_data_seg *)cseg + ds_cnt);
|
||||
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
|
||||
if (unlikely(num_dma < 0))
|
||||
goto err_drop;
|
||||
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
|
||||
num_bytes, num_dma, wi, cseg);
|
||||
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
|
||||
num_dma, wi, cseg);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
err_drop:
|
||||
sq->stats.dropped++;
|
||||
stats->dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
|
|||
|
||||
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
struct net_dim_sample dim_sample;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
|
||||
return;
|
||||
|
||||
net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes,
|
||||
net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
|
||||
&dim_sample);
|
||||
net_dim(&sq->dim, dim_sample);
|
||||
}
|
||||
|
||||
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5e_rq_stats *stats = rq->stats;
|
||||
struct net_dim_sample dim_sample;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
|
||||
return;
|
||||
|
||||
net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes,
|
||||
net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
|
||||
&dim_sample);
|
||||
net_dim(&rq->dim, dim_sample);
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
|
|||
spec->match_criteria_enable = match_header;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
flow_rule =
|
||||
mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
|
||||
mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
|
||||
&flow_act, &dest, 1);
|
||||
if (IS_ERR(flow_rule)) {
|
||||
esw_warn(esw->dev,
|
||||
|
@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|||
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
esw->fdb_table.fdb = fdb;
|
||||
esw->fdb_table.legacy.fdb = fdb;
|
||||
|
||||
/* Addresses group : Full match unicast/multicast addresses */
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
|
||||
|
@ -343,9 +343,9 @@ out:
|
|||
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
|
||||
esw->fdb_table.legacy.addr_grp = NULL;
|
||||
}
|
||||
if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
esw->fdb_table.fdb = NULL;
|
||||
if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
|
||||
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
|
||||
esw->fdb_table.legacy.fdb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -355,15 +355,15 @@ out:
|
|||
|
||||
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!esw->fdb_table.fdb)
|
||||
if (!esw->fdb_table.legacy.fdb)
|
||||
return;
|
||||
|
||||
esw_debug(esw->dev, "Destroy FDB Table\n");
|
||||
mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
esw->fdb_table.fdb = NULL;
|
||||
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
|
||||
esw->fdb_table.legacy.fdb = NULL;
|
||||
esw->fdb_table.legacy.addr_grp = NULL;
|
||||
esw->fdb_table.legacy.allmulti_grp = NULL;
|
||||
esw->fdb_table.legacy.promisc_grp = NULL;
|
||||
|
@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
|
||||
fdb_add:
|
||||
/* SRIOV is enabled: Forward UC MAC to vport */
|
||||
if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
|
||||
if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
|
||||
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
|
||||
|
||||
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
|
||||
|
@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
u8 *mac = vaddr->node.addr;
|
||||
u32 vport = vaddr->vport;
|
||||
|
||||
if (!esw->fdb_table.fdb)
|
||||
if (!esw->fdb_table.legacy.fdb)
|
||||
return 0;
|
||||
|
||||
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
|
||||
|
@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
u8 *mac = vaddr->node.addr;
|
||||
u32 vport = vaddr->vport;
|
||||
|
||||
if (!esw->fdb_table.fdb)
|
||||
if (!esw->fdb_table.legacy.fdb)
|
||||
return 0;
|
||||
|
||||
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
|
||||
|
|
|
@ -55,6 +55,9 @@
|
|||
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
|
||||
min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
|
||||
|
||||
#define mlx5_esw_has_fwd_fdb(dev) \
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
|
||||
|
||||
struct vport_ingress {
|
||||
struct mlx5_flow_table *acl;
|
||||
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||||
|
@ -117,16 +120,18 @@ struct mlx5_vport {
|
|||
};
|
||||
|
||||
struct mlx5_eswitch_fdb {
|
||||
void *fdb;
|
||||
union {
|
||||
struct legacy_fdb {
|
||||
struct mlx5_flow_table *fdb;
|
||||
struct mlx5_flow_group *addr_grp;
|
||||
struct mlx5_flow_group *allmulti_grp;
|
||||
struct mlx5_flow_group *promisc_grp;
|
||||
} legacy;
|
||||
|
||||
struct offloads_fdb {
|
||||
struct mlx5_flow_table *fdb;
|
||||
struct mlx5_flow_table *fast_fdb;
|
||||
struct mlx5_flow_table *fwd_fdb;
|
||||
struct mlx5_flow_table *slow_fdb;
|
||||
struct mlx5_flow_group *send_to_vport_grp;
|
||||
struct mlx5_flow_group *miss_grp;
|
||||
struct mlx5_flow_handle *miss_rule_uni;
|
||||
|
@ -214,6 +219,10 @@ struct mlx5_flow_handle *
|
|||
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr);
|
||||
struct mlx5_flow_handle *
|
||||
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr);
|
||||
void
|
||||
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *rule,
|
||||
|
@ -234,12 +243,18 @@ enum mlx5_flow_match_level {
|
|||
MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
|
||||
};
|
||||
|
||||
/* current maximum for flow based vport multicasting */
|
||||
#define MLX5_MAX_FLOW_FWD_VPORTS 2
|
||||
|
||||
struct mlx5_esw_flow_attr {
|
||||
struct mlx5_eswitch_rep *in_rep;
|
||||
struct mlx5_eswitch_rep *out_rep;
|
||||
struct mlx5_core_dev *out_mdev;
|
||||
struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
struct mlx5_core_dev *in_mdev;
|
||||
|
||||
int mirror_count;
|
||||
int out_count;
|
||||
|
||||
int action;
|
||||
__be16 vlan_proto;
|
||||
u16 vlan_vid;
|
||||
|
|
|
@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr)
|
||||
{
|
||||
struct mlx5_flow_destination dest[2] = {};
|
||||
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_table *ft = NULL;
|
||||
struct mlx5_fc *counter = NULL;
|
||||
struct mlx5_flow_handle *rule;
|
||||
int j, i = 0;
|
||||
void *misc;
|
||||
int i = 0;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (attr->mirror_count)
|
||||
ft = esw->fdb_table.offloads.fwd_fdb;
|
||||
else
|
||||
ft = esw->fdb_table.offloads.fast_fdb;
|
||||
|
||||
flow_act.action = attr->action;
|
||||
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
|
||||
if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
|
||||
|
@ -70,14 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
|
||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[i].vport.num = attr->out_rep->vport;
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
|
||||
for (j = attr->mirror_count; j < attr->out_count; j++) {
|
||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[i].vport.num = attr->out_rep[j]->vport;
|
||||
dest[i].vport.vhca_id =
|
||||
MLX5_CAP_GEN(attr->out_mdev, vhca_id);
|
||||
dest[i].vport.vhca_id_valid = 1;
|
||||
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
|
||||
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
counter = mlx5_fc_create(esw->dev, true);
|
||||
|
@ -119,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||
flow_act.encap_id = attr->encap_id;
|
||||
|
||||
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
|
||||
spec, &flow_act, dest, i);
|
||||
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
|
||||
if (IS_ERR(rule))
|
||||
goto err_add_rule;
|
||||
else
|
||||
|
@ -134,6 +139,57 @@ err_counter_alloc:
|
|||
return rule;
|
||||
}
|
||||
|
||||
struct mlx5_flow_handle *
|
||||
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_esw_flow_attr *attr)
|
||||
{
|
||||
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_handle *rule;
|
||||
void *misc;
|
||||
int i;
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
for (i = 0; i < attr->mirror_count; i++) {
|
||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[i].vport.num = attr->out_rep[i]->vport;
|
||||
dest[i].vport.vhca_id =
|
||||
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
|
||||
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
|
||||
}
|
||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
|
||||
i++;
|
||||
|
||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
|
||||
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
|
||||
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
MLX5_SET(fte_match_set_misc, misc,
|
||||
source_eswitch_owner_vhca_id,
|
||||
MLX5_CAP_GEN(attr->in_mdev, vhca_id));
|
||||
|
||||
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
|
||||
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
|
||||
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
|
||||
source_eswitch_owner_vhca_id);
|
||||
|
||||
if (attr->match_level == MLX5_MATCH_NONE)
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
|
||||
else
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
|
||||
MLX5_MATCH_MISC_PARAMETERS;
|
||||
|
||||
rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
|
||||
|
||||
if (!IS_ERR(rule))
|
||||
esw->offloads.num_flows++;
|
||||
|
||||
return rule;
|
||||
}
|
||||
|
||||
void
|
||||
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *rule,
|
||||
|
@ -173,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
|
|||
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
|
||||
|
||||
in_rep = attr->in_rep;
|
||||
out_rep = attr->out_rep;
|
||||
out_rep = attr->out_rep[0];
|
||||
|
||||
if (push)
|
||||
vport = in_rep;
|
||||
|
@ -194,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
|
|||
goto out_notsupp;
|
||||
|
||||
in_rep = attr->in_rep;
|
||||
out_rep = attr->out_rep;
|
||||
out_rep = attr->out_rep[0];
|
||||
|
||||
if (push && in_rep->vport == FDB_UPLINK_VPORT)
|
||||
goto out_notsupp;
|
||||
|
@ -245,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
|
|||
|
||||
if (!push && !pop && fwd) {
|
||||
/* tracks VF --> wire rules without vlan push action */
|
||||
if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
|
||||
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
|
||||
vport->vlan_refcount++;
|
||||
attr->vlan_handled = true;
|
||||
}
|
||||
|
@ -305,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||
|
||||
if (!push && !pop && fwd) {
|
||||
/* tracks VF --> wire rules without vlan push action */
|
||||
if (attr->out_rep->vport == FDB_UPLINK_VPORT)
|
||||
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
|
||||
vport->vlan_refcount--;
|
||||
|
||||
return 0;
|
||||
|
@ -363,7 +419,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
|
|||
dest.vport.num = vport;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
|
||||
&flow_act, &dest, 1);
|
||||
if (IS_ERR(flow_rule))
|
||||
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
|
||||
|
@ -407,7 +463,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
|
|||
dest.vport.num = 0;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
|
||||
&flow_act, &dest, 1);
|
||||
if (IS_ERR(flow_rule)) {
|
||||
err = PTR_ERR(flow_rule);
|
||||
|
@ -422,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
|
|||
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
|
||||
outer_headers.dmac_47_16);
|
||||
dmac_v[0] = 0x01;
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
|
||||
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
|
||||
&flow_act, &dest, 1);
|
||||
if (IS_ERR(flow_rule)) {
|
||||
err = PTR_ERR(flow_rule);
|
||||
|
@ -454,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
|||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
goto out_namespace;
|
||||
}
|
||||
|
||||
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
|
||||
|
@ -464,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
|||
esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
|
||||
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
||||
|
||||
if (mlx5_esw_has_fwd_fdb(dev))
|
||||
esw_size >>= 1;
|
||||
|
||||
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
|
||||
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
|
||||
|
||||
|
@ -474,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
|||
if (IS_ERR(fdb)) {
|
||||
err = PTR_ERR(fdb);
|
||||
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
|
||||
goto out;
|
||||
goto out_namespace;
|
||||
}
|
||||
esw->fdb_table.fdb = fdb;
|
||||
esw->fdb_table.offloads.fast_fdb = fdb;
|
||||
|
||||
out:
|
||||
if (!mlx5_esw_has_fwd_fdb(dev))
|
||||
goto out_namespace;
|
||||
|
||||
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
|
||||
esw_size,
|
||||
ESW_OFFLOADS_NUM_GROUPS, 1,
|
||||
flags);
|
||||
if (IS_ERR(fdb)) {
|
||||
err = PTR_ERR(fdb);
|
||||
esw_warn(dev, "Failed to create fwd table err %d\n", err);
|
||||
goto out_ft;
|
||||
}
|
||||
esw->fdb_table.offloads.fwd_fdb = fdb;
|
||||
|
||||
return err;
|
||||
|
||||
out_ft:
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
|
||||
out_namespace:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
if (mlx5_esw_has_fwd_fdb(esw->dev))
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
|
||||
}
|
||||
|
||||
#define MAX_PF_SQ 256
|
||||
|
@ -530,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
|
|||
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
|
||||
goto slow_fdb_err;
|
||||
}
|
||||
esw->fdb_table.offloads.fdb = fdb;
|
||||
esw->fdb_table.offloads.slow_fdb = fdb;
|
||||
|
||||
/* create send-to-vport group */
|
||||
memset(flow_group_in, 0, inlen);
|
||||
|
@ -586,9 +665,9 @@ miss_rule_err:
|
|||
miss_err:
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
|
||||
send_vport_err:
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
|
||||
slow_fdb_err:
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
esw_destroy_offloads_fast_fdb_table(esw);
|
||||
fast_fdb_err:
|
||||
ns_err:
|
||||
kvfree(flow_group_in);
|
||||
|
@ -597,7 +676,7 @@ ns_err:
|
|||
|
||||
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!esw->fdb_table.fdb)
|
||||
if (!esw->fdb_table.offloads.fast_fdb)
|
||||
return;
|
||||
|
||||
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
|
||||
|
@ -606,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
|||
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
|
||||
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
|
||||
esw_destroy_offloads_fast_fdb_table(esw);
|
||||
}
|
||||
|
||||
|
|
|
@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
}
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
||||
sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
|
||||
sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
|
@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
|
||||
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
||||
MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
|
||||
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
|
||||
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
|
||||
MLX5_ADAPTER_PAGE_SHIFT);
|
||||
MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
|
||||
|
||||
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
|
||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
|
||||
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
|
||||
|
||||
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
|
||||
kvfree(in);
|
||||
|
@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
|
|||
goto out;
|
||||
|
||||
err_cqwq:
|
||||
mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
|
||||
mlx5_wq_destroy(&conn->cq.wq_ctrl);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
|
|||
tasklet_disable(&conn->cq.tasklet);
|
||||
tasklet_kill(&conn->cq.tasklet);
|
||||
mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
|
||||
mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
|
||||
mlx5_wq_destroy(&conn->cq.wq_ctrl);
|
||||
}
|
||||
|
||||
static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
|
||||
|
@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
|
|||
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
|
||||
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
|
||||
|
||||
mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
|
||||
mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
|
||||
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
|
||||
|
||||
err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
|
||||
if (err)
|
||||
|
|
|
@ -54,7 +54,7 @@ struct mlx5_fpga_conn {
|
|||
/* CQ */
|
||||
struct {
|
||||
struct mlx5_cqwq wq;
|
||||
struct mlx5_frag_wq_ctrl wq_ctrl;
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
struct mlx5_core_cq mcq;
|
||||
struct tasklet_struct tasklet;
|
||||
} cq;
|
||||
|
|
|
@ -2495,7 +2495,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
|||
if (!steering->fdb_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
|
||||
prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
|
||||
if (IS_ERR(prio))
|
||||
goto out_err;
|
||||
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#ifndef __MLX5E_IPOB_H__
|
||||
#define __MLX5E_IPOB_H__
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_IPOIB
|
||||
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include "en.h"
|
||||
|
||||
|
@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
|
|||
/* Extract mlx5e_priv from IPoIB netdev */
|
||||
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
|
||||
|
||||
struct mlx5_wqe_eth_pad {
|
||||
u8 rsvd0[16];
|
||||
};
|
||||
|
||||
struct mlx5i_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_datagram_seg datagram;
|
||||
struct mlx5_wqe_eth_pad pad;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
struct mlx5_wqe_data_seg data[0];
|
||||
};
|
||||
|
||||
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
|
||||
struct mlx5i_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
|
||||
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
|
||||
memset(*wqe, 0, sizeof(**wqe));
|
||||
}
|
||||
|
||||
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_av *av, u32 dqpn, u32 dqkey);
|
||||
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
|
||||
#endif /* CONFIG_MLX5_CORE_IPOIB */
|
||||
#endif /* __MLX5E_IPOB_H__ */
|
||||
|
|
|
@ -36,7 +36,12 @@
|
|||
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return (u32)wq->fbc.frag_sz_m1 + 1;
|
||||
}
|
||||
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
||||
|
@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
|
|||
|
||||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return (u32)wq->sz_m1 + 1;
|
||||
return (u32)wq->fbc.sz_m1 + 1;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
|
||||
{
|
||||
return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
|
||||
return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
|
||||
}
|
||||
|
||||
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
|
||||
|
@ -67,17 +72,19 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
|
|||
|
||||
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return mlx5_wq_ll_get_size(wq) << wq->log_stride;
|
||||
return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
|
||||
}
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
|
||||
int err;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
|
||||
MLX5_GET(wq, wqc, log_wq_sz),
|
||||
fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
|
@ -85,14 +92,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.frags->buf;
|
||||
fbc->frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
|
@ -105,17 +112,35 @@ err_db_free:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
||||
struct mlx5_wq_qp *qp)
|
||||
{
|
||||
struct mlx5_frag_buf *rqb, *sqb;
|
||||
|
||||
rqb = &qp->rq.fbc.frag_buf;
|
||||
*rqb = *buf;
|
||||
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
rqb->npages = 1 << get_order(rqb->size);
|
||||
|
||||
sqb = &qp->sq.fbc.frag_buf;
|
||||
*sqb = *buf;
|
||||
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
sqb->npages = 1 << get_order(sqb->size);
|
||||
sqb->frags += rqb->npages; /* first part is for the rq */
|
||||
}
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int err;
|
||||
|
||||
wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
|
||||
wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1;
|
||||
|
||||
wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB);
|
||||
wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
|
||||
MLX5_GET(qpc, qpc, log_rq_size),
|
||||
&wq->rq.fbc);
|
||||
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
|
||||
MLX5_GET(qpc, qpc, log_sq_size),
|
||||
&wq->sq.fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
|
@ -123,15 +148,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->rq.buf = wq_ctrl->buf.frags->buf;
|
||||
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
|
||||
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
||||
|
||||
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
|
||||
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
|
||||
|
||||
|
@ -147,7 +172,7 @@ err_db_free:
|
|||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -160,7 +185,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
}
|
||||
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
|
||||
&wq_ctrl->frag_buf,
|
||||
&wq_ctrl->buf,
|
||||
param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
|
||||
|
@ -168,7 +193,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->fbc.frag_buf = wq_ctrl->frag_buf;
|
||||
wq->fbc.frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
wq_ctrl->mdev = mdev;
|
||||
|
@ -185,12 +210,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
void *wqc, struct mlx5_wq_ll *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
|
||||
struct mlx5_wqe_srq_next_seg *next_seg;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
|
||||
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
|
||||
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
|
||||
MLX5_GET(wq, wqc, log_wq_sz),
|
||||
fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
|
@ -198,17 +225,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
|
||||
&wq_ctrl->buf, param->buf_numa_node);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
|
||||
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
wq->buf = wq_ctrl->buf.frags->buf;
|
||||
wq->fbc.frag_buf = wq_ctrl->buf;
|
||||
wq->db = wq_ctrl->db.db;
|
||||
|
||||
for (i = 0; i < wq->sz_m1; i++) {
|
||||
for (i = 0; i < fbc->sz_m1; i++) {
|
||||
next_seg = mlx5_wq_ll_get_wqe(wq, i);
|
||||
next_seg->next_wqe_index = cpu_to_be16(i + 1);
|
||||
}
|
||||
|
@ -227,12 +254,7 @@ err_db_free:
|
|||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
|
||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||
}
|
||||
|
||||
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
|
||||
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
|
||||
}
|
||||
|
|
|
@ -48,17 +48,9 @@ struct mlx5_wq_ctrl {
|
|||
struct mlx5_db db;
|
||||
};
|
||||
|
||||
struct mlx5_frag_wq_ctrl {
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_frag_buf frag_buf;
|
||||
struct mlx5_db db;
|
||||
};
|
||||
|
||||
struct mlx5_wq_cyc {
|
||||
void *buf;
|
||||
struct mlx5_frag_buf_ctrl fbc;
|
||||
__be32 *db;
|
||||
u16 sz_m1;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
struct mlx5_wq_qp {
|
||||
|
@ -73,20 +65,19 @@ struct mlx5_cqwq {
|
|||
};
|
||||
|
||||
struct mlx5_wq_ll {
|
||||
void *buf;
|
||||
struct mlx5_frag_buf_ctrl fbc;
|
||||
__be32 *db;
|
||||
__be16 *tail_next;
|
||||
u16 sz_m1;
|
||||
u16 head;
|
||||
u16 wqe_ctr;
|
||||
u16 cur_sz;
|
||||
u8 log_stride;
|
||||
};
|
||||
|
||||
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *wqc, struct mlx5_wq_cyc *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
|
||||
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
|
@ -94,7 +85,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
|
||||
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *cqc, struct mlx5_cqwq *wq,
|
||||
struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||
struct mlx5_wq_ctrl *wq_ctrl);
|
||||
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
|
||||
|
||||
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
|
@ -103,16 +94,20 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
|||
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
|
||||
|
||||
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
|
||||
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->sz_m1;
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.frag_sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
||||
|
@ -123,9 +118,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
|
|||
return !equal && !smaller;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->cc & wq->fbc.sz_m1;
|
||||
return mlx5_cqwq_ctr2ix(wq, wq->cc);
|
||||
}
|
||||
|
||||
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
|
||||
|
@ -133,9 +133,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
|
|||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
|
||||
{
|
||||
return ctr >> wq->fbc.log_sz;
|
||||
}
|
||||
|
||||
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
|
||||
{
|
||||
return wq->cc >> wq->fbc.log_sz;
|
||||
return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc);
|
||||
}
|
||||
|
||||
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
|
||||
|
@ -166,7 +171,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
|
|||
|
||||
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
|
||||
{
|
||||
return wq->cur_sz == wq->sz_m1;
|
||||
return wq->cur_sz == wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
|
||||
|
@ -174,9 +179,14 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
|
|||
return !wq->cur_sz;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr)
|
||||
{
|
||||
return ctr & wq->fbc.sz_m1;
|
||||
}
|
||||
|
||||
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
|
||||
{
|
||||
return wq->buf + (ix << wq->log_stride);
|
||||
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
|
||||
}
|
||||
|
||||
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
|
||||
|
|
|
@ -983,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key)
|
|||
return key & 0xffffff00u;
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
void *cqc)
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
|
||||
fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
|
||||
fbc->log_stride = log_stride;
|
||||
fbc->log_sz = log_sz;
|
||||
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
||||
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
||||
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
void *cqc)
|
||||
{
|
||||
mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
|
||||
MLX5_GET(cqc, cqc, log_cq_size),
|
||||
fbc);
|
||||
}
|
||||
|
||||
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
||||
u32 ix)
|
||||
{
|
||||
|
|
|
@ -524,7 +524,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
|
|||
};
|
||||
|
||||
struct mlx5_ifc_flow_table_eswitch_cap_bits {
|
||||
u8 reserved_at_0[0x200];
|
||||
u8 reserved_at_0[0x1c];
|
||||
u8 fdb_multi_path_to_table[0x1];
|
||||
u8 reserved_at_1d[0x1e3];
|
||||
|
||||
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче