From 4cbeaff54f00f39493c4251bf115d02e26ac8bf2 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:40 +0300 Subject: [PATCH 1/8] net/mlx5e: Unify the RX flow Generally an RX packet flows through the following objects: Flow table --> TIR --> RQT --> RQ Where: - TIR stands for "Transport Interface Receive", defining the RSS and LRO paramaters. - RQT stands for "RQ Table", implementing the RSS indirection table. - RQ stands for "Receive Queue" For flows that do not need LRO, nor RSS, the driver made a shortcut to the above RX flow by pointing to the RQ directly from the TIR, yielding this flow: Flow table --> TIR --> RQ In this commit we remove this shortcut by "inserting" a single-RQ RQT between the TIR and the RQ, i.e RX packets will reach the same RQ but will go through an RQT of size 1, pointing to just a single RQ. This way the RX traffic re-direction to/from the "Drop RQ" will be more uniform (AKA "one flow"), as it will involve only RQTs re-direction and no TIRs re-direction. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 93 +++++++++++++------ 2 files changed, 69 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 45f6dc75c0df..af5791296584 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -344,10 +344,10 @@ enum mlx5e_traffic_types { MLX5E_NUM_TT, }; -enum { - MLX5E_RQT_SPREADING = 0, - MLX5E_RQT_DEFAULT_RQ = 1, - MLX5E_NUM_RQT = 2, +enum mlx5e_rqt_ix { + MLX5E_INDIRECTION_RQT, + MLX5E_SINGLE_RQ_RQT, + MLX5E_NUM_RQT, }; struct mlx5e_eth_addr_info { @@ -402,7 +402,7 @@ struct mlx5e_priv { struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 rqtn; + u32 rqtn[MLX5E_NUM_RQT]; u32 tirn[MLX5E_NUM_TT]; struct mlx5e_flow_table ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bb815893d3a8..333c828c56da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1184,16 +1184,49 @@ static int mlx5e_bits_invert(unsigned long a, int size) return inv; } -static int mlx5e_open_rqt(struct mlx5e_priv *priv) +static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, + enum mlx5e_rqt_ix rqt_ix) +{ + int i; + int log_sz; + + switch (rqt_ix) { + case MLX5E_INDIRECTION_RQT: + log_sz = priv->params.rx_hash_log_tbl_sz; + for (i = 0; i < (1 << log_sz); i++) { + int ix = i; + + if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, log_sz); + + ix = ix % priv->params.num_channels; + MLX5_SET(rqtc, rqtc, rq_num[i], + priv->channel[ix]->rq.rqn); + } + + break; + + default: /* MLX5E_SINGLE_RQ_RQT */ + MLX5_SET(rqtc, rqtc, rq_num[0], + priv->channel[0]->rq.rqn); + + break; + } +} + +static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; void *rqtc; int inlen; + int log_sz; + int sz; int err; - int log_tbl_sz = priv->params.rx_hash_log_tbl_sz; - int sz = 1 << log_tbl_sz; - int i; + + log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : + priv->params.rx_hash_log_tbl_sz; + sz = 1 << log_sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1205,26 +1238,18 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - for (i = 0; i < sz; i++) { - int ix = i; + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); - if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, log_tbl_sz); - - ix = ix % priv->params.num_channels; - MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); - } - - err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); kvfree(in); return err; } -static void mlx5e_close_rqt(struct mlx5e_priv *priv) +static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { - mlx5_core_destroy_rqt(priv->mdev, priv->rqtn); + mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); } static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) @@ -1259,18 +1284,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) lro_timer_supported_periods[3])); } + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + switch (tt) { case MLX5E_TT_ANY: - MLX5_SET(tirc, tirc, disp_type, - MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, - priv->channel[0]->rq.rqn); + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn[MLX5E_SINGLE_RQ_RQT]); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); break; default: - MLX5_SET(tirc, tirc, disp_type, - MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn); + priv->rqtn[MLX5E_INDIRECTION_RQT]); MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(priv->params.rss_hfunc)); if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { @@ -1472,18 +1496,25 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_close_tises; } - err = mlx5e_open_rqt(priv); + err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT); if (err) { - netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", + netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n", __func__, err); goto err_close_channels; } + err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n", + __func__, err); + goto err_close_rqt_indir; + } + err = mlx5e_open_tirs(priv); if (err) { netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", __func__, err); - goto err_close_rqls; + goto err_close_rqt_single; } err = mlx5e_open_flow_table(priv); @@ -1516,8 +1547,11 @@ err_close_flow_table: err_close_tirs: mlx5e_close_tirs(priv); -err_close_rqls: - mlx5e_close_rqt(priv); +err_close_rqt_single: + mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); + +err_close_rqt_indir: + mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); err_close_channels: mlx5e_close_channels(priv); @@ -1551,7 +1585,8 @@ int mlx5e_close_locked(struct net_device *netdev) netif_carrier_off(priv->netdev); mlx5e_close_flow_table(priv); mlx5e_close_tirs(priv); - mlx5e_close_rqt(priv); + mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); + mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_close_channels(priv); mlx5e_close_tises(priv); From 50cfa25aba67c658979c5a3188d514ee6780364b Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:41 +0300 Subject: [PATCH 2/8] net/mlx5e: Introduce the "Drop RQ" RX traffic routed to this RQ will be silently dropped, at the NIC HW level. This is in preparation for netdev "light-weight" open/stop flow change described in previous commit. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 122 ++++++++++++++++-- 2 files changed, 114 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index af5791296584..31e9610926fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -217,6 +217,7 @@ struct mlx5e_cq { struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; + struct mlx5e_priv *priv; /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -240,6 +241,7 @@ struct mlx5e_rq { struct mlx5_wq_ctrl wq_ctrl; u32 rqn; struct mlx5e_channel *channel; + struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; struct mlx5e_tx_skb_cb { @@ -399,6 +401,7 @@ struct mlx5e_priv { u32 pdn; u32 tdn; struct mlx5_core_mr mr; + struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 333c828c56da..baa7a69bb694 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -307,6 +307,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->netdev = c->netdev; rq->channel = c; rq->ix = c->ix; + rq->priv = c->priv; return 0; @@ -324,8 +325,7 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = rq->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; @@ -392,11 +392,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) static void mlx5e_disable_rq(struct mlx5e_rq *rq) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; - - mlx5_core_destroy_rq(mdev, rq->rqn); + mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) @@ -740,6 +736,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, } cq->channel = c; + cq->priv = priv; return 0; } @@ -751,8 +748,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { - struct mlx5e_channel *c = cq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = cq->priv; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; @@ -798,8 +794,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) static void mlx5e_disable_cq(struct mlx5e_cq *cq) { - struct mlx5e_channel *c = cq->channel; - struct mlx5e_priv *priv = c->priv; + struct mlx5e_priv *priv = cq->priv; struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_cq(mdev, &cq->mcq); @@ -1119,6 +1114,111 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv) kfree(priv->channel); } +static int mlx5e_create_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *rq, + struct mlx5e_rq_param *param) +{ + struct mlx5_core_dev *mdev = priv->mdev; + void *rqc = param->rqc; + void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + int err; + + param->wq.db_numa_node = param->wq.buf_numa_node; + + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->priv = priv; + + return 0; +} + +static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, + struct mlx5e_cq *cq, + struct mlx5e_cq_param *param) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int eqn_not_used; + int irqn; + int err; + + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + return err; + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + *mcq->set_ci_db = 0; + *mcq->arm_db = 0; + mcq->vector = param->eq_ix; + mcq->comp = mlx5e_completion_event; + mcq->event = mlx5e_cq_error_event; + mcq->irqn = irqn; + mcq->uar = &priv->cq_uar; + + cq->priv = priv; + + return 0; +} + +static int mlx5e_open_drop_rq(struct mlx5e_priv *priv) +{ + struct mlx5e_cq_param cq_param; + struct mlx5e_rq_param rq_param; + struct mlx5e_rq *rq = &priv->drop_rq; + struct mlx5e_cq *cq = &priv->drop_rq.cq; + int err; + + memset(&cq_param, 0, sizeof(cq_param)); + memset(&rq_param, 0, sizeof(rq_param)); + mlx5e_build_rx_cq_param(priv, &cq_param); + mlx5e_build_rq_param(priv, &rq_param); + + err = mlx5e_create_drop_cq(priv, cq, &cq_param); + if (err) + return err; + + err = mlx5e_enable_cq(cq, &cq_param); + if (err) + goto err_destroy_cq; + + err = mlx5e_create_drop_rq(priv, rq, &rq_param); + if (err) + goto err_disable_cq; + + err = mlx5e_enable_rq(rq, &rq_param); + if (err) + goto err_destroy_rq; + + return 0; + +err_destroy_rq: + mlx5e_destroy_rq(&priv->drop_rq); + +err_disable_cq: + mlx5e_disable_cq(&priv->drop_rq.cq); + +err_destroy_cq: + mlx5e_destroy_cq(&priv->drop_rq.cq); + + return err; +} + +static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) +{ + mlx5e_disable_rq(&priv->drop_rq); + mlx5e_destroy_rq(&priv->drop_rq); + mlx5e_disable_cq(&priv->drop_rq.cq); + mlx5e_destroy_cq(&priv->drop_rq.cq); +} + static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; From d9eea403ca81f60cd535d354c77ada4c2bee8d66 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:42 +0300 Subject: [PATCH 3/8] net/mlx5_core: Introduce access function to modify RSS/LRO params To be used by the mlx5 Eth driver in following commit. This is in preparation for netdev "light-weight" open/stop flow change described in previous commit. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/transobj.c | 12 ++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/transobj.h | 2 ++ include/linux/mlx5/mlx5_ifc.h | 9 ++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index c4f3f74908ec..e6453f61141e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -163,6 +163,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_tir_out)]; + + MLX5_SET(modify_tir_in, in, tirn, tirn); + MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index 10bd75e7d9b1..d436c2d8b527 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -45,6 +45,8 @@ int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, + int inlen); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c60a62bba652..469b7bda3304 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4050,6 +4050,13 @@ struct mlx5_ifc_modify_tis_in_bits { struct mlx5_ifc_tisc_bits ctx; }; +struct mlx5_ifc_modify_tir_bitmask_bits { + u8 reserved[0x20]; + + u8 reserved1[0x1f]; + u8 lro[0x1]; +}; + struct mlx5_ifc_modify_tir_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; @@ -4071,7 +4078,7 @@ struct mlx5_ifc_modify_tir_in_bits { u8 reserved_3[0x20]; - u8 modify_bitmask[0x40]; + struct mlx5_ifc_modify_tir_bitmask_bits bitmask; u8 reserved_4[0x40]; From 5c50368f38317627421bf24a0b66b1af0d44eddc Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:43 +0300 Subject: [PATCH 4/8] net/mlx5e: Light-weight netdev open/stop Create/destroy TIRs, TISs and flow tables upon PCI probe/remove rather than upon the netdev ndo_open/stop. Upon ndo_stop(), redirect all RX traffic to the (lately introduced) "Drop RQ" and then close only the RX/TX rings, leaving the TIRs, TISs and flow tables alive. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 237 ++++++++++++------ .../ethernet/mellanox/mlx5/core/transobj.c | 12 + .../ethernet/mellanox/mlx5/core/transobj.h | 2 + include/linux/mlx5/mlx5_ifc.h | 9 +- 4 files changed, 184 insertions(+), 76 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index baa7a69bb694..33d08bb11f84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1301,14 +1301,18 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, ix = ix % priv->params.num_channels; MLX5_SET(rqtc, rqtc, rq_num[i], - priv->channel[ix]->rq.rqn); + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn); } break; default: /* MLX5E_SINGLE_RQ_RQT */ MLX5_SET(rqtc, rqtc, rq_num[0], - priv->channel[0]->rq.rqn); + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[0]->rq.rqn : + priv->drop_rq.rqn); break; } @@ -1347,19 +1351,95 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) return err; } +static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *rqtc; + int inlen; + int log_sz; + int sz; + int err; + + log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : + priv->params.rx_hash_log_tbl_sz; + sz = 1 << log_sz; + + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + + MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); + + err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); + + kvfree(in); + + return err; +} + static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); } +static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) +{ + if (!priv->params.lro_en) + return; + +#define ROUGH_MAX_L2_L3_HDR_SZ 256 + + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[3])); +} + +static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_tir_in, in, bitmask.lro, 1); + tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + + mlx5e_build_tir_ctx_lro(tirc, priv); + + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + + kvfree(in); + + return err; +} + static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); MLX5_SET(tirc, tirc, transport_domain, priv->tdn); -#define ROUGH_MAX_L2_L3_HDR_SZ 256 - #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -1372,17 +1452,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) - if (priv->params.lro_en) { - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (priv->params.lro_wqe_sz - - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, - MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[3])); - } + mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); @@ -1568,12 +1638,20 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) return 0; } +static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) +{ + mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int num_txqs; int err; + set_bit(MLX5E_STATE_OPENED, &priv->state); + num_txqs = priv->params.num_channels * priv->params.num_tc; netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); @@ -1582,83 +1660,32 @@ int mlx5e_open_locked(struct net_device *netdev) if (err) return err; - err = mlx5e_open_tises(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", - __func__, err); - return err; - } - err = mlx5e_open_channels(priv); if (err) { netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", __func__, err); - goto err_close_tises; - } - - err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n", - __func__, err); - goto err_close_channels; - } - - err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n", - __func__, err); - goto err_close_rqt_indir; - } - - err = mlx5e_open_tirs(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", - __func__, err); - goto err_close_rqt_single; - } - - err = mlx5e_open_flow_table(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", - __func__, err); - goto err_close_tirs; + return err; } err = mlx5e_add_all_vlan_rules(priv); if (err) { netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", __func__, err); - goto err_close_flow_table; + goto err_close_channels; } mlx5e_init_eth_addr(priv); - set_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_update_carrier(priv); + mlx5e_redirect_rqts(priv); mlx5e_set_rx_mode_core(priv); schedule_delayed_work(&priv->update_stats_work, 0); return 0; -err_close_flow_table: - mlx5e_close_flow_table(priv); - -err_close_tirs: - mlx5e_close_tirs(priv); - -err_close_rqt_single: - mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); - -err_close_rqt_indir: - mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); - err_close_channels: mlx5e_close_channels(priv); -err_close_tises: - mlx5e_close_tises(priv); - return err; } @@ -1682,13 +1709,9 @@ int mlx5e_close_locked(struct net_device *netdev) mlx5e_set_rx_mode_core(priv); mlx5e_del_all_vlan_rules(priv); + mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); - mlx5e_close_flow_table(priv); - mlx5e_close_tirs(priv); - mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); - mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_close_channels(priv); - mlx5e_close_tises(priv); return 0; } @@ -1766,6 +1789,8 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_close_locked(priv->netdev); priv->params.lro_en = !!(features & NETIF_F_LRO); + mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); + mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); if (was_opened) err = mlx5e_open_locked(priv->netdev); @@ -2026,16 +2051,72 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_dealloc_transport_domain; } + err = mlx5e_open_tises(priv); + if (err) { + mlx5_core_warn(mdev, "open tises failed, %d\n", err); + goto err_destroy_mkey; + } + + err = mlx5e_open_drop_rq(priv); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_close_tises; + } + + err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT); + if (err) { + mlx5_core_warn(mdev, "open rqt(INDIR) failed, %d\n", err); + goto err_close_drop_rq; + } + + err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT); + if (err) { + mlx5_core_warn(mdev, "open rqt(SINGLE) failed, %d\n", err); + goto err_close_rqt_indir; + } + + err = mlx5e_open_tirs(priv); + if (err) { + mlx5_core_warn(mdev, "open tirs failed, %d\n", err); + goto err_close_rqt_single; + } + + err = mlx5e_open_flow_table(priv); + if (err) { + mlx5_core_warn(mdev, "open flow table failed, %d\n", err); + goto err_close_tirs; + } + + mlx5e_init_eth_addr(priv); + err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_destroy_mkey; + goto err_close_flow_table; } mlx5e_enable_async_events(priv); return priv; +err_close_flow_table: + mlx5e_close_flow_table(priv); + +err_close_tirs: + mlx5e_close_tirs(priv); + +err_close_rqt_single: + mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); + +err_close_rqt_indir: + mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); + +err_close_drop_rq: + mlx5e_close_drop_rq(priv); + +err_close_tises: + mlx5e_close_tises(priv); + err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mr); @@ -2060,6 +2141,12 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) struct net_device *netdev = priv->netdev; unregister_netdev(netdev); + mlx5e_close_flow_table(priv); + mlx5e_close_tirs(priv); + mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); + mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_close_drop_rq(priv); + mlx5e_close_tises(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index e6453f61141e..b4c87c7b0cf0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -387,6 +387,18 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rqt_out)]; + + MLX5_SET(modify_rqt_in, in, rqtn, rqtn); + MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) { u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index d436c2d8b527..74cae51436e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -65,6 +65,8 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); +int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, + int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 469b7bda3304..dd2097455a2e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4123,6 +4123,13 @@ struct mlx5_ifc_modify_rqt_out_bits { u8 reserved_1[0x40]; }; +struct mlx5_ifc_rqt_bitmask_bits { + u8 reserved[0x20]; + + u8 reserved1[0x1f]; + u8 rqn_list[0x1]; +}; + struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; @@ -4135,7 +4142,7 @@ struct mlx5_ifc_modify_rqt_in_bits { u8 reserved_3[0x20]; - u8 modify_bitmask[0x40]; + struct mlx5_ifc_rqt_bitmask_bits bitmask; u8 reserved_4[0x40]; From 40ab6a6ebeebbcfc313233f5aa0d55930734f529 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:44 +0300 Subject: [PATCH 5/8] net/mlx5e: Rename/move functions following the ndo_stop flow change Rename some functions that used to be invoked upon ndo_open/stop and are now invoked upon create/destroy_netdev() in order to better hint their place in the flow. Change some functions location in the file so that functions involved in ndo_open/stop flow will not be interleaved with other functions. This is a cosmetic change, no logical change here. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 +- .../mellanox/mlx5/core/en_flow_table.c | 4 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 648 +++++++++--------- 3 files changed, 327 insertions(+), 329 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 31e9610926fe..a6c4bd3265a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -482,8 +482,8 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); -int mlx5e_open_flow_table(struct mlx5e_priv *priv); -void mlx5e_close_flow_table(struct mlx5e_priv *priv); +int mlx5e_create_flow_tables(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index 70ec31b9e1e9..d99be7892ebc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -929,7 +929,7 @@ static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) mlx5_destroy_flow_table(priv->ft.vlan); } -int mlx5e_open_flow_table(struct mlx5e_priv *priv) +int mlx5e_create_flow_tables(struct mlx5e_priv *priv) { int err; @@ -949,7 +949,7 @@ err_destroy_main_flow_table: return err; } -void mlx5e_close_flow_table(struct mlx5e_priv *priv) +void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) { mlx5e_destroy_vlan_flow_table(priv); mlx5e_destroy_main_flow_table(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 33d08bb11f84..33a0488dc144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1114,6 +1114,283 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv) kfree(priv->channel); } +static int mlx5e_rx_hash_fn(int hfunc) +{ + return (hfunc == ETH_RSS_HASH_TOP) ? + MLX5_RX_HASH_FN_TOEPLITZ : + MLX5_RX_HASH_FN_INVERTED_XOR8; +} + +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; + + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + + return inv; +} + +static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, + enum mlx5e_rqt_ix rqt_ix) +{ + int i; + int log_sz; + + switch (rqt_ix) { + case MLX5E_INDIRECTION_RQT: + log_sz = priv->params.rx_hash_log_tbl_sz; + for (i = 0; i < (1 << log_sz); i++) { + int ix = i; + + if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, log_sz); + + ix = ix % priv->params.num_channels; + MLX5_SET(rqtc, rqtc, rq_num[i], + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn); + } + + break; + + default: /* MLX5E_SINGLE_RQ_RQT */ + MLX5_SET(rqtc, rqtc, rq_num[0], + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[0]->rq.rqn : + priv->drop_rq.rqn); + + break; + } +} + +static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *rqtc; + int inlen; + int log_sz; + int sz; + int err; + + log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : + priv->params.rx_hash_log_tbl_sz; + sz = 1 << log_sz; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); + + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + + err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); + + kvfree(in); + + return err; +} + +static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *rqtc; + int inlen; + int log_sz; + int sz; + int err; + + log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : + priv->params.rx_hash_log_tbl_sz; + sz = 1 << log_sz; + + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + + mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + + MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); + + err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +{ + mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); +} + +static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) +{ + mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); +} + +static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) +{ + if (!priv->params.lro_en) + return; + +#define ROUGH_MAX_L2_L3_HDR_SZ 256 + + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[3])); +} + +static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_tir_in, in, bitmask.lro, 1); + tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + + mlx5e_build_tir_ctx_lro(tirc, priv); + + err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + + kvfree(in); + + return err; +} + +static int mlx5e_set_dev_port_mtu(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int hw_mtu; + int err; + + err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); + if (err) + return err; + + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + + if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) + netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", + __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); + + netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); + return 0; +} + +int mlx5e_open_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int num_txqs; + int err; + + set_bit(MLX5E_STATE_OPENED, &priv->state); + + num_txqs = priv->params.num_channels * priv->params.num_tc; + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, priv->params.num_channels); + + err = mlx5e_set_dev_port_mtu(netdev); + if (err) + return err; + + err = mlx5e_open_channels(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", + __func__, err); + return err; + } + + err = mlx5e_add_all_vlan_rules(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", + __func__, err); + goto err_close_channels; + } + + mlx5e_update_carrier(priv); + mlx5e_redirect_rqts(priv); + mlx5e_set_rx_mode_core(priv); + + schedule_delayed_work(&priv->update_stats_work, 0); + return 0; + +err_close_channels: + mlx5e_close_channels(priv); + + return err; +} + +static int mlx5e_open(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_close_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_redirect_rqts(priv); + mlx5e_set_rx_mode_core(priv); + mlx5e_del_all_vlan_rules(priv); + netif_carrier_off(priv->netdev); + mlx5e_close_channels(priv); + + return 0; +} + +static int mlx5e_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_create_drop_rq(struct mlx5e_priv *priv, struct mlx5e_rq *rq, struct mlx5e_rq_param *param) @@ -1219,7 +1496,7 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) mlx5e_destroy_cq(&priv->drop_rq.cq); } -static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) +static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(create_tis_in)]; @@ -1233,18 +1510,18 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } -static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) +static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc) { mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } -static int mlx5e_open_tises(struct mlx5e_priv *priv) +static int mlx5e_create_tises(struct mlx5e_priv *priv) { int err; int tc; for (tc = 0; tc < priv->params.num_tc; tc++) { - err = mlx5e_open_tis(priv, tc); + err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; } @@ -1253,185 +1530,17 @@ static int mlx5e_open_tises(struct mlx5e_priv *priv) err_close_tises: for (tc--; tc >= 0; tc--) - mlx5e_close_tis(priv, tc); + mlx5e_destroy_tis(priv, tc); return err; } -static void mlx5e_close_tises(struct mlx5e_priv *priv) +static void mlx5e_destroy_tises(struct mlx5e_priv *priv) { int tc; for (tc = 0; tc < priv->params.num_tc; tc++) - mlx5e_close_tis(priv, tc); -} - -static int mlx5e_rx_hash_fn(int hfunc) -{ - return (hfunc == ETH_RSS_HASH_TOP) ? - MLX5_RX_HASH_FN_TOEPLITZ : - MLX5_RX_HASH_FN_INVERTED_XOR8; -} - -static int mlx5e_bits_invert(unsigned long a, int size) -{ - int inv = 0; - int i; - - for (i = 0; i < size; i++) - inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; - - return inv; -} - -static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, - enum mlx5e_rqt_ix rqt_ix) -{ - int i; - int log_sz; - - switch (rqt_ix) { - case MLX5E_INDIRECTION_RQT: - log_sz = priv->params.rx_hash_log_tbl_sz; - for (i = 0; i < (1 << log_sz); i++) { - int ix = i; - - if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, log_sz); - - ix = ix % priv->params.num_channels; - MLX5_SET(rqtc, rqtc, rq_num[i], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn); - } - - break; - - default: /* MLX5E_SINGLE_RQ_RQT */ - MLX5_SET(rqtc, rqtc, rq_num[0], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[0]->rq.rqn : - priv->drop_rq.rqn); - - break; - } -} - -static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; - void *rqtc; - int inlen; - int log_sz; - int sz; - int err; - - log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : - priv->params.rx_hash_log_tbl_sz; - sz = 1 << log_sz; - - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); - - err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); - - kvfree(in); - - return err; -} - -static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; - void *rqtc; - int inlen; - int log_sz; - int sz; - int err; - - log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : - priv->params.rx_hash_log_tbl_sz; - sz = 1 << log_sz; - - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); - - MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - - err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); - - kvfree(in); - - return err; -} - -static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) -{ - mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); -} - -static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) -{ - if (!priv->params.lro_en) - return; - -#define ROUGH_MAX_L2_L3_HDR_SZ 256 - - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (priv->params.lro_wqe_sz - - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, - MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[3])); -} - -static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) -{ - struct mlx5_core_dev *mdev = priv->mdev; - - void *in; - void *tirc; - int inlen; - int err; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.lro, 1); - tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - - mlx5e_build_tir_ctx_lro(tirc, priv); - - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); - - kvfree(in); - - return err; + mlx5e_destroy_tis(priv, tc); } static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) @@ -1560,7 +1669,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) } } -static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) +static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; @@ -1584,148 +1693,37 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) return err; } -static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) +static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt) { mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); } -static int mlx5e_open_tirs(struct mlx5e_priv *priv) +static int mlx5e_create_tirs(struct mlx5e_priv *priv) { int err; int i; for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_open_tir(priv, i); + err = mlx5e_create_tir(priv, i); if (err) - goto err_close_tirs; + goto err_destroy_tirs; } return 0; -err_close_tirs: +err_destroy_tirs: for (i--; i >= 0; i--) - mlx5e_close_tir(priv, i); + mlx5e_destroy_tir(priv, i); return err; } -static void mlx5e_close_tirs(struct mlx5e_priv *priv) +static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) { int i; for (i = 0; i < MLX5E_NUM_TT; i++) - mlx5e_close_tir(priv, i); -} - -static int mlx5e_set_dev_port_mtu(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - int hw_mtu; - int err; - - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); - if (err) - return err; - - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); - - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); - - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); - return 0; -} - -static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) -{ - mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); - mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); -} - -int mlx5e_open_locked(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - int num_txqs; - int err; - - set_bit(MLX5E_STATE_OPENED, &priv->state); - - num_txqs = priv->params.num_channels * priv->params.num_tc; - netif_set_real_num_tx_queues(netdev, num_txqs); - netif_set_real_num_rx_queues(netdev, priv->params.num_channels); - - err = mlx5e_set_dev_port_mtu(netdev); - if (err) - return err; - - err = mlx5e_open_channels(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", - __func__, err); - return err; - } - - err = mlx5e_add_all_vlan_rules(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", - __func__, err); - goto err_close_channels; - } - - mlx5e_init_eth_addr(priv); - - mlx5e_update_carrier(priv); - mlx5e_redirect_rqts(priv); - mlx5e_set_rx_mode_core(priv); - - schedule_delayed_work(&priv->update_stats_work, 0); - return 0; - -err_close_channels: - mlx5e_close_channels(priv); - - return err; -} - -static int mlx5e_open(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - int err; - - mutex_lock(&priv->state_lock); - err = mlx5e_open_locked(netdev); - mutex_unlock(&priv->state_lock); - - return err; -} - -int mlx5e_close_locked(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - clear_bit(MLX5E_STATE_OPENED, &priv->state); - - mlx5e_set_rx_mode_core(priv); - mlx5e_del_all_vlan_rules(priv); - mlx5e_redirect_rqts(priv); - netif_carrier_off(priv->netdev); - mlx5e_close_channels(priv); - - return 0; -} - -static int mlx5e_close(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - int err; - - mutex_lock(&priv->state_lock); - err = mlx5e_close_locked(netdev); - mutex_unlock(&priv->state_lock); - - return err; + mlx5e_destroy_tir(priv, i); } static struct rtnl_link_stats64 * @@ -2051,40 +2049,40 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_dealloc_transport_domain; } - err = mlx5e_open_tises(priv); + err = mlx5e_create_tises(priv); if (err) { - mlx5_core_warn(mdev, "open tises failed, %d\n", err); + mlx5_core_warn(mdev, "create tises failed, %d\n", err); goto err_destroy_mkey; } err = mlx5e_open_drop_rq(priv); if (err) { mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_close_tises; + goto err_destroy_tises; } - err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT); + err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT); if (err) { - mlx5_core_warn(mdev, "open rqt(INDIR) failed, %d\n", err); + mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err); goto err_close_drop_rq; } - err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT); + err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT); if (err) { - mlx5_core_warn(mdev, "open rqt(SINGLE) failed, %d\n", err); - goto err_close_rqt_indir; + mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err); + goto err_destroy_rqt_indir; } - err = mlx5e_open_tirs(priv); + err = mlx5e_create_tirs(priv); if (err) { - mlx5_core_warn(mdev, "open tirs failed, %d\n", err); - goto err_close_rqt_single; + mlx5_core_warn(mdev, "create tirs failed, %d\n", err); + goto err_destroy_rqt_single; } - err = mlx5e_open_flow_table(priv); + err = mlx5e_create_flow_tables(priv); if (err) { - mlx5_core_warn(mdev, "open flow table failed, %d\n", err); - goto err_close_tirs; + mlx5_core_warn(mdev, "create flow tables failed, %d\n", err); + goto err_destroy_tirs; } mlx5e_init_eth_addr(priv); @@ -2092,30 +2090,30 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_close_flow_table; + goto err_destroy_flow_tables; } mlx5e_enable_async_events(priv); return priv; -err_close_flow_table: - mlx5e_close_flow_table(priv); +err_destroy_flow_tables: + mlx5e_destroy_flow_tables(priv); -err_close_tirs: - mlx5e_close_tirs(priv); +err_destroy_tirs: + mlx5e_destroy_tirs(priv); -err_close_rqt_single: - mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); +err_destroy_rqt_single: + mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); -err_close_rqt_indir: - mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); +err_destroy_rqt_indir: + mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); err_close_drop_rq: mlx5e_close_drop_rq(priv); -err_close_tises: - mlx5e_close_tises(priv); +err_destroy_tises: + mlx5e_destroy_tises(priv); err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mr); @@ -2141,12 +2139,12 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) struct net_device *netdev = priv->netdev; unregister_netdev(netdev); - mlx5e_close_flow_table(priv); - mlx5e_close_tirs(priv); - mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT); - mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_destroy_flow_tables(priv); + mlx5e_destroy_tirs(priv); + mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); + mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); mlx5e_close_drop_rq(priv); - mlx5e_close_tises(priv); + mlx5e_destroy_tises(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); From 1cefa326ff26dddb2c4a7f43802ce0ba5c35a2ba Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:45 +0300 Subject: [PATCH 6/8] net/mlx5e: Disable async events before unregister_netdev() It does not make sense to allow events while the netdev is unregistered. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 33a0488dc144..436968806268 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2138,6 +2138,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) struct mlx5e_priv *priv = vpriv; struct net_device *netdev = priv->netdev; + mlx5e_disable_async_events(priv); + flush_scheduled_work(); unregister_netdev(netdev); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); @@ -2149,8 +2151,6 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); - mlx5e_disable_async_events(priv); - flush_scheduled_work(); free_netdev(netdev); } From 9b37b07fcb0e00a9c0b605b7b28c2d200f4eb064 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Tue, 4 Aug 2015 14:05:46 +0300 Subject: [PATCH 7/8] net/mlx5e: Take advantage of the light-weight netdev open/stop Now that TIRs, TISs and flow tables are kept alive while the netdev is stopped (after executing ndo_stop()) we can do the following improvements: - Obsolete the active_vlans SW shadow. - Do not delete/add flow table rules upon ndo_stop/open. In addition to simplifying the flow, this change also fastens the ndo_open/close operations. - Obsolete synchronization of threads accessing the flow tables with the netdev stop/open threads. Signed-off-by: Achiad Shochat Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +- .../mellanox/mlx5/core/en_flow_table.c | 109 ++++-------------- .../net/ethernet/mellanox/mlx5/core/en_main.c | 24 ++-- 3 files changed, 32 insertions(+), 106 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a6c4bd3265a5..35c33907a9ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -374,10 +374,10 @@ struct mlx5e_eth_addr_db { enum { MLX5E_STATE_ASYNC_EVENTS_ENABLE, MLX5E_STATE_OPENED, + MLX5E_STATE_DESTROYING, }; struct mlx5e_vlan_db { - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u32 active_vlans_ft_ix[VLAN_N_VID]; u32 untagged_rule_ft_ix; u32 any_vlan_rule_ft_ix; @@ -485,7 +485,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_tables(struct mlx5e_priv *priv); void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); void mlx5e_init_eth_addr(struct mlx5e_priv *priv); -void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -494,8 +493,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); -int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv); -void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index d99be7892ebc..e71563ce05d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -594,44 +594,28 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { - WARN_ON(!mutex_is_locked(&priv->state_lock)); + if (!priv->vlan.filter_disabled) + return; - if (priv->vlan.filter_disabled) { - priv->vlan.filter_disabled = false; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } + priv->vlan.filter_disabled = false; + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) { - WARN_ON(!mutex_is_locked(&priv->state_lock)); + if (priv->vlan.filter_disabled) + return; - if (!priv->vlan.filter_disabled) { - priv->vlan.filter_disabled = true; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - } + priv->vlan.filter_disabled = true; + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); - int err = 0; - mutex_lock(&priv->state_lock); - - set_bit(vid, priv->vlan.active_vlans); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, - vid); - - mutex_unlock(&priv->state_lock); - - return err; + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); } int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, @@ -639,56 +623,11 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - mutex_lock(&priv->state_lock); - - clear_bit(vid, priv->vlan.active_vlans); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); - - mutex_unlock(&priv->state_lock); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); return 0; } -int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) -{ - u16 vid; - int err; - - for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) { - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, - vid); - if (err) - return err; - } - - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - if (err) - return err; - - if (priv->vlan.filter_disabled) { - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, - 0); - if (err) - return err; - } - - return 0; -} - -void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) -{ - u16 vid; - - if (priv->vlan.filter_disabled) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); - - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - - for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); -} - #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) @@ -752,18 +691,21 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) mlx5e_sync_netdev_addr(priv); mlx5e_apply_netdev_addr(priv); } -void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) +void mlx5e_set_rx_mode_work(struct work_struct *work) { + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; struct net_device *ndev = priv->netdev; - bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); + bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); bool broadcast_enabled = rx_mode_enable; @@ -796,17 +738,6 @@ void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) ea->broadcast_enabled = broadcast_enabled; } -void mlx5e_set_rx_mode_work(struct work_struct *work) -{ - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - set_rx_mode_work); - - mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_set_rx_mode_core(priv); - mutex_unlock(&priv->state_lock); -} - void mlx5e_init_eth_addr(struct mlx5e_priv *priv) { ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); @@ -941,8 +872,15 @@ int mlx5e_create_flow_tables(struct mlx5e_priv *priv) if (err) goto err_destroy_main_flow_table; + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_vlan_flow_table; + return 0; +err_destroy_vlan_flow_table: + mlx5e_destroy_vlan_flow_table(priv); + err_destroy_main_flow_table: mlx5e_destroy_main_flow_table(priv); @@ -951,6 +889,7 @@ err_destroy_main_flow_table: void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) { + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_destroy_vlan_flow_table(priv); mlx5e_destroy_main_flow_table(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 436968806268..b8023a7484e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1332,24 +1332,12 @@ int mlx5e_open_locked(struct net_device *netdev) return err; } - err = mlx5e_add_all_vlan_rules(priv); - if (err) { - netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", - __func__, err); - goto err_close_channels; - } - mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); - mlx5e_set_rx_mode_core(priv); schedule_delayed_work(&priv->update_stats_work, 0); + return 0; - -err_close_channels: - mlx5e_close_channels(priv); - - return err; } static int mlx5e_open(struct net_device *netdev) @@ -1371,8 +1359,6 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_redirect_rqts(priv); - mlx5e_set_rx_mode_core(priv); - mlx5e_del_all_vlan_rules(priv); netif_carrier_off(priv->netdev); mlx5e_close_channels(priv); @@ -1794,6 +1780,8 @@ static int mlx5e_set_features(struct net_device *netdev, err = mlx5e_open_locked(priv->netdev); } + mutex_unlock(&priv->state_lock); + if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) mlx5e_enable_vlan_filter(priv); @@ -1801,8 +1789,6 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_disable_vlan_filter(priv); } - mutex_unlock(&priv->state_lock); - return 0; } @@ -2094,6 +2080,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) } mlx5e_enable_async_events(priv); + schedule_work(&priv->set_rx_mode_work); return priv; @@ -2138,6 +2125,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) struct mlx5e_priv *priv = vpriv; struct net_device *netdev = priv->netdev; + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + + schedule_work(&priv->set_rx_mode_work); mlx5e_disable_async_events(priv); flush_scheduled_work(); unregister_netdev(netdev); From efea389d3cc6427a9a94e92b2d7bf4c862f2cfcf Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 4 Aug 2015 14:05:47 +0300 Subject: [PATCH 8/8] net/mlx5_core: Support physical port counters Added physical port counters in the following standard formats to ethtool statistics: - IEEE 802.3 - RFC2863 - RFC2819 Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 75 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 10 ++- .../net/ethernet/mellanox/mlx5/core/en_main.c | 42 +++++++++++ include/linux/mlx5/device.h | 10 +++ include/linux/mlx5/driver.h | 1 + 5 files changed, 137 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 35c33907a9ff..e9d7d90363a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -138,6 +138,80 @@ struct mlx5e_vport_stats { #define NUM_VPORT_COUNTERS 31 }; +static const char pport_strings[][ETH_GSTRING_LEN] = { + /* IEEE802.3 counters */ + "frames_tx", + "frames_rx", + "check_seq_err", + "alignment_err", + "octets_tx", + "octets_received", + "multicast_xmitted", + "broadcast_xmitted", + "multicast_rx", + "broadcast_rx", + "in_range_len_errors", + "out_of_range_len", + "too_long_errors", + "symbol_err", + "mac_control_tx", + "mac_control_rx", + "unsupported_op_rx", + "pause_ctrl_rx", + "pause_ctrl_tx", + + /* RFC2863 counters */ + "in_octets", + "in_ucast_pkts", + "in_discards", + "in_errors", + "in_unknown_protos", + "out_octets", + "out_ucast_pkts", + "out_discards", + "out_errors", + "in_multicast_pkts", + "in_broadcast_pkts", + "out_multicast_pkts", + "out_broadcast_pkts", + + /* RFC2819 counters */ + "drop_events", + "octets", + "pkts", + "broadcast_pkts", + "multicast_pkts", + "crc_align_errors", + "undersize_pkts", + "oversize_pkts", + "fragments", + "jabbers", + "collisions", + "p64octets", + "p65to127octets", + "p128to255octets", + "p256to511octets", + "p512to1023octets", + "p1024to1518octets", + "p1519to2047octets", + "p2048to4095octets", + "p4096to8191octets", + "p8192to10239octets", +}; + +#define NUM_IEEE_802_3_COUNTERS 19 +#define NUM_RFC_2863_COUNTERS 13 +#define NUM_RFC_2819_COUNTERS 21 +#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \ + NUM_RFC_2863_COUNTERS + \ + NUM_RFC_2819_COUNTERS) + +struct mlx5e_pport_stats { + __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS]; + __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS]; + __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS]; +}; + static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", "csum_none", @@ -180,6 +254,7 @@ struct mlx5e_sq_stats { struct mlx5e_stats { struct mlx5e_vport_stats vport; + struct mlx5e_pport_stats pport; }; struct mlx5e_params { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b95aa3384c36..b549797b315f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -171,7 +171,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_COUNTERS + + return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + priv->params.num_channels * NUM_RQ_STATS + priv->params.num_channels * priv->params.num_tc * NUM_SQ_STATS; @@ -200,6 +200,11 @@ static void mlx5e_get_strings(struct net_device *dev, strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_strings[i]); + /* PPORT counters */ + for (i = 0; i < NUM_PPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_strings[i]); + /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) @@ -234,6 +239,9 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NUM_VPORT_COUNTERS; i++) data[idx++] = ((u64 *)&priv->stats.vport)[i]; + for (i = 0; i < NUM_PPORT_COUNTERS; i++) + data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]); + /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b8023a7484e0..111427b33ec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -82,6 +82,47 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } +static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_pport_stats *s = &priv->stats.pport; + u32 *in; + u32 *out; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + in = mlx5_vzalloc(sz); + out = mlx5_vzalloc(sz); + if (!in || !out) + goto free_out; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->IEEE_802_3_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->IEEE_802_3_counters)); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->RFC_2863_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->RFC_2863_counters)); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, + sz, MLX5_REG_PPCNT, 0, 0); + memcpy(s->RFC_2819_counters, + MLX5_ADDR_OF(ppcnt_reg, out, counter_set), + sizeof(s->RFC_2819_counters)); + +free_out: + kvfree(in); + kvfree(out); +} + void mlx5e_update_stats(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -202,6 +243,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tx_csum_offload = s->tx_packets - tx_offload_none; s->rx_csum_good = s->rx_packets - s->rx_csum_none; + mlx5e_update_pport_counters(priv); free_out: kvfree(out); } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b943cd9e2097..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1182,6 +1182,16 @@ enum { MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, }; +enum { + MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, + MLX5_RFC_2863_COUNTERS_GROUP = 0x1, + MLX5_RFC_2819_COUNTERS_GROUP = 0x2, + MLX5_RFC_3635_COUNTERS_GROUP = 0x3, + MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, + MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, + MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11 +}; + static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) { if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5fe0cae1a515..2039546b0ec6 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -103,6 +103,7 @@ enum { MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, + MLX5_REG_PPCNT = 0x5008, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010,