Merge branches 'rxe' and 'mlx' into k.o/for-next

This commit is contained in:
Doug Ledford 2017-07-26 20:13:33 -04:00
Родитель 03da084ed8 b4fbec9673 6afff1c715
Коммит f55c1e6608
48 изменённых файлов: 2476 добавлений и 187 удалений

Просмотреть файл

@ -3998,7 +3998,8 @@ static void iboe_mcast_work_handler(struct work_struct *work)
kfree(mw);
}
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
enum ib_gid_type gid_type)
{
struct sockaddr_in *sin = (struct sockaddr_in *)addr;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
@ -4008,8 +4009,8 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
mgid->raw[0] = 0xff;
mgid->raw[1] = 0x0e;
mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@ -4050,7 +4051,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
goto out1;
}
cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
@ -4066,8 +4069,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.hop_limit = 1;
mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;

Просмотреть файл

@ -1383,8 +1383,9 @@ static int create_qp(struct ib_uverbs_file *file,
attr.rwq_ind_tbl = ind_tbl;
}
if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) +
sizeof(cmd->reserved1)) && cmd->reserved1) {
if (cmd_sz > sizeof(*cmd) &&
!ib_is_udata_cleared(ucore, sizeof(*cmd),
cmd_sz - sizeof(*cmd))) {
ret = -EOPNOTSUPP;
goto err_put;
}
@ -1482,11 +1483,21 @@ static int create_qp(struct ib_uverbs_file *file,
IB_QP_CREATE_MANAGED_SEND |
IB_QP_CREATE_MANAGED_RECV |
IB_QP_CREATE_SCATTER_FCS |
IB_QP_CREATE_CVLAN_STRIPPING)) {
IB_QP_CREATE_CVLAN_STRIPPING |
IB_QP_CREATE_SOURCE_QPN)) {
ret = -EINVAL;
goto err_put;
}
if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (!capable(CAP_NET_RAW)) {
ret = -EPERM;
goto err_put;
}
attr.source_qpn = cmd->source_qpn;
}
buf = (void *)cmd + sizeof(*cmd);
if (cmd_sz > sizeof(*cmd))
if (!(buf[0] == 0 && !memcmp(buf, buf + 1,

Просмотреть файл

@ -1244,6 +1244,18 @@ int ib_resolve_eth_dmac(struct ib_device *device,
if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
ah_attr->roce.dmac);
return 0;
}
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
} else {
ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
(char *)ah_attr->roce.dmac);
}
} else {
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
@ -1569,15 +1581,53 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
/* Multicast groups */
static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
{
struct ib_qp_init_attr init_attr = {};
struct ib_qp_attr attr = {};
int num_eth_ports = 0;
int port;
/* If QP state >= init, it is assigned to a port and we can check this
* port only.
*/
if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
if (attr.qp_state >= IB_QPS_INIT) {
if (qp->device->get_link_layer(qp->device, attr.port_num) !=
IB_LINK_LAYER_INFINIBAND)
return true;
goto lid_check;
}
}
/* Can't get a quick answer, iterate over all ports */
for (port = 0; port < qp->device->phys_port_cnt; port++)
if (qp->device->get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
/* If we have at lease one Ethernet port, RoCE annex declares that
* multicast LID should be ignored. We can't tell at this step if the
* QP belongs to an IB or Ethernet port.
*/
if (num_eth_ports)
return true;
/* If all the ports are IB, we can check according to IB spec. */
lid_check:
return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
lid == be16_to_cpu(IB_LID_PERMISSIVE));
}
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
int ret;
if (!qp->device->attach_mcast)
return -ENOSYS;
if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
lid == be16_to_cpu(IB_LID_PERMISSIVE))
if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->attach_mcast(qp, gid, lid);
@ -1593,9 +1643,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
if (!qp->device->detach_mcast)
return -ENOSYS;
if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
lid == be16_to_cpu(IB_LID_PERMISSIVE))
if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->detach_mcast(qp, gid, lid);

Просмотреть файл

@ -218,6 +218,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_mtt;
uar = &to_mucontext(context)->uar;
cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
@ -233,6 +234,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_db;
uar = &dev->priv_uar;
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
}
if (dev->eq_table)

Просмотреть файл

@ -81,6 +81,8 @@ static const char mlx4_ib_version[] =
DRV_VERSION "\n";
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
u8 port_num);
static struct workqueue_struct *wq;
@ -552,6 +554,16 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
props->max_ah = INT_MAX;
if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
(mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) {
props->rss_caps.max_rwq_indirection_tables = props->max_qp;
props->rss_caps.max_rwq_indirection_table_size =
dev->dev->caps.max_rss_tbl_sz;
props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
props->max_wq_type_rq = props->max_qp;
}
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@ -563,6 +575,13 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
}
}
if (uhw->outlen >= resp.response_length +
sizeof(resp.max_inl_recv_sz)) {
resp.response_length += sizeof(resp.max_inl_recv_sz);
resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
sizeof(struct mlx4_wqe_data_seg);
}
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@ -1069,6 +1088,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
INIT_LIST_HEAD(&context->wqn_ranges_list);
mutex_init(&context->wqn_ranges_mutex);
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
else
@ -2713,6 +2735,26 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
(mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
IB_LINK_LAYER_ETHERNET))) {
ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
ibdev->ib_dev.create_rwq_ind_table =
mlx4_ib_create_rwq_ind_table;
ibdev->ib_dev.destroy_rwq_ind_table =
mlx4_ib_destroy_rwq_ind_table;
ibdev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
}
if (!mlx4_is_slave(ibdev->dev)) {
ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
@ -2772,7 +2814,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &counter_index);
err = mlx4_counter_alloc(ibdev->dev, &counter_index,
MLX4_RES_USAGE_DRIVER);
/* if failed to allocate a new counter, use default */
if (err)
counter_index =
@ -2827,7 +2870,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
&ibdev->steer_qpn_base, 0);
&ibdev->steer_qpn_base, 0,
MLX4_RES_USAGE_DRIVER);
if (err)
goto err_counter;

Просмотреть файл

@ -46,6 +46,7 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include <linux/mlx4/qp.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@ -88,6 +89,8 @@ struct mlx4_ib_ucontext {
struct list_head db_page_list;
struct mutex db_page_mutex;
struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
struct mlx4_ib_pd {
@ -289,8 +292,25 @@ struct mlx4_roce_smac_vlan_info {
int update_vid;
};
struct mlx4_wqn_range {
int base_wqn;
int size;
int refcount;
bool dirty;
struct list_head list;
};
struct mlx4_ib_rss {
unsigned int base_qpn_tbl_sz;
u8 flags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
};
struct mlx4_ib_qp {
struct ib_qp ibqp;
union {
struct ib_qp ibqp;
struct ib_wq ibwq;
};
struct mlx4_qp mqp;
struct mlx4_buf buf;
@ -318,6 +338,7 @@ struct mlx4_ib_qp {
u8 sq_no_prefetch;
u8 state;
int mlx_type;
u32 inl_recv_sz;
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
@ -328,6 +349,10 @@ struct mlx4_ib_qp {
struct list_head cq_recv_list;
struct list_head cq_send_list;
struct counter_index *counter_index;
struct mlx4_wqn_range *wqn_range;
/* Number of RSS QP parents that uses this WQ */
u32 rss_usecnt;
struct mlx4_ib_rss *rss_ctx;
};
struct mlx4_ib_srq {
@ -623,6 +648,8 @@ struct mlx4_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
__u64 hca_core_clock_offset;
__u32 max_inl_recv_sz;
__u32 reserved;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@ -890,4 +917,17 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_wq(struct ib_wq *wq);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table
*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
#endif /* MLX4_IB_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,4 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o

Просмотреть файл

@ -57,3 +57,23 @@ int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
MLX5_SET(query_cong_statistics_in, in, clear, reset);
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size)
{
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
MLX5_SET(query_cong_params_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_PARAMS);
MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
void *in, int in_size)
{
u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
}

Просмотреть файл

@ -39,4 +39,8 @@
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
bool reset, void *out, int out_size);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
#endif /* MLX5_IB_CMD_H */

Просмотреть файл

@ -0,0 +1,421 @@
/*
* Copyright (c) 2013-2017, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/debugfs.h>
#include "mlx5_ib.h"
#include "cmd.h"
enum mlx5_ib_cong_node_type {
MLX5_IB_RROCE_ECN_RP = 1,
MLX5_IB_RROCE_ECN_NP = 2,
};
static const char * const mlx5_ib_dbg_cc_name[] = {
"rp_clamp_tgt_rate",
"rp_clamp_tgt_rate_ati",
"rp_time_reset",
"rp_byte_reset",
"rp_threshold",
"rp_ai_rate",
"rp_hai_rate",
"rp_min_dec_fac",
"rp_min_rate",
"rp_rate_to_set_on_first_cnp",
"rp_dce_tcp_g",
"rp_dce_tcp_rtt",
"rp_rate_reduce_monitor_period",
"rp_initial_alpha_value",
"rp_gd",
"np_cnp_dscp",
"np_cnp_prio_mode",
"np_cnp_prio",
};
#define MLX5_IB_RP_CLAMP_TGT_RATE_ATTR BIT(1)
#define MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR BIT(2)
#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
#define MLX5_IB_RP_MIN_RATE_ATTR BIT(10)
#define MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR BIT(11)
#define MLX5_IB_RP_DCE_TCP_G_ATTR BIT(12)
#define MLX5_IB_RP_DCE_TCP_RTT_ATTR BIT(13)
#define MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR BIT(14)
#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
#define MLX5_IB_RP_GD_ATTR BIT(16)
#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
static enum mlx5_ib_cong_node_type
mlx5_ib_param_to_node(enum mlx5_ib_dbg_cc_types param_offset)
{
if (param_offset >= MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE &&
param_offset <= MLX5_IB_DBG_CC_RP_GD)
return MLX5_IB_RROCE_ECN_RP;
else
return MLX5_IB_RROCE_ECN_NP;
}
static u32 mlx5_get_cc_param_val(void *field, int offset)
{
switch (offset) {
case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
clamp_tgt_rate);
case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
clamp_tgt_rate_after_time_inc);
case MLX5_IB_DBG_CC_RP_TIME_RESET:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_time_reset);
case MLX5_IB_DBG_CC_RP_BYTE_RESET:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_byte_reset);
case MLX5_IB_DBG_CC_RP_THRESHOLD:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_threshold);
case MLX5_IB_DBG_CC_RP_AI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate);
case MLX5_IB_DBG_CC_RP_HAI_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_hai_rate);
case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_min_dec_fac);
case MLX5_IB_DBG_CC_RP_MIN_RATE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_min_rate);
case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rate_to_set_on_first_cnp);
case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
dce_tcp_g);
case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
dce_tcp_rtt);
case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rate_reduce_monitor_period);
case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
initial_alpha_value);
case MLX5_IB_DBG_CC_RP_GD:
return MLX5_GET(cong_control_r_roce_ecn_rp, field,
rpg_gd);
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_dscp);
case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_prio_mode);
case MLX5_IB_DBG_CC_NP_CNP_PRIO:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_802p_prio);
default:
return 0;
}
}
static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
u32 var, u32 *attr_mask)
{
switch (offset) {
case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
*attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
clamp_tgt_rate, var);
break;
case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
*attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
clamp_tgt_rate_after_time_inc, var);
break;
case MLX5_IB_DBG_CC_RP_TIME_RESET:
*attr_mask |= MLX5_IB_RP_TIME_RESET_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_time_reset, var);
break;
case MLX5_IB_DBG_CC_RP_BYTE_RESET:
*attr_mask |= MLX5_IB_RP_BYTE_RESET_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_byte_reset, var);
break;
case MLX5_IB_DBG_CC_RP_THRESHOLD:
*attr_mask |= MLX5_IB_RP_THRESHOLD_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_threshold, var);
break;
case MLX5_IB_DBG_CC_RP_AI_RATE:
*attr_mask |= MLX5_IB_RP_AI_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_ai_rate, var);
break;
case MLX5_IB_DBG_CC_RP_HAI_RATE:
*attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_hai_rate, var);
break;
case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
*attr_mask |= MLX5_IB_RP_MIN_DEC_FAC_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_min_dec_fac, var);
break;
case MLX5_IB_DBG_CC_RP_MIN_RATE:
*attr_mask |= MLX5_IB_RP_MIN_RATE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_min_rate, var);
break;
case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
*attr_mask |= MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rate_to_set_on_first_cnp, var);
break;
case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
*attr_mask |= MLX5_IB_RP_DCE_TCP_G_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
dce_tcp_g, var);
break;
case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
*attr_mask |= MLX5_IB_RP_DCE_TCP_RTT_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
dce_tcp_rtt, var);
break;
case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
*attr_mask |= MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rate_reduce_monitor_period, var);
break;
case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
*attr_mask |= MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
initial_alpha_value, var);
break;
case MLX5_IB_DBG_CC_RP_GD:
*attr_mask |= MLX5_IB_RP_GD_ATTR;
MLX5_SET(cong_control_r_roce_ecn_rp, field,
rpg_gd, var);
break;
case MLX5_IB_DBG_CC_NP_CNP_DSCP:
*attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
break;
case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
*attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, var);
break;
case MLX5_IB_DBG_CC_NP_CNP_PRIO:
*attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, 0);
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_802p_prio, var);
break;
}
}
static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
void *out;
void *field;
int err;
enum mlx5_ib_cong_node_type node;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
node = mlx5_ib_param_to_node(offset);
err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
if (err)
goto free;
field = MLX5_ADDR_OF(query_cong_params_out, out, congestion_parameters);
*var = mlx5_get_cc_param_val(field, offset);
free:
kvfree(out);
return err;
}
static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
void *in;
void *field;
enum mlx5_ib_cong_node_type node;
u32 attr_mask = 0;
int err;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
node = mlx5_ib_param_to_node(offset);
MLX5_SET(modify_cong_params_in, in, cong_protocol, node);
field = MLX5_ADDR_OF(modify_cong_params_in, in, congestion_parameters);
mlx5_ib_set_cc_param_mask_val(field, offset, var, &attr_mask);
field = MLX5_ADDR_OF(modify_cong_params_in, in, field_select);
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
kvfree(in);
return err;
}
static ssize_t set_param(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_ib_dbg_param *param = filp->private_data;
int offset = param->offset;
char lbuf[11] = { };
u32 var;
int ret;
if (count > sizeof(lbuf))
return -EINVAL;
if (copy_from_user(lbuf, buf, count))
return -EFAULT;
lbuf[sizeof(lbuf) - 1] = '\0';
if (kstrtou32(lbuf, 0, &var))
return -EINVAL;
ret = mlx5_ib_set_cc_params(param->dev, offset, var);
return ret ? ret : count;
}
static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_ib_dbg_param *param = filp->private_data;
int offset = param->offset;
u32 var = 0;
int ret;
char lbuf[11];
if (*pos)
return 0;
ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
if (ret)
return ret;
ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var);
if (ret < 0)
return ret;
if (copy_to_user(buf, lbuf, ret))
return -EFAULT;
*pos += ret;
return ret;
}
static const struct file_operations dbg_cc_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.write = set_param,
.read = get_param,
};
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
{
if (!mlx5_debugfs_root ||
!dev->dbg_cc_params ||
!dev->dbg_cc_params->root)
return;
debugfs_remove_recursive(dev->dbg_cc_params->root);
kfree(dev->dbg_cc_params);
dev->dbg_cc_params = NULL;
}
int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
int i;
if (!mlx5_debugfs_root)
goto out;
if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
!MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
goto out;
dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
if (!dbg_cc_params)
goto out;
dev->dbg_cc_params = dbg_cc_params;
dbg_cc_params->root = debugfs_create_dir("cc_params",
dev->mdev->priv.dbg_root);
if (!dbg_cc_params->root)
goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].dentry =
debugfs_create_file(mlx5_ib_dbg_cc_name[i],
0600, dbg_cc_params->root,
&dbg_cc_params->params[i],
&dbg_cc_fops);
if (!dbg_cc_params->params[i].dentry)
goto err;
}
out: return 0;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
mlx5_ib_cleanup_cong_debugfs(dev);
/*
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
*/
return 0;
}

Просмотреть файл

@ -96,6 +96,7 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@ -109,6 +110,8 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
}
in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
if (!err)
vfs_ctx[vf].policy = in->policy;
out:
kfree(in);
@ -151,6 +154,7 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@ -160,6 +164,8 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
in->node_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
if (!err)
vfs_ctx[vf].node_guid = guid;
kfree(in);
return err;
}
@ -169,6 +175,7 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@ -178,6 +185,8 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
in->port_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
if (!err)
vfs_ctx[vf].port_guid = guid;
kfree(in);
return err;
}

Просмотреть файл

@ -30,6 +30,7 @@
* SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/init.h>
@ -58,6 +59,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@ -97,6 +99,20 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
}
static int get_port_state(struct ib_device *ibdev,
u8 port_num,
enum ib_port_state *state)
{
struct ib_port_attr attr;
int ret;
memset(&attr, 0, sizeof(attr));
ret = mlx5_ib_query_port(ibdev, port_num, &attr);
if (!ret)
*state = attr.state;
return ret;
}
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@ -114,6 +130,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
write_unlock(&ibdev->roce.netdev_lock);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
@ -127,10 +144,23 @@ static int mlx5_netdev_event(struct notifier_block *this,
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
if (get_port_state(&ibdev->ib_dev, 1, &port_state))
return NOTIFY_DONE;
if (ibdev->roce.last_port_state == port_state)
return NOTIFY_DONE;
ibdev->roce.last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
ibev.event = (event == NETDEV_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
if (port_state == IB_PORT_DOWN)
ibev.event = IB_EVENT_PORT_ERR;
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
return NOTIFY_DONE;
ibev.element.port_num = 1;
ib_dispatch_event(&ibev);
}
@ -668,6 +698,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_TSO;
}
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
MLX5_CAP_GEN(dev->mdev, general_notification_event))
props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
/* Legacy bit to support old userspace libraries */
@ -1187,6 +1225,45 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
return 0;
}
static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
{
int err;
err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
if (err)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
return err;
mutex_lock(&dev->lb_mutex);
dev->user_td++;
if (dev->user_td == 2)
err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
mutex_unlock(&dev->lb_mutex);
return err;
}
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
{
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
return;
mutex_lock(&dev->lb_mutex);
dev->user_td--;
if (dev->user_td < 2)
mlx5_nic_vport_update_local_lb(dev->mdev, false);
mutex_unlock(&dev->lb_mutex);
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@ -1295,8 +1372,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->upd_xlt_page_mutex);
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
err = mlx5_core_alloc_transport_domain(dev->mdev,
&context->tdn);
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
if (err)
goto out_page;
}
@ -1362,7 +1438,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
out_td:
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_page:
free_page(context->upd_xlt_page);
@ -1390,7 +1466,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
mlx5_ib_dealloc_transport_domain(dev, context->tdn);
free_page(context->upd_xlt_page);
deallocate_uars(dev, context);
@ -2030,21 +2106,32 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
*/
static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
{
struct ib_flow_spec_eth *eth_spec;
union ib_flow_spec *flow_spec;
if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
ib_attr->size < sizeof(struct ib_flow_attr) +
sizeof(struct ib_flow_spec_eth) ||
ib_attr->num_of_specs < 1)
return false;
eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
if (eth_spec->type != IB_FLOW_SPEC_ETH ||
eth_spec->size != sizeof(*eth_spec))
return false;
flow_spec = (union ib_flow_spec *)(ib_attr + 1);
if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
struct ib_flow_spec_ipv4 *ipv4_spec;
return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
is_multicast_ether_addr(eth_spec->val.dst_mac);
ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
return true;
return false;
}
if (flow_spec->type == IB_FLOW_SPEC_ETH) {
struct ib_flow_spec_eth *eth_spec;
eth_spec = (struct ib_flow_spec_eth *)flow_spec;
return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
is_multicast_ether_addr(eth_spec->val.dst_mac);
}
return false;
}
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
@ -2522,8 +2609,14 @@ unlock:
static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
@ -2685,6 +2778,26 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
static void delay_drop_handler(struct work_struct *work)
{
int err;
struct mlx5_ib_delay_drop *delay_drop =
container_of(work, struct mlx5_ib_delay_drop,
delay_drop_work);
atomic_inc(&delay_drop->events_cnt);
mutex_lock(&delay_drop->lock);
err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
delay_drop->timeout);
if (err) {
mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
delay_drop->timeout);
delay_drop->activate = false;
}
mutex_unlock(&delay_drop->lock);
}
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@ -2737,8 +2850,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
goto out;
default:
return;
goto out;
}
ibev.device = &ibdev->ib_dev;
@ -2746,7 +2862,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (port < 1 || port > ibdev->num_ports) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
return;
goto out;
}
if (ibdev->ib_active)
@ -2754,6 +2870,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
out:
return;
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@ -3313,6 +3432,17 @@ static const struct mlx5_ib_counter cong_cnts[] = {
INIT_CONG_COUNTER(np_cnp_sent),
};
static const struct mlx5_ib_counter extended_err_cnts[] = {
INIT_Q_COUNTER(resp_local_length_error),
INIT_Q_COUNTER(resp_cqe_error),
INIT_Q_COUNTER(req_cqe_error),
INIT_Q_COUNTER(req_remote_invalid_request),
INIT_Q_COUNTER(req_remote_access_errors),
INIT_Q_COUNTER(resp_remote_access_errors),
INIT_Q_COUNTER(resp_cqe_flush_error),
INIT_Q_COUNTER(req_cqe_flush_error),
};
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
unsigned int i;
@ -3337,6 +3467,10 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
num_counters += ARRAY_SIZE(retrans_q_cnts);
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
num_counters += ARRAY_SIZE(extended_err_cnts);
cnts->num_q_counters = num_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
@ -3386,6 +3520,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
}
}
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
names[j] = extended_err_cnts[i].name;
offsets[j] = extended_err_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
names[j] = cong_cnts[i].name;
@ -3556,6 +3697,126 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
return netdev;
}
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
if (!dev->delay_drop.dbg)
return;
debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
kfree(dev->delay_drop.dbg);
dev->delay_drop.dbg = NULL;
}
static void cancel_delay_drop(struct mlx5_ib_dev *dev)
{
if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
return;
cancel_work_sync(&dev->delay_drop.delay_drop_work);
delay_drop_debugfs_cleanup(dev);
}
static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
char lbuf[20];
int len;
len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
return simple_read_from_buffer(buf, count, pos, lbuf, len);
}
static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
u32 timeout;
u32 var;
if (kstrtouint_from_user(buf, count, 0, &var))
return -EFAULT;
timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
1000);
if (timeout != var)
mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
timeout);
delay_drop->timeout = timeout;
return count;
}
static const struct file_operations fops_delay_drop_timeout = {
.owner = THIS_MODULE,
.open = simple_open,
.write = delay_drop_timeout_write,
.read = delay_drop_timeout_read,
};
static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
{
struct mlx5_ib_dbg_delay_drop *dbg;
if (!mlx5_debugfs_root)
return 0;
dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
if (!dbg)
return -ENOMEM;
dbg->dir_debugfs =
debugfs_create_dir("delay_drop",
dev->mdev->priv.dbg_root);
if (!dbg->dir_debugfs)
return -ENOMEM;
dbg->events_cnt_debugfs =
debugfs_create_atomic_t("num_timeout_events", 0400,
dbg->dir_debugfs,
&dev->delay_drop.events_cnt);
if (!dbg->events_cnt_debugfs)
goto out_debugfs;
dbg->rqs_cnt_debugfs =
debugfs_create_atomic_t("num_rqs", 0400,
dbg->dir_debugfs,
&dev->delay_drop.rqs_cnt);
if (!dbg->rqs_cnt_debugfs)
goto out_debugfs;
dbg->timeout_debugfs =
debugfs_create_file("timeout", 0600,
dbg->dir_debugfs,
&dev->delay_drop,
&fops_delay_drop_timeout);
if (!dbg->timeout_debugfs)
goto out_debugfs;
return 0;
out_debugfs:
delay_drop_debugfs_cleanup(dev);
return -ENOMEM;
}
static void init_delay_drop(struct mlx5_ib_dev *dev)
{
if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
return;
mutex_init(&dev->delay_drop.lock);
dev->delay_drop.dev = dev;
dev->delay_drop.activate = false;
dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
atomic_set(&dev->delay_drop.rqs_cnt, 0);
atomic_set(&dev->delay_drop.events_cnt, 0);
if (delay_drop_debugfs_init(dev))
mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
}
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
@ -3723,18 +3984,20 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
dev->ib_dev.create_flow = mlx5_ib_create_flow;
dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) {
dev->ib_dev.create_flow = mlx5_ib_create_flow;
dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
@ -3754,6 +4017,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
err = mlx5_enable_eth(dev);
if (err)
goto err_free_port;
dev->roce.last_port_state = IB_PORT_DOWN;
}
err = create_dev_resources(&dev->devr);
@ -3770,9 +4034,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_odp;
}
err = mlx5_ib_init_cong_debugfs(dev);
if (err)
goto err_cnt;
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
goto err_cnt;
goto err_cong;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
@ -3790,18 +4058,25 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_dev;
init_delay_drop(dev);
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
goto err_umrc;
goto err_delay_drop;
}
if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(mdev, disable_local_lb))
mutex_init(&dev->lb_mutex);
dev->ib_active = true;
return dev;
err_umrc:
err_delay_drop:
cancel_delay_drop(dev);
destroy_umrc_res(dev);
err_dev:
@ -3817,6 +4092,8 @@ err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
err_cnt:
mlx5_ib_cleanup_cong_debugfs(dev);
err_cong:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
@ -3846,11 +4123,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
cancel_delay_drop(dev);
mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
mlx5_ib_cleanup_cong_debugfs(dev);
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
destroy_umrc_res(dev);

Просмотреть файл

@ -247,6 +247,10 @@ struct mlx5_ib_wq {
void *qend;
};
enum mlx5_ib_wq_flags {
MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
};
struct mlx5_ib_rwq {
struct ib_wq ibwq;
struct mlx5_core_qp core_qp;
@ -264,6 +268,7 @@ struct mlx5_ib_rwq {
u32 wqe_count;
u32 wqe_shift;
int wq_sig;
u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
enum {
@ -378,6 +383,7 @@ struct mlx5_ib_qp {
struct list_head cq_recv_list;
struct list_head cq_send_list;
u32 rate_limit;
u32 underlay_qpn;
};
struct mlx5_ib_cq_buf {
@ -399,6 +405,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
MLX5_IB_QP_UNDERLAY = 1 << 10,
};
struct mlx5_umr_wr {
@ -616,6 +623,63 @@ struct mlx5_roce {
struct net_device *netdev;
struct notifier_block nb;
atomic_t next_port;
enum ib_port_state last_port_state;
};
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
};
enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
MLX5_IB_DBG_CC_RP_TIME_RESET,
MLX5_IB_DBG_CC_RP_BYTE_RESET,
MLX5_IB_DBG_CC_RP_THRESHOLD,
MLX5_IB_DBG_CC_RP_AI_RATE,
MLX5_IB_DBG_CC_RP_HAI_RATE,
MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
MLX5_IB_DBG_CC_RP_MIN_RATE,
MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
MLX5_IB_DBG_CC_RP_DCE_TCP_G,
MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
MLX5_IB_DBG_CC_RP_GD,
MLX5_IB_DBG_CC_NP_CNP_DSCP,
MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
MLX5_IB_DBG_CC_NP_CNP_PRIO,
MLX5_IB_DBG_CC_MAX,
};
struct mlx5_ib_dbg_cc_params {
struct dentry *root;
struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
};
enum {
MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
};
struct mlx5_ib_dbg_delay_drop {
struct dentry *dir_debugfs;
struct dentry *rqs_cnt_debugfs;
struct dentry *events_cnt_debugfs;
struct dentry *timeout_debugfs;
};
struct mlx5_ib_delay_drop {
struct mlx5_ib_dev *dev;
struct work_struct delay_drop_work;
/* serialize setting of delay drop */
struct mutex lock;
u32 timeout;
bool activate;
atomic_t events_cnt;
atomic_t rqs_cnt;
struct mlx5_ib_dbg_delay_drop *dbg;
};
struct mlx5_ib_dev {
@ -652,9 +716,15 @@ struct mlx5_ib_dev {
struct list_head qp_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
u8 umr_fence;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@ -904,6 +974,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr);

Просмотреть файл

@ -48,6 +48,7 @@ enum {
#define MLX5_UMR_ALIGN 2048
static int clean_mr(struct mlx5_ib_mr *mr);
static int max_umr_order(struct mlx5_ib_dev *dev);
static int use_umr(struct mlx5_ib_dev *dev, int order);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
@ -491,16 +492,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_ib_mr *mr = NULL;
struct mlx5_cache_ent *ent;
int last_umr_cache_entry;
int c;
int i;
c = order2idx(dev, order);
if (c < 0 || c > MAX_UMR_CACHE_ENTRY) {
last_umr_cache_entry = order2idx(dev, max_umr_order(dev));
if (c < 0 || c > last_umr_cache_entry) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
return NULL;
}
for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) {
for (i = c; i <= last_umr_cache_entry; i++) {
ent = &cache->ent[i];
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
@ -816,11 +819,16 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
return (npages + 1) / 2;
}
static int use_umr(struct mlx5_ib_dev *dev, int order)
static int max_umr_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
return order <= MAX_UMR_CACHE_ENTRY + 2;
return order <= MLX5_MAX_UMR_SHIFT;
return MAX_UMR_CACHE_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT;
}
static int use_umr(struct mlx5_ib_dev *dev, int order)
{
return order <= max_umr_order(dev);
}
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,

Просмотреть файл

@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type != IB_QPT_RC) {
av = *wqe;
if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
else
*wqe += sizeof(struct mlx5_base_av);

Просмотреть файл

@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
/* not supported currently */
@ -453,7 +454,8 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL;
}
if (attr->qp_type == IB_QPT_RAW_PACKET) {
if (attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
@ -675,10 +677,14 @@ err_umem:
return err;
}
static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq)
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq)
{
struct mlx5_ib_ucontext *context;
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
atomic_dec(&dev->delay_drop.rqs_cnt);
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem)
@ -1021,12 +1027,16 @@ static int is_connected(enum ib_qp_type qp_type)
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct mlx5_ib_sq *sq, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, transport_domain, tdn);
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}
@ -1229,7 +1239,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 tdn = mucontext->tdn;
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, sq, tdn);
err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
if (err)
return err;
@ -1502,10 +1512,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@ -1587,10 +1593,28 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
(MLX5_CAP_GEN(dev->mdev, port_type) !=
MLX5_CAP_PORT_TYPE_IB) ||
!mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
return -EOPNOTSUPP;
}
qp->flags |= MLX5_IB_QP_UNDERLAY;
qp->underlay_qpn = init_attr->source_qpn;
}
} else {
qp->wq_sig = !!wq_signature;
}
base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
qp->has_rq = qp_has_rq(init_attr);
err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
qp, (pd && pd->uobject) ? &ucmd : NULL);
@ -1741,7 +1765,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, pd);
@ -1893,7 +1918,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_qp_base *base;
unsigned long flags;
int err;
@ -1902,12 +1927,14 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
return;
}
base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) {
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
@ -1946,7 +1973,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
destroy_raw_packet_qp(dev, qp);
} else {
err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
@ -2702,7 +2730,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if (ibqp->qp_type == IB_QPT_UD ||
} else if ((ibqp->qp_type == IB_QPT_UD &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
} else if (attr_mask & IB_QP_PATH_MTU) {
@ -2799,6 +2828,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
/* Underlay port should be used - index 0 function per port */
if (qp->flags & MLX5_IB_QP_UNDERLAY)
port_num = 0;
mibport = &dev->port[port_num];
context->qp_counter_set_usr_page |=
cpu_to_be32((u32)(mibport->cnts.set_id) << 24);
@ -2824,7 +2858,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
raw_qp_param.operation = op;
@ -2913,7 +2948,13 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
if (qp_type != MLX5_IB_QPT_REG_UMR &&
if (qp->flags & MLX5_IB_QP_UNDERLAY) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
attr_mask);
goto out;
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
!ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
@ -4477,9 +4518,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
/* Not all of output fields are applicable, make sure to zero them */
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
goto out;
@ -4597,6 +4643,27 @@ static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
}
}
static int set_delay_drop(struct mlx5_ib_dev *dev)
{
int err = 0;
mutex_lock(&dev->delay_drop.lock);
if (dev->delay_drop.activate)
goto out;
err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
if (err)
goto out;
dev->delay_drop.activate = true;
out:
mutex_unlock(&dev->delay_drop.lock);
if (!err)
atomic_inc(&dev->delay_drop.rqs_cnt);
return err;
}
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct ib_wq_init_attr *init_attr)
{
@ -4651,9 +4718,28 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(rqc, rqc, scatter_fcs, 1);
}
if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
if (!(dev->ib_dev.attrs.raw_packet_caps &
IB_RAW_PACKET_CAP_DELAY_DROP)) {
mlx5_ib_dbg(dev, "Delay drop is not supported\n");
err = -EOPNOTSUPP;
goto out;
}
MLX5_SET(rqc, rqc, delay_drop_en, 1);
}
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev);
if (err) {
mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
err);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
} else {
rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
}
}
out:
kvfree(in);
return err;
@ -4787,7 +4873,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(pd, rwq);
destroy_user_rq(dev, pd, rwq);
err:
kfree(rwq);
return ERR_PTR(err);
@ -4799,7 +4885,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(wq->pd, rwq);
destroy_user_rq(dev, wq->pd, rwq);
kfree(rwq);
return 0;

Просмотреть файл

@ -97,7 +97,7 @@ int rxe_rcv(struct sk_buff *skb);
void rxe_dev_put(struct rxe_dev *rxe);
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char* name);
struct rxe_dev *get_rxe_by_name(const char *name);
void rxe_port_up(struct rxe_dev *rxe);
void rxe_port_down(struct rxe_dev *rxe);

Просмотреть файл

@ -250,7 +250,7 @@ void rxe_resp_queue_pkt(struct rxe_dev *rxe,
void rxe_comp_queue_pkt(struct rxe_dev *rxe,
struct rxe_qp *qp, struct sk_buff *skb);
static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
{
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}

Просмотреть файл

@ -76,7 +76,7 @@ static void rxe_vma_close(struct vm_area_struct *vma)
kref_put(&ip->ref, rxe_mmap_release);
}
static struct vm_operations_struct rxe_vm_ops = {
static const struct vm_operations_struct rxe_vm_ops = {
.open = rxe_vma_open,
.close = rxe_vma_close,
};

Просмотреть файл

@ -188,7 +188,7 @@ int rxe_pool_init(
struct rxe_dev *rxe,
struct rxe_pool *pool,
enum rxe_elem_type type,
unsigned max_elem)
unsigned int max_elem)
{
int err = 0;
size_t size = rxe_type_info[type].size;

Просмотреть файл

@ -43,7 +43,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static inline void retry_first_write_send(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
unsigned mask, int npsn)
unsigned int mask, int npsn)
{
int i;

Просмотреть файл

@ -78,7 +78,7 @@ void rxe_do_task(unsigned long data)
default:
spin_unlock_irqrestore(&task->state_lock, flags);
pr_warn("bad state = %d in rxe_do_task\n", task->state);
pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@ -105,7 +105,7 @@ void rxe_do_task(unsigned long data)
break;
default:
pr_warn("bad state = %d in rxe_do_task\n",
pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
spin_unlock_irqrestore(&task->state_lock, flags);

Просмотреть файл

@ -1210,8 +1210,8 @@ static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
}
static ssize_t rxe_show_parent(struct device *device,
struct device_attribute *attr, char *buf)
static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct rxe_dev *rxe = container_of(device, struct rxe_dev,
ib_dev.dev);
@ -1219,7 +1219,7 @@ static ssize_t rxe_show_parent(struct device *device,
return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
static DEVICE_ATTR_RO(parent);
static struct device_attribute *rxe_dev_attributes[] = {
&dev_attr_parent,
@ -1336,15 +1336,15 @@ int rxe_register_device(struct rxe_dev *rxe)
err = ib_register_device(dev, NULL);
if (err) {
pr_warn("rxe_register_device failed, err = %d\n", err);
pr_warn("%s failed with error %d\n", __func__, err);
goto err1;
}
for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
if (err) {
pr_warn("device_create_file failed, i = %d, err = %d\n",
i, err);
pr_warn("%s failed with error %d for attr number %d\n",
__func__, err, i);
goto err2;
}
}

Просмотреть файл

@ -241,13 +241,14 @@ err_out:
return err;
}
static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage)
{
u32 in_modifier = RES_CQ | (((u32)usage & 3) << 30);
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
@ -303,7 +304,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->vector = vector;
err = mlx4_cq_alloc_icm(dev, &cq->cqn);
err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage);
if (err)
return err;

Просмотреть файл

@ -140,6 +140,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
(cq->type == RX && priv->hwtstamp_config.rx_filter))
timestamp_en = 1;
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);

Просмотреть файл

@ -651,7 +651,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
MLX4_RES_USAGE_DRIVER);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");

Просмотреть файл

@ -1081,7 +1081,8 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
u32 qpn;
err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
MLX4_RESERVE_A0_QP);
MLX4_RESERVE_A0_QP,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "Failed reserving drop qpn\n");
return err;
@ -1127,7 +1128,8 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
&rss_map->base_qpn, flags);
&rss_map->base_qpn, flags,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;

Просмотреть файл

@ -105,7 +105,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
(unsigned long long) ring->sp_wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
MLX4_RESERVE_ETH_BF_QP);
MLX4_RESERVE_ETH_BF_QP,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
goto err_hwq_res;

Просмотреть файл

@ -2475,7 +2475,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
priv->def_counter[port] = -1;
for (port = 0; port < dev->caps.num_ports; port++) {
err = mlx4_counter_alloc(dev, &idx);
err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
@ -2517,13 +2517,14 @@ int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
return 0;
}
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
{
u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30);
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)

Просмотреть файл

@ -245,8 +245,9 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
}
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags)
int *base, u8 flags, u8 usage)
{
u32 in_modifier = RES_QP | (((u32)usage & 3) << 30);
u64 in_param = 0;
u64 out_param;
int err;
@ -258,7 +259,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
set_param_h(&in_param, align);
err = mlx4_cmd_imm(dev, in_param, &out_param,
RES_QP, RES_OP_RESERVE,
in_modifier, RES_OP_RESERVE,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)

Просмотреть файл

@ -189,6 +189,7 @@ struct mlx5e_lbt_priv {
struct packet_type pt;
struct completion comp;
bool loopback_ok;
bool local_lb;
};
static int
@ -236,6 +237,13 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
{
int err = 0;
/* Temporarily enable local_lb */
if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, true);
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
return err;
@ -254,6 +262,11 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, false);
}
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);
}

Просмотреть файл

@ -161,6 +161,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
case MLX5_EVENT_TYPE_FPGA_ERROR:
return "MLX5_EVENT_TYPE_FPGA_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT";
default:
return "Unrecognized event";
}
@ -378,6 +380,20 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
#endif
static void general_event_handler(struct mlx5_core_dev *dev,
struct mlx5_eqe *eqe)
{
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
if (dev->event)
dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
break;
default:
mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
}
}
static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
@ -486,6 +502,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
break;
case MLX5_EVENT_TYPE_GENERAL_EVENT:
general_event_handler(dev, eqe);
break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@ -693,6 +712,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
mlx5_core_is_pf(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
else

Просмотреть файл

@ -120,6 +120,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)

Просмотреть файл

@ -47,6 +47,7 @@
#include <linux/debugfs.h>
#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
@ -579,6 +580,18 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
return err;
}
static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
{
int ret = 0;
/* Disable local_lb by default */
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(dev, disable_local_lb))
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
}
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
@ -1155,6 +1168,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_fs;
}
err = mlx5_core_set_hca_defaults(dev);
if (err) {
dev_err(&pdev->dev, "Failed to set hca defaults\n");
goto err_fs;
}
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_attach(dev->priv.eswitch);
#endif

Просмотреть файл

@ -242,6 +242,20 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec)
{
u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
timeout_usec / 100);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
struct mbox_info {
u32 *in;
u32 *out;

Просмотреть файл

@ -32,6 +32,7 @@
#include <linux/pci.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#ifdef CONFIG_MLX5_CORE_EN
#include "eswitch.h"
@ -44,6 +45,38 @@ bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
return !!sriov->num_vfs;
}
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct mlx5_hca_vport_context *in;
int err = 0;
/* Restore sriov guid and policy settings */
if (sriov->vfs_ctx[vf].node_guid ||
sriov->vfs_ctx[vf].port_guid ||
sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
in->node_guid = sriov->vfs_ctx[vf].node_guid;
in->port_guid = sriov->vfs_ctx[vf].port_guid;
in->policy = sriov->vfs_ctx[vf].policy;
in->field_select =
!!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
!!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
!!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
if (err)
mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
kfree(in);
}
return err;
}
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@ -74,6 +107,15 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
}
sriov->vfs_ctx[vf].enabled = 1;
sriov->enabled_vfs++;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
err = sriov_restore_guids(dev, vf);
if (err) {
mlx5_core_warn(dev,
"failed to restore VF %d settings, err %d\n",
vf, err);
continue;
}
}
mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
}

Просмотреть файл

@ -897,6 +897,68 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
enum {
UC_LOCAL_LB,
MC_LOCAL_LB
};
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *in;
int err;
mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_mc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
{
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 *out;
int value;
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
if (err)
goto out;
value = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
value |= MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
*status = !value;
out:
kfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
enum mlx5_vport_roce_state {
MLX5_VPORT_ROCE_DISABLED = 0,
MLX5_VPORT_ROCE_ENABLED = 1,

Просмотреть файл

@ -428,6 +428,12 @@ enum mlx4_steer_type {
MLX4_NUM_STEERS
};
enum mlx4_resource_usage {
MLX4_RES_USAGE_NONE,
MLX4_RES_USAGE_DRIVER,
MLX4_RES_USAGE_USER_VERBS,
};
enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
@ -748,6 +754,7 @@ struct mlx4_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
u8 usage;
};
struct mlx4_qp {
@ -757,6 +764,7 @@ struct mlx4_qp {
atomic_t refcount;
struct completion free;
u8 usage;
};
struct mlx4_srq {
@ -1120,7 +1128,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags);
int *base, u8 flags, u8 usage);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
@ -1417,7 +1425,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);

Просмотреть файл

@ -290,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@ -304,6 +305,10 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
};
enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
@ -968,7 +973,7 @@ enum mlx5_cap_type {
MLX5_CAP_ATOMIC,
MLX5_CAP_ROCE,
MLX5_CAP_IPOIB_OFFLOADS,
MLX5_CAP_EOIB_OFFLOADS,
MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
@ -1011,6 +1016,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
#define MLX5_CAP_ROCE(mdev, cap) \
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)

Просмотреть файл

@ -162,6 +162,13 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
enum port_state_policy {
MLX5_POLICY_DOWN = 0,
MLX5_POLICY_UP = 1,
MLX5_POLICY_FOLLOW = 2,
MLX5_POLICY_INVALID = 0xffffffff
};
struct mlx5_field_desc {
struct dentry *dent;
int i;
@ -185,6 +192,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_PPS,
MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
};
enum mlx5_port_status {
@ -525,6 +533,9 @@ struct mlx5_mkey_table {
struct mlx5_vf_context {
int enabled;
u64 port_guid;
u64 node_guid;
enum port_state_policy policy;
};
struct mlx5_core_sriov {
@ -842,13 +853,6 @@ struct mlx5_pas {
u8 log_sz;
};
enum port_state_policy {
MLX5_POLICY_DOWN = 0,
MLX5_POLICY_UP = 1,
MLX5_POLICY_FOLLOW = 2,
MLX5_POLICY_INVALID = 0xffffffff
};
enum phy_port_state {
MLX5_AAA_111
};

Просмотреть файл

@ -200,6 +200,7 @@ enum {
MLX5_CMD_OP_QUERY_SQ = 0x907,
MLX5_CMD_OP_CREATE_RQ = 0x908,
MLX5_CMD_OP_MODIFY_RQ = 0x909,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
MLX5_CMD_OP_DESTROY_RQ = 0x90a,
MLX5_CMD_OP_QUERY_RQ = 0x90b,
MLX5_CMD_OP_CREATE_RMP = 0x90c,
@ -840,7 +841,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 retransmission_q_counters[0x1];
u8 reserved_at_183[0x1];
u8 modify_rq_counter_set_id[0x1];
u8 reserved_at_185[0x1];
u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@ -857,7 +858,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
u8 reserved_at_1b1[0x1];
u8 enhanced_error_q_counters[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
@ -873,7 +874,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
u8 reserved_at_1d2[0x3];
u8 general_notification_event[0x1];
u8 reserved_at_1d3[0x2];
u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
@ -1016,7 +1018,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
u8 reserved_at_3e1[0xa];
u8 disable_local_lb[0x1];
u8 reserved_at_3e2[0x9];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@ -1187,7 +1190,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
u8 reserved_at_d8[0x5];
u8 reserved_at_d8[0x4];
u8 cnp_prio_mode[0x1];
u8 cnp_802p_prio[0x3];
u8 reserved_at_e0[0x720];
@ -2515,7 +2519,7 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 reserved_at_1[0x1];
u8 delay_drop_en[0x1];
u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
@ -2562,7 +2566,9 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
u8 reserved_at_8[0x17];
u8 reserved_at_8[0x15];
u8 disable_mc_local_lb[0x1];
u8 disable_uc_local_lb[0x1];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@ -3947,7 +3953,47 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
u8 reserved_at_320[0x4e0];
u8 reserved_at_320[0xa0];
u8 resp_local_length_error[0x20];
u8 req_local_length_error[0x20];
u8 resp_local_qp_error[0x20];
u8 local_operation_error[0x20];
u8 resp_local_protection[0x20];
u8 req_local_protection[0x20];
u8 resp_cqe_error[0x20];
u8 req_cqe_error[0x20];
u8 req_mw_binding[0x20];
u8 req_bad_response[0x20];
u8 req_remote_invalid_request[0x20];
u8 resp_remote_invalid_request[0x20];
u8 req_remote_access_errors[0x20];
u8 resp_remote_access_errors[0x20];
u8 req_remote_operation_errors[0x20];
u8 req_transport_retries_exceeded[0x20];
u8 cq_overflow[0x20];
u8 resp_cqe_flush_error[0x20];
u8 req_cqe_flush_error[0x20];
u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@ -5229,7 +5275,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x16];
u8 reserved_at_0[0x14];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 min_inline[0x1];
@ -5847,6 +5895,28 @@ struct mlx5_ifc_destroy_rq_in_bits {
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_set_delay_drop_params_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x20];
u8 reserved_at_60[0x10];
u8 delay_drop_timeout[0x10];
};
struct mlx5_ifc_set_delay_drop_params_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];

Просмотреть файл

@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@ -561,6 +560,9 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);

Просмотреть файл

@ -114,5 +114,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
#endif /* __MLX5_VPORT_H__ */

Просмотреть файл

@ -304,7 +304,13 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
static inline int rdma_is_multicast_addr(struct in6_addr *addr)
{
return addr->s6_addr[0] == 0xff;
u32 ipv4_addr;
if (addr->s6_addr[0] == 0xff)
return 1;
memcpy(&ipv4_addr, addr->s6_addr + 12, 4);
return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr));
}
static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac)

Просмотреть файл

@ -1062,6 +1062,7 @@ enum ib_qp_create_flags {
/* FREE = 1 << 7, */
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@ -1089,6 +1090,7 @@ struct ib_qp_init_attr {
*/
u8 port_num;
struct ib_rwq_ind_table *rwq_ind_tbl;
u32 source_qpn;
};
struct ib_qp_open_attr {
@ -1549,6 +1551,10 @@ enum ib_raw_packet_caps {
IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
/* Checksum offloads are supported (for both send and receive). */
IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
/* When a packet is received for an RQ with no receive WQEs, the
* packet processing is delayed.
*/
IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
};
enum ib_wq_type {
@ -1577,6 +1583,7 @@ struct ib_wq {
enum ib_wq_flags {
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
};
struct ib_wq_init_attr {

Просмотреть файл

@ -578,7 +578,7 @@ struct ib_uverbs_ex_create_qp {
__u32 comp_mask;
__u32 create_flags;
__u32 rwq_ind_tbl_handle;
__u32 reserved1;
__u32 source_qpn;
};
struct ib_uverbs_open_qp {

Просмотреть файл

@ -95,13 +95,65 @@ struct mlx4_ib_create_srq_resp {
__u32 reserved;
};
struct mlx4_ib_create_qp_rss {
__u64 rx_hash_fields_mask;
__u8 rx_hash_function;
__u8 rx_key_len;
__u8 reserved[6];
__u8 rx_hash_key[40];
__u32 comp_mask;
__u32 reserved1;
};
struct mlx4_ib_create_qp {
__u64 buf_addr;
__u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
__u32 inl_recv_sz;
__u8 reserved;
};
struct mlx4_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
__u32 reserved1;
};
struct mlx4_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
struct mlx4_ib_create_rwq_ind_tbl_resp {
__u32 response_length;
__u32 reserved;
};
/* RX Hash function flags */
enum mlx4_ib_rx_hash_function_flags {
MLX4_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
};
/*
* RX Hash flags, these flags allows to set which incoming packet's field should
* participates in RX Hash. Each flag represent certain packet's field,
* when the flag is set the field that is represented by the flag will
* participate in RX Hash calculation.
*/
enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_IPV4 = 1 << 0,
MLX4_IB_RX_HASH_DST_IPV4 = 1 << 1,
MLX4_IB_RX_HASH_SRC_IPV6 = 1 << 2,
MLX4_IB_RX_HASH_DST_IPV6 = 1 << 3,
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
};
#endif /* MLX4_ABI_USER_H */