IB/mlx5: Add support for extended atomic operations

Extended atomic operations cmp&swp and fetch&add is a Mellanox
feature extending the standard atomic operation to use, varied
operand sizes, as apposed to normal atomic operation that use
an 8 byte operand only.
Extended atomics allows masking the results and arguments.

This patch configures QP to support extended atomic operation
with the maximum size possible, as exposed by HCA capabilities.

Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
Reviewed-by: Guy Levi <guyle@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Yonatan Cohen 2018-10-10 09:25:16 +03:00 коммит произвёл Doug Ledford
Родитель 76d865b87c
Коммит a60109dc9a
2 изменённых файлов: 96 добавлений и 25 удалений

Просмотреть файл

@ -1733,6 +1733,53 @@ static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
} }
static int atomic_size_to_mode(int size_mask)
{
/* driver does not support atomic_size > 256B
* and does not know how to translate bigger sizes
*/
int supported_size_mask = size_mask & 0x1ff;
int log_max_size;
if (!supported_size_mask)
return -EOPNOTSUPP;
log_max_size = __fls(supported_size_mask);
if (log_max_size > 3)
return log_max_size;
return MLX5_ATOMIC_MODE_8B;
}
static int get_atomic_mode(struct mlx5_ib_dev *dev,
enum ib_qp_type qp_type)
{
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
int atomic_mode = -EOPNOTSUPP;
int atomic_size_mask;
if (!atomic)
return -EOPNOTSUPP;
if (qp_type == MLX5_IB_QPT_DCT)
atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
else
atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
(atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
atomic_mode = atomic_size_to_mode(atomic_size_mask);
if (atomic_mode <= 0 &&
(atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
return atomic_mode;
}
static inline bool check_flags_mask(uint64_t input, uint64_t supported) static inline bool check_flags_mask(uint64_t input, uint64_t supported)
{ {
return (input & ~supported) == 0; return (input & ~supported) == 0;
@ -2562,13 +2609,15 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
return 0; return 0;
} }
static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
int attr_mask) const struct ib_qp_attr *attr,
int attr_mask, __be32 *hw_access_flags)
{ {
u32 hw_access_flags = 0;
u8 dest_rd_atomic; u8 dest_rd_atomic;
u32 access_flags; u32 access_flags;
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic; dest_rd_atomic = attr->max_dest_rd_atomic;
else else
@ -2583,13 +2632,25 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att
access_flags &= IB_ACCESS_REMOTE_WRITE; access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ) if (access_flags & IB_ACCESS_REMOTE_READ)
hw_access_flags |= MLX5_QP_BIT_RRE; *hw_access_flags |= MLX5_QP_BIT_RRE;
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); qp->ibqp.qp_type == IB_QPT_RC) {
if (access_flags & IB_ACCESS_REMOTE_WRITE) int atomic_mode;
hw_access_flags |= MLX5_QP_BIT_RWE;
return cpu_to_be32(hw_access_flags); atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
if (atomic_mode < 0)
return -EOPNOTSUPP;
*hw_access_flags |= MLX5_QP_BIT_RAE;
*hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
}
if (access_flags & IB_ACCESS_REMOTE_WRITE)
*hw_access_flags |= MLX5_QP_BIT_RWE;
*hw_access_flags = cpu_to_be32(*hw_access_flags);
return 0;
} }
enum { enum {
@ -3287,8 +3348,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
} }
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); __be32 access_flags = 0;
err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
if (err)
goto out;
context->params2 |= access_flags;
}
if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr_mask & IB_QP_MIN_RNR_TIMER)
context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
@ -3504,10 +3572,14 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
MLX5_SET(dctc, dctc, rwe, 1); MLX5_SET(dctc, dctc, rwe, 1);
if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
if (!mlx5_ib_dc_atomic_is_supported(dev)) int atomic_mode;
atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
if (atomic_mode < 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
MLX5_SET(dctc, dctc, rae, 1); MLX5_SET(dctc, dctc, rae, 1);
MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
} }
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num); MLX5_SET(dctc, dctc, port, attr->port_num);

Просмотреть файл

@ -97,14 +97,15 @@ enum {
}; };
enum { enum {
MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, MLX5_ATOMIC_MODE_OFFSET = 16,
MLX5_ATOMIC_MODE_CX = 2 << 16, MLX5_ATOMIC_MODE_IB_COMP = 1,
MLX5_ATOMIC_MODE_8B = 3 << 16, MLX5_ATOMIC_MODE_CX = 2,
MLX5_ATOMIC_MODE_16B = 4 << 16, MLX5_ATOMIC_MODE_8B = 3,
MLX5_ATOMIC_MODE_32B = 5 << 16, MLX5_ATOMIC_MODE_16B = 4,
MLX5_ATOMIC_MODE_64B = 6 << 16, MLX5_ATOMIC_MODE_32B = 5,
MLX5_ATOMIC_MODE_128B = 7 << 16, MLX5_ATOMIC_MODE_64B = 6,
MLX5_ATOMIC_MODE_256B = 8 << 16, MLX5_ATOMIC_MODE_128B = 7,
MLX5_ATOMIC_MODE_256B = 8,
}; };
enum { enum {
@ -162,13 +163,11 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
}; };
enum mlx5_dct_atomic_mode {
MLX5_ATOMIC_MODE_DCT_CX = 2,
};
enum { enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
}; };
enum mlx5_page_fault_resume_flags { enum mlx5_page_fault_resume_flags {