RDMA first v5.13 rc Pull Request
A mixture of small bug fixes, most for longer standing problems: - NULL pointer crash in siw - Various error unwind bugs in siw, rxe, cm - User triggerable errors in uverbs - Minor bugs in mlx5 and rxe drivers -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmCmawgACgkQOG33FX4g mxp1HRAAh/y1AGM9CnegxxH87YnXlmqJUAE64WAChM99exWPKLoKKibDHlMkkXsb eFvSZ294gsnGhEmZuVX3MeCFmkvTQC1cIFm5Z5xWLVU3NZ8oAGu2AmiqwEYMIMSI iXf5dENJDRmseZVFTCPlkPzrr8r1LjK+eeY5I9CX+ZbPkjC6iNKA7xsc1xXbW8WY aRGC8LYEUTpujmoGQtw4ET8R2szY2L5jX3XqofUraCT0nAjqoe/6uH09wbqHrLOs O27ZEYQIG4Ji+oHFfMthwYM+Ofo1SllkbO4IN784TxW7cj53clXsnZqh66829mdC qF+tuNGGw6GrJm7XbVkhZYb+P4EPYLFEeDPN3XJFsSc148Snwv5b7+/gRdNf3j8j pL/2c//6xueRfHuU+crFDE4hOgkb1JO466QN3krAI+kpqdz82W2CCKxoihCUZJKO jkXeOckek7siU95J4KKGN1PfbgKQpaQdV0ZV+1G/CpLDnuoiaIIPOQjnYmEexwAz 5zTBto5O9Dj/VozQ6msrMgORDVjCjEzkhRKL+/R9LrT5gOQzVvjp5E9vVEMjmupc 6Vhc7AKXy+En8sAgm2KFUO4IeT83uRhd5TpcD/tlZURb2RNh3Qyc7s3veIqanQYe 33ELd/YEyV9j7ftrF/Krebc3JavV1kjl1/vIf8hkVv4/q/C8yWs= =c71x -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A mixture of small bug fixes, most for longer standing problems: - NULL pointer crash in siw - Various error unwind bugs in siw, rxe, cm - User triggerable errors in uverbs - Minor bugs in mlx5 and rxe drivers" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/uverbs: Fix a NULL vs IS_ERR() bug RDMA/mlx5: Fix query DCT via DEVX RDMA/core: Don't access cm_id after its destruction RDMA/rxe: Return CQE error if invalid lkey was supplied RDMA/mlx5: Recover from fatal event in dual port mode RDMA/mlx5: Verify that DM operation is reasonable RDMA/rxe: Clear all QP fields if creation failed RDMA/core: Prevent divide-by-zero error triggered by the user RDMA/siw: Release xarray entry RDMA/siw: Properly check send and receive CQ pointers
This commit is contained in:
Коммит
f01da525b3
|
@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
|
||||||
list_del(&id_priv->list);
|
list_del(&id_priv->list);
|
||||||
cma_dev_put(id_priv->cma_dev);
|
cma_dev_put(id_priv->cma_dev);
|
||||||
id_priv->cma_dev = NULL;
|
id_priv->cma_dev = NULL;
|
||||||
|
id_priv->id.device = NULL;
|
||||||
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
|
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
|
||||||
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
|
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
|
||||||
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
|
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
|
||||||
|
@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
|
||||||
iw_destroy_cm_id(id_priv->cm_id.iw);
|
iw_destroy_cm_id(id_priv->cm_id.iw);
|
||||||
}
|
}
|
||||||
cma_leave_mc_groups(id_priv);
|
cma_leave_mc_groups(id_priv);
|
||||||
|
rdma_restrack_del(&id_priv->res);
|
||||||
cma_release_dev(id_priv);
|
cma_release_dev(id_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
|
||||||
kfree(id_priv->id.route.path_rec);
|
kfree(id_priv->id.route.path_rec);
|
||||||
|
|
||||||
put_net(id_priv->id.route.addr.dev_addr.net);
|
put_net(id_priv->id.route.addr.dev_addr.net);
|
||||||
rdma_restrack_del(&id_priv->res);
|
|
||||||
kfree(id_priv);
|
kfree(id_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
||||||
}
|
}
|
||||||
|
|
||||||
id_priv->backlog = backlog;
|
id_priv->backlog = backlog;
|
||||||
if (id->device) {
|
if (id_priv->cma_dev) {
|
||||||
if (rdma_cap_ib_cm(id->device, 1)) {
|
if (rdma_cap_ib_cm(id->device, 1)) {
|
||||||
ret = cma_ib_listen(id_priv);
|
ret = cma_ib_listen(id_priv);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
|
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
|
||||||
if (!uapi_object)
|
if (IS_ERR(uapi_object))
|
||||||
return -EINVAL;
|
return PTR_ERR(uapi_object);
|
||||||
|
|
||||||
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
|
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
|
||||||
out_len, &total);
|
out_len, &total);
|
||||||
|
@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (!user_entry_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
max_entries = uverbs_attr_ptr_get_array_size(
|
max_entries = uverbs_attr_ptr_get_array_size(
|
||||||
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
|
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
|
||||||
user_entry_size);
|
user_entry_size);
|
||||||
|
|
|
@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
|
||||||
case UVERBS_OBJECT_QP:
|
case UVERBS_OBJECT_QP:
|
||||||
{
|
{
|
||||||
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
|
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
|
||||||
enum ib_qp_type qp_type = qp->ibqp.qp_type;
|
|
||||||
|
|
||||||
if (qp_type == IB_QPT_RAW_PACKET ||
|
if (qp->type == IB_QPT_RAW_PACKET ||
|
||||||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
|
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
|
||||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
|
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
|
||||||
&qp->raw_packet_qp;
|
&qp->raw_packet_qp;
|
||||||
|
@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
|
||||||
sq->tisn) == obj_id);
|
sq->tisn) == obj_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qp_type == MLX5_IB_QPT_DCT)
|
if (qp->type == MLX5_IB_QPT_DCT)
|
||||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
|
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
|
||||||
qp->dct.mdct.mqp.qpn) == obj_id;
|
qp->dct.mdct.mqp.qpn) == obj_id;
|
||||||
|
|
||||||
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
||||||
qp->ibqp.qp_num) == obj_id;
|
qp->ibqp.qp_num) == obj_id;
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
if (op >= BITS_PER_TYPE(u32))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
|
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
|
|
@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
|
||||||
|
|
||||||
if (bound) {
|
if (bound) {
|
||||||
rdma_roce_rescan_device(&dev->ib_dev);
|
rdma_roce_rescan_device(&dev->ib_dev);
|
||||||
|
mpi->ibdev->ib_active = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
|
||||||
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
|
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
|
||||||
&wqe->dma, payload_addr(pkt),
|
&wqe->dma, payload_addr(pkt),
|
||||||
payload_size(pkt), to_mr_obj, NULL);
|
payload_size(pkt), to_mr_obj, NULL);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
wqe->status = IB_WC_LOC_PROT_ERR;
|
||||||
return COMPST_ERROR;
|
return COMPST_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
|
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
|
||||||
return COMPST_COMP_ACK;
|
return COMPST_COMP_ACK;
|
||||||
else
|
|
||||||
return COMPST_UPDATE_COMP;
|
return COMPST_UPDATE_COMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline enum comp_state do_atomic(struct rxe_qp *qp,
|
static inline enum comp_state do_atomic(struct rxe_qp *qp,
|
||||||
|
@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
|
||||||
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
|
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
|
||||||
&wqe->dma, &atomic_orig,
|
&wqe->dma, &atomic_orig,
|
||||||
sizeof(u64), to_mr_obj, NULL);
|
sizeof(u64), to_mr_obj, NULL);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
wqe->status = IB_WC_LOC_PROT_ERR;
|
||||||
return COMPST_ERROR;
|
return COMPST_ERROR;
|
||||||
else
|
}
|
||||||
return COMPST_COMP_ACK;
|
|
||||||
|
return COMPST_COMP_ACK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
|
||||||
|
|
|
@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||||
if (err) {
|
if (err) {
|
||||||
vfree(qp->sq.queue->buf);
|
vfree(qp->sq.queue->buf);
|
||||||
kfree(qp->sq.queue);
|
kfree(qp->sq.queue);
|
||||||
|
qp->sq.queue = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||||
if (err) {
|
if (err) {
|
||||||
vfree(qp->rq.queue->buf);
|
vfree(qp->rq.queue->buf);
|
||||||
kfree(qp->rq.queue);
|
kfree(qp->rq.queue);
|
||||||
|
qp->rq.queue = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||||
err2:
|
err2:
|
||||||
rxe_queue_cleanup(qp->sq.queue);
|
rxe_queue_cleanup(qp->sq.queue);
|
||||||
err1:
|
err1:
|
||||||
|
qp->pd = NULL;
|
||||||
|
qp->rcq = NULL;
|
||||||
|
qp->scq = NULL;
|
||||||
|
qp->srq = NULL;
|
||||||
|
|
||||||
if (srq)
|
if (srq)
|
||||||
rxe_drop_ref(srq);
|
rxe_drop_ref(srq);
|
||||||
rxe_drop_ref(scq);
|
rxe_drop_ref(scq);
|
||||||
|
|
|
@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||||
struct siw_ucontext *uctx =
|
struct siw_ucontext *uctx =
|
||||||
rdma_udata_to_drv_context(udata, struct siw_ucontext,
|
rdma_udata_to_drv_context(udata, struct siw_ucontext,
|
||||||
base_ucontext);
|
base_ucontext);
|
||||||
struct siw_cq *scq = NULL, *rcq = NULL;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int num_sqe, num_rqe, rv = 0;
|
int num_sqe, num_rqe, rv = 0;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
scq = to_siw_cq(attrs->send_cq);
|
|
||||||
rcq = to_siw_cq(attrs->recv_cq);
|
|
||||||
|
|
||||||
if (!scq || (!rcq && !attrs->srq)) {
|
if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
|
||||||
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
|
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||||
else {
|
else {
|
||||||
/* Zero sized SQ is not supported */
|
/* Zero sized SQ is not supported */
|
||||||
rv = -EINVAL;
|
rv = -EINVAL;
|
||||||
goto err_out;
|
goto err_out_xa;
|
||||||
}
|
}
|
||||||
if (num_rqe)
|
if (num_rqe)
|
||||||
num_rqe = roundup_pow_of_two(num_rqe);
|
num_rqe = roundup_pow_of_two(num_rqe);
|
||||||
|
@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qp->pd = pd;
|
qp->pd = pd;
|
||||||
qp->scq = scq;
|
qp->scq = to_siw_cq(attrs->send_cq);
|
||||||
qp->rcq = rcq;
|
qp->rcq = to_siw_cq(attrs->recv_cq);
|
||||||
|
|
||||||
if (attrs->srq) {
|
if (attrs->srq) {
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче