First 5.0 rc pull request
Not much so far, but I'm feeling like the 2nd PR -rc will be larger than this. We have the usual batch of bugs and two fixes to code merged this cycle. - Restore valgrind support for the ioctl verbs interface merged this window, and fix a missed error code on an error path from that conversion - A user reported crash on obsolete mthca hardware - pvrdma was using the wrong command opcode toward the hypervisor - NULL pointer crash regression when dumping rdma-cm over netlink - Be conservative about exposing the global rkey -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAlxBTeMACgkQOG33FX4g mxrOIQ//YdZdU9J825DM4ppH/MWRoPgayI+cca5sW2EG/nkgsvFJoiVDDK5/ka1g ge5Q21ZLMSPCBR0Iu/e/JOq6fJI4fsbcJGZURbyKgRZqyCBCf6qJbhiZKifpQMVb w7RP8kRFRdaiQzkAYfZSv9TP93JLvTDLg6zZ74r4vc8YphIzkI410v568hs6FiVu MIcb53pBWUswpCAnBVB+54sw+phJyjd02kmY4xTlWmiEzwHBb0JQ+Kps72/G0IWy 0vOlDI1UjwqoDfThzyT7mcXqnSbXxg/e8EecMpyFzlorQyxgZ5TsJgQ8ubSYxuiQ 7+dZ4rsdoZD++3MGtpmqDMQzKSPb989WzJT8WLp5oSw4ryAXeJJ+tys/APLtvPkf EgKgVyEqfxMDXn02/ENwDPpZyKLZkhcHFLgvfYmxtlDvtai/rvTLmzV1mptEaxlF +2pwSQM4/E/8qrLglN9kdFSfjBMb7Bvd2NYQqZ9vah2omb7gPsaTEEpVw6l/E0NX oOxFKPEzb0nP9KmJmwO8KLCvcrruuRL8kpmhc6sQMQJ6z0h4hmZrHF5EZZH92g0p maHyrx66vqw/Yl+TLvAb/T6FV1ax5c1TauiNErAjnag2wgVWW42Q7lQzSFLFI8su GU8oRlbIclDQ/1bszsf0IShq0r9G17+2n6yyTX39rj62YioiDlI= =ymZq -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes frfom Jason Gunthorpe: "Not much so far. We have the usual batch of bugs and two fixes to code merged this cycle: - Restore valgrind support for the ioctl verbs interface merged this window, and fix a missed error code on an error path from that conversion - A user reported crash on obsolete mthca hardware - pvrdma was using the wrong command opcode toward the hypervisor - NULL pointer crash regression when dumping rdma-cm over netlink - Be conservative about exposing the global rkey" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/uverbs: Mark ioctl responses with UVERBS_ATTR_F_VALID_OUTPUT RDMA/mthca: Clear QP objects during their allocation RDMA/vmw_pvrdma: Return the correct opcode when creating WR RDMA/cma: Add cm_id restrack resource based on kernel or user cm_id type RDMA/nldev: Don't expose unsafe global rkey to regular user RDMA/uverbs: Fix post send success return value in case of error
This commit is contained in:
Коммит
d7393226d1
|
@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
|
|||
id_priv->id.route.addr.dev_addr.transport =
|
||||
rdma_node_get_transport(cma_dev->device->node_type);
|
||||
list_add_tail(&id_priv->list, &cma_dev->id_list);
|
||||
rdma_restrack_kadd(&id_priv->res);
|
||||
if (id_priv->res.kern_name)
|
||||
rdma_restrack_kadd(&id_priv->res);
|
||||
else
|
||||
rdma_restrack_uadd(&id_priv->res);
|
||||
}
|
||||
|
||||
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
|
||||
|
|
|
@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
|
|||
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
|
||||
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
|
||||
goto err;
|
||||
if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
|
||||
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
|
||||
pd->unsafe_global_rkey))
|
||||
goto err;
|
||||
|
||||
if (fill_res_name_pid(msg, res))
|
||||
goto err;
|
||||
|
|
|
@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
|
|||
enum uverbs_obj_access access,
|
||||
bool commit);
|
||||
|
||||
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
|
||||
|
||||
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
|
||||
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
|
||||
|
||||
|
|
|
@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
|
||||
return uverbs_copy_to_struct_or_zero(
|
||||
attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
|
||||
|
||||
if (copy_to_user(attrs->ucore.outbuf, resp,
|
||||
min(attrs->ucore.outlen, resp_len)))
|
||||
return -EFAULT;
|
||||
|
@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
|
|||
goto out_put;
|
||||
}
|
||||
|
||||
if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
|
||||
ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
|
||||
|
||||
ret = 0;
|
||||
|
||||
out_put:
|
||||
|
@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
|||
return -ENOMEM;
|
||||
|
||||
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
|
||||
if (!qp)
|
||||
if (!qp) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
is_ud = qp->qp_type == IB_QPT_UD;
|
||||
sg_ind = 0;
|
||||
|
|
|
@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
|
|||
0, uattr->len - len);
|
||||
}
|
||||
|
||||
static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
|
||||
const struct uverbs_attr *attr)
|
||||
{
|
||||
struct bundle_priv *pbundle =
|
||||
container_of(bundle, struct bundle_priv, bundle);
|
||||
u16 flags;
|
||||
|
||||
flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
|
||||
UVERBS_ATTR_F_VALID_OUTPUT;
|
||||
if (put_user(flags,
|
||||
&pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
|
||||
const struct uverbs_api_attr *attr_uapi,
|
||||
struct uverbs_objs_arr_attr *attr,
|
||||
|
@ -455,6 +470,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
|
|||
ret = handler(&pbundle->bundle);
|
||||
}
|
||||
|
||||
/*
|
||||
* Until the drivers are revised to use the bundle directly we have to
|
||||
* assume that the driver wrote to its UHW_OUT and flag userspace
|
||||
* appropriately.
|
||||
*/
|
||||
if (!ret && pbundle->method_elm->has_udata) {
|
||||
const struct uverbs_attr *attr =
|
||||
uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
|
||||
|
||||
if (!IS_ERR(attr))
|
||||
ret = uverbs_set_output(&pbundle->bundle, attr);
|
||||
}
|
||||
|
||||
/*
|
||||
* EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
|
||||
* not invoke the method because the request is not supported. No
|
||||
|
@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
|
|||
int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
|
||||
const void *from, size_t size)
|
||||
{
|
||||
struct bundle_priv *pbundle =
|
||||
container_of(bundle, struct bundle_priv, bundle);
|
||||
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
|
||||
u16 flags;
|
||||
size_t min_size;
|
||||
|
||||
if (IS_ERR(attr))
|
||||
|
@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
|
|||
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
|
||||
return -EFAULT;
|
||||
|
||||
flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
|
||||
UVERBS_ATTR_F_VALID_OUTPUT;
|
||||
if (put_user(flags,
|
||||
&pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
return uverbs_set_output(bundle, attr);
|
||||
}
|
||||
EXPORT_SYMBOL(uverbs_copy_to);
|
||||
|
||||
|
||||
/*
|
||||
* This is only used if the caller has directly used copy_to_use to write the
|
||||
* data. It signals to user space that the buffer is filled in.
|
||||
*/
|
||||
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
|
||||
{
|
||||
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
|
||||
|
||||
if (IS_ERR(attr))
|
||||
return PTR_ERR(attr);
|
||||
|
||||
return uverbs_set_output(bundle, attr);
|
||||
}
|
||||
|
||||
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
|
||||
size_t idx, s64 lower_bound, u64 upper_bound,
|
||||
s64 *def_val)
|
||||
|
@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
|
|||
{
|
||||
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
|
||||
|
||||
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data),
|
||||
attr->ptr_attr.len))
|
||||
return -EFAULT;
|
||||
if (size < attr->ptr_attr.len) {
|
||||
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
|
||||
attr->ptr_attr.len - size))
|
||||
return -EFAULT;
|
||||
}
|
||||
return uverbs_copy_to(bundle, idx, from, size);
|
||||
}
|
||||
|
|
|
@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
|||
|
||||
buf += sizeof(hdr);
|
||||
|
||||
memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
|
||||
bundle.ufile = file;
|
||||
if (!method_elm->is_ex) {
|
||||
size_t in_len = hdr.in_words * 4 - sizeof(hdr);
|
||||
|
|
|
@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|||
{
|
||||
struct mthca_ucontext *context;
|
||||
|
||||
qp = kmalloc(sizeof *qp, GFP_KERNEL);
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
|||
if (udata)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
|
||||
qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
|
||||
if (!qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
|
|||
|
||||
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
|
||||
{
|
||||
return (enum pvrdma_wr_opcode)op;
|
||||
switch (op) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
return PVRDMA_WR_RDMA_WRITE;
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
|
||||
case IB_WR_SEND:
|
||||
return PVRDMA_WR_SEND;
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
return PVRDMA_WR_SEND_WITH_IMM;
|
||||
case IB_WR_RDMA_READ:
|
||||
return PVRDMA_WR_RDMA_READ;
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
|
||||
case IB_WR_LSO:
|
||||
return PVRDMA_WR_LSO;
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
return PVRDMA_WR_SEND_WITH_INV;
|
||||
case IB_WR_RDMA_READ_WITH_INV:
|
||||
return PVRDMA_WR_RDMA_READ_WITH_INV;
|
||||
case IB_WR_LOCAL_INV:
|
||||
return PVRDMA_WR_LOCAL_INV;
|
||||
case IB_WR_REG_MR:
|
||||
return PVRDMA_WR_FAST_REG_MR;
|
||||
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
|
||||
return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
|
||||
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
|
||||
return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
|
||||
case IB_WR_REG_SIG_MR:
|
||||
return PVRDMA_WR_REG_SIG_MR;
|
||||
default:
|
||||
return PVRDMA_WR_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
static inline enum ib_wc_status pvrdma_wc_status_to_ib(
|
||||
|
|
|
@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
|||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
|
||||
wqe_hdr->ex.imm_data = wr->ex.imm_data;
|
||||
|
||||
if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
|
||||
*bad_wr = wr;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (qp->ibqp.qp_type) {
|
||||
case IB_QPT_GSI:
|
||||
case IB_QPT_UD:
|
||||
|
|
|
@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
|
|||
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
|
||||
PVRDMA_WR_BIND_MW,
|
||||
PVRDMA_WR_REG_SIG_MR,
|
||||
PVRDMA_WR_ERROR,
|
||||
};
|
||||
|
||||
enum pvrdma_wc_status {
|
||||
|
|
Загрузка…
Ссылка в новой задаче