[PATCH] IB: Add user-supplied context to userspace CM ABI

- Add user specified context to all uCM events.  Users will not retrieve
  any events associated with the context after destroying the corresponding
  cm_id.
- Provide the ib_cm_init_qp_attr() call to userspace clients of the CM.
  This call may be used to set QP attributes properly before modifying the QP.
- Fixes some error handling synchonization and cleanup issues.
- Performs some minor code cleanup.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Sean Hefty 2005-09-01 09:28:03 -07:00 коммит произвёл Roland Dreier
Родитель 1d6801f9dd
Коммит 0b2b35f681
3 изменённых файлов: 262 добавлений и 110 удалений

Просмотреть файл

@ -72,7 +72,6 @@ enum {
static struct semaphore ctx_id_mutex;
static struct idr ctx_id_table;
static int ctx_id_rover = 0;
static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{
@ -97,33 +96,16 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
wake_up(&ctx->wait);
}
static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
static inline int ib_ucm_new_cm_id(int event)
{
return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED;
}
static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
{
struct ib_ucm_context *ctx;
struct ib_ucm_event *uevent;
down(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file)
ctx = ERR_PTR(-EINVAL);
else
idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
atomic_dec(&ctx->ref);
wait_event(ctx->wait, !atomic_read(&ctx->ref));
/* No new events will be generated after destroying the cm_id. */
if (!IS_ERR(ctx->cm_id))
ib_destroy_cm_id(ctx->cm_id);
/* Cleanup events not yet reported to the user. */
down(&file->mutex);
down(&ctx->file->mutex);
list_del(&ctx->file_list);
while (!list_empty(&ctx->events)) {
@ -133,15 +115,12 @@ static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
list_del(&uevent->ctx_list);
/* clear incoming connections. */
if (uevent->cm_id)
if (ib_ucm_new_cm_id(uevent->resp.event))
ib_destroy_cm_id(uevent->cm_id);
kfree(uevent);
}
up(&file->mutex);
kfree(ctx);
return 0;
up(&ctx->file->mutex);
}
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
@ -153,36 +132,31 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
if (!ctx)
return NULL;
memset(ctx, 0, sizeof *ctx);
atomic_set(&ctx->ref, 1);
init_waitqueue_head(&ctx->wait);
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
list_add_tail(&ctx->file_list, &file->ctxs);
do {
result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
if (!result)
goto error;
ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
retry:
result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
if (!result)
goto error;
down(&ctx_id_mutex);
result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
up(&ctx_id_mutex);
} while (result == -EAGAIN);
down(&ctx_id_mutex);
result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
up(&ctx_id_mutex);
if (result == -EAGAIN)
goto retry;
if (result)
goto error;
list_add_tail(&ctx->file_list, &file->ctxs);
ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
return ctx;
error:
list_del(&ctx->file_list);
kfree(ctx);
error:
kfree(ctx);
return NULL;
}
/*
@ -219,12 +193,9 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
kpath->packet_life_time_selector;
}
static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
struct ib_ucm_req_event_resp *ureq,
static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
struct ib_cm_req_event_param *kreq)
{
ureq->listen_id = ctx->id;
ureq->remote_ca_guid = kreq->remote_ca_guid;
ureq->remote_qkey = kreq->remote_qkey;
ureq->remote_qpn = kreq->remote_qpn;
@ -259,14 +230,6 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
urep->srq = krep->srq;
}
static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
struct ib_ucm_sidr_req_event_resp *ureq,
struct ib_cm_sidr_req_event_param *kreq)
{
ureq->listen_id = ctx->id;
ureq->pkey = kreq->pkey;
}
static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
struct ib_cm_sidr_rep_event_param *krep)
{
@ -275,15 +238,14 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
urep->qpn = krep->qpn;
};
static int ib_ucm_event_process(struct ib_ucm_context *ctx,
struct ib_cm_event *evt,
static int ib_ucm_event_process(struct ib_cm_event *evt,
struct ib_ucm_event *uvt)
{
void *info = NULL;
switch (evt->event) {
case IB_CM_REQ_RECEIVED:
ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
ib_ucm_event_req_get(&uvt->resp.u.req_resp,
&evt->param.req_rcvd);
uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
uvt->resp.present = IB_UCM_PRES_PRIMARY;
@ -331,8 +293,8 @@ static int ib_ucm_event_process(struct ib_ucm_context *ctx,
info = evt->param.apr_rcvd.apr_info;
break;
case IB_CM_SIDR_REQ_RECEIVED:
ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
&evt->param.sidr_req_rcvd);
uvt->resp.u.sidr_req_resp.pkey =
evt->param.sidr_req_rcvd.pkey;
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
break;
case IB_CM_SIDR_REP_RECEIVED:
@ -378,31 +340,24 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
struct ib_ucm_event *uevent;
struct ib_ucm_context *ctx;
int result = 0;
int id;
ctx = cm_id->context;
if (event->event == IB_CM_REQ_RECEIVED ||
event->event == IB_CM_SIDR_REQ_RECEIVED)
id = IB_UCM_CM_ID_INVALID;
else
id = ctx->id;
uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
if (!uevent)
goto err1;
memset(uevent, 0, sizeof(*uevent));
uevent->resp.id = id;
uevent->ctx = ctx;
uevent->cm_id = cm_id;
uevent->resp.uid = ctx->uid;
uevent->resp.id = ctx->id;
uevent->resp.event = event->event;
result = ib_ucm_event_process(ctx, event, uevent);
result = ib_ucm_event_process(event, uevent);
if (result)
goto err2;
uevent->ctx = ctx;
uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
down(&ctx->file->mutex);
list_add_tail(&uevent->file_list, &ctx->file->events);
list_add_tail(&uevent->ctx_list, &ctx->events);
@ -414,7 +369,7 @@ err2:
kfree(uevent);
err1:
/* Destroy new cm_id's */
return (id == IB_UCM_CM_ID_INVALID);
return ib_ucm_new_cm_id(event->event);
}
static ssize_t ib_ucm_event(struct ib_ucm_file *file,
@ -423,7 +378,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
{
struct ib_ucm_context *ctx;
struct ib_ucm_event_get cmd;
struct ib_ucm_event *uevent = NULL;
struct ib_ucm_event *uevent;
int result = 0;
DEFINE_WAIT(wait);
@ -436,7 +391,6 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
* wait
*/
down(&file->mutex);
while (list_empty(&file->events)) {
if (file->filp->f_flags & O_NONBLOCK) {
@ -463,21 +417,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
if (!uevent->cm_id)
goto user;
if (ib_ucm_new_cm_id(uevent->resp.event)) {
ctx = ib_ucm_ctx_alloc(file);
if (!ctx) {
result = -ENOMEM;
goto done;
}
ctx = ib_ucm_ctx_alloc(file);
if (!ctx) {
result = -ENOMEM;
goto done;
ctx->cm_id = uevent->cm_id;
ctx->cm_id->context = ctx;
uevent->resp.id = ctx->id;
}
ctx->cm_id = uevent->cm_id;
ctx->cm_id->context = ctx;
uevent->resp.id = ctx->id;
user:
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&uevent->resp, sizeof(uevent->resp))) {
result = -EFAULT;
@ -485,12 +436,10 @@ user:
}
if (uevent->data) {
if (cmd.data_len < uevent->data_len) {
result = -ENOMEM;
goto done;
}
if (copy_to_user((void __user *)(unsigned long)cmd.data,
uevent->data, uevent->data_len)) {
result = -EFAULT;
@ -499,12 +448,10 @@ user:
}
if (uevent->info) {
if (cmd.info_len < uevent->info_len) {
result = -ENOMEM;
goto done;
}
if (copy_to_user((void __user *)(unsigned long)cmd.info,
uevent->info, uevent->info_len)) {
result = -EFAULT;
@ -514,6 +461,7 @@ user:
list_del(&uevent->file_list);
list_del(&uevent->ctx_list);
uevent->ctx->events_reported++;
kfree(uevent->data);
kfree(uevent->info);
@ -545,6 +493,7 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
if (!ctx)
return -ENOMEM;
ctx->uid = cmd.uid;
ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
if (IS_ERR(ctx->cm_id)) {
result = PTR_ERR(ctx->cm_id);
@ -561,7 +510,14 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
return 0;
err:
ib_ucm_destroy_ctx(file, ctx->id);
down(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex);
if (!IS_ERR(ctx->cm_id))
ib_destroy_cm_id(ctx->cm_id);
kfree(ctx);
return result;
}
@ -570,11 +526,44 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
int in_len, int out_len)
{
struct ib_ucm_destroy_id cmd;
struct ib_ucm_destroy_id_resp resp;
struct ib_ucm_context *ctx;
int result = 0;
if (out_len < sizeof(resp))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
return ib_ucm_destroy_ctx(file, cmd.id);
down(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, cmd.id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file)
ctx = ERR_PTR(-EINVAL);
else
idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
atomic_dec(&ctx->ref);
wait_event(ctx->wait, !atomic_read(&ctx->ref));
/* No new events will be generated after destroying the cm_id. */
ib_destroy_cm_id(ctx->cm_id);
/* Cleanup events not yet reported to the user. */
ib_ucm_cleanup_events(ctx);
resp.events_reported = ctx->events_reported;
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
result = -EFAULT;
kfree(ctx);
return result;
}
static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
@ -609,6 +598,98 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
return result;
}
static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr *dest_attr,
struct ib_ah_attr *src_attr)
{
memcpy(dest_attr->grh_dgid, src_attr->grh.dgid.raw,
sizeof src_attr->grh.dgid);
dest_attr->grh_flow_label = src_attr->grh.flow_label;
dest_attr->grh_sgid_index = src_attr->grh.sgid_index;
dest_attr->grh_hop_limit = src_attr->grh.hop_limit;
dest_attr->grh_traffic_class = src_attr->grh.traffic_class;
dest_attr->dlid = src_attr->dlid;
dest_attr->sl = src_attr->sl;
dest_attr->src_path_bits = src_attr->src_path_bits;
dest_attr->static_rate = src_attr->static_rate;
dest_attr->is_global = (src_attr->ah_flags & IB_AH_GRH);
dest_attr->port_num = src_attr->port_num;
}
static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp *dest_attr,
struct ib_qp_attr *src_attr)
{
dest_attr->cur_qp_state = src_attr->cur_qp_state;
dest_attr->path_mtu = src_attr->path_mtu;
dest_attr->path_mig_state = src_attr->path_mig_state;
dest_attr->qkey = src_attr->qkey;
dest_attr->rq_psn = src_attr->rq_psn;
dest_attr->sq_psn = src_attr->sq_psn;
dest_attr->dest_qp_num = src_attr->dest_qp_num;
dest_attr->qp_access_flags = src_attr->qp_access_flags;
dest_attr->max_send_wr = src_attr->cap.max_send_wr;
dest_attr->max_recv_wr = src_attr->cap.max_recv_wr;
dest_attr->max_send_sge = src_attr->cap.max_send_sge;
dest_attr->max_recv_sge = src_attr->cap.max_recv_sge;
dest_attr->max_inline_data = src_attr->cap.max_inline_data;
ib_ucm_copy_ah_attr(&dest_attr->ah_attr, &src_attr->ah_attr);
ib_ucm_copy_ah_attr(&dest_attr->alt_ah_attr, &src_attr->alt_ah_attr);
dest_attr->pkey_index = src_attr->pkey_index;
dest_attr->alt_pkey_index = src_attr->alt_pkey_index;
dest_attr->en_sqd_async_notify = src_attr->en_sqd_async_notify;
dest_attr->sq_draining = src_attr->sq_draining;
dest_attr->max_rd_atomic = src_attr->max_rd_atomic;
dest_attr->max_dest_rd_atomic = src_attr->max_dest_rd_atomic;
dest_attr->min_rnr_timer = src_attr->min_rnr_timer;
dest_attr->port_num = src_attr->port_num;
dest_attr->timeout = src_attr->timeout;
dest_attr->retry_cnt = src_attr->retry_cnt;
dest_attr->rnr_retry = src_attr->rnr_retry;
dest_attr->alt_port_num = src_attr->alt_port_num;
dest_attr->alt_timeout = src_attr->alt_timeout;
}
static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
struct ib_ucm_init_qp_attr_resp resp;
struct ib_ucm_init_qp_attr cmd;
struct ib_ucm_context *ctx;
struct ib_qp_attr qp_attr;
int result = 0;
if (out_len < sizeof(resp))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
ctx = ib_ucm_ctx_get(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
if (result)
goto out;
ib_ucm_copy_qp_attr(&resp, &qp_attr);
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
result = -EFAULT;
out:
ib_ucm_ctx_put(ctx);
return result;
}
static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
@ -808,6 +889,7 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
ctx = ib_ucm_ctx_get(file, cmd.id);
if (!IS_ERR(ctx)) {
ctx->uid = cmd.uid;
result = ib_send_cm_rep(ctx->cm_id, &param);
ib_ucm_ctx_put(ctx);
} else
@ -1086,6 +1168,7 @@ static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
[IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
[IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
[IB_USER_CM_CMD_EVENT] = ib_ucm_event,
[IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr,
};
static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
@ -1161,12 +1244,18 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
down(&file->mutex);
while (!list_empty(&file->ctxs)) {
ctx = list_entry(file->ctxs.next,
struct ib_ucm_context, file_list);
up(&file->mutex);
ib_ucm_destroy_ctx(file, ctx->id);
down(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id);
up(&ctx_id_mutex);
ib_destroy_cm_id(ctx->cm_id);
ib_ucm_cleanup_events(ctx);
kfree(ctx);
down(&file->mutex);
}
up(&file->mutex);

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -43,8 +44,6 @@
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#define IB_UCM_CM_ID_INVALID 0xffffffff
struct ib_ucm_file {
struct semaphore mutex;
struct file *filp;
@ -58,9 +57,11 @@ struct ib_ucm_context {
int id;
wait_queue_head_t wait;
atomic_t ref;
int events_reported;
struct ib_ucm_file *file;
struct ib_cm_id *cm_id;
__u64 uid;
struct list_head events; /* list of pending events. */
struct list_head file_list; /* member in file ctx list */
@ -71,16 +72,12 @@ struct ib_ucm_event {
struct list_head file_list; /* member in file event list */
struct list_head ctx_list; /* member in ctx event list */
struct ib_cm_id *cm_id;
struct ib_ucm_event_resp resp;
void *data;
void *info;
int data_len;
int info_len;
/*
* new connection identifiers needs to be saved until
* userspace can get a handle on them.
*/
struct ib_cm_id *cm_id;
};
#endif /* UCM_H */

Просмотреть файл

@ -1,5 +1,6 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -37,7 +38,7 @@
#include <linux/types.h>
#define IB_USER_CM_ABI_VERSION 1
#define IB_USER_CM_ABI_VERSION 2
enum {
IB_USER_CM_CMD_CREATE_ID,
@ -60,6 +61,7 @@ enum {
IB_USER_CM_CMD_SEND_SIDR_REP,
IB_USER_CM_CMD_EVENT,
IB_USER_CM_CMD_INIT_QP_ATTR,
};
/*
* command ABI structures.
@ -71,6 +73,7 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
__u64 uid;
__u64 response;
};
@ -79,9 +82,14 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
__u64 response;
__u32 id;
};
struct ib_ucm_destroy_id_resp {
__u32 events_reported;
};
struct ib_ucm_attr_id {
__u64 response;
__u32 id;
@ -94,6 +102,64 @@ struct ib_ucm_attr_id_resp {
__be32 remote_id;
};
struct ib_ucm_init_qp_attr {
__u64 response;
__u32 id;
__u32 qp_state;
};
struct ib_ucm_ah_attr {
__u8 grh_dgid[16];
__u32 grh_flow_label;
__u16 dlid;
__u16 reserved;
__u8 grh_sgid_index;
__u8 grh_hop_limit;
__u8 grh_traffic_class;
__u8 sl;
__u8 src_path_bits;
__u8 static_rate;
__u8 is_global;
__u8 port_num;
};
struct ib_ucm_init_qp_attr_resp {
__u32 qp_attr_mask;
__u32 qp_state;
__u32 cur_qp_state;
__u32 path_mtu;
__u32 path_mig_state;
__u32 qkey;
__u32 rq_psn;
__u32 sq_psn;
__u32 dest_qp_num;
__u32 qp_access_flags;
struct ib_ucm_ah_attr ah_attr;
struct ib_ucm_ah_attr alt_ah_attr;
/* ib_qp_cap */
__u32 max_send_wr;
__u32 max_recv_wr;
__u32 max_send_sge;
__u32 max_recv_sge;
__u32 max_inline_data;
__u16 pkey_index;
__u16 alt_pkey_index;
__u8 en_sqd_async_notify;
__u8 sq_draining;
__u8 max_rd_atomic;
__u8 max_dest_rd_atomic;
__u8 min_rnr_timer;
__u8 port_num;
__u8 timeout;
__u8 retry_cnt;
__u8 rnr_retry;
__u8 alt_port_num;
__u8 alt_timeout;
};
struct ib_ucm_listen {
__be64 service_id;
__be64 service_mask;
@ -157,6 +223,7 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
__u64 uid;
__u64 data;
__u32 id;
__u32 qpn;
@ -232,7 +299,6 @@ struct ib_ucm_event_get {
};
struct ib_ucm_req_event_resp {
__u32 listen_id;
/* device */
/* port */
struct ib_ucm_path_rec primary_path;
@ -287,7 +353,6 @@ struct ib_ucm_apr_event_resp {
};
struct ib_ucm_sidr_req_event_resp {
__u32 listen_id;
/* device */
/* port */
__u16 pkey;
@ -307,6 +372,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
__u64 uid;
__u32 id;
__u32 event;
__u32 present;