RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>

Move cma.c's internal definition of enum cma_state to enum rdma_cm_state
in an exported header so that it can be exported via RDMA netlink.

Signed-off-by: Nir Muchtar <nirm@voltaire.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Nir Muchtar 2011-05-20 11:46:11 -07:00 коммит произвёл Roland Dreier
Родитель b2cbae2c24
Коммит 550e5ca77e
2 изменённых файлов: 94 добавлений и 90 удалений

Просмотреть файл

@ -89,20 +89,6 @@ struct cma_device {
struct list_head id_list; struct list_head id_list;
}; };
enum cma_state {
CMA_IDLE,
CMA_ADDR_QUERY,
CMA_ADDR_RESOLVED,
CMA_ROUTE_QUERY,
CMA_ROUTE_RESOLVED,
CMA_CONNECT,
CMA_DISCONNECT,
CMA_ADDR_BOUND,
CMA_LISTEN,
CMA_DEVICE_REMOVAL,
CMA_DESTROYING
};
struct rdma_bind_list { struct rdma_bind_list {
struct idr *ps; struct idr *ps;
struct hlist_head owners; struct hlist_head owners;
@ -126,7 +112,7 @@ struct rdma_id_private {
struct list_head mc_list; struct list_head mc_list;
int internal_id; int internal_id;
enum cma_state state; enum rdma_cm_state state;
spinlock_t lock; spinlock_t lock;
struct mutex qp_mutex; struct mutex qp_mutex;
@ -165,8 +151,8 @@ struct cma_multicast {
struct cma_work { struct cma_work {
struct work_struct work; struct work_struct work;
struct rdma_id_private *id; struct rdma_id_private *id;
enum cma_state old_state; enum rdma_cm_state old_state;
enum cma_state new_state; enum rdma_cm_state new_state;
struct rdma_cm_event event; struct rdma_cm_event event;
}; };
@ -217,7 +203,7 @@ struct sdp_hah {
#define CMA_VERSION 0x00 #define CMA_VERSION 0x00
#define SDP_MAJ_VERSION 0x2 #define SDP_MAJ_VERSION 0x2
static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -229,7 +215,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
} }
static int cma_comp_exch(struct rdma_id_private *id_priv, static int cma_comp_exch(struct rdma_id_private *id_priv,
enum cma_state comp, enum cma_state exch) enum rdma_cm_state comp, enum rdma_cm_state exch)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -241,11 +227,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
return ret; return ret;
} }
static enum cma_state cma_exch(struct rdma_id_private *id_priv, static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
enum cma_state exch) enum rdma_cm_state exch)
{ {
unsigned long flags; unsigned long flags;
enum cma_state old; enum rdma_cm_state old;
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
old = id_priv->state; old = id_priv->state;
@ -413,7 +399,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
} }
static int cma_disable_callback(struct rdma_id_private *id_priv, static int cma_disable_callback(struct rdma_id_private *id_priv,
enum cma_state state) enum rdma_cm_state state)
{ {
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (id_priv->state != state) { if (id_priv->state != state) {
@ -437,7 +423,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
if (!id_priv) if (!id_priv)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
id_priv->state = CMA_IDLE; id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context; id_priv->id.context = context;
id_priv->id.event_handler = event_handler; id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps; id_priv->id.ps = ps;
@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
} }
static void cma_cancel_operation(struct rdma_id_private *id_priv, static void cma_cancel_operation(struct rdma_id_private *id_priv,
enum cma_state state) enum rdma_cm_state state)
{ {
switch (state) { switch (state) {
case CMA_ADDR_QUERY: case RDMA_CM_ADDR_QUERY:
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break; break;
case CMA_ROUTE_QUERY: case RDMA_CM_ROUTE_QUERY:
cma_cancel_route(id_priv); cma_cancel_route(id_priv);
break; break;
case CMA_LISTEN: case RDMA_CM_LISTEN:
if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
&& !id_priv->cma_dev) && !id_priv->cma_dev)
cma_cancel_listens(id_priv); cma_cancel_listens(id_priv);
@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
void rdma_destroy_id(struct rdma_cm_id *id) void rdma_destroy_id(struct rdma_cm_id *id)
{ {
struct rdma_id_private *id_priv; struct rdma_id_private *id_priv;
enum cma_state state; enum rdma_cm_state state;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
state = cma_exch(id_priv, CMA_DESTROYING); state = cma_exch(id_priv, RDMA_CM_DESTROYING);
cma_cancel_operation(id_priv, state); cma_cancel_operation(id_priv, state);
/* /*
@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int ret = 0; int ret = 0;
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
cma_disable_callback(id_priv, CMA_CONNECT)) || cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT && (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
cma_disable_callback(id_priv, CMA_DISCONNECT))) cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.status = -ETIMEDOUT; /* fall through */ event.status = -ETIMEDOUT; /* fall through */
case IB_CM_DREQ_RECEIVED: case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED: case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
RDMA_CM_DISCONNECT))
goto out; goto out;
event.event = RDMA_CM_EVENT_DISCONNECTED; event.event = RDMA_CM_EVENT_DISCONNECTED;
break; break;
@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT; id_priv->state = RDMA_CM_CONNECT;
return id_priv; return id_priv;
destroy_id: destroy_id:
@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
} }
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT; id_priv->state = RDMA_CM_CONNECT;
return id_priv; return id_priv;
err: err:
rdma_destroy_id(id); rdma_destroy_id(id);
@ -1201,7 +1188,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int offset, ret; int offset, ret;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_callback(listen_id, CMA_LISTEN)) if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED; return -ECONNABORTED;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -1243,7 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
* while we're accessing the cm_id. * while we're accessing the cm_id.
*/ */
mutex_lock(&lock); mutex_lock(&lock);
if (cma_comp(conn_id, CMA_CONNECT) && if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
!cma_is_ud_ps(conn_id->id.ps)) !cma_is_ud_ps(conn_id->id.ps))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock); mutex_unlock(&lock);
@ -1257,7 +1244,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
conn_id->cm_id.ib = NULL; conn_id->cm_id.ib = NULL;
release_conn_id: release_conn_id:
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
@ -1328,7 +1315,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
struct sockaddr_in *sin; struct sockaddr_in *sin;
int ret = 0; int ret = 0;
if (cma_disable_callback(id_priv, CMA_CONNECT)) if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -1371,7 +1358,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL; id_priv->cm_id.iw = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -1393,7 +1380,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct ib_device_attr attr; struct ib_device_attr attr;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_callback(listen_id, CMA_LISTEN)) if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED; return -ECONNABORTED;
/* Create a new RDMA id for the new IW CM ID */ /* Create a new RDMA id for the new IW CM ID */
@ -1406,7 +1393,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
} }
conn_id = container_of(new_cm_id, struct rdma_id_private, id); conn_id = container_of(new_cm_id, struct rdma_id_private, id);
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = CMA_CONNECT; conn_id->state = RDMA_CM_CONNECT;
dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
if (!dev) { if (!dev) {
@ -1461,7 +1448,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
if (ret) { if (ret) {
/* User wants to destroy the CM ID */ /* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL; conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
cma_deref_id(conn_id); cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
@ -1554,7 +1541,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
dev_id_priv = container_of(id, struct rdma_id_private, id); dev_id_priv = container_of(id, struct rdma_id_private, id);
dev_id_priv->state = CMA_ADDR_BOUND; dev_id_priv->state = RDMA_CM_ADDR_BOUND;
memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
route->num_paths = 1; route->num_paths = 1;
*route->path_rec = *path_rec; *route->path_rec = *path_rec;
} else { } else {
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ADDR_RESOLVED; work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
work->event.status = status; work->event.status = status;
} }
@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
goto out; goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) { if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1; destroy = 1;
} }
out: out:
@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
int destroy = 0; int destroy = 0;
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (id_priv->state == CMA_DESTROYING || if (id_priv->state == RDMA_CM_DESTROYING ||
id_priv->state == CMA_DEVICE_REMOVAL) id_priv->state == RDMA_CM_DEVICE_REMOVAL)
goto out; goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) { if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1; destroy = 1;
} }
@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL; return -EINVAL;
id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
id->route.num_paths = num_paths; id->route.num_paths = num_paths;
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_set_ib_paths); EXPORT_SYMBOL(rdma_set_ib_paths);
@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
queue_work(cma_wq, &work->work); queue_work(cma_wq, &work->work);
return 0; return 0;
@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2; goto err2;
} }
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
work->event.status = 0; work->event.status = 0;
@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL; return -EINVAL;
atomic_inc(&id_priv->refcount); atomic_inc(&id_priv->refcount);
@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
cma_deref_id(id_priv); cma_deref_id(id_priv);
return ret; return ret;
} }
@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
goto out; goto out;
if (!status && !id_priv->cma_dev) if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv); status = cma_acquire_dev(id_priv);
if (status) { if (status) {
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ADDR_BOUND))
goto out; goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR; event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status; event.status = status;
@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
} }
if (id_priv->id.event_handler(&id_priv->id, &event)) { if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
cma_deref_id(id_priv); cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY; work->old_state = RDMA_CM_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED; work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
queue_work(cma_wq, &work->work); queue_work(cma_wq, &work->work);
return 0; return 0;
@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr); ret = cma_bind_addr(id, src_addr, dst_addr);
if (ret) if (ret)
return ret; return ret;
} }
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
return -EINVAL; return -EINVAL;
atomic_inc(&id_priv->refcount); atomic_inc(&id_priv->refcount);
@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
cma_deref_id(id_priv); cma_deref_id(id_priv);
return ret; return ret;
} }
@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
id_priv->reuseaddr = reuse; id_priv->reuseaddr = reuse;
ret = 0; ret = 0;
} else { } else {
@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
if (id_priv == cur_id) if (id_priv == cur_id)
continue; continue;
if ((cur_id->state == CMA_LISTEN) || if ((cur_id->state == RDMA_CM_LISTEN) ||
!reuseaddr || !cur_id->reuseaddr) { !reuseaddr || !cur_id->reuseaddr) {
cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
if (cma_any_addr(cur_addr)) if (cma_any_addr(cur_addr))
@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
if (ret) if (ret)
return ret; return ret;
} }
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
return -EINVAL; return -EINVAL;
if (id_priv->reuseaddr) { if (id_priv->reuseaddr) {
@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
return 0; return 0;
err: err:
id_priv->backlog = 0; id_priv->backlog = 0;
cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_listen); EXPORT_SYMBOL(rdma_listen);
@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return -EAFNOSUPPORT; return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL; return -EINVAL;
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@ -2360,7 +2350,7 @@ err2:
if (id_priv->cma_dev) if (id_priv->cma_dev)
cma_release_dev(id_priv); cma_release_dev(id_priv);
err1: err1:
cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_bind_addr); EXPORT_SYMBOL(rdma_bind_addr);
@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0; int ret = 0;
if (cma_disable_callback(id_priv, CMA_CONNECT)) if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL; return -EINVAL;
if (!id->qp) { if (!id->qp) {
@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_connect); EXPORT_SYMBOL(rdma_connect);
@ -2758,7 +2748,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT)) if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL; return -EINVAL;
if (!id->qp && conn_param) { if (!id->qp && conn_param) {
@ -2887,8 +2877,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
int ret; int ret;
id_priv = mc->id_priv; id_priv = mc->id_priv;
if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
return 0; return 0;
mutex_lock(&id_priv->qp_mutex); mutex_lock(&id_priv->qp_mutex);
@ -2912,7 +2902,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
ret = id_priv->id.event_handler(&id_priv->id, &event); ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) { if (ret) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return 0; return 0;
@ -3095,8 +3085,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_ADDR_BOUND) && if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, CMA_ADDR_RESOLVED)) !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
return -EINVAL; return -EINVAL;
mc = kmalloc(sizeof *mc, GFP_KERNEL); mc = kmalloc(sizeof *mc, GFP_KERNEL);
@ -3261,19 +3251,19 @@ static void cma_add_one(struct ib_device *device)
static int cma_remove_id_dev(struct rdma_id_private *id_priv) static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{ {
struct rdma_cm_event event; struct rdma_cm_event event;
enum cma_state state; enum rdma_cm_state state;
int ret = 0; int ret = 0;
/* Record that we want to remove the device */ /* Record that we want to remove the device */
state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
if (state == CMA_DESTROYING) if (state == RDMA_CM_DESTROYING)
return 0; return 0;
cma_cancel_operation(id_priv, state); cma_cancel_operation(id_priv, state);
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
/* Check for destruction from another callback. */ /* Check for destruction from another callback. */
if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
goto out; goto out;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);

Просмотреть файл

@ -111,6 +111,20 @@ struct rdma_cm_event {
} param; } param;
}; };
enum rdma_cm_state {
RDMA_CM_IDLE,
RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ROUTE_QUERY,
RDMA_CM_ROUTE_RESOLVED,
RDMA_CM_CONNECT,
RDMA_CM_DISCONNECT,
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
RDMA_CM_DESTROYING
};
struct rdma_cm_id; struct rdma_cm_id;
/** /**