Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cma: Save PID of ID's owner
  RDMA/cma: Add support for netlink statistics export
  RDMA/cma: Pass QP type into rdma_create_id()
  RDMA: Update exported headers list
  RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>
  RDMA/nes: Add a check for strict_strtoul()
  RDMA/cxgb3: Don't post zero-byte read if endpoint is going away
  RDMA/cxgb4: Use completion objects for event blocking
  IB/srp: Fix integer -> pointer cast warnings
  IB: Add devnode methods to cm_class and umad_class
  IB/mad: Return EPROTONOSUPPORT when an RDMA device lacks the QP required
  IB/uverbs: Add devnode method to set path/mode
  RDMA/ucma: Add .nodename/.mode to tell userspace where to create device node
  RDMA: Add netlink infrastructure
  RDMA: Add error handling to ib_core_init()
This commit is contained in:
Linus Torvalds 2011-05-26 12:13:57 -07:00
Родитель 20e0ec119b 8dc4abdf4c
Коммит 4c171acc20
31 изменённых файлов: 633 добавлений и 159 удалений

Просмотреть файл

@ -2,6 +2,7 @@ menuconfig INFINIBAND
tristate "InfiniBand support" tristate "InfiniBand support"
depends on PCI || BROKEN depends on PCI || BROKEN
depends on HAS_IOMEM depends on HAS_IOMEM
depends on NET
---help--- ---help---
Core support for InfiniBand (IB). Make sure to also select Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your any protocols you wish to use as well as drivers for your

Просмотреть файл

@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y) $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o device.o fmr_pool.o cache.o netlink.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o ib_mad-y := mad.o smi.o agent.o mad_rmpp.o

Просмотреть файл

@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj .release = cm_release_port_obj
}; };
static char *cm_devnode(struct device *dev, mode_t *mode)
{
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
struct class cm_class = { struct class cm_class = {
.owner = THIS_MODULE,
.name = "infiniband_cm", .name = "infiniband_cm",
.devnode = cm_devnode,
}; };
EXPORT_SYMBOL(cm_class); EXPORT_SYMBOL(cm_class);

Просмотреть файл

@ -47,6 +47,7 @@
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
#include <rdma/rdma_cm_ib.h> #include <rdma/rdma_cm_ib.h>
#include <rdma/rdma_netlink.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/ib_cm.h> #include <rdma/ib_cm.h>
#include <rdma/ib_sa.h> #include <rdma/ib_sa.h>
@ -89,20 +90,6 @@ struct cma_device {
struct list_head id_list; struct list_head id_list;
}; };
enum cma_state {
CMA_IDLE,
CMA_ADDR_QUERY,
CMA_ADDR_RESOLVED,
CMA_ROUTE_QUERY,
CMA_ROUTE_RESOLVED,
CMA_CONNECT,
CMA_DISCONNECT,
CMA_ADDR_BOUND,
CMA_LISTEN,
CMA_DEVICE_REMOVAL,
CMA_DESTROYING
};
struct rdma_bind_list { struct rdma_bind_list {
struct idr *ps; struct idr *ps;
struct hlist_head owners; struct hlist_head owners;
@ -126,7 +113,7 @@ struct rdma_id_private {
struct list_head mc_list; struct list_head mc_list;
int internal_id; int internal_id;
enum cma_state state; enum rdma_cm_state state;
spinlock_t lock; spinlock_t lock;
struct mutex qp_mutex; struct mutex qp_mutex;
@ -146,6 +133,7 @@ struct rdma_id_private {
u32 seq_num; u32 seq_num;
u32 qkey; u32 qkey;
u32 qp_num; u32 qp_num;
pid_t owner;
u8 srq; u8 srq;
u8 tos; u8 tos;
u8 reuseaddr; u8 reuseaddr;
@ -165,8 +153,8 @@ struct cma_multicast {
struct cma_work { struct cma_work {
struct work_struct work; struct work_struct work;
struct rdma_id_private *id; struct rdma_id_private *id;
enum cma_state old_state; enum rdma_cm_state old_state;
enum cma_state new_state; enum rdma_cm_state new_state;
struct rdma_cm_event event; struct rdma_cm_event event;
}; };
@ -217,7 +205,7 @@ struct sdp_hah {
#define CMA_VERSION 0x00 #define CMA_VERSION 0x00
#define SDP_MAJ_VERSION 0x2 #define SDP_MAJ_VERSION 0x2
static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
} }
static int cma_comp_exch(struct rdma_id_private *id_priv, static int cma_comp_exch(struct rdma_id_private *id_priv,
enum cma_state comp, enum cma_state exch) enum rdma_cm_state comp, enum rdma_cm_state exch)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
return ret; return ret;
} }
static enum cma_state cma_exch(struct rdma_id_private *id_priv, static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
enum cma_state exch) enum rdma_cm_state exch)
{ {
unsigned long flags; unsigned long flags;
enum cma_state old; enum rdma_cm_state old;
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
old = id_priv->state; old = id_priv->state;
@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
} }
static inline int cma_is_ud_ps(enum rdma_port_space ps)
{
return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv, static void cma_attach_to_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev) struct cma_device *cma_dev)
{ {
@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
} }
static int cma_disable_callback(struct rdma_id_private *id_priv, static int cma_disable_callback(struct rdma_id_private *id_priv,
enum cma_state state) enum rdma_cm_state state)
{ {
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (id_priv->state != state) { if (id_priv->state != state) {
@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
} }
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps) void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type)
{ {
struct rdma_id_private *id_priv; struct rdma_id_private *id_priv;
@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
if (!id_priv) if (!id_priv)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
id_priv->state = CMA_IDLE; id_priv->owner = task_pid_nr(current);
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context; id_priv->id.context = context;
id_priv->id.event_handler = event_handler; id_priv->id.event_handler = event_handler;
id_priv->id.ps = ps; id_priv->id.ps = ps;
id_priv->id.qp_type = qp_type;
spin_lock_init(&id_priv->lock); spin_lock_init(&id_priv->lock);
mutex_init(&id_priv->qp_mutex); mutex_init(&id_priv->qp_mutex);
init_completion(&id_priv->comp); init_completion(&id_priv->comp);
@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (IS_ERR(qp)) if (IS_ERR(qp))
return PTR_ERR(qp); return PTR_ERR(qp);
if (cma_is_ud_ps(id_priv->id.ps)) if (id->qp_type == IB_QPT_UD)
ret = cma_init_ud_qp(id_priv, qp); ret = cma_init_ud_qp(id_priv, qp);
else else
ret = cma_init_conn_qp(id_priv, qp); ret = cma_init_conn_qp(id_priv, qp);
@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
qp_attr->port_num = id_priv->id.port_num; qp_attr->port_num = id_priv->id.port_num;
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (cma_is_ud_ps(id_priv->id.ps)) { if (id_priv->id.qp_type == IB_QPT_UD) {
ret = cma_set_qkey(id_priv); ret = cma_set_qkey(id_priv);
if (ret) if (ret)
return ret; return ret;
@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
switch (rdma_node_get_transport(id_priv->id.device->node_type)) { switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB: case RDMA_TRANSPORT_IB:
if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
else else
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
} }
static void cma_cancel_operation(struct rdma_id_private *id_priv, static void cma_cancel_operation(struct rdma_id_private *id_priv,
enum cma_state state) enum rdma_cm_state state)
{ {
switch (state) { switch (state) {
case CMA_ADDR_QUERY: case RDMA_CM_ADDR_QUERY:
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break; break;
case CMA_ROUTE_QUERY: case RDMA_CM_ROUTE_QUERY:
cma_cancel_route(id_priv); cma_cancel_route(id_priv);
break; break;
case CMA_LISTEN: case RDMA_CM_LISTEN:
if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
&& !id_priv->cma_dev) && !id_priv->cma_dev)
cma_cancel_listens(id_priv); cma_cancel_listens(id_priv);
@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
void rdma_destroy_id(struct rdma_cm_id *id) void rdma_destroy_id(struct rdma_cm_id *id)
{ {
struct rdma_id_private *id_priv; struct rdma_id_private *id_priv;
enum cma_state state; enum rdma_cm_state state;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
state = cma_exch(id_priv, CMA_DESTROYING); state = cma_exch(id_priv, RDMA_CM_DESTROYING);
cma_cancel_operation(id_priv, state); cma_cancel_operation(id_priv, state);
/* /*
@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int ret = 0; int ret = 0;
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
cma_disable_callback(id_priv, CMA_CONNECT)) || cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT && (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
cma_disable_callback(id_priv, CMA_DISCONNECT))) cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.status = -ETIMEDOUT; /* fall through */ event.status = -ETIMEDOUT; /* fall through */
case IB_CM_DREQ_RECEIVED: case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED: case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
RDMA_CM_DISCONNECT))
goto out; goto out;
event.event = RDMA_CM_EVENT_DISCONNECTED; event.event = RDMA_CM_EVENT_DISCONNECTED;
break; break;
@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
goto err; goto err;
id = rdma_create_id(listen_id->event_handler, listen_id->context, id = rdma_create_id(listen_id->event_handler, listen_id->context,
listen_id->ps); listen_id->ps, ib_event->param.req_rcvd.qp_type);
if (IS_ERR(id)) if (IS_ERR(id))
goto err; goto err;
@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT; id_priv->state = RDMA_CM_CONNECT;
return id_priv; return id_priv;
destroy_id: destroy_id:
@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
int ret; int ret;
id = rdma_create_id(listen_id->event_handler, listen_id->context, id = rdma_create_id(listen_id->event_handler, listen_id->context,
listen_id->ps); listen_id->ps, IB_QPT_UD);
if (IS_ERR(id)) if (IS_ERR(id))
return NULL; return NULL;
@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
} }
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT; id_priv->state = RDMA_CM_CONNECT;
return id_priv; return id_priv;
err: err:
rdma_destroy_id(id); rdma_destroy_id(id);
@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
int offset, ret; int offset, ret;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_callback(listen_id, CMA_LISTEN)) if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED; return -ECONNABORTED;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id->id.ps); offset = cma_user_data_offset(listen_id->id.ps);
event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
if (cma_is_ud_ps(listen_id->id.ps)) { if (listen_id->id.qp_type == IB_QPT_UD) {
conn_id = cma_new_udp_id(&listen_id->id, ib_event); conn_id = cma_new_udp_id(&listen_id->id, ib_event);
event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data = ib_event->private_data + offset;
event.param.ud.private_data_len = event.param.ud.private_data_len =
@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
* while we're accessing the cm_id. * while we're accessing the cm_id.
*/ */
mutex_lock(&lock); mutex_lock(&lock);
if (cma_comp(conn_id, CMA_CONNECT) && if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
!cma_is_ud_ps(conn_id->id.ps))
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock); mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
conn_id->cm_id.ib = NULL; conn_id->cm_id.ib = NULL;
release_conn_id: release_conn_id:
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
struct sockaddr_in *sin; struct sockaddr_in *sin;
int ret = 0; int ret = 0;
if (cma_disable_callback(id_priv, CMA_CONNECT)) if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL; id_priv->cm_id.iw = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct ib_device_attr attr; struct ib_device_attr attr;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_callback(listen_id, CMA_LISTEN)) if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
return -ECONNABORTED; return -ECONNABORTED;
/* Create a new RDMA id for the new IW CM ID */ /* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.event_handler, new_cm_id = rdma_create_id(listen_id->id.event_handler,
listen_id->id.context, listen_id->id.context,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(new_cm_id)) { if (IS_ERR(new_cm_id)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
conn_id = container_of(new_cm_id, struct rdma_id_private, id); conn_id = container_of(new_cm_id, struct rdma_id_private, id);
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = CMA_CONNECT; conn_id->state = RDMA_CM_CONNECT;
dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
if (!dev) { if (!dev) {
@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
if (ret) { if (ret) {
/* User wants to destroy the CM ID */ /* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL; conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING); cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
cma_deref_id(conn_id); cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id); rdma_destroy_id(&conn_id->id);
@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct rdma_cm_id *id; struct rdma_cm_id *id;
int ret; int ret;
id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
id_priv->id.qp_type);
if (IS_ERR(id)) if (IS_ERR(id))
return; return;
dev_id_priv = container_of(id, struct rdma_id_private, id); dev_id_priv = container_of(id, struct rdma_id_private, id);
dev_id_priv->state = CMA_ADDR_BOUND; dev_id_priv->state = RDMA_CM_ADDR_BOUND;
memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
route->num_paths = 1; route->num_paths = 1;
*route->path_rec = *path_rec; *route->path_rec = *path_rec;
} else { } else {
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ADDR_RESOLVED; work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
work->event.status = status; work->event.status = status;
} }
@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
goto out; goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) { if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1; destroy = 1;
} }
out: out:
@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
int destroy = 0; int destroy = 0;
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (id_priv->state == CMA_DESTROYING || if (id_priv->state == RDMA_CM_DESTROYING ||
id_priv->state == CMA_DEVICE_REMOVAL) id_priv->state == RDMA_CM_DEVICE_REMOVAL)
goto out; goto out;
if (id_priv->id.event_handler(&id_priv->id, &work->event)) { if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
destroy = 1; destroy = 1;
} }
@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL; return -EINVAL;
id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
id->route.num_paths = num_paths; id->route.num_paths = num_paths;
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_set_ib_paths); EXPORT_SYMBOL(rdma_set_ib_paths);
@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
queue_work(cma_wq, &work->work); queue_work(cma_wq, &work->work);
return 0; return 0;
@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2; goto err2;
} }
work->old_state = CMA_ROUTE_QUERY; work->old_state = RDMA_CM_ROUTE_QUERY;
work->new_state = CMA_ROUTE_RESOLVED; work->new_state = RDMA_CM_ROUTE_RESOLVED;
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
work->event.status = 0; work->event.status = 0;
@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
return -EINVAL; return -EINVAL;
atomic_inc(&id_priv->refcount); atomic_inc(&id_priv->refcount);
@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
cma_deref_id(id_priv); cma_deref_id(id_priv);
return ret; return ret;
} }
@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED))
goto out; goto out;
if (!status && !id_priv->cma_dev) if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv); status = cma_acquire_dev(id_priv);
if (status) { if (status) {
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ADDR_BOUND))
goto out; goto out;
event.event = RDMA_CM_EVENT_ADDR_ERROR; event.event = RDMA_CM_EVENT_ADDR_ERROR;
event.status = status; event.status = status;
@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
} }
if (id_priv->id.event_handler(&id_priv->id, &event)) { if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
cma_deref_id(id_priv); cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
work->id = id_priv; work->id = id_priv;
INIT_WORK(&work->work, cma_work_handler); INIT_WORK(&work->work, cma_work_handler);
work->old_state = CMA_ADDR_QUERY; work->old_state = RDMA_CM_ADDR_QUERY;
work->new_state = CMA_ADDR_RESOLVED; work->new_state = RDMA_CM_ADDR_RESOLVED;
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
queue_work(cma_wq, &work->work); queue_work(cma_wq, &work->work);
return 0; return 0;
@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr); ret = cma_bind_addr(id, src_addr, dst_addr);
if (ret) if (ret)
return ret; return ret;
} }
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
return -EINVAL; return -EINVAL;
atomic_inc(&id_priv->refcount); atomic_inc(&id_priv->refcount);
@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
cma_deref_id(id_priv); cma_deref_id(id_priv);
return ret; return ret;
} }
@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
id_priv->reuseaddr = reuse; id_priv->reuseaddr = reuse;
ret = 0; ret = 0;
} else { } else {
@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
if (id_priv == cur_id) if (id_priv == cur_id)
continue; continue;
if ((cur_id->state == CMA_LISTEN) || if ((cur_id->state == RDMA_CM_LISTEN) ||
!reuseaddr || !cur_id->reuseaddr) { !reuseaddr || !cur_id->reuseaddr) {
cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
if (cma_any_addr(cur_addr)) if (cma_any_addr(cur_addr))
@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) { if (id_priv->state == RDMA_CM_IDLE) {
((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
if (ret) if (ret)
return ret; return ret;
} }
if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
return -EINVAL; return -EINVAL;
if (id_priv->reuseaddr) { if (id_priv->reuseaddr) {
@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
return 0; return 0;
err: err:
id_priv->backlog = 0; id_priv->backlog = 0;
cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_listen); EXPORT_SYMBOL(rdma_listen);
@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return -EAFNOSUPPORT; return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL; return -EINVAL;
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@ -2360,7 +2350,7 @@ err2:
if (id_priv->cma_dev) if (id_priv->cma_dev)
cma_release_dev(id_priv); cma_release_dev(id_priv);
err1: err1:
cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_bind_addr); EXPORT_SYMBOL(rdma_bind_addr);
@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0; int ret = 0;
if (cma_disable_callback(id_priv, CMA_CONNECT)) if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0; return 0;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
if (ret) { if (ret) {
/* Destroy the CM ID by returning a non-zero value. */ /* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL; id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return ret; return ret;
@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL; return -EINVAL;
if (!id->qp) { if (!id->qp) {
@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB: case RDMA_TRANSPORT_IB:
if (cma_is_ud_ps(id->ps)) if (id->qp_type == IB_QPT_UD)
ret = cma_resolve_ib_udp(id_priv, conn_param); ret = cma_resolve_ib_udp(id_priv, conn_param);
else else
ret = cma_connect_ib(id_priv, conn_param); ret = cma_connect_ib(id_priv, conn_param);
@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
return 0; return 0;
err: err:
cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_connect); EXPORT_SYMBOL(rdma_connect);
@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_CONNECT))
id_priv->owner = task_pid_nr(current);
if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL; return -EINVAL;
if (!id->qp && conn_param) { if (!id->qp && conn_param) {
@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB: case RDMA_TRANSPORT_IB:
if (cma_is_ud_ps(id->ps)) if (id->qp_type == IB_QPT_UD)
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
conn_param->private_data, conn_param->private_data,
conn_param->private_data_len); conn_param->private_data_len);
@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
switch (rdma_node_get_transport(id->device->node_type)) { switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB: case RDMA_TRANSPORT_IB:
if (cma_is_ud_ps(id->ps)) if (id->qp_type == IB_QPT_UD)
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
private_data, private_data_len); private_data, private_data_len);
else else
@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
int ret; int ret;
id_priv = mc->id_priv; id_priv = mc->id_priv;
if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
return 0; return 0;
mutex_lock(&id_priv->qp_mutex); mutex_lock(&id_priv->qp_mutex);
@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
ret = id_priv->id.event_handler(&id_priv->id, &event); ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) { if (ret) {
cma_exch(id_priv, CMA_DESTROYING); cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
rdma_destroy_id(&id_priv->id); rdma_destroy_id(&id_priv->id);
return 0; return 0;
@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
int ret; int ret;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, CMA_ADDR_BOUND) && if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, CMA_ADDR_RESOLVED)) !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
return -EINVAL; return -EINVAL;
mc = kmalloc(sizeof *mc, GFP_KERNEL); mc = kmalloc(sizeof *mc, GFP_KERNEL);
@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)
static int cma_remove_id_dev(struct rdma_id_private *id_priv) static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{ {
struct rdma_cm_event event; struct rdma_cm_event event;
enum cma_state state; enum rdma_cm_state state;
int ret = 0; int ret = 0;
/* Record that we want to remove the device */ /* Record that we want to remove the device */
state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
if (state == CMA_DESTROYING) if (state == RDMA_CM_DESTROYING)
return 0; return 0;
cma_cancel_operation(id_priv, state); cma_cancel_operation(id_priv, state);
mutex_lock(&id_priv->handler_mutex); mutex_lock(&id_priv->handler_mutex);
/* Check for destruction from another callback. */ /* Check for destruction from another callback. */
if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
goto out; goto out;
memset(&event, 0, sizeof event); memset(&event, 0, sizeof event);
@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)
kfree(cma_dev); kfree(cma_dev);
} }
static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nlmsghdr *nlh;
struct rdma_cm_id_stats *id_stats;
struct rdma_id_private *id_priv;
struct rdma_cm_id *id = NULL;
struct cma_device *cma_dev;
int i_dev = 0, i_id = 0;
/*
* We export all of the IDs as a sequence of messages. Each
* ID gets its own netlink message.
*/
mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) {
if (i_dev < cb->args[0]) {
i_dev++;
continue;
}
i_id = 0;
list_for_each_entry(id_priv, &cma_dev->id_list, list) {
if (i_id < cb->args[1]) {
i_id++;
continue;
}
id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
sizeof *id_stats, RDMA_NL_RDMA_CM,
RDMA_NL_RDMA_CM_ID_STATS);
if (!id_stats)
goto out;
memset(id_stats, 0, sizeof *id_stats);
id = &id_priv->id;
id_stats->node_type = id->route.addr.dev_addr.dev_type;
id_stats->port_num = id->port_num;
id_stats->bound_dev_if =
id->route.addr.dev_addr.bound_dev_if;
if (id->route.addr.src_addr.ss_family == AF_INET) {
if (ibnl_put_attr(skb, nlh,
sizeof(struct sockaddr_in),
&id->route.addr.src_addr,
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
goto out;
}
if (ibnl_put_attr(skb, nlh,
sizeof(struct sockaddr_in),
&id->route.addr.dst_addr,
RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
goto out;
}
} else if (id->route.addr.src_addr.ss_family == AF_INET6) {
if (ibnl_put_attr(skb, nlh,
sizeof(struct sockaddr_in6),
&id->route.addr.src_addr,
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
goto out;
}
if (ibnl_put_attr(skb, nlh,
sizeof(struct sockaddr_in6),
&id->route.addr.dst_addr,
RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
goto out;
}
}
id_stats->pid = id_priv->owner;
id_stats->port_space = id->ps;
id_stats->cm_state = id_priv->state;
id_stats->qp_num = id_priv->qp_num;
id_stats->qp_type = id->qp_type;
i_id++;
}
cb->args[1] = 0;
i_dev++;
}
out:
mutex_unlock(&lock);
cb->args[0] = i_dev;
cb->args[1] = i_id;
return skb->len;
}
static const struct ibnl_client_cbs cma_cb_table[] = {
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
};
static int __init cma_init(void) static int __init cma_init(void)
{ {
int ret; int ret;
@ -3343,6 +3430,10 @@ static int __init cma_init(void)
ret = ib_register_client(&cma_client); ret = ib_register_client(&cma_client);
if (ret) if (ret)
goto err; goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
return 0; return 0;
err: err:
@ -3355,6 +3446,7 @@ err:
static void __exit cma_cleanup(void) static void __exit cma_cleanup(void)
{ {
ibnl_remove_client(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client); ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb); unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client); rdma_addr_unregister_client(&addr_client);

Просмотреть файл

@ -38,6 +38,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h" #include "core_priv.h"
@ -725,22 +726,40 @@ static int __init ib_core_init(void)
return -ENOMEM; return -ENOMEM;
ret = ib_sysfs_setup(); ret = ib_sysfs_setup();
if (ret) if (ret) {
printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
goto err;
}
ret = ibnl_init();
if (ret) {
printk(KERN_WARNING "Couldn't init IB netlink interface\n");
goto err_sysfs;
}
ret = ib_cache_setup(); ret = ib_cache_setup();
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
ib_sysfs_cleanup(); goto err_nl;
destroy_workqueue(ib_wq);
} }
return 0;
err_nl:
ibnl_cleanup();
err_sysfs:
ib_sysfs_cleanup();
err:
destroy_workqueue(ib_wq);
return ret; return ret;
} }
static void __exit ib_core_cleanup(void) static void __exit ib_core_cleanup(void)
{ {
ib_cache_cleanup(); ib_cache_cleanup();
ibnl_cleanup();
ib_sysfs_cleanup(); ib_sysfs_cleanup();
/* Make sure that any pending umem accounting work is done. */ /* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq); destroy_workqueue(ib_wq);

Просмотреть файл

@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error1; goto error1;
} }
/* Verify the QP requested is supported. For example, Ethernet devices
* will not have QP0 */
if (!port_priv->qp_info[qpn].qp) {
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
/* Allocate structures */ /* Allocate structures */
mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
if (!mad_agent_priv) { if (!mad_agent_priv) {

Просмотреть файл

@ -0,0 +1,190 @@
/*
* Copyright (c) 2010 Voltaire Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
struct ibnl_client {
struct list_head list;
int index;
int nops;
const struct ibnl_client_cbs *cb_table;
};
static DEFINE_MUTEX(ibnl_mutex);
static struct sock *nls;
static LIST_HEAD(client_list);
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[])
{
struct ibnl_client *cur;
struct ibnl_client *nl_client;
nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
if (!nl_client)
return -ENOMEM;
nl_client->index = index;
nl_client->nops = nops;
nl_client->cb_table = cb_table;
mutex_lock(&ibnl_mutex);
list_for_each_entry(cur, &client_list, list) {
if (cur->index == index) {
pr_warn("Client for %d already exists\n", index);
mutex_unlock(&ibnl_mutex);
kfree(nl_client);
return -EINVAL;
}
}
list_add_tail(&nl_client->list, &client_list);
mutex_unlock(&ibnl_mutex);
return 0;
}
EXPORT_SYMBOL(ibnl_add_client);
int ibnl_remove_client(int index)
{
struct ibnl_client *cur, *next;
mutex_lock(&ibnl_mutex);
list_for_each_entry_safe(cur, next, &client_list, list) {
if (cur->index == index) {
list_del(&(cur->list));
mutex_unlock(&ibnl_mutex);
kfree(cur);
return 0;
}
}
pr_warn("Can't remove callback for client idx %d. Not found\n", index);
mutex_unlock(&ibnl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL(ibnl_remove_client);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op)
{
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
*nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
len, NLM_F_MULTI);
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
return NLMSG_DATA(*nlh);
nlmsg_failure:
nlmsg_trim(skb, prev_tail);
return NULL;
}
EXPORT_SYMBOL(ibnl_put_msg);
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type)
{
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
NLA_PUT(skb, type, len, data);
nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
return 0;
nla_put_failure:
nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
return -EMSGSIZE;
}
EXPORT_SYMBOL(ibnl_put_attr);
static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct ibnl_client *client;
int type = nlh->nlmsg_type;
int index = RDMA_NL_GET_CLIENT(type);
int op = RDMA_NL_GET_OP(type);
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
if (op < 0 || op >= client->nops ||
!client->cb_table[RDMA_NL_GET_OP(op)].dump)
return -EINVAL;
return netlink_dump_start(nls, skb, nlh,
client->cb_table[op].dump,
NULL);
}
}
pr_info("Index %d wasn't found in client list\n", index);
return -EINVAL;
}
static void ibnl_rcv(struct sk_buff *skb)
{
mutex_lock(&ibnl_mutex);
netlink_rcv_skb(skb, &ibnl_rcv_msg);
mutex_unlock(&ibnl_mutex);
}
int __init ibnl_init(void)
{
nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
NULL, THIS_MODULE);
if (!nls) {
pr_warn("Failed to create netlink socket\n");
return -ENOMEM;
}
return 0;
}
void ibnl_cleanup(void)
{
struct ibnl_client *cur, *next;
mutex_lock(&ibnl_mutex);
list_for_each_entry_safe(cur, next, &client_list, list) {
list_del(&(cur->list));
kfree(cur);
}
mutex_unlock(&ibnl_mutex);
netlink_kernel_release(nls);
}

Просмотреть файл

@ -367,13 +367,28 @@ done:
return ret; return ret;
} }
static ssize_t ucma_create_id(struct ucma_file *file, static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
const char __user *inbuf, {
int in_len, int out_len) switch (cmd->ps) {
case RDMA_PS_TCP:
*qp_type = IB_QPT_RC;
return 0;
case RDMA_PS_UDP:
case RDMA_PS_IPOIB:
*qp_type = IB_QPT_UD;
return 0;
default:
return -EINVAL;
}
}
static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{ {
struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp; struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx; struct ucma_context *ctx;
enum ib_qp_type qp_type;
int ret; int ret;
if (out_len < sizeof(resp)) if (out_len < sizeof(resp))
@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ret = ucma_get_qp_type(&cmd, &qp_type);
if (ret)
return ret;
mutex_lock(&file->mut); mutex_lock(&file->mut);
ctx = ucma_alloc_ctx(file); ctx = ucma_alloc_ctx(file);
mutex_unlock(&file->mut); mutex_unlock(&file->mut);
@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file,
return -ENOMEM; return -ENOMEM;
ctx->uid = cmd.uid; ctx->uid = cmd.uid;
ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
if (IS_ERR(ctx->cm_id)) { if (IS_ERR(ctx->cm_id)) {
ret = PTR_ERR(ctx->cm_id); ret = PTR_ERR(ctx->cm_id);
goto err1; goto err1;
@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = {
}; };
static struct miscdevice ucma_misc = { static struct miscdevice ucma_misc = {
.minor = MISC_DYNAMIC_MINOR, .minor = MISC_DYNAMIC_MINOR,
.name = "rdma_cm", .name = "rdma_cm",
.fops = &ucma_fops, .nodename = "infiniband/rdma_cm",
.mode = 0666,
.fops = &ucma_fops,
}; };
static ssize_t show_abi_version(struct device *dev, static ssize_t show_abi_version(struct device *dev,

Просмотреть файл

@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device)
kref_put(&umad_dev->ref, ib_umad_release_dev); kref_put(&umad_dev->ref, ib_umad_release_dev);
} }
static char *umad_devnode(struct device *dev, mode_t *mode)
{
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
static int __init ib_umad_init(void) static int __init ib_umad_init(void)
{ {
int ret; int ret;
@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void)
goto out_chrdev; goto out_chrdev;
} }
umad_class->devnode = umad_devnode;
ret = class_create_file(umad_class, &class_attr_abi_version.attr); ret = class_create_file(umad_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");

Просмотреть файл

@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kfree(uverbs_dev); kfree(uverbs_dev);
} }
static char *uverbs_devnode(struct device *dev, mode_t *mode)
{
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
static int __init ib_uverbs_init(void) static int __init ib_uverbs_init(void)
{ {
int ret; int ret;
@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void)
goto out_chrdev; goto out_chrdev;
} }
uverbs_class->devnode = uverbs_devnode;
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");

Просмотреть файл

@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
goto err; goto err;
if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
iwch_post_zb_read(ep->com.qp); iwch_post_zb_read(ep);
} }
goto out; goto out;
@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *ep = ctx; struct iwch_ep *ep = ctx;
struct cpl_wr_ack *hdr = cplhdr(skb); struct cpl_wr_ack *hdr = cplhdr(skb);
unsigned int credits = ntohs(hdr->credits); unsigned int credits = ntohs(hdr->credits);
unsigned long flags;
int post_zb = 0;
PDBG("%s ep %p credits %u\n", __func__, ep, credits); PDBG("%s ep %p credits %u\n", __func__, ep, credits);
@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE; return CPL_RET_BUF_DONE;
} }
spin_lock_irqsave(&ep->com.lock, flags);
BUG_ON(credits != 1); BUG_ON(credits != 1);
dst_confirm(ep->dst); dst_confirm(ep->dst);
if (!ep->mpa_skb) { if (!ep->mpa_skb) {
PDBG("%s rdma_init wr_ack ep %p state %u\n", PDBG("%s rdma_init wr_ack ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
if (ep->mpa_attr.initiator) { if (ep->mpa_attr.initiator) {
PDBG("%s initiator ep %p state %u\n", PDBG("%s initiator ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
if (peer2peer) if (peer2peer && ep->com.state == FPDU_MODE)
iwch_post_zb_read(ep->com.qp); post_zb = 1;
} else { } else {
PDBG("%s responder ep %p state %u\n", PDBG("%s responder ep %p state %u\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
ep->com.rpl_done = 1; if (ep->com.state == MPA_REQ_RCVD) {
wake_up(&ep->com.waitq); ep->com.rpl_done = 1;
wake_up(&ep->com.waitq);
}
} }
} else { } else {
PDBG("%s lsm ack ep %p state %u freeing skb\n", PDBG("%s lsm ack ep %p state %u freeing skb\n",
__func__, ep, state_read(&ep->com)); __func__, ep, ep->com.state);
kfree_skb(ep->mpa_skb); kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL; ep->mpa_skb = NULL;
} }
spin_unlock_irqrestore(&ep->com.lock, flags);
if (post_zb)
iwch_post_zb_read(ep);
return CPL_RET_BUF_DONE; return CPL_RET_BUF_DONE;
} }

Просмотреть файл

@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp,
struct ib_mw_bind *mw_bind); struct ib_mw_bind *mw_bind);
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
int iwch_post_zb_read(struct iwch_qp *qhp); int iwch_post_zb_read(struct iwch_ep *ep);
int iwch_register_device(struct iwch_dev *dev); int iwch_register_device(struct iwch_dev *dev);
void iwch_unregister_device(struct iwch_dev *dev); void iwch_unregister_device(struct iwch_dev *dev);
void stop_read_rep_timer(struct iwch_qp *qhp); void stop_read_rep_timer(struct iwch_qp *qhp);

Просмотреть файл

@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
} }
} }
int iwch_post_zb_read(struct iwch_qp *qhp) int iwch_post_zb_read(struct iwch_ep *ep)
{ {
union t3_wr *wqe; union t3_wr *wqe;
struct sk_buff *skb; struct sk_buff *skb;
@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
wqe->read.local_len = cpu_to_be32(0); wqe->read.local_len = cpu_to_be32(0);
wqe->read.local_to = cpu_to_be64(1); wqe->read.local_to = cpu_to_be64(1);
wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
V_FW_RIWR_LEN(flit_cnt)); V_FW_RIWR_LEN(flit_cnt));
skb->priority = CPL_PRIORITY_DATA; skb->priority = CPL_PRIORITY_DATA;
return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
} }
/* /*

Просмотреть файл

@ -35,7 +35,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/workqueue.h> #include <linux/completion.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/pci.h> #include <linux/pci.h>
@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
#define C4IW_WR_TO (10*HZ) #define C4IW_WR_TO (10*HZ)
enum {
REPLY_READY = 0,
};
struct c4iw_wr_wait { struct c4iw_wr_wait {
wait_queue_head_t wait; struct completion completion;
unsigned long status;
int ret; int ret;
}; };
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{ {
wr_waitp->ret = 0; wr_waitp->ret = 0;
wr_waitp->status = 0; init_completion(&wr_waitp->completion);
init_waitqueue_head(&wr_waitp->wait);
} }
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
{ {
wr_waitp->ret = ret; wr_waitp->ret = ret;
set_bit(REPLY_READY, &wr_waitp->status); complete(&wr_waitp->completion);
wake_up(&wr_waitp->wait);
} }
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
int ret; int ret;
do { do {
ret = wait_event_timeout(wr_waitp->wait, ret = wait_for_completion_timeout(&wr_waitp->completion, to);
test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
if (!ret) { if (!ret) {
printk(KERN_ERR MOD "%s - Device %s not responding - " printk(KERN_ERR MOD "%s - Device %s not responding - "
"tid %u qpid %u\n", func, "tid %u qpid %u\n", func,

Просмотреть файл

@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
u32 i = 0; u32 i = 0;
struct nes_device *nesdev; struct nes_device *nesdev;
strict_strtoul(buf, 0, &wqm_quanta_value); if (kstrtoul(buf, 0, &wqm_quanta_value) < 0)
return -EINVAL;
list_for_each_entry(nesdev, &nes_dev_list, list) { list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) { if (i == ee_flsh_adapter) {
nesdev->nesadapter->wqm_quanta = wqm_quanta_value; nesdev->nesadapter->wqm_quanta = wqm_quanta_value;

Просмотреть файл

@ -1,6 +1,6 @@
config INFINIBAND_QIB config INFINIBAND_QIB
tristate "QLogic PCIe HCA support" tristate "QLogic PCIe HCA support"
depends on 64BIT && NET depends on 64BIT
---help--- ---help---
This is a low-level driver for QLogic PCIe QLE InfiniBand host This is a low-level driver for QLogic PCIe QLE InfiniBand host
channel adapters. This driver does not support the QLogic channel adapters. This driver does not support the QLogic

Просмотреть файл

@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn,
iser_conn_get(ib_conn); /* ref ib conn's cma id */ iser_conn_get(ib_conn); /* ref ib conn's cma id */
ib_conn->cma_id = rdma_create_id(iser_cma_handler, ib_conn->cma_id = rdma_create_id(iser_cma_handler,
(void *)ib_conn, (void *)ib_conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) { if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id); err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err); iser_err("rdma_create_id failed: %d\n", err);

Просмотреть файл

@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target,
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{ {
struct ib_device *dev = target->srp_host->srp_dev->dev; struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu = (struct srp_iu *) wc->wr_id; struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
int res; int res;
u8 opcode; u8 opcode;
@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
break; break;
} }
iu = (struct srp_iu *) wc.wr_id; iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
list_add(&iu->list, &target->free_tx); list_add(&iu->list, &target->free_tx);
} }
} }

Просмотреть файл

@ -24,6 +24,7 @@
/* leave room for NETLINK_DM (DM Events) */ /* leave room for NETLINK_DM (DM Events) */
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19 #define NETLINK_ECRYPTFS 19
#define NETLINK_RDMA 20
#define MAX_LINKS 32 #define MAX_LINKS 32

Просмотреть файл

@ -1 +1,6 @@
header-y += ib_user_cm.h
header-y += ib_user_mad.h header-y += ib_user_mad.h
header-y += ib_user_sa.h
header-y += ib_user_verbs.h
header-y += rdma_netlink.h
header-y += rdma_user_cm.h

Просмотреть файл

@ -34,6 +34,7 @@
#ifndef IB_USER_CM_H #ifndef IB_USER_CM_H
#define IB_USER_CM_H #define IB_USER_CM_H
#include <linux/types.h>
#include <rdma/ib_user_sa.h> #include <rdma/ib_user_sa.h>
#define IB_USER_CM_ABI_VERSION 5 #define IB_USER_CM_ABI_VERSION 5

Просмотреть файл

@ -111,6 +111,20 @@ struct rdma_cm_event {
} param; } param;
}; };
enum rdma_cm_state {
RDMA_CM_IDLE,
RDMA_CM_ADDR_QUERY,
RDMA_CM_ADDR_RESOLVED,
RDMA_CM_ROUTE_QUERY,
RDMA_CM_ROUTE_RESOLVED,
RDMA_CM_CONNECT,
RDMA_CM_DISCONNECT,
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
RDMA_CM_DESTROYING
};
struct rdma_cm_id; struct rdma_cm_id;
/** /**
@ -130,6 +144,7 @@ struct rdma_cm_id {
rdma_cm_event_handler event_handler; rdma_cm_event_handler event_handler;
struct rdma_route route; struct rdma_route route;
enum rdma_port_space ps; enum rdma_port_space ps;
enum ib_qp_type qp_type;
u8 port_num; u8 port_num;
}; };
@ -140,9 +155,11 @@ struct rdma_cm_id {
* returned rdma_id. * returned rdma_id.
* @context: User specified context associated with the id. * @context: User specified context associated with the id.
* @ps: RDMA port space. * @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
*/ */
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps); void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type);
/** /**
* rdma_destroy_id - Destroys an RDMA identifier. * rdma_destroy_id - Destroys an RDMA identifier.

Просмотреть файл

@ -0,0 +1,92 @@
#ifndef _RDMA_NETLINK_H
#define _RDMA_NETLINK_H
#include <linux/types.h>
enum {
RDMA_NL_RDMA_CM = 1
};
#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
enum {
RDMA_NL_RDMA_CM_ID_STATS = 0,
RDMA_NL_RDMA_CM_NUM_OPS
};
enum {
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
RDMA_NL_RDMA_CM_NUM_ATTR,
};
struct rdma_cm_id_stats {
__u32 qp_num;
__u32 bound_dev_if;
__u32 port_space;
__s32 pid;
__u8 cm_state;
__u8 node_type;
__u8 port_num;
__u8 qp_type;
};
#ifdef __KERNEL__
#include <linux/netlink.h>
struct ibnl_client_cbs {
int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
};
int ibnl_init(void);
void ibnl_cleanup(void);
/**
* Add a a client to the list of IB netlink exporters.
* @index: Index of the added client
* @nops: Number of supported ops by the added client.
* @cb_table: A table for op->callback
*
* Returns 0 on success or a negative error code.
*/
int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[]);
/**
* Remove a client from IB netlink.
* @index: Index of the removed IB client.
*
* Returns 0 on success or a negative error code.
*/
int ibnl_remove_client(int index);
/**
* Put a new message in a supplied skb.
* @skb: The netlink skb.
* @nlh: Pointer to put the header of the new netlink message.
* @seq: The message sequence number.
* @len: The requested message length to allocate.
* @client: Calling IB netlink client.
* @op: message content op.
* Returns the allocated buffer on success and NULL on failure.
*/
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op);
/**
* Put a new attribute in a supplied skb.
* @skb: The netlink skb.
* @nlh: Header of the netlink message to append the attribute to.
* @len: The length of the attribute data.
* @data: The attribute data to put.
* @type: The attribute type.
* Returns the 0 and a negative error code on failure.
*/
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type);
#endif /* __KERNEL__ */
#endif /* _RDMA_NETLINK_H */

Просмотреть файл

@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
return -ENOMEM; return -ENOMEM;
/* Create the RDMA CM ID */ /* Create the RDMA CM ID */
rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(rdma->cm_id)) if (IS_ERR(rdma->cm_id))
goto error; goto error;

Просмотреть файл

@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both /* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs. * IB and iWARP capable NICs.
*/ */
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) if (IS_ERR(cm_id))
return PTR_ERR(cm_id); return PTR_ERR(cm_id);

Просмотреть файл

@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */ /* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */ /* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) { if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id); ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL; ic->i_cm_id = NULL;

Просмотреть файл

@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both /* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs. * IB and iWARP capable NICs.
*/ */
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) if (IS_ERR(cm_id))
return PTR_ERR(cm_id); return PTR_ERR(cm_id);

Просмотреть файл

@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */ /* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */ /* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
RDMA_PS_TCP); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) { if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id); ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL; ic->i_cm_id = NULL;

Просмотреть файл

@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
struct rdma_cm_id *cm_id; struct rdma_cm_id *cm_id;
int ret; int ret;
cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(cm_id)) { if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id); ret = PTR_ERR(cm_id);
printk(KERN_ERR "RDS/RDMA: failed to setup listener, " printk(KERN_ERR "RDS/RDMA: failed to setup listener, "

Просмотреть файл

@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
xprt = &cma_xprt->sc_xprt; xprt = &cma_xprt->sc_xprt;
listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(listen_id)) { if (IS_ERR(listen_id)) {
ret = PTR_ERR(listen_id); ret = PTR_ERR(listen_id);
dprintk("svcrdma: rdma_create_id failed = %d\n", ret); dprintk("svcrdma: rdma_create_id failed = %d\n", ret);

Просмотреть файл

@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
init_completion(&ia->ri_done); init_completion(&ia->ri_done);
id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) { if (IS_ERR(id)) {
rc = PTR_ERR(id); rc = PTR_ERR(id);
dprintk("RPC: %s: rdma_create_id() failed %i\n", dprintk("RPC: %s: rdma_create_id() failed %i\n",