xprtrdma: Use ib_device pointer safely
The connect worker can replace ri_id, but prevents ri_id->device from changing during the lifetime of a transport instance. The old ID is kept around until a new ID is created and the ->device is confirmed to be the same. Cache a copy of ri_id->device in rpcrdma_ia and in rpcrdma_rep. The cached copy can be used safely in code that does not serialize with the connect worker. Other code can use it to save an extra address generation (one pointer dereference instead of two). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Tested-By: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-by: Doug Ledford <dledford@redhat.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Родитель
494ae30d2a
Коммит
89e0d11258
|
@ -85,7 +85,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||||
int nsegs, bool writing)
|
int nsegs, bool writing)
|
||||||
{
|
{
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
struct ib_device *device = ia->ri_id->device;
|
struct ib_device *device = ia->ri_device;
|
||||||
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
||||||
struct rpcrdma_mr_seg *seg1 = seg;
|
struct rpcrdma_mr_seg *seg1 = seg;
|
||||||
struct rpcrdma_mw *mw = seg1->rl_mw;
|
struct rpcrdma_mw *mw = seg1->rl_mw;
|
||||||
|
@ -137,17 +137,13 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||||
{
|
{
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
struct rpcrdma_mr_seg *seg1 = seg;
|
struct rpcrdma_mr_seg *seg1 = seg;
|
||||||
struct ib_device *device;
|
|
||||||
int rc, nsegs = seg->mr_nsegs;
|
int rc, nsegs = seg->mr_nsegs;
|
||||||
LIST_HEAD(l);
|
LIST_HEAD(l);
|
||||||
|
|
||||||
list_add(&seg1->rl_mw->r.fmr->list, &l);
|
list_add(&seg1->rl_mw->r.fmr->list, &l);
|
||||||
rc = ib_unmap_fmr(&l);
|
rc = ib_unmap_fmr(&l);
|
||||||
read_lock(&ia->ri_qplock);
|
|
||||||
device = ia->ri_id->device;
|
|
||||||
while (seg1->mr_nsegs--)
|
while (seg1->mr_nsegs--)
|
||||||
rpcrdma_unmap_one(device, seg++);
|
rpcrdma_unmap_one(ia->ri_device, seg++);
|
||||||
read_unlock(&ia->ri_qplock);
|
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
return nsegs;
|
return nsegs;
|
||||||
|
|
|
@ -137,7 +137,7 @@ static int
|
||||||
frwr_op_init(struct rpcrdma_xprt *r_xprt)
|
frwr_op_init(struct rpcrdma_xprt *r_xprt)
|
||||||
{
|
{
|
||||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
struct ib_device *device = r_xprt->rx_ia.ri_id->device;
|
struct ib_device *device = r_xprt->rx_ia.ri_device;
|
||||||
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
|
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
|
||||||
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
||||||
int i;
|
int i;
|
||||||
|
@ -178,7 +178,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||||
int nsegs, bool writing)
|
int nsegs, bool writing)
|
||||||
{
|
{
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
struct ib_device *device = ia->ri_id->device;
|
struct ib_device *device = ia->ri_device;
|
||||||
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
||||||
struct rpcrdma_mr_seg *seg1 = seg;
|
struct rpcrdma_mr_seg *seg1 = seg;
|
||||||
struct rpcrdma_mw *mw = seg1->rl_mw;
|
struct rpcrdma_mw *mw = seg1->rl_mw;
|
||||||
|
@ -263,7 +263,6 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
struct ib_send_wr invalidate_wr, *bad_wr;
|
struct ib_send_wr invalidate_wr, *bad_wr;
|
||||||
int rc, nsegs = seg->mr_nsegs;
|
int rc, nsegs = seg->mr_nsegs;
|
||||||
struct ib_device *device;
|
|
||||||
|
|
||||||
seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
|
seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
|
||||||
|
|
||||||
|
@ -273,10 +272,9 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||||
invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
|
invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
|
||||||
DECR_CQCOUNT(&r_xprt->rx_ep);
|
DECR_CQCOUNT(&r_xprt->rx_ep);
|
||||||
|
|
||||||
read_lock(&ia->ri_qplock);
|
|
||||||
device = ia->ri_id->device;
|
|
||||||
while (seg1->mr_nsegs--)
|
while (seg1->mr_nsegs--)
|
||||||
rpcrdma_unmap_one(device, seg++);
|
rpcrdma_unmap_one(ia->ri_device, seg++);
|
||||||
|
read_lock(&ia->ri_qplock);
|
||||||
rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
|
rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
|
||||||
read_unlock(&ia->ri_qplock);
|
read_unlock(&ia->ri_qplock);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
@ -304,7 +302,7 @@ static void
|
||||||
frwr_op_reset(struct rpcrdma_xprt *r_xprt)
|
frwr_op_reset(struct rpcrdma_xprt *r_xprt)
|
||||||
{
|
{
|
||||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||||
struct ib_device *device = r_xprt->rx_ia.ri_id->device;
|
struct ib_device *device = r_xprt->rx_ia.ri_device;
|
||||||
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
|
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
|
||||||
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
|
||||||
struct rpcrdma_mw *r;
|
struct rpcrdma_mw *r;
|
||||||
|
|
|
@ -50,8 +50,7 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||||
{
|
{
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
|
|
||||||
rpcrdma_map_one(ia->ri_id->device, seg,
|
rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
|
||||||
rpcrdma_data_dir(writing));
|
|
||||||
seg->mr_rkey = ia->ri_bind_mem->rkey;
|
seg->mr_rkey = ia->ri_bind_mem->rkey;
|
||||||
seg->mr_base = seg->mr_dma;
|
seg->mr_base = seg->mr_dma;
|
||||||
seg->mr_nsegs = 1;
|
seg->mr_nsegs = 1;
|
||||||
|
@ -65,10 +64,7 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||||
{
|
{
|
||||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||||
|
|
||||||
read_lock(&ia->ri_qplock);
|
rpcrdma_unmap_one(ia->ri_device, seg);
|
||||||
rpcrdma_unmap_one(ia->ri_id->device, seg);
|
|
||||||
read_unlock(&ia->ri_qplock);
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -272,7 +272,6 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
|
||||||
{
|
{
|
||||||
struct rpcrdma_rep *rep =
|
struct rpcrdma_rep *rep =
|
||||||
(struct rpcrdma_rep *)(unsigned long)wc->wr_id;
|
(struct rpcrdma_rep *)(unsigned long)wc->wr_id;
|
||||||
struct rpcrdma_ia *ia;
|
|
||||||
|
|
||||||
/* WARNING: Only wr_id and status are reliable at this point */
|
/* WARNING: Only wr_id and status are reliable at this point */
|
||||||
if (wc->status != IB_WC_SUCCESS)
|
if (wc->status != IB_WC_SUCCESS)
|
||||||
|
@ -285,9 +284,8 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
|
||||||
dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
|
dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
|
||||||
__func__, rep, wc->byte_len);
|
__func__, rep, wc->byte_len);
|
||||||
|
|
||||||
ia = &rep->rr_rxprt->rx_ia;
|
|
||||||
rep->rr_len = wc->byte_len;
|
rep->rr_len = wc->byte_len;
|
||||||
ib_dma_sync_single_for_cpu(ia->ri_id->device,
|
ib_dma_sync_single_for_cpu(rep->rr_device,
|
||||||
rdmab_addr(rep->rr_rdmabuf),
|
rdmab_addr(rep->rr_rdmabuf),
|
||||||
rep->rr_len, DMA_FROM_DEVICE);
|
rep->rr_len, DMA_FROM_DEVICE);
|
||||||
prefetch(rdmab_to_msg(rep->rr_rdmabuf));
|
prefetch(rdmab_to_msg(rep->rr_rdmabuf));
|
||||||
|
@ -483,7 +481,7 @@ connected:
|
||||||
|
|
||||||
pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
|
pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
|
||||||
sap, rpc_get_port(sap),
|
sap, rpc_get_port(sap),
|
||||||
ia->ri_id->device->name,
|
ia->ri_device->name,
|
||||||
ia->ri_ops->ro_displayname,
|
ia->ri_ops->ro_displayname,
|
||||||
xprt->rx_buf.rb_max_requests,
|
xprt->rx_buf.rb_max_requests,
|
||||||
ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
|
ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
|
||||||
|
@ -584,8 +582,9 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
||||||
rc = PTR_ERR(ia->ri_id);
|
rc = PTR_ERR(ia->ri_id);
|
||||||
goto out1;
|
goto out1;
|
||||||
}
|
}
|
||||||
|
ia->ri_device = ia->ri_id->device;
|
||||||
|
|
||||||
ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
|
ia->ri_pd = ib_alloc_pd(ia->ri_device);
|
||||||
if (IS_ERR(ia->ri_pd)) {
|
if (IS_ERR(ia->ri_pd)) {
|
||||||
rc = PTR_ERR(ia->ri_pd);
|
rc = PTR_ERR(ia->ri_pd);
|
||||||
dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
|
dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
|
||||||
|
@ -593,7 +592,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = ib_query_device(ia->ri_id->device, devattr);
|
rc = ib_query_device(ia->ri_device, devattr);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dprintk("RPC: %s: ib_query_device failed %d\n",
|
dprintk("RPC: %s: ib_query_device failed %d\n",
|
||||||
__func__, rc);
|
__func__, rc);
|
||||||
|
@ -602,7 +601,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
||||||
|
|
||||||
if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
|
if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
|
||||||
ia->ri_have_dma_lkey = 1;
|
ia->ri_have_dma_lkey = 1;
|
||||||
ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
|
ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (memreg == RPCRDMA_FRMR) {
|
if (memreg == RPCRDMA_FRMR) {
|
||||||
|
@ -617,7 +616,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (memreg == RPCRDMA_MTHCAFMR) {
|
if (memreg == RPCRDMA_MTHCAFMR) {
|
||||||
if (!ia->ri_id->device->alloc_fmr) {
|
if (!ia->ri_device->alloc_fmr) {
|
||||||
dprintk("RPC: %s: MTHCAFMR registration "
|
dprintk("RPC: %s: MTHCAFMR registration "
|
||||||
"not supported by HCA\n", __func__);
|
"not supported by HCA\n", __func__);
|
||||||
memreg = RPCRDMA_ALLPHYSICAL;
|
memreg = RPCRDMA_ALLPHYSICAL;
|
||||||
|
@ -767,9 +766,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
||||||
init_waitqueue_head(&ep->rep_connect_wait);
|
init_waitqueue_head(&ep->rep_connect_wait);
|
||||||
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
|
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
|
||||||
|
|
||||||
sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
|
sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
|
||||||
rpcrdma_cq_async_error_upcall, ep,
|
rpcrdma_cq_async_error_upcall, ep,
|
||||||
ep->rep_attr.cap.max_send_wr + 1, 0);
|
ep->rep_attr.cap.max_send_wr + 1, 0);
|
||||||
if (IS_ERR(sendcq)) {
|
if (IS_ERR(sendcq)) {
|
||||||
rc = PTR_ERR(sendcq);
|
rc = PTR_ERR(sendcq);
|
||||||
dprintk("RPC: %s: failed to create send CQ: %i\n",
|
dprintk("RPC: %s: failed to create send CQ: %i\n",
|
||||||
|
@ -784,9 +783,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
||||||
goto out2;
|
goto out2;
|
||||||
}
|
}
|
||||||
|
|
||||||
recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
|
recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
|
||||||
rpcrdma_cq_async_error_upcall, ep,
|
rpcrdma_cq_async_error_upcall, ep,
|
||||||
ep->rep_attr.cap.max_recv_wr + 1, 0);
|
ep->rep_attr.cap.max_recv_wr + 1, 0);
|
||||||
if (IS_ERR(recvcq)) {
|
if (IS_ERR(recvcq)) {
|
||||||
rc = PTR_ERR(recvcq);
|
rc = PTR_ERR(recvcq);
|
||||||
dprintk("RPC: %s: failed to create recv CQ: %i\n",
|
dprintk("RPC: %s: failed to create recv CQ: %i\n",
|
||||||
|
@ -907,7 +906,7 @@ retry:
|
||||||
* More stuff I haven't thought of!
|
* More stuff I haven't thought of!
|
||||||
* Rrrgh!
|
* Rrrgh!
|
||||||
*/
|
*/
|
||||||
if (ia->ri_id->device != id->device) {
|
if (ia->ri_device != id->device) {
|
||||||
printk("RPC: %s: can't reconnect on "
|
printk("RPC: %s: can't reconnect on "
|
||||||
"different device!\n", __func__);
|
"different device!\n", __func__);
|
||||||
rdma_destroy_id(id);
|
rdma_destroy_id(id);
|
||||||
|
@ -1049,6 +1048,7 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rep->rr_device = ia->ri_device;
|
||||||
rep->rr_rxprt = r_xprt;
|
rep->rr_rxprt = r_xprt;
|
||||||
return rep;
|
return rep;
|
||||||
|
|
||||||
|
@ -1449,9 +1449,9 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
|
||||||
/*
|
/*
|
||||||
* All memory passed here was kmalloc'ed, therefore phys-contiguous.
|
* All memory passed here was kmalloc'ed, therefore phys-contiguous.
|
||||||
*/
|
*/
|
||||||
iov->addr = ib_dma_map_single(ia->ri_id->device,
|
iov->addr = ib_dma_map_single(ia->ri_device,
|
||||||
va, len, DMA_BIDIRECTIONAL);
|
va, len, DMA_BIDIRECTIONAL);
|
||||||
if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
|
if (ib_dma_mapping_error(ia->ri_device, iov->addr))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
iov->length = len;
|
iov->length = len;
|
||||||
|
@ -1495,8 +1495,8 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
ib_dma_unmap_single(ia->ri_id->device,
|
ib_dma_unmap_single(ia->ri_device,
|
||||||
iov->addr, iov->length, DMA_BIDIRECTIONAL);
|
iov->addr, iov->length, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (NULL == mr)
|
if (NULL == mr)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1589,15 +1589,18 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
||||||
send_wr.num_sge = req->rl_niovs;
|
send_wr.num_sge = req->rl_niovs;
|
||||||
send_wr.opcode = IB_WR_SEND;
|
send_wr.opcode = IB_WR_SEND;
|
||||||
if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
|
if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
|
||||||
ib_dma_sync_single_for_device(ia->ri_id->device,
|
ib_dma_sync_single_for_device(ia->ri_device,
|
||||||
req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
|
req->rl_send_iov[3].addr,
|
||||||
DMA_TO_DEVICE);
|
req->rl_send_iov[3].length,
|
||||||
ib_dma_sync_single_for_device(ia->ri_id->device,
|
DMA_TO_DEVICE);
|
||||||
req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
|
ib_dma_sync_single_for_device(ia->ri_device,
|
||||||
DMA_TO_DEVICE);
|
req->rl_send_iov[1].addr,
|
||||||
ib_dma_sync_single_for_device(ia->ri_id->device,
|
req->rl_send_iov[1].length,
|
||||||
req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
|
DMA_TO_DEVICE);
|
||||||
DMA_TO_DEVICE);
|
ib_dma_sync_single_for_device(ia->ri_device,
|
||||||
|
req->rl_send_iov[0].addr,
|
||||||
|
req->rl_send_iov[0].length,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (DECR_CQCOUNT(ep) > 0)
|
if (DECR_CQCOUNT(ep) > 0)
|
||||||
send_wr.send_flags = 0;
|
send_wr.send_flags = 0;
|
||||||
|
@ -1630,7 +1633,7 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
|
||||||
recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
|
recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
|
||||||
recv_wr.num_sge = 1;
|
recv_wr.num_sge = 1;
|
||||||
|
|
||||||
ib_dma_sync_single_for_cpu(ia->ri_id->device,
|
ib_dma_sync_single_for_cpu(ia->ri_device,
|
||||||
rdmab_addr(rep->rr_rdmabuf),
|
rdmab_addr(rep->rr_rdmabuf),
|
||||||
rdmab_length(rep->rr_rdmabuf),
|
rdmab_length(rep->rr_rdmabuf),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
|
@ -62,6 +62,7 @@
|
||||||
struct rpcrdma_ia {
|
struct rpcrdma_ia {
|
||||||
const struct rpcrdma_memreg_ops *ri_ops;
|
const struct rpcrdma_memreg_ops *ri_ops;
|
||||||
rwlock_t ri_qplock;
|
rwlock_t ri_qplock;
|
||||||
|
struct ib_device *ri_device;
|
||||||
struct rdma_cm_id *ri_id;
|
struct rdma_cm_id *ri_id;
|
||||||
struct ib_pd *ri_pd;
|
struct ib_pd *ri_pd;
|
||||||
struct ib_mr *ri_bind_mem;
|
struct ib_mr *ri_bind_mem;
|
||||||
|
@ -173,6 +174,7 @@ struct rpcrdma_buffer;
|
||||||
|
|
||||||
struct rpcrdma_rep {
|
struct rpcrdma_rep {
|
||||||
unsigned int rr_len;
|
unsigned int rr_len;
|
||||||
|
struct ib_device *rr_device;
|
||||||
struct rpcrdma_xprt *rr_rxprt;
|
struct rpcrdma_xprt *rr_rxprt;
|
||||||
struct list_head rr_list;
|
struct list_head rr_list;
|
||||||
struct rpcrdma_regbuf *rr_rdmabuf;
|
struct rpcrdma_regbuf *rr_rdmabuf;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче