SUNRPC: Add a transport-specific private field in rpc_rqst
Currently there's a hidden and indirect mechanism for finding the rpcrdma_req that goes with an rpc_rqst. It depends on getting from the rq_buffer pointer in struct rpc_rqst to the struct rpcrdma_regbuf that controls that buffer, and then to the struct rpcrdma_req it goes with. This was done back in the day to avoid the need to add a per-rqst pointer or to alter the buf_free API when support for RPC-over-RDMA was introduced. I'm about to change the way regbuf's work to support larger inline thresholds. Now is a good time to replace this indirect mechanism with something that is more straightforward. I guess this should be considered a clean up. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Родитель
68778945e4
Коммит
5a6d1db455
|
@ -83,6 +83,7 @@ struct rpc_rqst {
|
|||
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
|
||||
struct list_head rq_list;
|
||||
|
||||
void *rq_xprtdata; /* Per-xprt private data */
|
||||
void *rq_buffer; /* Call XDR encode buffer */
|
||||
size_t rq_callsize;
|
||||
void *rq_rbuffer; /* Reply XDR decode buffer */
|
||||
|
|
|
@ -55,11 +55,9 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
|
|||
rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
|
||||
if (IS_ERR(rb))
|
||||
goto out_fail;
|
||||
rb->rg_owner = req;
|
||||
req->rl_sendbuf = rb;
|
||||
/* so that rpcr_to_rdmar works when receiving a request */
|
||||
rqst->rq_buffer = (void *)req->rl_sendbuf->rg_base;
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rqst->rq_buffer, size);
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
|
||||
rpcrdma_set_xprtdata(rqst, req);
|
||||
return 0;
|
||||
|
||||
out_fail:
|
||||
|
|
|
@ -523,6 +523,7 @@ xprt_rdma_allocate(struct rpc_task *task)
|
|||
out:
|
||||
dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
|
||||
req->rl_connect_cookie = 0; /* our reserved value */
|
||||
rpcrdma_set_xprtdata(rqst, req);
|
||||
rqst->rq_buffer = req->rl_sendbuf->rg_base;
|
||||
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_rcvsize;
|
||||
return 0;
|
||||
|
@ -559,7 +560,6 @@ out_sendbuf:
|
|||
rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
|
||||
if (IS_ERR(rb))
|
||||
goto out_fail;
|
||||
rb->rg_owner = req;
|
||||
|
||||
r_xprt->rx_stats.hardway_register_count += size;
|
||||
rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
|
||||
|
|
|
@ -1210,7 +1210,6 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
|
|||
iov->length = size;
|
||||
iov->lkey = ia->ri_pd->local_dma_lkey;
|
||||
rb->rg_size = size;
|
||||
rb->rg_owner = NULL;
|
||||
return rb;
|
||||
|
||||
out_free:
|
||||
|
|
|
@ -113,7 +113,6 @@ struct rpcrdma_ep {
|
|||
|
||||
struct rpcrdma_regbuf {
|
||||
size_t rg_size;
|
||||
struct rpcrdma_req *rg_owner;
|
||||
struct ib_sge rg_iov;
|
||||
__be32 rg_base[0] __attribute__ ((aligned(256)));
|
||||
};
|
||||
|
@ -297,14 +296,16 @@ struct rpcrdma_req {
|
|||
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
|
||||
};
|
||||
|
||||
static inline void
|
||||
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
|
||||
{
|
||||
rqst->rq_xprtdata = req;
|
||||
}
|
||||
|
||||
static inline struct rpcrdma_req *
|
||||
rpcr_to_rdmar(struct rpc_rqst *rqst)
|
||||
{
|
||||
void *buffer = rqst->rq_buffer;
|
||||
struct rpcrdma_regbuf *rb;
|
||||
|
||||
rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
|
||||
return rb->rg_owner;
|
||||
return rqst->rq_xprtdata;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче