xprtrdma: Make rpcrdma_{un}map_one() into inline functions
These functions are called in a loop for each page transferred via RDMA READ or WRITE. Extract loop invariants and inline them to reduce CPU overhead. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Родитель
e46ac34c3c
Коммит
d654788e98
|
@ -85,6 +85,8 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
int nsegs, bool writing)
|
||||
{
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct ib_device *device = ia->ri_id->device;
|
||||
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
||||
struct rpcrdma_mr_seg *seg1 = seg;
|
||||
struct rpcrdma_mw *mw = seg1->rl_mw;
|
||||
u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
|
||||
|
@ -97,7 +99,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
if (nsegs > RPCRDMA_MAX_FMR_SGES)
|
||||
nsegs = RPCRDMA_MAX_FMR_SGES;
|
||||
for (i = 0; i < nsegs;) {
|
||||
rpcrdma_map_one(ia, seg, writing);
|
||||
rpcrdma_map_one(device, seg, direction);
|
||||
physaddrs[i] = seg->mr_dma;
|
||||
len += seg->mr_len;
|
||||
++seg;
|
||||
|
@ -123,7 +125,7 @@ out_maperr:
|
|||
__func__, len, (unsigned long long)seg1->mr_dma,
|
||||
pageoff, i, rc);
|
||||
while (i--)
|
||||
rpcrdma_unmap_one(ia, --seg);
|
||||
rpcrdma_unmap_one(device, --seg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -135,14 +137,16 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
|||
{
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct rpcrdma_mr_seg *seg1 = seg;
|
||||
struct ib_device *device;
|
||||
int rc, nsegs = seg->mr_nsegs;
|
||||
LIST_HEAD(l);
|
||||
|
||||
list_add(&seg1->rl_mw->r.fmr->list, &l);
|
||||
rc = ib_unmap_fmr(&l);
|
||||
read_lock(&ia->ri_qplock);
|
||||
device = ia->ri_id->device;
|
||||
while (seg1->mr_nsegs--)
|
||||
rpcrdma_unmap_one(ia, seg++);
|
||||
rpcrdma_unmap_one(device, seg++);
|
||||
read_unlock(&ia->ri_qplock);
|
||||
if (rc)
|
||||
goto out_err;
|
||||
|
|
|
@ -178,6 +178,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
int nsegs, bool writing)
|
||||
{
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct ib_device *device = ia->ri_id->device;
|
||||
enum dma_data_direction direction = rpcrdma_data_dir(writing);
|
||||
struct rpcrdma_mr_seg *seg1 = seg;
|
||||
struct rpcrdma_mw *mw = seg1->rl_mw;
|
||||
struct rpcrdma_frmr *frmr = &mw->r.frmr;
|
||||
|
@ -197,7 +199,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
if (nsegs > ia->ri_max_frmr_depth)
|
||||
nsegs = ia->ri_max_frmr_depth;
|
||||
for (page_no = i = 0; i < nsegs;) {
|
||||
rpcrdma_map_one(ia, seg, writing);
|
||||
rpcrdma_map_one(device, seg, direction);
|
||||
pa = seg->mr_dma;
|
||||
for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
|
||||
frmr->fr_pgl->page_list[page_no++] = pa;
|
||||
|
@ -247,7 +249,7 @@ out_senderr:
|
|||
ib_update_fast_reg_key(mr, --key);
|
||||
frmr->fr_state = FRMR_IS_INVALID;
|
||||
while (i--)
|
||||
rpcrdma_unmap_one(ia, --seg);
|
||||
rpcrdma_unmap_one(device, --seg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -261,6 +263,7 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
|||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct ib_send_wr invalidate_wr, *bad_wr;
|
||||
int rc, nsegs = seg->mr_nsegs;
|
||||
struct ib_device *device;
|
||||
|
||||
seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
|
||||
|
||||
|
@ -271,8 +274,9 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
|||
DECR_CQCOUNT(&r_xprt->rx_ep);
|
||||
|
||||
read_lock(&ia->ri_qplock);
|
||||
device = ia->ri_id->device;
|
||||
while (seg1->mr_nsegs--)
|
||||
rpcrdma_unmap_one(ia, seg++);
|
||||
rpcrdma_unmap_one(device, seg++);
|
||||
rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
|
||||
read_unlock(&ia->ri_qplock);
|
||||
if (rc)
|
||||
|
|
|
@ -50,7 +50,8 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
{
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
|
||||
rpcrdma_map_one(ia, seg, writing);
|
||||
rpcrdma_map_one(ia->ri_id->device, seg,
|
||||
rpcrdma_data_dir(writing));
|
||||
seg->mr_rkey = ia->ri_bind_mem->rkey;
|
||||
seg->mr_base = seg->mr_dma;
|
||||
seg->mr_nsegs = 1;
|
||||
|
@ -62,7 +63,12 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
static int
|
||||
physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
|
||||
{
|
||||
rpcrdma_unmap_one(&r_xprt->rx_ia, seg);
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
|
||||
read_lock(&ia->ri_qplock);
|
||||
rpcrdma_unmap_one(ia->ri_id->device, seg);
|
||||
read_unlock(&ia->ri_qplock);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1436,6 +1436,14 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
|
|||
* Wrappers for internal-use kmalloc memory registration, used by buffer code.
|
||||
*/
|
||||
|
||||
void
|
||||
rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
|
||||
{
|
||||
dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
|
||||
seg->mr_offset,
|
||||
(unsigned long long)seg->mr_dma, seg->mr_dmalen);
|
||||
}
|
||||
|
||||
static int
|
||||
rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
|
||||
struct ib_mr **mrp, struct ib_sge *iov)
|
||||
|
@ -1560,42 +1568,6 @@ rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers for chunk registration, shared by read/write chunk code.
|
||||
*/
|
||||
|
||||
void
|
||||
rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, bool writing)
|
||||
{
|
||||
seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
seg->mr_dmalen = seg->mr_len;
|
||||
if (seg->mr_page)
|
||||
seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
|
||||
seg->mr_page, offset_in_page(seg->mr_offset),
|
||||
seg->mr_dmalen, seg->mr_dir);
|
||||
else
|
||||
seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
|
||||
seg->mr_offset,
|
||||
seg->mr_dmalen, seg->mr_dir);
|
||||
if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
|
||||
dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
|
||||
__func__,
|
||||
(unsigned long long)seg->mr_dma,
|
||||
seg->mr_offset, seg->mr_dmalen);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
|
||||
{
|
||||
if (seg->mr_page)
|
||||
ib_dma_unmap_page(ia->ri_id->device,
|
||||
seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
|
||||
else
|
||||
ib_dma_unmap_single(ia->ri_id->device,
|
||||
seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepost any receive buffer, then post send.
|
||||
*
|
||||
|
|
|
@ -424,8 +424,49 @@ void rpcrdma_free_regbuf(struct rpcrdma_ia *,
|
|||
struct rpcrdma_regbuf *);
|
||||
|
||||
unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
|
||||
void rpcrdma_map_one(struct rpcrdma_ia *, struct rpcrdma_mr_seg *, bool);
|
||||
void rpcrdma_unmap_one(struct rpcrdma_ia *, struct rpcrdma_mr_seg *);
|
||||
|
||||
/*
|
||||
* Wrappers for chunk registration, shared by read/write chunk code.
|
||||
*/
|
||||
|
||||
void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);
|
||||
|
||||
static inline enum dma_data_direction
|
||||
rpcrdma_data_dir(bool writing)
|
||||
{
|
||||
return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
seg->mr_dir = direction;
|
||||
seg->mr_dmalen = seg->mr_len;
|
||||
|
||||
if (seg->mr_page)
|
||||
seg->mr_dma = ib_dma_map_page(device,
|
||||
seg->mr_page, offset_in_page(seg->mr_offset),
|
||||
seg->mr_dmalen, seg->mr_dir);
|
||||
else
|
||||
seg->mr_dma = ib_dma_map_single(device,
|
||||
seg->mr_offset,
|
||||
seg->mr_dmalen, seg->mr_dir);
|
||||
|
||||
if (ib_dma_mapping_error(device, seg->mr_dma))
|
||||
rpcrdma_mapping_error(seg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
|
||||
{
|
||||
if (seg->mr_page)
|
||||
ib_dma_unmap_page(device,
|
||||
seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
|
||||
else
|
||||
ib_dma_unmap_single(device,
|
||||
seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
|
||||
|
|
Загрузка…
Ссылка в новой задаче