SUNRPC: Fix callback channel
The NFSv4.1 callback channel is currently broken because the receive
message will keep shrinking because the backchannel receive buffer size
never gets reset.
The easiest solution to this problem is instead of changing the receive
buffer, to rather adjust the copied request.
Fixes: 38b7631fbe
("nfs4: limit callback decoding to received bytes")
Cc: Benjamin Coddington <bcodding@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
This commit is contained in:
Родитель
527e9316f8
Коммит
756b9b37cf
|
@ -78,8 +78,7 @@ static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
|
||||||
|
|
||||||
p = xdr_inline_decode(xdr, nbytes);
|
p = xdr_inline_decode(xdr, nbytes);
|
||||||
if (unlikely(p == NULL))
|
if (unlikely(p == NULL))
|
||||||
printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed "
|
printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
|
||||||
"or truncated request.\n");
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -890,7 +889,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
|
||||||
struct cb_compound_hdr_arg hdr_arg = { 0 };
|
struct cb_compound_hdr_arg hdr_arg = { 0 };
|
||||||
struct cb_compound_hdr_res hdr_res = { NULL };
|
struct cb_compound_hdr_res hdr_res = { NULL };
|
||||||
struct xdr_stream xdr_in, xdr_out;
|
struct xdr_stream xdr_in, xdr_out;
|
||||||
struct xdr_buf *rq_arg = &rqstp->rq_arg;
|
|
||||||
__be32 *p, status;
|
__be32 *p, status;
|
||||||
struct cb_process_state cps = {
|
struct cb_process_state cps = {
|
||||||
.drc_status = 0,
|
.drc_status = 0,
|
||||||
|
@ -902,8 +900,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
|
||||||
|
|
||||||
dprintk("%s: start\n", __func__);
|
dprintk("%s: start\n", __func__);
|
||||||
|
|
||||||
rq_arg->len = rq_arg->head[0].iov_len + rq_arg->page_len;
|
xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
|
||||||
xdr_init_decode(&xdr_in, rq_arg, rq_arg->head[0].iov_base);
|
|
||||||
|
|
||||||
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
|
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
|
||||||
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
|
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
|
||||||
|
|
|
@ -353,20 +353,12 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
|
||||||
{
|
{
|
||||||
struct rpc_xprt *xprt = req->rq_xprt;
|
struct rpc_xprt *xprt = req->rq_xprt;
|
||||||
struct svc_serv *bc_serv = xprt->bc_serv;
|
struct svc_serv *bc_serv = xprt->bc_serv;
|
||||||
struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
|
|
||||||
|
|
||||||
spin_lock(&xprt->bc_pa_lock);
|
spin_lock(&xprt->bc_pa_lock);
|
||||||
list_del(&req->rq_bc_pa_list);
|
list_del(&req->rq_bc_pa_list);
|
||||||
xprt_dec_alloc_count(xprt, 1);
|
xprt_dec_alloc_count(xprt, 1);
|
||||||
spin_unlock(&xprt->bc_pa_lock);
|
spin_unlock(&xprt->bc_pa_lock);
|
||||||
|
|
||||||
if (copied <= rq_rcv_buf->head[0].iov_len) {
|
|
||||||
rq_rcv_buf->head[0].iov_len = copied;
|
|
||||||
rq_rcv_buf->page_len = 0;
|
|
||||||
} else {
|
|
||||||
rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
req->rq_private_buf.len = copied;
|
req->rq_private_buf.len = copied;
|
||||||
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
||||||
|
|
||||||
|
|
|
@ -1363,7 +1363,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
|
||||||
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
|
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
|
||||||
memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
|
memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
|
||||||
memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
|
memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
|
||||||
|
|
||||||
|
/* Adjust the argument buffer length */
|
||||||
rqstp->rq_arg.len = req->rq_private_buf.len;
|
rqstp->rq_arg.len = req->rq_private_buf.len;
|
||||||
|
if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
|
||||||
|
rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
|
||||||
|
rqstp->rq_arg.page_len = 0;
|
||||||
|
} else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
|
||||||
|
rqstp->rq_arg.page_len)
|
||||||
|
rqstp->rq_arg.page_len = rqstp->rq_arg.len -
|
||||||
|
rqstp->rq_arg.head[0].iov_len;
|
||||||
|
else
|
||||||
|
rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
|
||||||
|
rqstp->rq_arg.page_len;
|
||||||
|
|
||||||
/* reset result send buffer "put" position */
|
/* reset result send buffer "put" position */
|
||||||
resv->iov_len = 0;
|
resv->iov_len = 0;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче