Merge branch 'for-2.6.30' of git://linux-nfs.org/~bfields/linux

* 'for-2.6.30' of git://linux-nfs.org/~bfields/linux:
  svcrdma: dma unmap the correct length for the RPCRDMA header page.
  nfsd: Revert "svcrpc: take advantage of tcp autotuning"
  nfsd: fix hung up of nfs client while sync write data to nfs server
This commit is contained in:
Linus Torvalds 2009-05-29 08:49:09 -07:00
Родитель 5f789cd8ba 98779be861
Коммит c8bce3d3bd
4 изменённых файлов: 42 добавлений и 21 удалений

Просмотреть файл

@ -1015,6 +1015,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs); set_fs(oldfs);
if (host_err >= 0) { if (host_err >= 0) {
*cnt = host_err;
nfsdstats.io_write += host_err; nfsdstats.io_write += host_err;
fsnotify_modify(file->f_path.dentry); fsnotify_modify(file->f_path.dentry);
} }
@ -1060,10 +1061,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
} }
dprintk("nfsd: write complete host_err=%d\n", host_err); dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0) { if (host_err >= 0)
err = 0; err = 0;
*cnt = host_err; else
} else
err = nfserrno(host_err); err = nfserrno(host_err);
out: out:
return err; return err;

Просмотреть файл

@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
lock_sock(sock->sk); lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2; sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
release_sock(sock->sk); release_sock(sock->sk);
#endif #endif
} }
@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* sndbuf needs to have room for one request
* per thread, otherwise we can stall even when the
* network isn't a bottleneck.
*
* We count all threads rather than threads in a
* particular pool, which provides an upper bound
* on the number of threads which will access the socket.
*
* rcvbuf just needs to be able to hold a few requests.
* Normally they will be removed from the queue
* as soon a a complete request arrives.
*/
svc_sock_setbufsize(svsk->sk_sock,
(serv->sv_nrthreads+3) * serv->sv_max_mesg,
3 * serv->sv_max_mesg);
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
/* Receive data. If we haven't got the record length yet, get /* Receive data. If we haven't got the record length yet, get
@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
/* initialise setting must have enough space to
* receive and respond to one request.
* svc_tcp_recvfrom will re-adjust if necessary
*/
svc_sock_setbufsize(svsk->sk_sock,
3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state != TCP_ESTABLISHED) if (sk->sk_state != TCP_ESTABLISHED)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
/* Initialize the socket */ /* Initialize the socket */
if (sock->type == SOCK_DGRAM) if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv); svc_udp_init(svsk, serv);
else { else
/* initialise setting must have enough space to
* receive and respond to one request.
*/
svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
4 * serv->sv_max_mesg);
svc_tcp_init(svsk, serv); svc_tcp_init(svsk, serv);
}
dprintk("svc: svc_setup_socket created %p (inet %p)\n", dprintk("svc: svc_setup_socket created %p (inet %p)\n",
svsk, svsk->sk_sk); svsk, svsk->sk_sk);

Просмотреть файл

@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
page_bytes -= sge_bytes; page_bytes -= sge_bytes;
frmr->page_list->page_list[page_no] = frmr->page_list->page_list[page_no] =
ib_dma_map_page(xprt->sc_cm_id->device, page, 0, ib_dma_map_single(xprt->sc_cm_id->device,
page_address(page),
PAGE_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no])) frmr->page_list->page_list[page_no]))
@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
/* Prepare the SGE for the RPCRDMA Header */ /* Prepare the SGE for the RPCRDMA Header */
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr = ctxt->sge[0].addr =
ib_dma_map_page(rdma->sc_cm_id->device, ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
page, 0, PAGE_SIZE, DMA_TO_DEVICE); ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err; goto err;
atomic_inc(&rdma->sc_dma_used); atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE; ctxt->direction = DMA_TO_DEVICE;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
/* Determine how many of our SGE are to be transmitted */ /* Determine how many of our SGE are to be transmitted */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);

Просмотреть файл

@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
BUG_ON(sge_no >= xprt->sc_max_sge); BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page(); page = svc_rdma_get_page();
ctxt->pages[sge_no] = page; ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device, pa = ib_dma_map_single(xprt->sc_cm_id->device,
page, 0, PAGE_SIZE, page_address(page), PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt; goto err_put_ctxt;
@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
/* Prepare SGE for local address */ /* Prepare SGE for local address */
sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
p, 0, PAGE_SIZE, DMA_FROM_DEVICE); page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
put_page(p); put_page(p);
return; return;
@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
if (ret) { if (ret) {
dprintk("svcrdma: Error %d posting send for protocol error\n", dprintk("svcrdma: Error %d posting send for protocol error\n",
ret); ret);
ib_dma_unmap_page(xprt->sc_cm_id->device, ib_dma_unmap_single(xprt->sc_cm_id->device,
sge.addr, PAGE_SIZE, sge.addr, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);