NFS: Remove nfs_writepage_sync()

Maintaining two parallel ways of doing synchronous writes is rather
pointless. This patch gets rid of the legacy nfs_writepage_sync(), and
replaces it with the faster asynchronous writes.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
Trond Myklebust 2006-12-05 00:35:40 -05:00
Родитель e21195a740
Коммит 200baa2112
6 изменённых файлов: 10 добавлений и 258 удалений

Просмотреть файл

@ -378,6 +378,12 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
result = generic_file_aio_write(iocb, iov, nr_segs, pos);
/* Return error values for O_SYNC and IS_SYNC() */
if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) {
int err = nfs_fsync(iocb->ki_filp, dentry, 1);
if (err < 0)
result = err;
}
out:
return result;

Просмотреть файл

@ -276,51 +276,6 @@ static int nfs3_proc_read(struct nfs_read_data *rdata)
return status;
}
static int nfs3_proc_write(struct nfs_write_data *wdata)
{
int rpcflags = wdata->flags;
struct inode * inode = wdata->inode;
struct nfs_fattr * fattr = wdata->res.fattr;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_WRITE],
.rpc_argp = &wdata->args,
.rpc_resp = &wdata->res,
.rpc_cred = wdata->cred,
};
int status;
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
if (status >= 0)
nfs_post_op_update_inode(inode, fattr);
dprintk("NFS reply write: %d\n", status);
return status < 0? status : wdata->res.count;
}
static int nfs3_proc_commit(struct nfs_write_data *cdata)
{
struct inode * inode = cdata->inode;
struct nfs_fattr * fattr = cdata->res.fattr;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT],
.rpc_argp = &cdata->args,
.rpc_resp = &cdata->res,
.rpc_cred = cdata->cred,
};
int status;
dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
(long long) cdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
if (status >= 0)
nfs_post_op_update_inode(inode, fattr);
dprintk("NFS reply commit: %d\n", status);
return status;
}
/*
* Create a regular file.
* For now, we don't implement O_EXCL.
@ -901,8 +856,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.access = nfs3_proc_access,
.readlink = nfs3_proc_readlink,
.read = nfs3_proc_read,
.write = nfs3_proc_write,
.commit = nfs3_proc_commit,
.create = nfs3_proc_create,
.remove = nfs3_proc_remove,
.unlink_setup = nfs3_proc_unlink_setup,

Просмотреть файл

@ -1775,89 +1775,6 @@ static int nfs4_proc_read(struct nfs_read_data *rdata)
return err;
}
static int _nfs4_proc_write(struct nfs_write_data *wdata)
{
int rpcflags = wdata->flags;
struct inode *inode = wdata->inode;
struct nfs_fattr *fattr = wdata->res.fattr;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE],
.rpc_argp = &wdata->args,
.rpc_resp = &wdata->res,
.rpc_cred = wdata->cred,
};
int status;
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
wdata->args.bitmask = server->attr_bitmask;
wdata->res.server = server;
wdata->timestamp = jiffies;
nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, rpcflags);
dprintk("NFS reply write: %d\n", status);
if (status < 0)
return status;
renew_lease(server, wdata->timestamp);
nfs_post_op_update_inode(inode, fattr);
return wdata->res.count;
}
static int nfs4_proc_write(struct nfs_write_data *wdata)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(wdata->inode),
_nfs4_proc_write(wdata),
&exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_commit(struct nfs_write_data *cdata)
{
struct inode *inode = cdata->inode;
struct nfs_fattr *fattr = cdata->res.fattr;
struct nfs_server *server = NFS_SERVER(inode);
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
.rpc_argp = &cdata->args,
.rpc_resp = &cdata->res,
.rpc_cred = cdata->cred,
};
int status;
dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
(long long) cdata->args.offset);
cdata->args.bitmask = server->attr_bitmask;
cdata->res.server = server;
cdata->timestamp = jiffies;
nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, 0);
if (status >= 0)
renew_lease(server, cdata->timestamp);
dprintk("NFS reply commit: %d\n", status);
if (status >= 0)
nfs_post_op_update_inode(inode, fattr);
return status;
}
static int nfs4_proc_commit(struct nfs_write_data *cdata)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(cdata->inode),
_nfs4_proc_commit(cdata),
&exception);
} while (exception.retry);
return err;
}
/*
* Got race?
* We will need to arrange for the VFS layer to provide an atomic open.
@ -3730,8 +3647,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,
.read = nfs4_proc_read,
.write = nfs4_proc_write,
.commit = nfs4_proc_commit,
.create = nfs4_proc_create,
.remove = nfs4_proc_remove,
.unlink_setup = nfs4_proc_unlink_setup,

Просмотреть файл

@ -215,32 +215,6 @@ static int nfs_proc_read(struct nfs_read_data *rdata)
return status;
}
static int nfs_proc_write(struct nfs_write_data *wdata)
{
int flags = wdata->flags;
struct inode * inode = wdata->inode;
struct nfs_fattr * fattr = wdata->res.fattr;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_WRITE],
.rpc_argp = &wdata->args,
.rpc_resp = &wdata->res,
.rpc_cred = wdata->cred,
};
int status;
dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
(long long) wdata->args.offset);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
if (status >= 0) {
nfs_post_op_update_inode(inode, fattr);
wdata->res.count = wdata->args.count;
wdata->verf.committed = NFS_FILE_SYNC;
}
dprintk("NFS reply write: %d\n", status);
return status < 0? status : wdata->res.count;
}
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
int flags, struct nameidata *nd)
@ -693,8 +667,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.access = NULL, /* access */
.readlink = nfs_proc_readlink,
.read = nfs_proc_read,
.write = nfs_proc_write,
.commit = NULL, /* commit */
.create = nfs_proc_create,
.remove = nfs_proc_remove,
.unlink_setup = nfs_proc_unlink_setup,

Просмотреть файл

@ -210,78 +210,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
SetPageUptodate(page);
}
/*
* Write a page synchronously.
* Offset is the data offset within the page.
*/
static int nfs_writepage_sync(struct nfs_open_context *ctx, struct page *page,
unsigned int offset, unsigned int count, int how)
{
struct inode *inode = page->mapping->host;
unsigned int wsize = NFS_SERVER(inode)->wsize;
int result, written = 0;
struct nfs_write_data *wdata;
wdata = nfs_writedata_alloc(wsize);
if (!wdata)
return -ENOMEM;
wdata->flags = how;
wdata->cred = ctx->cred;
wdata->inode = inode;
wdata->args.fh = NFS_FH(inode);
wdata->args.context = ctx;
wdata->args.pages = &page;
wdata->args.stable = NFS_FILE_SYNC;
wdata->args.pgbase = offset;
wdata->args.count = wsize;
wdata->res.fattr = &wdata->fattr;
wdata->res.verf = &wdata->verf;
dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
count, (long long)(page_offset(page) + offset));
set_page_writeback(page);
nfs_begin_data_update(inode);
do {
if (count < wsize)
wdata->args.count = count;
wdata->args.offset = page_offset(page) + wdata->args.pgbase;
result = NFS_PROTO(inode)->write(wdata);
if (result < 0) {
/* Must mark the page invalid after I/O error */
ClearPageUptodate(page);
goto io_error;
}
if (result < wdata->args.count)
printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
wdata->args.count, result);
wdata->args.offset += result;
wdata->args.pgbase += result;
written += result;
count -= result;
nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
} while (count);
/* Update file length */
nfs_grow_file(page, offset, written);
/* Set the PG_uptodate flag? */
nfs_mark_uptodate(page, offset, written);
if (PageError(page))
ClearPageError(page);
io_error:
nfs_end_data_update(inode);
end_page_writeback(page);
nfs_writedata_release(wdata);
return written ? written : result;
}
static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
unsigned int offset, unsigned int count)
{
@ -342,22 +270,12 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
err = -EBADF;
goto out;
}
lock_kernel();
if (!IS_SYNC(inode)) {
err = nfs_writepage_setup(ctx, page, 0, offset);
if (!wbc->for_writepages)
nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
} else {
err = nfs_writepage_sync(ctx, page, 0, offset, wb_priority(wbc));
if (err >= 0) {
if (err != offset)
redirty_page_for_writepage(wbc, page);
err = 0;
}
}
unlock_kernel();
err = nfs_writepage_setup(ctx, page, 0, offset);
put_nfs_open_context(ctx);
out:
if (!wbc->for_writepages)
nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
unlock_page(page);
return err;
}
@ -777,16 +695,6 @@ int nfs_updatepage(struct file *file, struct page *page,
file->f_dentry->d_name.name, count,
(long long)(page_offset(page) +offset));
if (IS_SYNC(inode)) {
status = nfs_writepage_sync(ctx, page, offset, count, 0);
if (status > 0) {
if (offset == 0 && status == PAGE_CACHE_SIZE)
SetPageUptodate(page);
return 0;
}
return status;
}
/* If we're not using byte range locks, and we know the page
* is entirely in cache, it may be more efficient to avoid
* fragmenting write requests.

Просмотреть файл

@ -785,8 +785,6 @@ struct nfs_rpc_ops {
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*read) (struct nfs_read_data *);
int (*write) (struct nfs_write_data *);
int (*commit) (struct nfs_write_data *);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int, struct nameidata *);
int (*remove) (struct inode *, struct qstr *);