Merge branch 'nfs-for-3.2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

* 'nfs-for-3.2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (26 commits)
  Check validity of cl_rpcclient in nfs_server_list_show
  NFS: Get rid of the nfs_rdata_mempool
  NFS: Don't rely on PageError in nfs_readpage_release_partial
  NFS: Get rid of unnecessary calls to ClearPageError() in read code
  NFS: Get rid of nfs_restart_rpc()
  NFS: Get rid of the unused nfs_write_data->flags field
  NFS: Get rid of the unused nfs_read_data->flags field
  NFSv4: Translate NFS4ERR_BADNAME into ENOENT when applied to a lookup
  NFS: Remove the unused "lookupfh()" version of nfs4_proc_lookup()
  NFS: Use the inode->i_version to cache NFSv4 change attribute information
  SUNRPC: Remove unnecessary export of rpc_sockaddr2uaddr
  SUNRPC: Fix rpc_sockaddr2uaddr
  nfs/super.c: local functions should be static
  pnfsblock: fix writeback deadlock
  pnfsblock: fix NULL pointer dereference
  pnfs: recoalesce when ld read pagelist fails
  pnfs: recoalesce when ld write pagelist fails
  pnfs: make _set_lo_fail generic
  pnfsblock: add missing rpc_put_mount and path_put
  SUNRPC/NFS: make rpc pipe upcall generic
  ...
This commit is contained in:
Linus Torvalds 2011-10-25 15:44:06 +02:00
Родитель 1442d1678c 940aab4902
Коммит ef78cc75f1
27 изменённых файлов: 238 добавлений и 309 удалений

Просмотреть файл

@ -176,17 +176,6 @@ retry:
return bio;
}
static void bl_set_lo_fail(struct pnfs_layout_segment *lseg)
{
if (lseg->pls_range.iomode == IOMODE_RW) {
dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
} else {
dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
}
}
/* This is basically copied from mpage_end_io_read */
static void bl_end_io_read(struct bio *bio, int err)
{
@ -206,7 +195,7 @@ static void bl_end_io_read(struct bio *bio, int err)
if (!uptodate) {
if (!rdata->pnfs_error)
rdata->pnfs_error = -EIO;
bl_set_lo_fail(rdata->lseg);
pnfs_set_lo_fail(rdata->lseg);
}
bio_put(bio);
put_parallel(par);
@ -303,6 +292,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
bl_end_io_read, par);
if (IS_ERR(bio)) {
rdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
}
@ -370,7 +360,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
bl_set_lo_fail(wdata->lseg);
pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
@ -386,7 +376,7 @@ static void bl_end_io_write(struct bio *bio, int err)
if (!uptodate) {
if (!wdata->pnfs_error)
wdata->pnfs_error = -EIO;
bl_set_lo_fail(wdata->lseg);
pnfs_set_lo_fail(wdata->lseg);
}
bio_put(bio);
put_parallel(par);
@ -543,6 +533,11 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
fill_invalid_ext:
dprintk("%s need to zero %d pages\n", __func__, npg_zero);
for (;npg_zero > 0; npg_zero--) {
if (bl_is_sector_init(be->be_inval, isect)) {
dprintk("isect %llu already init\n",
(unsigned long long)isect);
goto next_page;
}
/* page ref released in bl_end_io_write_zero */
index = isect >> PAGE_CACHE_SECTOR_SHIFT;
dprintk("%s zero %dth page: index %lu isect %llu\n",
@ -562,8 +557,7 @@ fill_invalid_ext:
* PageUptodate: It was read before
* sector_initialized: already written out
*/
if (PageDirty(page) || PageWriteback(page) ||
bl_is_sector_init(be->be_inval, isect)) {
if (PageDirty(page) || PageWriteback(page)) {
print_page(page);
unlock_page(page);
page_cache_release(page);
@ -592,6 +586,7 @@ fill_invalid_ext:
bl_end_io_write_zero, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
/* FIXME: This should be done in bi_end_io */
@ -640,6 +635,7 @@ next_page:
bl_end_io_write, par);
if (IS_ERR(bio)) {
wdata->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
isect += PAGE_CACHE_SECTORS;
@ -805,7 +801,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
struct nfs4_deviceid *d_id)
{
struct pnfs_device *dev;
struct pnfs_block_dev *rv = NULL;
struct pnfs_block_dev *rv;
u32 max_resp_sz;
int max_pages;
struct page **pages = NULL;
@ -823,19 +819,21 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
dev = kmalloc(sizeof(*dev), GFP_NOFS);
if (!dev) {
dprintk("%s kmalloc failed\n", __func__);
return NULL;
return ERR_PTR(-ENOMEM);
}
pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
kfree(dev);
return NULL;
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < max_pages; i++) {
pages[i] = alloc_page(GFP_NOFS);
if (!pages[i])
if (!pages[i]) {
rv = ERR_PTR(-ENOMEM);
goto out_free;
}
}
memcpy(&dev->dev_id, d_id, sizeof(*d_id));
dev->layout_type = LAYOUT_BLOCK_VOLUME;
@ -847,8 +845,10 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
rc = nfs4_proc_getdeviceinfo(server, dev);
dprintk("%s getdevice info returns %d\n", __func__, rc);
if (rc)
if (rc) {
rv = ERR_PTR(rc);
goto out_free;
}
rv = nfs4_blk_decode_device(server, dev);
out_free:
@ -866,7 +866,7 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
struct pnfs_devicelist *dlist = NULL;
struct pnfs_block_dev *bdev;
LIST_HEAD(block_disklist);
int status = 0, i;
int status, i;
dprintk("%s enter\n", __func__);
@ -898,8 +898,8 @@ bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
for (i = 0; i < dlist->num_devs; i++) {
bdev = nfs4_blk_get_deviceinfo(server, fh,
&dlist->dev_id[i]);
if (!bdev) {
status = -ENODEV;
if (IS_ERR(bdev)) {
status = PTR_ERR(bdev);
goto out_error;
}
spin_lock(&b_mt_id->bm_lock);
@ -960,7 +960,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
};
static const struct rpc_pipe_ops bl_upcall_ops = {
.upcall = bl_pipe_upcall,
.upcall = rpc_pipe_generic_upcall,
.downcall = bl_pipe_downcall,
.destroy_msg = bl_pipe_destroy_msg,
};
@ -989,17 +989,20 @@ static int __init nfs4blocklayout_init(void)
mnt,
NFS_PIPE_DIRNAME, 0, &path);
if (ret)
goto out_remove;
goto out_putrpc;
bl_device_pipe = rpc_mkpipe(path.dentry, "blocklayout", NULL,
&bl_upcall_ops, 0);
path_put(&path);
if (IS_ERR(bl_device_pipe)) {
ret = PTR_ERR(bl_device_pipe);
goto out_remove;
goto out_putrpc;
}
out:
return ret;
out_putrpc:
rpc_put_mount();
out_remove:
pnfs_unregister_layoutdriver(&blocklayout_type);
return ret;
@ -1012,6 +1015,7 @@ static void __exit nfs4blocklayout_exit(void)
pnfs_unregister_layoutdriver(&blocklayout_type);
rpc_unlink(bl_device_pipe);
rpc_put_mount();
}
MODULE_ALIAS("nfs-layouttype4-3");

Просмотреть файл

@ -150,7 +150,7 @@ BLK_LSEG2EXT(struct pnfs_layout_segment *lseg)
}
struct bl_dev_msg {
int status;
int32_t status;
uint32_t major, minor;
};
@ -169,8 +169,6 @@ extern wait_queue_head_t bl_wq;
#define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
/* blocklayoutdev.c */
ssize_t bl_pipe_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
struct block_device *nfs4_blkdev_get(dev_t dev);

Просмотреть файл

@ -79,28 +79,6 @@ int nfs4_blkdev_put(struct block_device *bdev)
return blkdev_put(bdev, FMODE_READ);
}
/*
* Shouldn't there be a rpc_generic_upcall() to do this for us?
*/
ssize_t bl_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
size_t mlen = min(msg->len - msg->copied, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
return mlen;
}
static struct bl_dev_msg bl_mount_reply;
ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
@ -131,7 +109,7 @@ struct pnfs_block_dev *
nfs4_blk_decode_device(struct nfs_server *server,
struct pnfs_device *dev)
{
struct pnfs_block_dev *rv = NULL;
struct pnfs_block_dev *rv;
struct block_device *bd = NULL;
struct rpc_pipe_msg msg;
struct bl_msg_hdr bl_msg = {
@ -141,7 +119,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
uint8_t *dataptr;
DECLARE_WAITQUEUE(wq, current);
struct bl_dev_msg *reply = &bl_mount_reply;
int offset, len, i;
int offset, len, i, rc;
dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data,
@ -168,8 +146,10 @@ nfs4_blk_decode_device(struct nfs_server *server,
dprintk("%s CALLING USERSPACE DAEMON\n", __func__);
add_wait_queue(&bl_wq, &wq);
if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
rc = rpc_queue_upcall(bl_device_pipe->d_inode, &msg);
if (rc < 0) {
remove_wait_queue(&bl_wq, &wq);
rv = ERR_PTR(rc);
goto out;
}
@ -187,8 +167,9 @@ nfs4_blk_decode_device(struct nfs_server *server,
bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
if (IS_ERR(bd)) {
dprintk("%s failed to open device : %ld\n",
__func__, PTR_ERR(bd));
rc = PTR_ERR(bd);
dprintk("%s failed to open device : %d\n", __func__, rc);
rv = ERR_PTR(rc);
goto out;
}

Просмотреть файл

@ -1868,6 +1868,10 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
/* display one transport per line on subsequent lines */
clp = list_entry(v, struct nfs_client, cl_share_link);
/* Check if the client is initialized */
if (clp->cl_cons_state != NFS_CS_READY)
return 0;
seq_printf(m, "v%u %s %s %3d %s\n",
clp->rpc_ops->version,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),

Просмотреть файл

@ -240,7 +240,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
sizeof(delegation->stateid.data));
delegation->type = res->delegation_type;
delegation->maxsize = res->maxsize;
delegation->change_attr = nfsi->change_attr;
delegation->change_attr = inode->i_version;
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;

Просмотреть файл

@ -212,7 +212,7 @@ static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = nfsi->change_attr;
auxdata.change_attr = nfsi->vfs_inode.i_version;
if (bufmax > sizeof(auxdata))
bufmax = sizeof(auxdata);
@ -244,7 +244,7 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
auxdata.change_attr = nfsi->change_attr;
auxdata.change_attr = nfsi->vfs_inode.i_version;
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;

Просмотреть файл

@ -336,8 +336,6 @@ struct idmap {
struct idmap_hashtable idmap_group_hash;
};
static ssize_t idmap_pipe_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
@ -345,7 +343,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static unsigned int fnvhash32(const void *, size_t);
static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = idmap_pipe_upcall,
.upcall = rpc_pipe_generic_upcall,
.downcall = idmap_pipe_downcall,
.destroy_msg = idmap_pipe_destroy_msg,
};
@ -595,27 +593,6 @@ nfs_idmap_name(struct idmap *idmap, struct idmap_hashtable *h,
return ret;
}
/* RPC pipefs upcall/downcall routines */
static ssize_t
idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
size_t mlen = min(msg->len, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
return mlen;
}
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{

Просмотреть файл

@ -318,7 +318,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
nfsi->change_attr = 0;
inode->i_version = 0;
inode->i_size = 0;
inode->i_nlink = 0;
inode->i_uid = -2;
@ -344,7 +344,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
nfsi->change_attr = fattr->change_attr;
inode->i_version = fattr->change_attr;
else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_DATA;
@ -897,8 +897,8 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
&& nfsi->change_attr == fattr->pre_change_attr) {
nfsi->change_attr = fattr->change_attr;
&& inode->i_version == fattr->pre_change_attr) {
inode->i_version = fattr->change_attr;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
ret |= NFS_INO_INVALID_ATTR;
@ -952,7 +952,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
return -EIO;
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
nfsi->change_attr != fattr->change_attr)
inode->i_version != fattr->change_attr)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
/* Verify a few of the more important attributes */
@ -1163,7 +1163,7 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
fattr->pre_change_attr = NFS_I(inode)->change_attr;
fattr->pre_change_attr = inode->i_version;
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
@ -1244,13 +1244,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
if (nfsi->change_attr != fattr->change_attr) {
if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
nfsi->change_attr = fattr->change_attr;
inode->i_version = fattr->change_attr;
}
} else if (server->caps & NFS_CAP_CHANGE_ATTR)
invalid |= save_cache_validity;

Просмотреть файл

@ -457,13 +457,3 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
/*
* Helper for restarting RPC calls in the possible presence of NFSv4.1
* sessions.
*/
static inline int nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
{
if (nfs4_has_session(clp))
return rpc_restart_call_prepare(task);
return rpc_restart_call(task);
}

Просмотреть файл

@ -77,19 +77,6 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
BUG();
}
/* For data server errors we don't recover from */
static void
filelayout_set_lo_fail(struct pnfs_layout_segment *lseg)
{
if (lseg->pls_range.iomode == IOMODE_RW) {
dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
} else {
dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
}
}
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
@ -135,7 +122,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
static int filelayout_read_done_cb(struct rpc_task *task,
struct nfs_read_data *data)
{
struct nfs_client *clp = data->ds_clp;
int reset = 0;
dprintk("%s DS read\n", __func__);
@ -145,11 +131,10 @@ static int filelayout_read_done_cb(struct rpc_task *task,
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
filelayout_set_lo_fail(data->lseg);
pnfs_set_lo_fail(data->lseg);
nfs4_reset_read(task, data);
clp = NFS_SERVER(data->inode)->nfs_client;
}
nfs_restart_rpc(task, clp);
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@ -216,17 +201,13 @@ static int filelayout_write_done_cb(struct rpc_task *task,
if (filelayout_async_handle_error(task, data->args.context->state,
data->ds_clp, &reset) == -EAGAIN) {
struct nfs_client *clp;
dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
filelayout_set_lo_fail(data->lseg);
pnfs_set_lo_fail(data->lseg);
nfs4_reset_write(task, data);
clp = NFS_SERVER(data->inode)->nfs_client;
} else
clp = data->ds_clp;
nfs_restart_rpc(task, clp);
}
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@ -256,9 +237,9 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
__func__, data->ds_clp, data->ds_clp->cl_session);
if (reset) {
prepare_to_resend_writes(data);
filelayout_set_lo_fail(data->lseg);
pnfs_set_lo_fail(data->lseg);
} else
nfs_restart_rpc(task, data->ds_clp);
rpc_restart_call_prepare(task);
return -EAGAIN;
}

Просмотреть файл

@ -73,9 +73,6 @@ static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@ -753,9 +750,9 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
nfsi->change_attr = cinfo->after;
dir->i_version = cinfo->after;
spin_unlock(&dir->i_lock);
}
@ -1596,8 +1593,14 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
int status;
status = nfs4_run_open_task(data, 0);
if (status != 0 || !data->rpc_done)
if (!data->rpc_done)
return status;
if (status != 0) {
if (status == -NFS4ERR_BADNAME &&
!(o_arg->open_flags & O_CREAT))
return -ENOENT;
return status;
}
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
@ -2408,14 +2411,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return status;
}
static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
const struct nfs_fh *dirfh, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs_server *server = NFS_SERVER(dir);
int status;
struct nfs4_lookup_arg args = {
.bitmask = server->attr_bitmask,
.dir_fh = dirfh,
.dir_fh = NFS_FH(dir),
.name = name,
};
struct nfs4_lookup_res res = {
@ -2431,40 +2435,8 @@ static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
struct nfs4_exception exception = { };
int err;
do {
err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
/* FIXME: !!!! */
if (err == -NFS4ERR_MOVED) {
err = -EREMOTE;
break;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr)
{
int status;
dprintk("NFS call lookup %s\n", name->name);
status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
if (status == -NFS4ERR_MOVED)
status = nfs4_get_referral(dir, name, fattr, fhandle);
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
dprintk("NFS reply lookup: %d\n", status);
return status;
}
@ -2485,11 +2457,20 @@ static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qst
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dir),
_nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
&exception);
if (err == -EPERM)
int status;
status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
switch (status) {
case -NFS4ERR_BADNAME:
return -ENOENT;
case -NFS4ERR_MOVED:
err = nfs4_get_referral(dir, name, fattr, fhandle);
break;
case -NFS4ERR_WRONGSEC:
nfs_fixup_secinfo_attributes(fattr, fhandle);
}
err = nfs4_handle_exception(NFS_SERVER(dir),
status, &exception);
} while (exception.retry);
return err;
}
@ -3210,7 +3191,7 @@ static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
struct nfs_server *server = NFS_SERVER(data->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, server->nfs_client);
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@ -3260,7 +3241,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
rpc_restart_call_prepare(task);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@ -3317,7 +3298,7 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
rpc_restart_call_prepare(task);
return -EAGAIN;
}
nfs_refresh_inode(inode, data->res.fattr);
@ -3857,7 +3838,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
nfs_restart_rpc(task, data->res.server->nfs_client);
rpc_restart_call_prepare(task);
return;
}
}
@ -4111,8 +4092,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
nfs_restart_rpc(task,
calldata->server->nfs_client);
rpc_restart_call_prepare(task);
}
}
@ -4945,7 +4925,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
task->tk_status = 0;
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
nfs_restart_rpc(task, data->clp);
rpc_restart_call_prepare(task);
return;
}
dprintk("<-- %s\n", __func__);
@ -5786,7 +5766,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
server = NFS_SERVER(lrp->args.inode);
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
nfs_restart_rpc(task, lrp->clp);
rpc_restart_call_prepare(task);
return;
}
spin_lock(&lo->plh_inode->i_lock);
@ -5957,7 +5937,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
}
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
nfs_restart_rpc(task, server->nfs_client);
rpc_restart_call_prepare(task);
return;
}
@ -6270,7 +6250,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.getroot = nfs4_proc_get_root,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookupfh = nfs4_proc_lookupfh,
.lookup = nfs4_proc_lookup,
.access = nfs4_proc_access,
.readlink = nfs4_proc_readlink,

Просмотреть файл

@ -1168,23 +1168,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
/*
* Called by non rpc-based layout drivers
*/
int
pnfs_ld_write_done(struct nfs_write_data *data)
void pnfs_ld_write_done(struct nfs_write_data *data)
{
int status;
if (!data->pnfs_error) {
if (likely(!data->pnfs_error)) {
pnfs_set_layoutcommit(data);
data->mds_ops->rpc_call_done(&data->task, data);
data->mds_ops->rpc_release(data);
return 0;
} else {
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
}
dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
data->pnfs_error);
status = nfs_initiate_write(data, NFS_CLIENT(data->inode),
data->mds_ops, NFS_FILE_SYNC);
return status ? : -EAGAIN;
data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
@ -1268,23 +1262,17 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
/*
* Called by non rpc-based layout drivers
*/
int
pnfs_ld_read_done(struct nfs_read_data *data)
void pnfs_ld_read_done(struct nfs_read_data *data)
{
int status;
if (!data->pnfs_error) {
if (likely(!data->pnfs_error)) {
__nfs4_read_done_cb(data);
data->mds_ops->rpc_call_done(&data->task, data);
data->mds_ops->rpc_release(data);
return 0;
} else {
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
}
dprintk("%s: pnfs_error=%d, retry via MDS\n", __func__,
data->pnfs_error);
status = nfs_initiate_read(data, NFS_CLIENT(data->inode),
data->mds_ops);
return status ? : -EAGAIN;
data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
@ -1381,6 +1369,18 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
}
}
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
if (lseg->pls_range.iomode == IOMODE_RW) {
dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
} else {
dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
}
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{

Просмотреть файл

@ -178,6 +178,7 @@ int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg);
int pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
@ -200,8 +201,8 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
int _pnfs_return_layout(struct inode *);
int pnfs_ld_write_done(struct nfs_write_data *);
int pnfs_ld_read_done(struct nfs_read_data *);
void pnfs_ld_write_done(struct nfs_write_data *);
void pnfs_ld_read_done(struct nfs_read_data *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,

Просмотреть файл

@ -35,16 +35,13 @@ static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
static struct kmem_cache *nfs_rdata_cachep;
static mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_KERNEL);
struct nfs_read_data *p;
p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
if (pagecount <= ARRAY_SIZE(p->page_array))
@ -52,7 +49,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
else {
p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
if (!p->pagevec) {
mempool_free(p, nfs_rdata_mempool);
kmem_cache_free(nfs_rdata_cachep, p);
p = NULL;
}
}
@ -64,7 +61,7 @@ void nfs_readdata_free(struct nfs_read_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
kmem_cache_free(nfs_rdata_cachep, p);
}
void nfs_readdata_release(struct nfs_read_data *rdata)
@ -276,7 +273,6 @@ nfs_async_read_error(struct list_head *head)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
SetPageError(req->wb_page);
nfs_readpage_release(req);
}
}
@ -322,7 +318,6 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head
offset += len;
} while(nbytes != 0);
atomic_set(&req->wb_complete, requests);
ClearPageError(page);
desc->pg_rpc_callops = &nfs_read_partial_ops;
return ret;
out_bad:
@ -331,7 +326,6 @@ out_bad:
list_del(&data->list);
nfs_readdata_free(data);
}
SetPageError(page);
nfs_readpage_release(req);
return -ENOMEM;
}
@ -357,7 +351,6 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
ClearPageError(req->wb_page);
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
@ -435,7 +428,7 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
rpc_restart_call_prepare(task);
}
/*
@ -462,10 +455,10 @@ static void nfs_readpage_release_partial(void *calldata)
int status = data->task.tk_status;
if (status < 0)
SetPageError(page);
set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
if (atomic_dec_and_test(&req->wb_complete)) {
if (!PageError(page))
if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
SetPageUptodate(page);
nfs_readpage_release(req);
}
@ -541,13 +534,23 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
static void nfs_readpage_release_full(void *calldata)
{
struct nfs_read_data *data = calldata;
struct nfs_pageio_descriptor pgio;
if (data->pnfs_error) {
nfs_pageio_init_read_mds(&pgio, data->inode);
pgio.pg_recoalesce = 1;
}
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
if (!data->pnfs_error)
nfs_readpage_release(req);
else
nfs_pageio_add_request(&pgio, req);
}
if (data->pnfs_error)
nfs_pageio_complete(&pgio);
nfs_readdata_release(calldata);
}
@ -648,7 +651,6 @@ readpage_async_filler(void *data, struct page *page)
return 0;
out_error:
error = PTR_ERR(new);
SetPageError(page);
out_unlock:
unlock_page(page);
return error;
@ -711,16 +713,10 @@ int __init nfs_init_readpagecache(void)
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
nfs_rdata_cachep);
if (nfs_rdata_mempool == NULL)
return -ENOMEM;
return 0;
}
void nfs_destroy_readpagecache(void)
{
mempool_destroy(nfs_rdata_mempool);
kmem_cache_destroy(nfs_rdata_cachep);
}

Просмотреть файл

@ -733,18 +733,22 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt)
return 0;
}
#ifdef CONFIG_NFS_V4
#ifdef CONFIG_NFS_V4_1
void show_sessions(struct seq_file *m, struct nfs_server *server)
static void show_sessions(struct seq_file *m, struct nfs_server *server)
{
if (nfs4_has_session(server->nfs_client))
seq_printf(m, ",sessions");
}
#else
void show_sessions(struct seq_file *m, struct nfs_server *server) {}
static void show_sessions(struct seq_file *m, struct nfs_server *server) {}
#endif
#endif
#ifdef CONFIG_NFS_V4
#ifdef CONFIG_NFS_V4_1
void show_pnfs(struct seq_file *m, struct nfs_server *server)
static void show_pnfs(struct seq_file *m, struct nfs_server *server)
{
seq_printf(m, ",pnfs=");
if (server->pnfs_curr_ld)
@ -752,9 +756,10 @@ void show_pnfs(struct seq_file *m, struct nfs_server *server)
else
seq_printf(m, "not configured");
}
#else /* CONFIG_NFS_V4_1 */
void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
#endif /* CONFIG_NFS_V4_1 */
#else
static void show_pnfs(struct seq_file *m, struct nfs_server *server) {}
#endif
#endif
static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
{

Просмотреть файл

@ -87,7 +87,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
nfs_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
rpc_restart_call_prepare(task);
}
/**
@ -369,7 +369,7 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
struct dentry *new_dentry = data->new_dentry;
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
nfs_restart_rpc(task, NFS_SERVER(old_dir)->nfs_client);
rpc_restart_call_prepare(task);
return;
}

Просмотреть файл

@ -390,7 +390,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
BUG_ON(error);
if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
nfsi->change_attr++;
inode->i_version++;
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
@ -428,7 +428,6 @@ static void
nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
__mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@ -762,6 +761,8 @@ int nfs_updatepage(struct file *file, struct page *page,
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
nfs_set_pageerror(page);
else
__set_page_dirty_nobuffers(page);
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
@ -1010,7 +1011,6 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *r
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
ClearPageError(req->wb_page);
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
@ -1165,7 +1165,13 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
static void nfs_writeback_release_full(void *calldata)
{
struct nfs_write_data *data = calldata;
int status = data->task.tk_status;
int ret, status = data->task.tk_status;
struct nfs_pageio_descriptor pgio;
if (data->pnfs_error) {
nfs_pageio_init_write_mds(&pgio, data->inode, FLUSH_STABLE);
pgio.pg_recoalesce = 1;
}
/* Update attributes as result of writeback. */
while (!list_empty(&data->pages)) {
@ -1181,6 +1187,11 @@ static void nfs_writeback_release_full(void *calldata)
req->wb_bytes,
(long long)req_offset(req));
if (data->pnfs_error) {
dprintk(", pnfs error = %d\n", data->pnfs_error);
goto next;
}
if (status < 0) {
nfs_set_pageerror(page);
nfs_context_set_write_error(req->wb_context, status);
@ -1200,7 +1211,19 @@ remove_request:
next:
nfs_clear_page_tag_locked(req);
nfs_end_page_writeback(page);
if (data->pnfs_error) {
lock_page(page);
nfs_pageio_cond_complete(&pgio, page->index);
ret = nfs_page_async_flush(&pgio, page, 0);
if (ret) {
nfs_set_pageerror(page);
dprintk("rewrite to MDS error = %d\n", ret);
}
unlock_page(page);
}
}
if (data->pnfs_error)
nfs_pageio_complete(&pgio);
nfs_writedata_release(calldata);
}
@ -1281,7 +1304,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
nfs_restart_rpc(task, server->nfs_client);
rpc_restart_call_prepare(task);
return;
}
if (time_before(complain, jiffies)) {
@ -1553,6 +1576,10 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int flags = FLUSH_SYNC;
int ret = 0;
/* no commits means nothing needs to be done */
if (!nfsi->ncommit)
return ret;
if (wbc->sync_mode == WB_SYNC_NONE) {
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
@ -1686,34 +1713,20 @@ out_error:
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page)
{
struct nfs_page *req;
int ret;
/*
* If PagePrivate is set, then the page is currently associated with
* an in-progress read or write request. Don't try to migrate it.
*
* FIXME: we could do this in principle, but we'll need a way to ensure
* that we can safely release the inode reference while holding
* the page lock.
*/
if (PagePrivate(page))
return -EBUSY;
nfs_fscache_release_page(page, GFP_KERNEL);
req = nfs_find_and_lock_request(page, false);
ret = PTR_ERR(req);
if (IS_ERR(req))
goto out;
ret = migrate_page(mapping, newpage, page);
if (!req)
goto out;
if (ret)
goto out_unlock;
page_cache_get(newpage);
spin_lock(&mapping->host->i_lock);
req->wb_page = newpage;
SetPagePrivate(newpage);
set_page_private(newpage, (unsigned long)req);
ClearPagePrivate(page);
set_page_private(page, 0);
spin_unlock(&mapping->host->i_lock);
page_cache_release(page);
out_unlock:
nfs_clear_page_tag_locked(req);
out:
return ret;
return migrate_page(mapping, newpage, page);
}
#endif

Просмотреть файл

@ -149,7 +149,6 @@ struct nfs_inode {
unsigned long read_cache_jiffies;
unsigned long attrtimeo;
unsigned long attrtimeo_timestamp;
__u64 change_attr; /* v4 only */
unsigned long attr_gencount;
/* "Generation counter" for the attribute cache. This is

Просмотреть файл

@ -34,6 +34,7 @@ enum {
PG_NEED_COMMIT,
PG_NEED_RESCHED,
PG_PNFS_COMMIT,
PG_PARTIAL_READ_FAILED,
};
struct nfs_inode;

Просмотреть файл

@ -1133,7 +1133,6 @@ struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
struct nfs_read_data {
int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@ -1156,7 +1155,6 @@ struct nfs_read_data {
};
struct nfs_write_data {
int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@ -1197,9 +1195,6 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
int (*lookupfh)(struct nfs_server *, struct nfs_fh *,
struct qstr *, struct nfs_fh *,
struct nfs_fattr *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,

Просмотреть файл

@ -9,6 +9,7 @@
#ifndef _LINUX_SUNRPC_CLNT_H
#define _LINUX_SUNRPC_CLNT_H
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
@ -161,7 +162,7 @@ const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
size_t rpc_pton(const char *, const size_t,
struct sockaddr *, const size_t);
char * rpc_sockaddr2uaddr(const struct sockaddr *);
char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
size_t rpc_uaddr2sockaddr(const char *, const size_t,
struct sockaddr *, const size_t);

Просмотреть файл

@ -44,6 +44,8 @@ RPC_I(struct inode *inode)
return container_of(inode, struct rpc_inode, vfs_inode);
}
extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
struct rpc_clnt;

Просмотреть файл

@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(rpc_pton);
/**
* rpc_sockaddr2uaddr - Construct a universal address string from @sap.
* @sap: socket address
* @gfp_flags: allocation mode
*
* Returns a %NUL-terminated string in dynamically allocated memory;
* otherwise NULL is returned if an error occurred. Caller must
* free the returned string.
*/
char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
{
char portbuf[RPCBIND_MAXUADDRPLEN];
char addrbuf[RPCBIND_MAXUADDRLEN];
@ -288,9 +289,8 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
return NULL;
return kstrdup(addrbuf, GFP_KERNEL);
return kstrdup(addrbuf, gfp_flags);
}
EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
/**
* rpc_uaddr2sockaddr - convert a universal address to a socket address.

Просмотреть файл

@ -603,26 +603,6 @@ out:
return err;
}
static ssize_t
gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
size_t mlen = min(msg->len, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
return mlen;
}
#define MSG_BUF_MAXSIZE 1024
static ssize_t
@ -1590,7 +1570,7 @@ static const struct rpc_credops gss_nullops = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
.upcall = gss_pipe_upcall,
.upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v0,
@ -1598,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
.upcall = gss_pipe_upcall,
.upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v1,

Просмотреть файл

@ -850,6 +850,8 @@ rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = call_start;
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
return 1;
}

Просмотреть файл

@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work)
rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
}
ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
size_t mlen = min(msg->len - msg->copied, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
return mlen;
}
EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
/**
* rpc_queue_upcall - queue an upcall message to userspace
* @inode: inode of upcall pipe on which to queue given message

Просмотреть файл

@ -410,7 +410,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
unsigned short port = ntohs(sin->sin_port);
int result;
map->r_addr = rpc_sockaddr2uaddr(sap);
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@ -437,7 +437,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
unsigned short port = ntohs(sin6->sin6_port);
int result;
map->r_addr = rpc_sockaddr2uaddr(sap);
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@ -686,7 +686,7 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
map->r_addr = rpc_sockaddr2uaddr(sap);
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
map->r_owner = "";
break;
case RPCBVERS_2: