2010-10-20 08:17:58 +04:00
|
|
|
/*
|
|
|
|
* pNFS functions to call and manage layout drivers.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002 [year of first publication]
|
|
|
|
* The Regents of the University of Michigan
|
|
|
|
* All Rights Reserved
|
|
|
|
*
|
|
|
|
* Dean Hildebrand <dhildebz@umich.edu>
|
|
|
|
*
|
|
|
|
* Permission is granted to use, copy, create derivative works, and
|
|
|
|
* redistribute this software and such derivative works for any purpose,
|
|
|
|
* so long as the name of the University of Michigan is not used in
|
|
|
|
* any advertising or publicity pertaining to the use or distribution
|
|
|
|
* of this software without specific, written prior authorization. If
|
|
|
|
* the above copyright notice or any other identification of the
|
|
|
|
* University of Michigan is included in any copy of any portion of
|
|
|
|
* this software, then the disclaimer below must also be included.
|
|
|
|
*
|
|
|
|
* This software is provided as is, without representation or warranty
|
|
|
|
* of any kind either express or implied, including without limitation
|
|
|
|
* the implied warranties of merchantability, fitness for a particular
|
|
|
|
* purpose, or noninfringement. The Regents of the University of
|
|
|
|
* Michigan shall not be liable for any damages, including special,
|
|
|
|
* indirect, incidental, or consequential damages, with respect to any
|
|
|
|
* claim arising out of or in connection with the use of the software,
|
|
|
|
* even if it has been or is hereafter advised of the possibility of
|
|
|
|
* such damages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/nfs_fs.h>
|
2011-07-13 23:58:28 +04:00
|
|
|
#include <linux/nfs_page.h>
|
2011-07-01 22:23:34 +04:00
|
|
|
#include <linux/module.h>
|
2016-09-15 21:40:49 +03:00
|
|
|
#include <linux/sort.h>
|
2010-10-20 08:18:02 +04:00
|
|
|
#include "internal.h"
|
2010-10-20 08:17:58 +04:00
|
|
|
#include "pnfs.h"
|
2011-03-01 04:34:16 +03:00
|
|
|
#include "iostat.h"
|
2013-08-14 23:31:28 +04:00
|
|
|
#include "nfs4trace.h"
|
2015-01-24 21:54:37 +03:00
|
|
|
#include "delegation.h"
|
2015-06-23 14:51:57 +03:00
|
|
|
#include "nfs42.h"
|
2016-09-21 22:24:26 +03:00
|
|
|
#include "nfs4_fs.h"
|
2010-10-20 08:17:58 +04:00
|
|
|
|
|
|
|
#define NFSDBG_FACILITY NFSDBG_PNFS
|
2012-09-19 01:01:12 +04:00
|
|
|
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
|
2010-10-20 08:17:58 +04:00
|
|
|
|
2010-10-20 08:17:59 +04:00
|
|
|
/* Locking:
|
|
|
|
*
|
|
|
|
* pnfs_spinlock:
|
|
|
|
* protects pnfs_modules_tbl.
|
|
|
|
*/
|
|
|
|
static DEFINE_SPINLOCK(pnfs_spinlock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pnfs_modules_tbl holds all pnfs modules
|
|
|
|
*/
|
|
|
|
static LIST_HEAD(pnfs_modules_tbl);
|
|
|
|
|
2016-01-27 07:12:11 +03:00
|
|
|
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
|
2016-10-13 02:50:54 +03:00
|
|
|
static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
|
|
|
|
struct list_head *free_me,
|
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
u32 seq);
|
2016-11-30 20:32:55 +03:00
|
|
|
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
|
|
|
|
struct list_head *tmp_list);
|
2014-09-05 20:53:25 +04:00
|
|
|
|
2010-10-20 08:17:59 +04:00
|
|
|
/* Return the registered pnfs layout driver module matching given id */
|
|
|
|
static struct pnfs_layoutdriver_type *
|
|
|
|
find_pnfs_driver_locked(u32 id)
|
|
|
|
{
|
|
|
|
struct pnfs_layoutdriver_type *local;
|
|
|
|
|
|
|
|
list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
|
|
|
|
if (local->id == id)
|
|
|
|
goto out;
|
|
|
|
local = NULL;
|
|
|
|
out:
|
|
|
|
dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
|
|
|
|
return local;
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:17:58 +04:00
|
|
|
static struct pnfs_layoutdriver_type *
|
|
|
|
find_pnfs_driver(u32 id)
|
|
|
|
{
|
2010-10-20 08:17:59 +04:00
|
|
|
struct pnfs_layoutdriver_type *local;
|
|
|
|
|
|
|
|
spin_lock(&pnfs_spinlock);
|
|
|
|
local = find_pnfs_driver_locked(id);
|
2012-06-15 21:02:58 +04:00
|
|
|
if (local != NULL && !try_module_get(local->owner)) {
|
|
|
|
dprintk("%s: Could not grab reference on module\n", __func__);
|
|
|
|
local = NULL;
|
|
|
|
}
|
2010-10-20 08:17:59 +04:00
|
|
|
spin_unlock(&pnfs_spinlock);
|
|
|
|
return local;
|
2010-10-20 08:17:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
unset_pnfs_layoutdriver(struct nfs_server *nfss)
|
|
|
|
{
|
2011-07-31 04:52:36 +04:00
|
|
|
if (nfss->pnfs_curr_ld) {
|
|
|
|
if (nfss->pnfs_curr_ld->clear_layoutdriver)
|
|
|
|
nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
|
2012-06-14 21:08:38 +04:00
|
|
|
/* Decrement the MDS count. Purge the deviceid cache if zero */
|
|
|
|
if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
|
|
|
|
nfs4_deviceid_purge_client(nfss->nfs_client);
|
2010-10-20 08:17:59 +04:00
|
|
|
module_put(nfss->pnfs_curr_ld->owner);
|
2011-07-31 04:52:36 +04:00
|
|
|
}
|
2010-10-20 08:17:58 +04:00
|
|
|
nfss->pnfs_curr_ld = NULL;
|
|
|
|
}
|
|
|
|
|
2016-09-15 21:40:49 +03:00
|
|
|
/*
|
|
|
|
* When the server sends a list of layout types, we choose one in the order
|
|
|
|
* given in the list below.
|
|
|
|
*
|
|
|
|
* FIXME: should this list be configurable in some fashion? module param?
|
|
|
|
* mount option? something else?
|
|
|
|
*/
|
|
|
|
static const u32 ld_prefs[] = {
|
|
|
|
LAYOUT_SCSI,
|
|
|
|
LAYOUT_BLOCK_VOLUME,
|
|
|
|
LAYOUT_OSD2_OBJECTS,
|
|
|
|
LAYOUT_FLEX_FILES,
|
|
|
|
LAYOUT_NFSV4_1_FILES,
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
ld_cmp(const void *e1, const void *e2)
|
|
|
|
{
|
|
|
|
u32 ld1 = *((u32 *)e1);
|
|
|
|
u32 ld2 = *((u32 *)e2);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; ld_prefs[i] != 0; i++) {
|
|
|
|
if (ld1 == ld_prefs[i])
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ld2 == ld_prefs[i])
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:17:58 +04:00
|
|
|
/*
|
|
|
|
* Try to set the server's pnfs module to the pnfs layout type specified by id.
|
|
|
|
* Currently only one pNFS layout driver per filesystem is supported.
|
|
|
|
*
|
2016-08-10 22:58:24 +03:00
|
|
|
* @ids array of layout types supported by MDS.
|
2010-10-20 08:17:58 +04:00
|
|
|
*/
|
|
|
|
void
|
2011-07-31 04:52:36 +04:00
|
|
|
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
|
2016-09-15 21:40:49 +03:00
|
|
|
struct nfs_fsinfo *fsinfo)
|
2010-10-20 08:17:58 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layoutdriver_type *ld_type = NULL;
|
2016-08-10 22:58:24 +03:00
|
|
|
u32 id;
|
2016-09-15 21:40:49 +03:00
|
|
|
int i;
|
2010-10-20 08:17:58 +04:00
|
|
|
|
2016-10-26 22:54:31 +03:00
|
|
|
if (fsinfo->nlayouttypes == 0)
|
|
|
|
goto out_no_driver;
|
2010-10-20 08:17:58 +04:00
|
|
|
if (!(server->nfs_client->cl_exchange_flags &
|
|
|
|
(EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
|
2016-08-10 22:58:24 +03:00
|
|
|
printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
|
|
|
|
__func__, server->nfs_client->cl_exchange_flags);
|
2010-10-20 08:17:58 +04:00
|
|
|
goto out_no_driver;
|
|
|
|
}
|
2016-08-10 22:58:24 +03:00
|
|
|
|
2016-09-15 21:40:49 +03:00
|
|
|
sort(fsinfo->layouttype, fsinfo->nlayouttypes,
|
|
|
|
sizeof(*fsinfo->layouttype), ld_cmp, NULL);
|
2016-08-10 22:58:24 +03:00
|
|
|
|
2016-09-15 21:40:49 +03:00
|
|
|
for (i = 0; i < fsinfo->nlayouttypes; i++) {
|
|
|
|
id = fsinfo->layouttype[i];
|
2010-10-20 08:17:58 +04:00
|
|
|
ld_type = find_pnfs_driver(id);
|
2016-09-15 21:40:49 +03:00
|
|
|
if (!ld_type) {
|
|
|
|
request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
|
|
|
|
id);
|
|
|
|
ld_type = find_pnfs_driver(id);
|
|
|
|
}
|
|
|
|
if (ld_type)
|
|
|
|
break;
|
2010-10-20 08:17:58 +04:00
|
|
|
}
|
2016-08-10 22:58:24 +03:00
|
|
|
|
|
|
|
if (!ld_type) {
|
2016-09-15 21:40:49 +03:00
|
|
|
dprintk("%s: No pNFS module found!\n", __func__);
|
2016-08-10 22:58:24 +03:00
|
|
|
goto out_no_driver;
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:17:58 +04:00
|
|
|
server->pnfs_curr_ld = ld_type;
|
2011-07-31 04:52:36 +04:00
|
|
|
if (ld_type->set_layoutdriver
|
|
|
|
&& ld_type->set_layoutdriver(server, mntfh)) {
|
2012-01-26 22:32:23 +04:00
|
|
|
printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
|
|
|
|
"driver %u.\n", __func__, id);
|
2011-07-31 04:52:36 +04:00
|
|
|
module_put(ld_type->owner);
|
|
|
|
goto out_no_driver;
|
|
|
|
}
|
2012-06-14 21:08:38 +04:00
|
|
|
/* Bump the MDS count */
|
|
|
|
atomic_inc(&server->nfs_client->cl_mds_count);
|
2011-03-01 04:34:21 +03:00
|
|
|
|
2010-10-20 08:17:58 +04:00
|
|
|
dprintk("%s: pNFS module for %u set\n", __func__, id);
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_no_driver:
|
|
|
|
dprintk("%s: Using NFSv4 I/O\n", __func__);
|
|
|
|
server->pnfs_curr_ld = NULL;
|
|
|
|
}
|
2010-10-20 08:17:59 +04:00
|
|
|
|
|
|
|
int
|
|
|
|
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
|
|
|
|
{
|
|
|
|
int status = -EINVAL;
|
|
|
|
struct pnfs_layoutdriver_type *tmp;
|
|
|
|
|
|
|
|
if (ld_type->id == 0) {
|
2012-01-26 22:32:23 +04:00
|
|
|
printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
|
2010-10-20 08:17:59 +04:00
|
|
|
return status;
|
|
|
|
}
|
2010-10-20 08:18:03 +04:00
|
|
|
if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
|
2012-01-26 22:32:23 +04:00
|
|
|
printk(KERN_ERR "NFS: %s Layout driver must provide "
|
2010-10-20 08:18:03 +04:00
|
|
|
"alloc_lseg and free_lseg.\n", __func__);
|
|
|
|
return status;
|
|
|
|
}
|
2010-10-20 08:17:59 +04:00
|
|
|
|
|
|
|
spin_lock(&pnfs_spinlock);
|
|
|
|
tmp = find_pnfs_driver_locked(ld_type->id);
|
|
|
|
if (!tmp) {
|
|
|
|
list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
|
|
|
|
status = 0;
|
|
|
|
dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
|
|
|
|
ld_type->name);
|
|
|
|
} else {
|
2012-01-26 22:32:23 +04:00
|
|
|
printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
|
2010-10-20 08:17:59 +04:00
|
|
|
__func__, ld_type->id);
|
|
|
|
}
|
|
|
|
spin_unlock(&pnfs_spinlock);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
|
|
|
|
|
|
|
|
void
|
|
|
|
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
|
|
|
|
{
|
|
|
|
dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
|
|
|
|
spin_lock(&pnfs_spinlock);
|
|
|
|
list_del(&ld_type->pnfs_tblid);
|
|
|
|
spin_unlock(&pnfs_spinlock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
|
2010-10-20 08:18:01 +04:00
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
/*
|
|
|
|
* pNFS client layout cache
|
|
|
|
*/
|
|
|
|
|
2011-01-06 14:36:28 +03:00
|
|
|
/* Need to hold i_lock if caller does not already hold reference */
|
2011-01-06 14:36:30 +03:00
|
|
|
void
|
2012-09-19 04:51:13 +04:00
|
|
|
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
2017-10-20 12:53:33 +03:00
|
|
|
refcount_inc(&lo->plh_refcount);
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
|
|
|
|
2011-05-22 20:51:33 +04:00
|
|
|
static struct pnfs_layout_hdr *
|
|
|
|
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
2012-09-21 04:37:23 +04:00
|
|
|
return ld->alloc_layout_hdr(ino, gfp_flags);
|
2011-05-22 20:51:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
2012-09-21 01:31:43 +04:00
|
|
|
struct nfs_server *server = NFS_SERVER(lo->plh_inode);
|
|
|
|
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
|
|
|
|
|
2020-02-19 01:14:40 +03:00
|
|
|
if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
|
2012-09-21 01:31:43 +04:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
|
|
|
|
spin_lock(&clp->cl_lock);
|
2020-02-19 01:14:40 +03:00
|
|
|
list_del_rcu(&lo->plh_layouts);
|
2012-09-21 01:31:43 +04:00
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
2018-12-03 03:30:31 +03:00
|
|
|
put_cred(lo->plh_lc_cred);
|
2012-09-21 04:37:23 +04:00
|
|
|
return ld->free_layout_hdr(lo);
|
2011-05-22 20:51:33 +04:00
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:01 +04:00
|
|
|
static void
|
2012-09-21 01:23:11 +04:00
|
|
|
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
2012-09-20 23:52:13 +04:00
|
|
|
struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
|
2011-01-06 14:36:28 +03:00
|
|
|
dprintk("%s: freeing layout cache %p\n", __func__, lo);
|
2012-09-20 23:52:13 +04:00
|
|
|
nfsi->layout = NULL;
|
|
|
|
/* Reset MDS Threshold I/O counters */
|
|
|
|
nfsi->write_io = 0;
|
|
|
|
nfsi->read_io = 0;
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
void
|
2012-09-19 04:51:13 +04:00
|
|
|
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2018-03-07 22:49:06 +03:00
|
|
|
struct inode *inode;
|
2020-11-25 20:06:14 +03:00
|
|
|
unsigned long i_state;
|
2011-01-06 14:36:28 +03:00
|
|
|
|
2018-03-07 22:49:06 +03:00
|
|
|
if (!lo)
|
|
|
|
return;
|
|
|
|
inode = lo->plh_inode;
|
2016-01-27 07:12:11 +03:00
|
|
|
pnfs_layoutreturn_before_put_layout_hdr(lo);
|
|
|
|
|
2017-10-20 12:53:33 +03:00
|
|
|
if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
|
2014-10-10 19:25:46 +04:00
|
|
|
if (!list_empty(&lo->plh_segs))
|
|
|
|
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
|
2012-09-21 01:23:11 +04:00
|
|
|
pnfs_detach_layout_hdr(lo);
|
2020-11-25 20:06:14 +03:00
|
|
|
i_state = inode->i_state;
|
2011-01-06 14:36:28 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
2012-09-21 01:23:11 +04:00
|
|
|
pnfs_free_layout_hdr(lo);
|
2020-11-25 20:06:14 +03:00
|
|
|
/* Notify pnfs_destroy_layout_final() that we're done */
|
|
|
|
if (i_state & (I_FREEING | I_CLEAR))
|
|
|
|
wake_up_var(lo);
|
2011-01-06 14:36:28 +03:00
|
|
|
}
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
|
2020-02-18 23:58:31 +03:00
|
|
|
static struct inode *
|
|
|
|
pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
struct inode *inode = igrab(lo->plh_inode);
|
|
|
|
if (inode)
|
|
|
|
return inode;
|
|
|
|
set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-21 21:51:50 +03:00
|
|
|
/*
|
|
|
|
* Compare 2 layout stateid sequence ids, to see which is newer,
|
|
|
|
* taking into account wraparound issues.
|
|
|
|
*/
|
|
|
|
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
|
|
|
|
{
|
|
|
|
return (s32)(s1 - s2) > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
|
|
|
|
{
|
2021-07-23 15:57:20 +03:00
|
|
|
if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
|
2021-01-21 21:51:50 +03:00
|
|
|
lo->plh_barrier = newseq;
|
|
|
|
}
|
|
|
|
|
2016-11-30 18:47:48 +03:00
|
|
|
static void
|
|
|
|
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
|
|
|
|
iomode = IOMODE_ANY;
|
|
|
|
lo->plh_return_iomode = iomode;
|
|
|
|
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
2021-07-26 14:58:49 +03:00
|
|
|
/*
|
|
|
|
* We must set lo->plh_return_seq to avoid livelocks with
|
|
|
|
* pnfs_layout_need_return()
|
|
|
|
*/
|
|
|
|
if (seq == 0)
|
|
|
|
seq = be32_to_cpu(lo->plh_stateid.seqid);
|
|
|
|
if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
|
2016-11-30 18:47:48 +03:00
|
|
|
lo->plh_return_seq = seq;
|
2021-07-26 14:58:49 +03:00
|
|
|
pnfs_barrier_update(lo, seq);
|
2016-11-30 18:47:48 +03:00
|
|
|
}
|
|
|
|
|
2016-11-14 22:34:18 +03:00
|
|
|
static void
|
|
|
|
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
2017-05-02 00:03:44 +03:00
|
|
|
struct pnfs_layout_segment *lseg;
|
2016-11-14 22:34:18 +03:00
|
|
|
lo->plh_return_iomode = 0;
|
|
|
|
lo->plh_return_seq = 0;
|
|
|
|
clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
2017-05-02 00:03:44 +03:00
|
|
|
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
|
|
|
|
if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
|
|
|
continue;
|
|
|
|
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
|
|
|
|
}
|
2016-11-14 22:34:18 +03:00
|
|
|
}
|
|
|
|
|
2016-12-06 01:33:07 +03:00
|
|
|
static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
|
|
|
clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
|
|
|
|
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
|
|
|
|
}
|
|
|
|
|
2016-11-30 20:32:55 +03:00
|
|
|
static void
|
|
|
|
pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
|
|
|
|
struct list_head *free_me)
|
|
|
|
{
|
|
|
|
clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
|
|
|
|
clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
|
|
|
if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
|
|
|
|
pnfs_lseg_dec_and_remove_zero(lseg, free_me);
|
|
|
|
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
|
|
|
|
pnfs_lseg_dec_and_remove_zero(lseg, free_me);
|
|
|
|
}
|
|
|
|
|
2017-11-06 23:28:07 +03:00
|
|
|
/*
|
2019-09-20 14:23:45 +03:00
|
|
|
* Update the seqid of a layout stateid after receiving
|
|
|
|
* NFS4ERR_OLD_STATEID
|
2017-11-06 23:28:07 +03:00
|
|
|
*/
|
2019-09-20 14:23:45 +03:00
|
|
|
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
|
2018-08-16 04:35:46 +03:00
|
|
|
struct pnfs_layout_range *dst_range,
|
|
|
|
struct inode *inode)
|
2017-11-06 23:28:07 +03:00
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
2018-07-30 05:39:15 +03:00
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = IOMODE_ANY,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
2017-11-06 23:28:07 +03:00
|
|
|
bool ret = false;
|
2018-07-30 05:39:15 +03:00
|
|
|
LIST_HEAD(head);
|
|
|
|
int err;
|
2017-11-06 23:28:07 +03:00
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
lo = NFS_I(inode)->layout;
|
2019-09-20 14:23:45 +03:00
|
|
|
if (lo && pnfs_layout_is_valid(lo) &&
|
|
|
|
nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
|
|
|
|
/* Is our call using the most recent seqid? If so, bump it */
|
|
|
|
if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
|
|
|
|
nfs4_stateid_seqid_inc(dst);
|
|
|
|
ret = true;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Try to update the seqid to the most recent */
|
2018-07-30 05:39:15 +03:00
|
|
|
err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
|
|
|
|
if (err != -EBUSY) {
|
|
|
|
dst->seqid = lo->plh_stateid.seqid;
|
2018-08-16 04:35:46 +03:00
|
|
|
*dst_range = range;
|
2018-07-30 05:39:15 +03:00
|
|
|
ret = true;
|
|
|
|
}
|
2017-11-06 23:28:07 +03:00
|
|
|
}
|
2019-09-20 14:23:45 +03:00
|
|
|
out:
|
2017-11-06 23:28:07 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
2018-07-30 05:39:15 +03:00
|
|
|
pnfs_free_lseg_list(&head);
|
2017-11-06 23:28:07 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-02-23 01:34:59 +03:00
|
|
|
/*
|
|
|
|
* Mark a pnfs_layout_hdr and all associated layout segments as invalid
|
|
|
|
*
|
|
|
|
* In order to continue using the pnfs_layout_hdr, a full recovery
|
|
|
|
* is required.
|
|
|
|
* Note that caller must hold inode->i_lock.
|
|
|
|
*/
|
2016-07-22 18:25:27 +03:00
|
|
|
int
|
2016-02-23 01:34:59 +03:00
|
|
|
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
|
|
|
struct list_head *lseg_list)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = IOMODE_ANY,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
2016-11-30 20:32:55 +03:00
|
|
|
struct pnfs_layout_segment *lseg, *next;
|
2016-02-23 01:34:59 +03:00
|
|
|
|
|
|
|
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
2016-11-30 20:32:55 +03:00
|
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
|
|
|
pnfs_clear_lseg_state(lseg, lseg_list);
|
2017-05-02 00:03:44 +03:00
|
|
|
pnfs_clear_layoutreturn_info(lo);
|
2016-10-13 02:50:54 +03:00
|
|
|
pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
|
2016-12-06 01:33:07 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
|
|
|
|
!test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
|
|
|
|
pnfs_clear_layoutreturn_waitbit(lo);
|
2016-11-30 20:32:55 +03:00
|
|
|
return !list_empty(&lo->plh_segs);
|
2016-02-23 01:34:59 +03:00
|
|
|
}
|
|
|
|
|
2012-09-19 00:41:18 +04:00
|
|
|
static int
|
|
|
|
pnfs_iomode_to_fail_bit(u32 iomode)
|
|
|
|
{
|
|
|
|
return iomode == IOMODE_RW ?
|
|
|
|
NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-09-24 21:07:16 +04:00
|
|
|
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
|
2012-09-19 00:41:18 +04:00
|
|
|
{
|
2012-09-19 01:01:12 +04:00
|
|
|
lo->plh_retry_timestamp = jiffies;
|
2013-01-04 16:19:49 +04:00
|
|
|
if (!test_and_set_bit(fail_bit, &lo->plh_flags))
|
2017-10-20 12:53:33 +03:00
|
|
|
refcount_inc(&lo->plh_refcount);
|
2012-09-24 21:07:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(fail_bit, &lo->plh_flags))
|
2017-10-20 12:53:33 +03:00
|
|
|
refcount_dec(&lo->plh_refcount);
|
2012-09-24 21:07:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
|
|
|
|
{
|
|
|
|
struct inode *inode = lo->plh_inode;
|
2012-09-21 05:19:43 +04:00
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = iomode,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
|
|
|
LIST_HEAD(head);
|
2012-09-24 21:07:16 +04:00
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
2016-05-17 19:28:42 +03:00
|
|
|
pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
|
2012-09-24 21:07:16 +04:00
|
|
|
spin_unlock(&inode->i_lock);
|
2012-09-21 05:19:43 +04:00
|
|
|
pnfs_free_lseg_list(&head);
|
2012-09-19 00:41:18 +04:00
|
|
|
dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
|
|
|
|
iomode == IOMODE_RW ? "RW" : "READ");
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
|
|
|
|
{
|
2012-09-19 01:01:12 +04:00
|
|
|
unsigned long start, end;
|
2012-09-24 21:07:16 +04:00
|
|
|
int fail_bit = pnfs_iomode_to_fail_bit(iomode);
|
|
|
|
|
|
|
|
if (test_bit(fail_bit, &lo->plh_flags) == 0)
|
2012-09-19 01:01:12 +04:00
|
|
|
return false;
|
|
|
|
end = jiffies;
|
|
|
|
start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
|
|
|
|
if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
|
|
|
|
/* It is time to retry the failed layoutgets */
|
2012-09-24 21:07:16 +04:00
|
|
|
pnfs_layout_clear_fail_bit(lo, fail_bit);
|
2012-09-19 01:01:12 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2012-09-19 00:41:18 +04:00
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:02 +04:00
|
|
|
static void
|
2016-07-24 22:10:12 +03:00
|
|
|
pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
|
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
const nfs4_stateid *stateid)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2011-01-06 14:36:20 +03:00
|
|
|
INIT_LIST_HEAD(&lseg->pls_list);
|
2011-07-31 04:52:33 +04:00
|
|
|
INIT_LIST_HEAD(&lseg->pls_lc_list);
|
2020-03-20 23:04:06 +03:00
|
|
|
INIT_LIST_HEAD(&lseg->pls_commits);
|
2017-10-20 12:53:32 +03:00
|
|
|
refcount_set(&lseg->pls_refcount, 1);
|
2011-01-06 14:36:23 +03:00
|
|
|
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
|
2011-01-06 14:36:20 +03:00
|
|
|
lseg->pls_layout = lo;
|
2016-07-24 22:10:12 +03:00
|
|
|
lseg->pls_range = *range;
|
|
|
|
lseg->pls_seq = be32_to_cpu(stateid->seqid);
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
|
2012-09-21 04:46:49 +04:00
|
|
|
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2016-10-13 02:50:54 +03:00
|
|
|
if (lseg != NULL) {
|
|
|
|
struct inode *inode = lseg->pls_layout->plh_inode;
|
|
|
|
NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
|
|
|
|
}
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
|
2011-03-01 04:34:13 +03:00
|
|
|
static void
|
2012-09-21 00:33:30 +04:00
|
|
|
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
|
|
|
|
struct pnfs_layout_segment *lseg)
|
2011-03-01 04:34:13 +03:00
|
|
|
{
|
2011-05-22 20:52:03 +04:00
|
|
|
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
|
2011-03-01 04:34:13 +03:00
|
|
|
list_del_init(&lseg->pls_list);
|
2012-09-21 04:57:11 +04:00
|
|
|
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
|
2017-10-20 12:53:33 +03:00
|
|
|
refcount_dec(&lo->plh_refcount);
|
2016-11-30 19:38:10 +03:00
|
|
|
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
|
|
|
return;
|
2016-11-14 21:10:48 +03:00
|
|
|
if (list_empty(&lo->plh_segs) &&
|
|
|
|
!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
|
|
|
|
!test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
2016-09-04 19:46:35 +03:00
|
|
|
if (atomic_read(&lo->plh_outstanding) == 0)
|
|
|
|
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
2012-09-21 23:49:42 +04:00
|
|
|
clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
|
2016-06-17 23:48:26 +03:00
|
|
|
}
|
2011-03-01 04:34:13 +03:00
|
|
|
}
|
|
|
|
|
2016-10-13 02:50:54 +03:00
|
|
|
static bool
|
|
|
|
pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
|
|
|
|
struct pnfs_layout_segment *lseg)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
|
|
|
|
pnfs_layout_is_valid(lo)) {
|
2016-11-30 18:47:48 +03:00
|
|
|
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
|
2016-10-13 02:50:54 +03:00
|
|
|
list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-03-01 04:34:15 +03:00
|
|
|
void
|
2012-09-19 04:57:08 +04:00
|
|
|
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2012-09-21 00:33:30 +04:00
|
|
|
struct pnfs_layout_hdr *lo;
|
2011-03-01 04:34:13 +03:00
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
if (!lseg)
|
|
|
|
return;
|
|
|
|
|
2011-01-06 14:36:23 +03:00
|
|
|
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
|
2017-10-20 12:53:32 +03:00
|
|
|
refcount_read(&lseg->pls_refcount),
|
2011-01-06 14:36:23 +03:00
|
|
|
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
|
2015-02-06 01:27:39 +03:00
|
|
|
|
2012-09-21 00:33:30 +04:00
|
|
|
lo = lseg->pls_layout;
|
|
|
|
inode = lo->plh_inode;
|
2015-02-06 01:27:39 +03:00
|
|
|
|
2017-10-20 12:53:32 +03:00
|
|
|
if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
|
2012-09-21 04:57:11 +04:00
|
|
|
pnfs_get_layout_hdr(lo);
|
2015-02-06 01:27:39 +03:00
|
|
|
pnfs_layout_remove_lseg(lo, lseg);
|
2016-10-13 02:50:54 +03:00
|
|
|
if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
|
|
|
|
lseg = NULL;
|
2015-02-06 01:27:39 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
pnfs_free_lseg(lseg);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
2011-01-06 14:36:23 +03:00
|
|
|
}
|
|
|
|
}
|
2012-09-19 04:57:08 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
|
2010-10-20 08:18:02 +04:00
|
|
|
|
2011-05-22 20:47:26 +04:00
|
|
|
/*
|
|
|
|
* is l2 fully contained in l1?
|
|
|
|
* start1 end1
|
|
|
|
* [----------------------------------)
|
|
|
|
* start2 end2
|
|
|
|
* [----------------)
|
|
|
|
*/
|
2013-06-03 19:24:36 +04:00
|
|
|
static bool
|
2013-06-03 19:30:24 +04:00
|
|
|
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
|
2013-06-03 19:24:36 +04:00
|
|
|
const struct pnfs_layout_range *l2)
|
2011-05-22 20:47:26 +04:00
|
|
|
{
|
|
|
|
u64 start1 = l1->offset;
|
2016-10-25 19:24:25 +03:00
|
|
|
u64 end1 = pnfs_end_offset(start1, l1->length);
|
2011-05-22 20:47:26 +04:00
|
|
|
u64 start2 = l2->offset;
|
2016-10-25 19:24:25 +03:00
|
|
|
u64 end2 = pnfs_end_offset(start2, l2->length);
|
2011-05-22 20:47:26 +04:00
|
|
|
|
|
|
|
return (start1 <= start2) && (end1 >= end2);
|
|
|
|
}
|
|
|
|
|
2013-03-20 21:03:00 +04:00
|
|
|
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
|
|
|
|
struct list_head *tmp_list)
|
|
|
|
{
|
2017-10-20 12:53:32 +03:00
|
|
|
if (!refcount_dec_and_test(&lseg->pls_refcount))
|
2013-03-20 21:03:00 +04:00
|
|
|
return false;
|
|
|
|
pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
|
|
|
|
list_add(&lseg->pls_list, tmp_list);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-06 14:36:23 +03:00
|
|
|
/* Returns 1 if lseg is removed from list, 0 otherwise */
|
|
|
|
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
|
|
|
|
struct list_head *tmp_list)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
|
|
|
|
/* Remove the reference keeping the lseg in the
|
|
|
|
* list. It will now be removed when all
|
|
|
|
* outstanding io is finished.
|
|
|
|
*/
|
2011-03-01 04:34:13 +03:00
|
|
|
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
|
2017-10-20 12:53:32 +03:00
|
|
|
refcount_read(&lseg->pls_refcount));
|
2013-03-20 21:03:00 +04:00
|
|
|
if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
|
2011-03-01 04:34:13 +03:00
|
|
|
rv = 1;
|
2011-01-06 14:36:23 +03:00
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2016-07-22 18:13:22 +03:00
|
|
|
static bool
|
|
|
|
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
|
|
|
|
const struct pnfs_layout_range *recall_range)
|
|
|
|
{
|
|
|
|
return (recall_range->iomode == IOMODE_ANY ||
|
|
|
|
lseg_range->iomode == recall_range->iomode) &&
|
|
|
|
pnfs_lseg_range_intersecting(lseg_range, recall_range);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
|
|
|
|
const struct pnfs_layout_range *recall_range,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
|
|
|
|
return false;
|
|
|
|
if (recall_range == NULL)
|
|
|
|
return true;
|
|
|
|
return pnfs_should_free_range(&lseg->pls_range, recall_range);
|
|
|
|
}
|
|
|
|
|
2016-05-17 19:28:42 +03:00
|
|
|
/**
|
|
|
|
* pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
|
|
|
|
* @lo: layout header containing the lsegs
|
|
|
|
* @tmp_list: list head where doomed lsegs should go
|
|
|
|
* @recall_range: optional recall range argument to match (may be NULL)
|
|
|
|
* @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
|
|
|
|
*
|
|
|
|
* Walk the list of lsegs in the layout header, and tear down any that should
|
|
|
|
* be destroyed. If "recall_range" is specified then the segment must match
|
|
|
|
* that range. If "seq" is non-zero, then only match segments that were handed
|
|
|
|
* out at or before that sequence.
|
|
|
|
*
|
|
|
|
* Returns number of matching invalid lsegs remaining in list after scanning
|
|
|
|
* it and purging them.
|
2011-01-06 14:36:23 +03:00
|
|
|
*/
|
2011-01-06 14:36:30 +03:00
|
|
|
int
|
2012-09-19 04:43:31 +04:00
|
|
|
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
|
2011-01-06 14:36:23 +03:00
|
|
|
struct list_head *tmp_list,
|
2016-05-17 19:28:42 +03:00
|
|
|
const struct pnfs_layout_range *recall_range,
|
|
|
|
u32 seq)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_segment *lseg, *next;
|
2016-01-04 20:41:15 +03:00
|
|
|
int remaining = 0;
|
2010-10-20 08:18:02 +04:00
|
|
|
|
|
|
|
dprintk("%s:Begin lo %p\n", __func__, lo);
|
|
|
|
|
2012-09-21 22:48:04 +04:00
|
|
|
if (list_empty(&lo->plh_segs))
|
2011-02-03 21:28:50 +03:00
|
|
|
return 0;
|
2011-01-06 14:36:23 +03:00
|
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
2016-07-22 18:13:22 +03:00
|
|
|
if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
|
pnfs/blocklayout: handle transient devices
PNFS block/SCSI layouts should gracefully handle cases where block devices
are not available when a layout is retrieved, or the block devices are
removed while the client holds a layout.
While setting up a layout segment, keep a record of an unavailable or
un-parsable block device in cache with a flag so that subsequent layouts do
not spam the server with GETDEVINFO. We can reuse the current
NFS_DEVICEID_UNAVAILABLE handling with one variation: instead of reusing
the device, we will discard it and send a fresh GETDEVINFO after the
timeout, since the lookup and validation of the device occurs within the
GETDEVINFO response handling.
A lookup of a layout segment that references an unavailable device will
return a segment with the NFS_LSEG_UNAVAILABLE flag set. This will allow
the pgio layer to mark the layout with the appropriate fail bit, which
forces subsequent IO to the MDS, and prevents spamming the server with
LAYOUTGET, LAYOUTRETURN.
Finally, when IO to a block device fails, look up the block device(s)
referenced by the pgio header, and mark them as unavailable.
Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2017-12-08 20:52:59 +03:00
|
|
|
dprintk("%s: freeing lseg %p iomode %d seq %u "
|
2011-01-06 14:36:23 +03:00
|
|
|
"offset %llu length %llu\n", __func__,
|
2016-05-17 19:28:42 +03:00
|
|
|
lseg, lseg->pls_range.iomode, lseg->pls_seq,
|
|
|
|
lseg->pls_range.offset, lseg->pls_range.length);
|
2016-01-04 20:41:15 +03:00
|
|
|
if (!mark_lseg_invalid(lseg, tmp_list))
|
|
|
|
remaining++;
|
2011-01-06 14:36:23 +03:00
|
|
|
}
|
2016-01-04 20:41:15 +03:00
|
|
|
dprintk("%s:Return %i\n", __func__, remaining);
|
|
|
|
return remaining;
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
|
2016-10-13 02:50:54 +03:00
|
|
|
static void
|
|
|
|
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
|
|
|
|
struct list_head *free_me,
|
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_segment *lseg, *next;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
|
|
|
|
if (pnfs_match_lseg_recall(lseg, range, seq))
|
|
|
|
list_move_tail(&lseg->pls_list, free_me);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-03 21:28:52 +03:00
|
|
|
/* note free_me must contain lsegs from a single layout_hdr */
|
2011-01-06 14:36:30 +03:00
|
|
|
void
|
2011-01-06 14:36:23 +03:00
|
|
|
pnfs_free_lseg_list(struct list_head *free_me)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2011-01-06 14:36:23 +03:00
|
|
|
struct pnfs_layout_segment *lseg, *tmp;
|
2011-02-03 21:28:52 +03:00
|
|
|
|
|
|
|
if (list_empty(free_me))
|
|
|
|
return;
|
|
|
|
|
2011-01-06 14:36:23 +03:00
|
|
|
list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
|
2011-01-06 14:36:20 +03:00
|
|
|
list_del(&lseg->pls_list);
|
2012-09-21 04:46:49 +04:00
|
|
|
pnfs_free_lseg(lseg);
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-25 20:06:14 +03:00
|
|
|
static struct pnfs_layout_hdr *__pnfs_destroy_layout(struct nfs_inode *nfsi)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
2010-10-20 08:18:02 +04:00
|
|
|
LIST_HEAD(tmp_list);
|
2010-10-20 08:18:01 +04:00
|
|
|
|
|
|
|
spin_lock(&nfsi->vfs_inode.i_lock);
|
|
|
|
lo = nfsi->layout;
|
|
|
|
if (lo) {
|
2012-09-24 21:07:16 +04:00
|
|
|
pnfs_get_layout_hdr(lo);
|
2016-02-23 01:34:59 +03:00
|
|
|
pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
|
2012-09-24 21:07:16 +04:00
|
|
|
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
|
|
|
|
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
|
|
|
|
spin_unlock(&nfsi->vfs_inode.i_lock);
|
|
|
|
pnfs_free_lseg_list(&tmp_list);
|
2017-04-29 17:10:17 +03:00
|
|
|
nfs_commit_inode(&nfsi->vfs_inode, 0);
|
2012-09-24 21:07:16 +04:00
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
} else
|
|
|
|
spin_unlock(&nfsi->vfs_inode.i_lock);
|
2020-11-25 20:06:14 +03:00
|
|
|
return lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnfs_destroy_layout(struct nfs_inode *nfsi)
|
|
|
|
{
|
|
|
|
__pnfs_destroy_layout(nfsi);
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
2012-04-28 01:53:53 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
|
2010-10-20 08:18:02 +04:00
|
|
|
|
2020-11-25 20:06:14 +03:00
|
|
|
static bool pnfs_layout_removed(struct nfs_inode *nfsi,
|
|
|
|
struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
spin_lock(&nfsi->vfs_inode.i_lock);
|
|
|
|
ret = nfsi->layout != lo;
|
|
|
|
spin_unlock(&nfsi->vfs_inode.i_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnfs_destroy_layout_final(struct nfs_inode *nfsi)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi);
|
|
|
|
|
|
|
|
if (lo)
|
|
|
|
wait_var_event(lo, pnfs_layout_removed(nfsi, lo));
|
|
|
|
}
|
|
|
|
|
2013-02-12 18:48:42 +04:00
|
|
|
static bool
|
|
|
|
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
|
|
|
|
struct list_head *layout_list)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
2013-02-12 18:48:42 +04:00
|
|
|
bool ret = false;
|
2010-10-20 08:18:02 +04:00
|
|
|
|
2013-02-12 18:48:42 +04:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
lo = NFS_I(inode)->layout;
|
|
|
|
if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
|
|
|
|
pnfs_get_layout_hdr(lo);
|
|
|
|
list_add(&lo->plh_bulk_destroy, layout_list);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Caller must hold rcu_read_lock and clp->cl_lock */
|
|
|
|
static int
|
|
|
|
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
|
|
|
|
struct nfs_server *server,
|
|
|
|
struct list_head *layout_list)
|
2019-02-22 22:20:27 +03:00
|
|
|
__must_hold(&clp->cl_lock)
|
|
|
|
__must_hold(RCU)
|
2013-02-12 18:48:42 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo, *next;
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
|
2019-02-22 22:20:27 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
|
|
|
|
test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
|
|
|
|
!list_empty(&lo->plh_bulk_destroy))
|
2016-12-01 02:00:07 +03:00
|
|
|
continue;
|
2019-02-22 22:20:27 +03:00
|
|
|
/* If the sb is being destroyed, just bail */
|
|
|
|
if (!nfs_sb_active(server->super))
|
|
|
|
break;
|
2020-02-18 23:58:31 +03:00
|
|
|
inode = pnfs_grab_inode_layout_hdr(lo);
|
2019-02-22 22:20:27 +03:00
|
|
|
if (inode != NULL) {
|
2020-02-19 01:14:40 +03:00
|
|
|
if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
|
|
|
|
list_del_rcu(&lo->plh_layouts);
|
2019-02-22 22:20:27 +03:00
|
|
|
if (pnfs_layout_add_bulk_destroy_list(inode,
|
|
|
|
layout_list))
|
|
|
|
continue;
|
|
|
|
rcu_read_unlock();
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
iput(inode);
|
|
|
|
} else {
|
|
|
|
rcu_read_unlock();
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
|
|
|
nfs_sb_deactive(server->super);
|
2013-02-12 18:48:42 +04:00
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
|
rcu_read_lock();
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
|
|
|
|
bool is_bulk_recall)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
struct inode *inode;
|
|
|
|
LIST_HEAD(lseg_list);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (!list_empty(layout_list)) {
|
|
|
|
lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
|
|
|
|
plh_bulk_destroy);
|
|
|
|
dprintk("%s freeing layout for inode %lu\n", __func__,
|
|
|
|
lo->plh_inode->i_ino);
|
|
|
|
inode = lo->plh_inode;
|
2014-09-10 19:23:29 +04:00
|
|
|
|
|
|
|
pnfs_layoutcommit_inode(inode, false);
|
|
|
|
|
2013-02-12 18:48:42 +04:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
list_del_init(&lo->plh_bulk_destroy);
|
2016-02-23 01:46:34 +03:00
|
|
|
if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
|
|
|
|
if (is_bulk_recall)
|
|
|
|
set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
|
2013-02-12 18:48:42 +04:00
|
|
|
ret = -EAGAIN;
|
2016-02-23 01:46:34 +03:00
|
|
|
}
|
2013-02-12 18:48:42 +04:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
pnfs_free_lseg_list(&lseg_list);
|
2015-12-31 17:28:06 +03:00
|
|
|
/* Free all lsegs that are attached to commit buckets */
|
|
|
|
nfs_commit_inode(inode, 0);
|
2013-02-12 18:48:42 +04:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2019-02-22 22:20:27 +03:00
|
|
|
nfs_iput_and_deactive(inode);
|
2013-02-12 18:48:42 +04:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
|
|
|
|
struct nfs_fsid *fsid,
|
|
|
|
bool is_recall)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
LIST_HEAD(layout_list);
|
2011-06-16 01:52:40 +04:00
|
|
|
|
2010-10-20 08:18:02 +04:00
|
|
|
spin_lock(&clp->cl_lock);
|
2011-06-02 00:44:44 +04:00
|
|
|
rcu_read_lock();
|
2013-02-12 18:48:42 +04:00
|
|
|
restart:
|
2011-06-02 00:44:44 +04:00
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
2013-02-12 18:48:42 +04:00
|
|
|
if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
|
|
|
|
continue;
|
|
|
|
if (pnfs_layout_bulk_destroy_byserver_locked(clp,
|
|
|
|
server,
|
|
|
|
&layout_list) != 0)
|
|
|
|
goto restart;
|
2011-06-02 00:44:44 +04:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2010-10-20 08:18:02 +04:00
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
|
2013-02-12 18:48:42 +04:00
|
|
|
if (list_empty(&layout_list))
|
|
|
|
return 0;
|
|
|
|
return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
|
|
|
|
bool is_recall)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
LIST_HEAD(layout_list);
|
|
|
|
|
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
|
rcu_read_lock();
|
|
|
|
restart:
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
|
|
|
if (pnfs_layout_bulk_destroy_byserver_locked(clp,
|
|
|
|
server,
|
|
|
|
&layout_list) != 0)
|
|
|
|
goto restart;
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
2013-02-12 18:48:42 +04:00
|
|
|
rcu_read_unlock();
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
|
|
|
|
if (list_empty(&layout_list))
|
|
|
|
return 0;
|
|
|
|
return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-09-17 05:19:00 +03:00
|
|
|
* Called by the state manager to remove all layouts established under an
|
2013-02-12 18:48:42 +04:00
|
|
|
* expired lease.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
pnfs_destroy_all_layouts(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
nfs4_deviceid_mark_client_invalid(clp);
|
|
|
|
nfs4_deviceid_purge_client(clp);
|
|
|
|
|
|
|
|
pnfs_destroy_layouts_byclid(clp, false);
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
|
|
|
|
2020-01-27 21:07:26 +03:00
|
|
|
static void
|
|
|
|
pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
|
|
|
|
{
|
|
|
|
const struct cred *old;
|
|
|
|
|
|
|
|
if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
|
|
|
|
old = xchg(&lo->plh_lc_cred, get_cred(cred));
|
|
|
|
put_cred(old);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-06 14:36:22 +03:00
|
|
|
/* update lo->plh_stateid with new if is more recent */
|
2011-01-06 14:36:30 +03:00
|
|
|
void
|
|
|
|
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
|
2020-01-27 21:07:26 +03:00
|
|
|
const struct cred *cred, bool update_barrier)
|
2010-10-20 08:18:03 +04:00
|
|
|
{
|
2021-07-02 23:37:15 +03:00
|
|
|
u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
|
|
|
|
u32 newseq = be32_to_cpu(new->seqid);
|
2016-09-03 18:20:04 +03:00
|
|
|
|
|
|
|
if (!pnfs_layout_is_valid(lo)) {
|
2020-01-27 21:07:26 +03:00
|
|
|
pnfs_set_layout_cred(lo, cred);
|
2016-09-03 18:20:04 +03:00
|
|
|
nfs4_stateid_copy(&lo->plh_stateid, new);
|
|
|
|
lo->plh_barrier = newseq;
|
|
|
|
pnfs_clear_layoutreturn_info(lo);
|
|
|
|
clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
|
|
|
return;
|
|
|
|
}
|
2021-07-02 23:37:15 +03:00
|
|
|
|
|
|
|
if (pnfs_seqid_is_newer(newseq, oldseq))
|
2012-03-05 03:13:56 +04:00
|
|
|
nfs4_stateid_copy(&lo->plh_stateid, new);
|
2021-07-02 23:37:15 +03:00
|
|
|
|
|
|
|
if (update_barrier) {
|
|
|
|
pnfs_barrier_update(lo, newseq);
|
2016-07-24 18:46:06 +03:00
|
|
|
return;
|
2021-07-02 23:37:15 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Because of wraparound, we want to keep the barrier
|
|
|
|
* "close" to the current seqids. We really only want to
|
|
|
|
* get here from a layoutget call.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&lo->plh_outstanding) == 1)
|
|
|
|
pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
|
2010-10-20 08:18:03 +04:00
|
|
|
}
|
|
|
|
|
2011-01-06 14:36:25 +03:00
|
|
|
static bool
|
2012-10-06 03:56:58 +04:00
|
|
|
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
|
|
|
|
const nfs4_stateid *stateid)
|
2011-01-06 14:36:30 +03:00
|
|
|
{
|
2012-10-06 03:56:58 +04:00
|
|
|
u32 seqid = be32_to_cpu(stateid->seqid);
|
2012-10-03 03:56:49 +04:00
|
|
|
|
2021-07-23 15:57:21 +03:00
|
|
|
return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
|
2012-10-06 03:56:58 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* lget is set to 1 if called from inside send_layoutget call chain */
|
|
|
|
static bool
|
2015-08-04 23:40:08 +03:00
|
|
|
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
|
2012-10-06 03:56:58 +04:00
|
|
|
{
|
2011-01-06 14:36:32 +03:00
|
|
|
return lo->plh_block_lgets ||
|
2015-08-04 23:40:08 +03:00
|
|
|
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
|
2011-01-06 14:36:25 +03:00
|
|
|
}
|
|
|
|
|
2016-10-06 19:08:51 +03:00
|
|
|
static struct nfs_server *
|
|
|
|
pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
|
2016-09-22 19:30:20 +03:00
|
|
|
if (inode) {
|
2016-10-06 19:08:51 +03:00
|
|
|
server = NFS_SERVER(inode);
|
2016-09-22 19:30:20 +03:00
|
|
|
} else {
|
2016-10-06 19:08:51 +03:00
|
|
|
struct dentry *parent_dir = dget_parent(ctx->dentry);
|
|
|
|
server = NFS_SERVER(parent_dir->d_inode);
|
|
|
|
dput(parent_dir);
|
|
|
|
}
|
|
|
|
return server;
|
|
|
|
}
|
|
|
|
|
2018-05-31 00:16:20 +03:00
|
|
|
static void nfs4_free_pages(struct page **pages, size_t size)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!pages)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (!pages[i])
|
|
|
|
break;
|
|
|
|
__free_page(pages[i]);
|
|
|
|
}
|
|
|
|
kfree(pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
struct page **pages;
|
|
|
|
int i;
|
|
|
|
|
2018-09-03 19:57:52 +03:00
|
|
|
pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
|
2018-05-31 00:16:20 +03:00
|
|
|
if (!pages) {
|
|
|
|
dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
pages[i] = alloc_page(gfp_flags);
|
|
|
|
if (!pages[i]) {
|
|
|
|
dprintk("%s: failed to allocate page\n", __func__);
|
2018-09-03 19:57:52 +03:00
|
|
|
nfs4_free_pages(pages, i);
|
2018-05-31 00:16:20 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2016-09-21 12:14:28 +03:00
|
|
|
static struct nfs4_layoutget *
|
2016-10-06 19:08:51 +03:00
|
|
|
pnfs_alloc_init_layoutget_args(struct inode *ino,
|
2010-10-20 08:18:01 +04:00
|
|
|
struct nfs_open_context *ctx,
|
2016-10-06 19:11:21 +03:00
|
|
|
const nfs4_stateid *stateid,
|
2016-01-04 20:52:53 +03:00
|
|
|
const struct pnfs_layout_range *range,
|
2016-09-21 12:14:28 +03:00
|
|
|
gfp_t gfp_flags)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
2016-10-06 19:08:51 +03:00
|
|
|
struct nfs_server *server = pnfs_find_server(ino, ctx);
|
2018-09-03 20:12:15 +03:00
|
|
|
size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
|
2016-09-20 00:47:09 +03:00
|
|
|
size_t max_pages = max_response_pages(server);
|
2010-10-20 08:18:03 +04:00
|
|
|
struct nfs4_layoutget *lgp;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2010-10-20 08:18:01 +04:00
|
|
|
|
2016-05-17 19:28:46 +03:00
|
|
|
lgp = kzalloc(sizeof(*lgp), gfp_flags);
|
|
|
|
if (lgp == NULL)
|
2016-09-21 12:14:28 +03:00
|
|
|
return NULL;
|
2016-05-17 19:28:46 +03:00
|
|
|
|
2018-09-03 20:12:15 +03:00
|
|
|
if (max_reply_sz) {
|
|
|
|
size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
if (npages < max_pages)
|
|
|
|
max_pages = npages;
|
|
|
|
}
|
|
|
|
|
2016-09-20 00:47:09 +03:00
|
|
|
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
|
|
|
|
if (!lgp->args.layout.pages) {
|
|
|
|
kfree(lgp);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
|
|
|
|
lgp->res.layoutp = &lgp->args.layout;
|
|
|
|
|
2017-02-02 04:42:44 +03:00
|
|
|
/* Don't confuse uninitialised result and success */
|
|
|
|
lgp->res.status = -NFS4ERR_DELAY;
|
2016-05-17 19:28:46 +03:00
|
|
|
|
|
|
|
lgp->args.minlength = PAGE_SIZE;
|
|
|
|
if (lgp->args.minlength > range->length)
|
|
|
|
lgp->args.minlength = range->length;
|
2016-10-06 19:08:51 +03:00
|
|
|
if (ino) {
|
|
|
|
loff_t i_size = i_size_read(ino);
|
|
|
|
|
|
|
|
if (range->iomode == IOMODE_READ) {
|
|
|
|
if (range->offset >= i_size)
|
|
|
|
lgp->args.minlength = 0;
|
|
|
|
else if (i_size - range->offset < lgp->args.minlength)
|
|
|
|
lgp->args.minlength = i_size - range->offset;
|
|
|
|
}
|
2016-05-17 19:28:45 +03:00
|
|
|
}
|
2016-05-17 19:28:46 +03:00
|
|
|
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
|
|
|
|
pnfs_copy_range(&lgp->args.range, range);
|
|
|
|
lgp->args.type = server->pnfs_curr_ld->id;
|
|
|
|
lgp->args.inode = ino;
|
|
|
|
lgp->args.ctx = get_nfs_open_context(ctx);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
nfs4_stateid_copy(&lgp->args.stateid, stateid);
|
2016-05-17 19:28:46 +03:00
|
|
|
lgp->gfp_flags = gfp_flags;
|
2020-02-08 03:40:14 +03:00
|
|
|
lgp->cred = ctx->cred;
|
2016-09-21 12:14:28 +03:00
|
|
|
return lgp;
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
|
|
|
|
2018-05-31 00:16:20 +03:00
|
|
|
void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
|
|
|
|
{
|
|
|
|
size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
|
|
|
|
|
|
|
|
nfs4_free_pages(lgp->args.layout.pages, max_pages);
|
2021-07-03 00:24:22 +03:00
|
|
|
pnfs_put_layout_hdr(lgp->lo);
|
2018-05-31 00:16:20 +03:00
|
|
|
put_nfs_open_context(lgp->args.ctx);
|
|
|
|
kfree(lgp);
|
|
|
|
}
|
|
|
|
|
2013-03-20 21:03:00 +04:00
|
|
|
static void pnfs_clear_layoutcommit(struct inode *inode,
|
|
|
|
struct list_head *head)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct pnfs_layout_segment *lseg, *tmp;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
|
|
|
|
return;
|
|
|
|
list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
|
|
|
|
if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
|
|
|
|
continue;
|
|
|
|
pnfs_lseg_dec_and_remove_zero(lseg, head);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-13 02:50:54 +03:00
|
|
|
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
|
2016-11-20 21:13:54 +03:00
|
|
|
const nfs4_stateid *arg_stateid,
|
2016-10-13 02:50:54 +03:00
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
const nfs4_stateid *stateid)
|
|
|
|
{
|
|
|
|
struct inode *inode = lo->plh_inode;
|
|
|
|
LIST_HEAD(freeme);
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
2021-01-04 23:01:18 +03:00
|
|
|
if (!pnfs_layout_is_valid(lo) ||
|
2016-11-20 21:13:54 +03:00
|
|
|
!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
|
|
|
|
goto out_unlock;
|
2016-10-13 02:50:54 +03:00
|
|
|
if (stateid) {
|
2016-11-20 21:13:54 +03:00
|
|
|
u32 seq = be32_to_cpu(arg_stateid->seqid);
|
|
|
|
|
2016-10-13 02:50:54 +03:00
|
|
|
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
|
|
|
|
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
|
2020-01-27 21:07:26 +03:00
|
|
|
pnfs_set_layout_stateid(lo, stateid, NULL, true);
|
2016-10-13 02:50:54 +03:00
|
|
|
} else
|
|
|
|
pnfs_mark_layout_stateid_invalid(lo, &freeme);
|
2016-11-20 21:13:54 +03:00
|
|
|
out_unlock:
|
2016-10-13 02:50:54 +03:00
|
|
|
pnfs_clear_layoutreturn_waitbit(lo);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
pnfs_free_lseg_list(&freeme);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-01-27 07:12:11 +03:00
|
|
|
static bool
|
2016-07-21 19:44:15 +03:00
|
|
|
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
|
|
|
|
nfs4_stateid *stateid,
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred **cred,
|
2016-07-21 19:44:15 +03:00
|
|
|
enum pnfs_iomode *iomode)
|
2016-01-27 07:12:11 +03:00
|
|
|
{
|
2016-09-03 17:39:51 +03:00
|
|
|
/* Serialise LAYOUTGET/LAYOUTRETURN */
|
|
|
|
if (atomic_read(&lo->plh_outstanding) != 0)
|
|
|
|
return false;
|
2016-10-18 00:54:32 +03:00
|
|
|
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
|
2016-01-27 07:12:11 +03:00
|
|
|
return false;
|
2016-10-18 00:54:32 +03:00
|
|
|
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
2016-01-27 07:12:11 +03:00
|
|
|
pnfs_get_layout_hdr(lo);
|
2021-01-21 21:51:50 +03:00
|
|
|
nfs4_stateid_copy(stateid, &lo->plh_stateid);
|
|
|
|
*cred = get_cred(lo->plh_lc_cred);
|
2016-07-21 19:44:15 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
|
2020-04-02 22:37:02 +03:00
|
|
|
if (lo->plh_return_seq != 0)
|
|
|
|
stateid->seqid = cpu_to_be32(lo->plh_return_seq);
|
2016-07-21 19:44:15 +03:00
|
|
|
if (iomode != NULL)
|
|
|
|
*iomode = lo->plh_return_iomode;
|
|
|
|
pnfs_clear_layoutreturn_info(lo);
|
2021-01-21 21:51:50 +03:00
|
|
|
} else if (iomode != NULL)
|
2016-07-21 19:44:15 +03:00
|
|
|
*iomode = IOMODE_ANY;
|
2021-01-21 21:51:50 +03:00
|
|
|
pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
|
2016-01-27 07:12:11 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-11-16 05:47:27 +03:00
|
|
|
static void
|
|
|
|
pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
|
|
|
|
struct pnfs_layout_hdr *lo,
|
|
|
|
const nfs4_stateid *stateid,
|
|
|
|
enum pnfs_iomode iomode)
|
|
|
|
{
|
|
|
|
struct inode *inode = lo->plh_inode;
|
|
|
|
|
|
|
|
args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
|
|
|
|
args->inode = inode;
|
|
|
|
args->range.iomode = iomode;
|
|
|
|
args->range.offset = 0;
|
|
|
|
args->range.length = NFS4_MAX_UINT64;
|
|
|
|
args->layout = lo;
|
|
|
|
nfs4_stateid_copy(&args->stateid, stateid);
|
|
|
|
}
|
|
|
|
|
2014-09-05 20:53:22 +04:00
|
|
|
static int
|
2020-04-02 22:37:02 +03:00
|
|
|
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
|
|
|
|
const nfs4_stateid *stateid,
|
|
|
|
const struct cred **pcred,
|
|
|
|
enum pnfs_iomode iomode,
|
|
|
|
bool sync)
|
2014-09-05 20:53:22 +04:00
|
|
|
{
|
|
|
|
struct inode *ino = lo->plh_inode;
|
2016-12-03 00:12:12 +03:00
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
2014-09-05 20:53:22 +04:00
|
|
|
struct nfs4_layoutreturn *lrp;
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred *cred = *pcred;
|
2014-09-05 20:53:22 +04:00
|
|
|
int status = 0;
|
|
|
|
|
2020-04-02 22:37:02 +03:00
|
|
|
*pcred = NULL;
|
2015-02-06 01:05:08 +03:00
|
|
|
lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
|
2014-09-05 20:53:22 +04:00
|
|
|
if (unlikely(lrp == NULL)) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
spin_lock(&ino->i_lock);
|
2014-12-12 01:02:04 +03:00
|
|
|
pnfs_clear_layoutreturn_waitbit(lo);
|
2014-09-05 20:53:22 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
2020-04-02 22:37:02 +03:00
|
|
|
put_cred(cred);
|
2014-09-05 20:53:22 +04:00
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2016-11-16 05:47:27 +03:00
|
|
|
pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
|
2016-09-23 18:38:08 +03:00
|
|
|
lrp->args.ld_private = &lrp->ld_private;
|
2014-09-05 20:53:22 +04:00
|
|
|
lrp->clp = NFS_SERVER(ino)->nfs_client;
|
2020-04-02 22:37:02 +03:00
|
|
|
lrp->cred = cred;
|
2016-12-03 00:12:12 +03:00
|
|
|
if (ld->prepare_layoutreturn)
|
|
|
|
ld->prepare_layoutreturn(&lrp->args);
|
2014-09-05 20:53:22 +04:00
|
|
|
|
2014-11-17 04:30:40 +03:00
|
|
|
status = nfs4_proc_layoutreturn(lrp, sync);
|
2014-09-05 20:53:22 +04:00
|
|
|
out:
|
|
|
|
dprintk("<-- %s status: %d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2020-08-05 16:03:56 +03:00
|
|
|
static bool
|
|
|
|
pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
|
|
|
|
enum pnfs_iomode iomode,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_range recall_range = {
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
.iomode = iomode,
|
|
|
|
};
|
|
|
|
return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
|
|
|
|
&recall_range, seq) != -EBUSY;
|
|
|
|
}
|
|
|
|
|
2016-01-27 07:12:11 +03:00
|
|
|
/* Return true if layoutreturn is needed */
|
|
|
|
static bool
|
|
|
|
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
2016-01-28 04:32:50 +03:00
|
|
|
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
2016-01-27 07:12:11 +03:00
|
|
|
return false;
|
2020-08-05 16:03:56 +03:00
|
|
|
return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
|
|
|
|
lo->plh_return_seq);
|
2016-01-27 07:12:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
struct inode *inode= lo->plh_inode;
|
|
|
|
|
2016-01-28 04:32:50 +03:00
|
|
|
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
2016-01-27 07:12:11 +03:00
|
|
|
return;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (pnfs_layout_need_return(lo)) {
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred *cred;
|
2016-01-27 07:12:11 +03:00
|
|
|
nfs4_stateid stateid;
|
|
|
|
enum pnfs_iomode iomode;
|
|
|
|
bool send;
|
|
|
|
|
2020-04-02 22:37:02 +03:00
|
|
|
send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
|
2016-01-27 07:12:11 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
if (send) {
|
|
|
|
/* Send an async layoutreturn so we dont deadlock */
|
2020-04-02 22:37:02 +03:00
|
|
|
pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
|
2016-01-27 07:12:11 +03:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
2012-06-20 23:03:34 +04:00
|
|
|
/*
|
|
|
|
* Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
|
|
|
|
* when the layout segment list is empty.
|
|
|
|
*
|
|
|
|
* Note that a pnfs_layout_hdr can exist with an empty layout segment
|
|
|
|
* list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
|
|
|
|
* deviceid is marked invalid.
|
|
|
|
*/
|
2011-05-22 20:52:37 +04:00
|
|
|
int
|
|
|
|
_pnfs_return_layout(struct inode *ino)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo = NULL;
|
|
|
|
struct nfs_inode *nfsi = NFS_I(ino);
|
2021-05-19 19:54:51 +03:00
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = IOMODE_ANY,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
2011-05-22 20:52:37 +04:00
|
|
|
LIST_HEAD(tmp_list);
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred *cred;
|
2011-05-22 20:52:37 +04:00
|
|
|
nfs4_stateid stateid;
|
2016-11-30 18:19:09 +03:00
|
|
|
int status = 0;
|
2018-06-11 22:32:06 +03:00
|
|
|
bool send, valid_layout;
|
2011-05-22 20:52:37 +04:00
|
|
|
|
2012-06-20 23:03:33 +04:00
|
|
|
dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
|
2011-05-22 20:52:37 +04:00
|
|
|
|
|
|
|
spin_lock(&ino->i_lock);
|
|
|
|
lo = nfsi->layout;
|
2012-09-22 00:37:02 +04:00
|
|
|
if (!lo) {
|
2011-05-22 20:52:37 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
2012-06-20 23:03:34 +04:00
|
|
|
dprintk("NFS: %s no layout to return\n", __func__);
|
|
|
|
goto out;
|
2011-05-22 20:52:37 +04:00
|
|
|
}
|
|
|
|
/* Reference matched in nfs4_layoutreturn_release */
|
2012-09-19 04:51:13 +04:00
|
|
|
pnfs_get_layout_hdr(lo);
|
2016-11-30 18:19:09 +03:00
|
|
|
/* Is there an outstanding layoutreturn ? */
|
|
|
|
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
|
|
|
TASK_UNINTERRUPTIBLE))
|
|
|
|
goto out_put_layout_hdr;
|
|
|
|
spin_lock(&ino->i_lock);
|
|
|
|
}
|
2018-06-11 22:32:06 +03:00
|
|
|
valid_layout = pnfs_layout_is_valid(lo);
|
2013-03-20 21:03:00 +04:00
|
|
|
pnfs_clear_layoutcommit(ino, &tmp_list);
|
2021-05-19 19:54:51 +03:00
|
|
|
pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
|
|
|
|
|
|
|
|
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
|
2014-09-10 19:23:31 +04:00
|
|
|
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
|
|
|
|
|
2012-06-20 23:03:34 +04:00
|
|
|
/* Don't send a LAYOUTRETURN if list was initially empty */
|
2018-06-11 22:32:06 +03:00
|
|
|
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
|
|
|
|
!valid_layout) {
|
2012-06-20 23:03:34 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
dprintk("NFS: %s no layout segments to return\n", __func__);
|
2020-04-19 21:38:00 +03:00
|
|
|
goto out_wait_layoutreturn;
|
2012-06-20 23:03:34 +04:00
|
|
|
}
|
2014-08-21 20:09:22 +04:00
|
|
|
|
2020-04-02 22:37:02 +03:00
|
|
|
send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
|
2011-05-22 20:52:37 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
2015-07-09 19:40:01 +03:00
|
|
|
if (send)
|
2020-04-02 22:37:02 +03:00
|
|
|
status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
|
2020-04-19 21:38:00 +03:00
|
|
|
out_wait_layoutreturn:
|
|
|
|
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
|
2015-07-09 19:40:01 +03:00
|
|
|
out_put_layout_hdr:
|
2017-01-26 23:50:41 +03:00
|
|
|
pnfs_free_lseg_list(&tmp_list);
|
2015-07-09 19:40:01 +03:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2011-05-22 20:52:37 +04:00
|
|
|
out:
|
|
|
|
dprintk("<-- %s status: %d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2013-03-20 21:23:33 +04:00
|
|
|
int
|
|
|
|
pnfs_commit_and_return_layout(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
lo = NFS_I(inode)->layout;
|
|
|
|
if (lo == NULL) {
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
pnfs_get_layout_hdr(lo);
|
|
|
|
/* Block new layoutgets and read/write to ds */
|
|
|
|
lo->plh_block_lgets++;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
filemap_fdatawait(inode->i_mapping);
|
|
|
|
ret = pnfs_layoutcommit_inode(inode, true);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = _pnfs_return_layout(inode);
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
lo->plh_block_lgets--;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-11-16 09:11:25 +03:00
|
|
|
bool pnfs_roc(struct inode *ino,
|
|
|
|
struct nfs4_layoutreturn_args *args,
|
|
|
|
struct nfs4_layoutreturn_res *res,
|
2018-12-03 03:30:31 +03:00
|
|
|
const struct cred *cred)
|
2011-01-06 14:36:32 +03:00
|
|
|
{
|
2015-01-24 21:54:37 +03:00
|
|
|
struct nfs_inode *nfsi = NFS_I(ino);
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
struct nfs4_state *state;
|
2011-01-06 14:36:32 +03:00
|
|
|
struct pnfs_layout_hdr *lo;
|
2016-11-16 09:11:25 +03:00
|
|
|
struct pnfs_layout_segment *lseg, *next;
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred *lc_cred;
|
2014-11-17 04:30:41 +03:00
|
|
|
nfs4_stateid stateid;
|
2016-11-16 09:11:25 +03:00
|
|
|
enum pnfs_iomode iomode = 0;
|
|
|
|
bool layoutreturn = false, roc = false;
|
2016-11-21 18:56:38 +03:00
|
|
|
bool skip_read = false;
|
2011-01-06 14:36:32 +03:00
|
|
|
|
2016-11-16 09:11:25 +03:00
|
|
|
if (!nfs_have_layout(ino))
|
|
|
|
return false;
|
2016-12-01 00:23:38 +03:00
|
|
|
retry:
|
2018-09-02 22:57:01 +03:00
|
|
|
rcu_read_lock();
|
2011-01-06 14:36:32 +03:00
|
|
|
spin_lock(&ino->i_lock);
|
2015-01-24 21:54:37 +03:00
|
|
|
lo = nfsi->layout;
|
2016-11-21 19:05:33 +03:00
|
|
|
if (!lo || !pnfs_layout_is_valid(lo) ||
|
2018-03-07 22:49:06 +03:00
|
|
|
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
|
|
|
|
lo = NULL;
|
2015-01-24 21:54:37 +03:00
|
|
|
goto out_noroc;
|
2018-03-07 22:49:06 +03:00
|
|
|
}
|
|
|
|
pnfs_get_layout_hdr(lo);
|
2016-12-01 00:23:38 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
2018-09-02 22:57:01 +03:00
|
|
|
rcu_read_unlock();
|
2016-12-01 00:23:38 +03:00
|
|
|
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
goto retry;
|
|
|
|
}
|
2015-01-24 21:54:37 +03:00
|
|
|
|
2015-08-19 08:49:19 +03:00
|
|
|
/* no roc if we hold a delegation */
|
2016-11-21 18:56:38 +03:00
|
|
|
if (nfs4_check_delegation(ino, FMODE_READ)) {
|
|
|
|
if (nfs4_check_delegation(ino, FMODE_WRITE))
|
|
|
|
goto out_noroc;
|
|
|
|
skip_read = true;
|
|
|
|
}
|
2015-01-24 21:54:37 +03:00
|
|
|
|
2018-09-02 22:57:01 +03:00
|
|
|
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
2015-01-24 21:54:37 +03:00
|
|
|
state = ctx->state;
|
2016-11-21 18:56:38 +03:00
|
|
|
if (state == NULL)
|
|
|
|
continue;
|
2015-01-24 21:54:37 +03:00
|
|
|
/* Don't return layout if there is open file state */
|
2016-11-21 18:56:38 +03:00
|
|
|
if (state->state & FMODE_WRITE)
|
2015-01-24 21:54:37 +03:00
|
|
|
goto out_noroc;
|
2016-11-21 18:56:38 +03:00
|
|
|
if (state->state & FMODE_READ)
|
|
|
|
skip_read = true;
|
2015-01-24 21:54:37 +03:00
|
|
|
}
|
|
|
|
|
2015-08-19 08:49:19 +03:00
|
|
|
|
2016-11-16 09:11:25 +03:00
|
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
|
2016-11-21 18:56:38 +03:00
|
|
|
if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
|
|
|
|
continue;
|
2015-08-19 08:49:19 +03:00
|
|
|
/* If we are sending layoutreturn, invalidate all valid lsegs */
|
2016-11-16 09:11:25 +03:00
|
|
|
if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Note: mark lseg for return so pnfs_layout_remove_lseg
|
|
|
|
* doesn't invalidate the layout for us.
|
|
|
|
*/
|
|
|
|
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
|
|
|
if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
|
|
|
|
continue;
|
|
|
|
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
|
2016-11-16 02:29:59 +03:00
|
|
|
}
|
|
|
|
|
2016-11-16 09:11:25 +03:00
|
|
|
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
|
|
|
goto out_noroc;
|
2016-11-16 02:29:59 +03:00
|
|
|
|
2015-09-22 06:35:22 +03:00
|
|
|
/* ROC in two conditions:
|
2015-08-19 08:49:19 +03:00
|
|
|
* 1. there are ROC lsegs
|
|
|
|
* 2. we don't send layoutreturn
|
|
|
|
*/
|
2016-11-16 09:11:25 +03:00
|
|
|
/* lo ref dropped in pnfs_roc_release() */
|
2020-04-02 22:37:02 +03:00
|
|
|
layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
|
2016-11-16 09:11:25 +03:00
|
|
|
/* If the creds don't match, we can't compound the layoutreturn */
|
2020-04-20 06:53:52 +03:00
|
|
|
if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
|
2016-11-16 09:11:25 +03:00
|
|
|
goto out_noroc;
|
|
|
|
|
|
|
|
roc = layoutreturn;
|
|
|
|
pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
|
|
|
|
res->lrs_present = 0;
|
|
|
|
layoutreturn = false;
|
2020-04-02 22:37:02 +03:00
|
|
|
put_cred(lc_cred);
|
2020-04-20 06:53:52 +03:00
|
|
|
|
2015-01-24 21:54:37 +03:00
|
|
|
out_noroc:
|
2011-01-06 14:36:32 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
2018-09-02 22:57:01 +03:00
|
|
|
rcu_read_unlock();
|
2015-08-19 08:49:19 +03:00
|
|
|
pnfs_layoutcommit_inode(ino, true);
|
2016-12-03 00:12:12 +03:00
|
|
|
if (roc) {
|
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
|
|
|
if (ld->prepare_layoutreturn)
|
|
|
|
ld->prepare_layoutreturn(args);
|
2018-03-07 22:49:06 +03:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2016-12-03 00:12:12 +03:00
|
|
|
return true;
|
|
|
|
}
|
2015-08-19 08:49:19 +03:00
|
|
|
if (layoutreturn)
|
2020-04-02 22:37:02 +03:00
|
|
|
pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
|
2018-03-07 22:49:06 +03:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2016-12-03 00:12:12 +03:00
|
|
|
return false;
|
2011-01-06 14:36:32 +03:00
|
|
|
}
|
|
|
|
|
2021-01-04 21:18:03 +03:00
|
|
|
int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
|
|
|
|
struct nfs4_layoutreturn_res **respp, int *ret)
|
2019-09-20 14:23:41 +03:00
|
|
|
{
|
|
|
|
struct nfs4_layoutreturn_args *arg = *argpp;
|
|
|
|
int retval = -EAGAIN;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
return 0;
|
|
|
|
/* Handle Layoutreturn errors */
|
|
|
|
switch (*ret) {
|
|
|
|
case 0:
|
|
|
|
retval = 0;
|
|
|
|
break;
|
2019-09-20 14:23:43 +03:00
|
|
|
case -NFS4ERR_NOMATCHING_LAYOUT:
|
|
|
|
/* Was there an RPC level error? If not, retry */
|
|
|
|
if (task->tk_rpc_status == 0)
|
|
|
|
break;
|
|
|
|
/* If the call was not sent, let caller handle it */
|
|
|
|
if (!RPC_WAS_SENT(task))
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* Otherwise, assume the call succeeded and
|
|
|
|
* that we need to release the layout
|
|
|
|
*/
|
|
|
|
*ret = 0;
|
|
|
|
(*respp)->lrs_present = 0;
|
|
|
|
retval = 0;
|
|
|
|
break;
|
2019-09-20 14:23:42 +03:00
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
/* Let the caller handle the retry */
|
|
|
|
*ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
|
|
|
return 0;
|
2019-09-20 14:23:41 +03:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
2019-09-20 14:23:45 +03:00
|
|
|
if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
|
2021-01-04 21:18:03 +03:00
|
|
|
&arg->range, arg->inode))
|
2019-09-20 14:23:41 +03:00
|
|
|
break;
|
|
|
|
*ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
*argpp = NULL;
|
|
|
|
*respp = NULL;
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2016-11-16 09:11:25 +03:00
|
|
|
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
|
|
|
|
struct nfs4_layoutreturn_res *res,
|
|
|
|
int ret)
|
2011-01-06 14:36:32 +03:00
|
|
|
{
|
2016-11-16 09:11:25 +03:00
|
|
|
struct pnfs_layout_hdr *lo = args->layout;
|
2021-01-04 21:35:46 +03:00
|
|
|
struct inode *inode = args->inode;
|
2016-11-16 09:11:25 +03:00
|
|
|
const nfs4_stateid *res_stateid = NULL;
|
2016-12-03 00:12:12 +03:00
|
|
|
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
|
2011-01-06 14:36:32 +03:00
|
|
|
|
2019-09-20 14:23:40 +03:00
|
|
|
switch (ret) {
|
|
|
|
case -NFS4ERR_NOMATCHING_LAYOUT:
|
2021-01-04 21:35:46 +03:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (pnfs_layout_is_valid(lo) &&
|
|
|
|
nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
|
|
|
|
pnfs_set_plh_return_info(lo, args->range.iomode, 0);
|
2021-01-04 23:01:18 +03:00
|
|
|
pnfs_clear_layoutreturn_waitbit(lo);
|
2021-01-04 21:35:46 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
2019-09-20 14:23:40 +03:00
|
|
|
break;
|
|
|
|
case 0:
|
2016-11-16 09:11:25 +03:00
|
|
|
if (res->lrs_present)
|
|
|
|
res_stateid = &res->stateid;
|
2020-08-24 01:36:59 +03:00
|
|
|
fallthrough;
|
2019-09-20 14:23:40 +03:00
|
|
|
default:
|
2021-01-04 23:01:18 +03:00
|
|
|
pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
|
|
|
|
res_stateid);
|
2016-11-16 09:11:25 +03:00
|
|
|
}
|
2020-08-04 01:32:28 +03:00
|
|
|
trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
|
2016-12-03 00:12:12 +03:00
|
|
|
if (ld_private && ld_private->ops && ld_private->ops->free)
|
|
|
|
ld_private->ops->free(ld_private);
|
2016-11-16 09:11:25 +03:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2011-01-06 14:36:32 +03:00
|
|
|
}
|
|
|
|
|
2015-09-22 06:35:22 +03:00
|
|
|
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(ino);
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
bool sleep = false;
|
|
|
|
|
|
|
|
/* we might not have grabbed lo reference. so need to check under
|
|
|
|
* i_lock */
|
|
|
|
spin_lock(&ino->i_lock);
|
|
|
|
lo = nfsi->layout;
|
2016-11-18 23:21:30 +03:00
|
|
|
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
|
|
|
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
|
2015-09-22 06:35:22 +03:00
|
|
|
sleep = true;
|
2016-11-18 23:21:30 +03:00
|
|
|
}
|
2015-09-22 06:35:22 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
return sleep;
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
/*
|
|
|
|
* Compare two layout segments for sorting into layout cache.
|
|
|
|
* We want to preferentially return RW over RO layouts, so ensure those
|
|
|
|
* are seen first.
|
|
|
|
*/
|
|
|
|
static s64
|
2013-06-03 19:30:24 +04:00
|
|
|
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
|
2013-06-03 19:24:36 +04:00
|
|
|
const struct pnfs_layout_range *l2)
|
2010-10-20 08:18:03 +04:00
|
|
|
{
|
2011-05-22 20:47:26 +04:00
|
|
|
s64 d;
|
|
|
|
|
|
|
|
/* high offset > low offset */
|
|
|
|
d = l1->offset - l2->offset;
|
|
|
|
if (d)
|
|
|
|
return d;
|
|
|
|
|
|
|
|
/* short length > long length */
|
|
|
|
d = l2->length - l1->length;
|
|
|
|
if (d)
|
|
|
|
return d;
|
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
/* read > read/write */
|
2011-05-22 20:47:26 +04:00
|
|
|
return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
|
2010-10-20 08:18:03 +04:00
|
|
|
}
|
|
|
|
|
2015-08-25 15:54:17 +03:00
|
|
|
static bool
|
|
|
|
pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
|
|
|
|
const struct pnfs_layout_range *l2)
|
|
|
|
{
|
|
|
|
return pnfs_lseg_range_cmp(l1, l2) > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
|
|
|
|
struct pnfs_layout_segment *old)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
|
|
|
|
struct pnfs_layout_segment *lseg,
|
|
|
|
bool (*is_after)(const struct pnfs_layout_range *,
|
|
|
|
const struct pnfs_layout_range *),
|
|
|
|
bool (*do_merge)(struct pnfs_layout_segment *,
|
|
|
|
struct pnfs_layout_segment *),
|
|
|
|
struct list_head *free_me)
|
2010-10-20 08:18:02 +04:00
|
|
|
{
|
2015-08-25 15:54:17 +03:00
|
|
|
struct pnfs_layout_segment *lp, *tmp;
|
2010-10-20 08:18:03 +04:00
|
|
|
|
2010-10-20 08:18:02 +04:00
|
|
|
dprintk("%s:Begin\n", __func__);
|
|
|
|
|
2015-08-25 15:54:17 +03:00
|
|
|
list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
|
|
|
|
if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
|
|
|
|
continue;
|
|
|
|
if (do_merge(lseg, lp)) {
|
|
|
|
mark_lseg_invalid(lp, free_me);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (is_after(&lseg->pls_range, &lp->pls_range))
|
2010-10-20 08:18:03 +04:00
|
|
|
continue;
|
2011-01-06 14:36:20 +03:00
|
|
|
list_add_tail(&lseg->pls_list, &lp->pls_list);
|
2010-10-20 08:18:03 +04:00
|
|
|
dprintk("%s: inserted lseg %p "
|
|
|
|
"iomode %d offset %llu length %llu before "
|
|
|
|
"lp %p iomode %d offset %llu length %llu\n",
|
2011-01-06 14:36:20 +03:00
|
|
|
__func__, lseg, lseg->pls_range.iomode,
|
|
|
|
lseg->pls_range.offset, lseg->pls_range.length,
|
|
|
|
lp, lp->pls_range.iomode, lp->pls_range.offset,
|
|
|
|
lp->pls_range.length);
|
2011-05-22 20:47:26 +04:00
|
|
|
goto out;
|
2010-10-20 08:18:02 +04:00
|
|
|
}
|
2011-05-22 20:47:26 +04:00
|
|
|
list_add_tail(&lseg->pls_list, &lo->plh_segs);
|
|
|
|
dprintk("%s: inserted lseg %p "
|
|
|
|
"iomode %d offset %llu length %llu at tail\n",
|
|
|
|
__func__, lseg, lseg->pls_range.iomode,
|
|
|
|
lseg->pls_range.offset, lseg->pls_range.length);
|
|
|
|
out:
|
2012-09-19 04:51:13 +04:00
|
|
|
pnfs_get_layout_hdr(lo);
|
2010-10-20 08:18:02 +04:00
|
|
|
|
|
|
|
dprintk("%s:Return\n", __func__);
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
2015-08-25 15:54:17 +03:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
|
|
|
|
|
|
|
|
static void
|
|
|
|
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
|
|
|
|
struct pnfs_layout_segment *lseg,
|
|
|
|
struct list_head *free_me)
|
|
|
|
{
|
|
|
|
struct inode *inode = lo->plh_inode;
|
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
|
|
|
|
|
|
|
|
if (ld->add_lseg != NULL)
|
|
|
|
ld->add_lseg(lo, lseg, free_me);
|
|
|
|
else
|
|
|
|
pnfs_generic_layout_insert_lseg(lo, lseg,
|
|
|
|
pnfs_lseg_range_is_after,
|
|
|
|
pnfs_lseg_no_merge,
|
|
|
|
free_me);
|
|
|
|
}
|
2010-10-20 08:18:01 +04:00
|
|
|
|
|
|
|
static struct pnfs_layout_hdr *
|
2011-07-31 04:52:32 +04:00
|
|
|
alloc_init_layout_hdr(struct inode *ino,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
gfp_t gfp_flags)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
|
2011-05-22 20:51:33 +04:00
|
|
|
lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
|
2010-10-20 08:18:01 +04:00
|
|
|
if (!lo)
|
|
|
|
return NULL;
|
2017-10-20 12:53:33 +03:00
|
|
|
refcount_set(&lo->plh_refcount, 1);
|
2011-01-06 14:36:21 +03:00
|
|
|
INIT_LIST_HEAD(&lo->plh_layouts);
|
|
|
|
INIT_LIST_HEAD(&lo->plh_segs);
|
2016-10-13 02:50:54 +03:00
|
|
|
INIT_LIST_HEAD(&lo->plh_return_segs);
|
2013-02-12 18:48:42 +04:00
|
|
|
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
|
2011-01-06 14:36:21 +03:00
|
|
|
lo->plh_inode = ino;
|
2018-12-03 03:30:31 +03:00
|
|
|
lo->plh_lc_cred = get_cred(ctx->cred);
|
2016-06-17 23:48:19 +03:00
|
|
|
lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
|
2010-10-20 08:18:01 +04:00
|
|
|
return lo;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pnfs_layout_hdr *
|
2011-07-31 04:52:32 +04:00
|
|
|
pnfs_find_alloc_layout(struct inode *ino,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
gfp_t gfp_flags)
|
2016-06-17 23:48:20 +03:00
|
|
|
__releases(&ino->i_lock)
|
|
|
|
__acquires(&ino->i_lock)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(ino);
|
|
|
|
struct pnfs_layout_hdr *new = NULL;
|
|
|
|
|
|
|
|
dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
|
|
|
|
|
2012-10-03 02:41:05 +04:00
|
|
|
if (nfsi->layout != NULL)
|
|
|
|
goto out_existing;
|
2010-10-20 08:18:01 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
2011-07-31 04:52:32 +04:00
|
|
|
new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
|
2010-10-20 08:18:01 +04:00
|
|
|
spin_lock(&ino->i_lock);
|
|
|
|
|
2012-10-03 02:41:05 +04:00
|
|
|
if (likely(nfsi->layout == NULL)) { /* Won the race? */
|
2010-10-20 08:18:01 +04:00
|
|
|
nfsi->layout = new;
|
2012-10-03 02:41:05 +04:00
|
|
|
return new;
|
2012-10-31 12:05:48 +04:00
|
|
|
} else if (new != NULL)
|
|
|
|
pnfs_free_layout_hdr(new);
|
2012-10-03 02:41:05 +04:00
|
|
|
out_existing:
|
|
|
|
pnfs_get_layout_hdr(nfsi->layout);
|
2010-10-20 08:18:01 +04:00
|
|
|
return nfsi->layout;
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
/*
|
|
|
|
* iomode matching rules:
|
2016-05-25 17:31:14 +03:00
|
|
|
* iomode lseg strict match
|
|
|
|
* iomode
|
|
|
|
* ----- ----- ------ -----
|
|
|
|
* ANY READ N/A true
|
|
|
|
* ANY RW N/A true
|
|
|
|
* RW READ N/A false
|
|
|
|
* RW RW N/A true
|
|
|
|
* READ READ N/A true
|
|
|
|
* READ RW true false
|
|
|
|
* READ RW false true
|
2010-10-20 08:18:03 +04:00
|
|
|
*/
|
2013-06-03 19:24:36 +04:00
|
|
|
static bool
|
2013-06-03 19:30:24 +04:00
|
|
|
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
|
2016-05-25 17:31:14 +03:00
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
bool strict_iomode)
|
2010-10-20 08:18:03 +04:00
|
|
|
{
|
2011-05-22 20:47:26 +04:00
|
|
|
struct pnfs_layout_range range1;
|
|
|
|
|
|
|
|
if ((range->iomode == IOMODE_RW &&
|
|
|
|
ls_range->iomode != IOMODE_RW) ||
|
2016-05-25 17:31:14 +03:00
|
|
|
(range->iomode != ls_range->iomode &&
|
2017-10-07 17:02:21 +03:00
|
|
|
strict_iomode) ||
|
2013-06-03 19:30:24 +04:00
|
|
|
!pnfs_lseg_range_intersecting(ls_range, range))
|
2018-08-02 03:55:51 +03:00
|
|
|
return false;
|
2011-05-22 20:47:26 +04:00
|
|
|
|
|
|
|
/* range1 covers only the first byte in the range */
|
|
|
|
range1 = *range;
|
|
|
|
range1.length = 1;
|
2013-06-03 19:30:24 +04:00
|
|
|
return pnfs_lseg_range_contained(ls_range, &range1);
|
2010-10-20 08:18:03 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lookup range in layout
|
|
|
|
*/
|
2010-10-20 08:18:01 +04:00
|
|
|
static struct pnfs_layout_segment *
|
2011-05-22 20:47:26 +04:00
|
|
|
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
|
2016-05-25 17:31:14 +03:00
|
|
|
struct pnfs_layout_range *range,
|
|
|
|
bool strict_iomode)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
2010-10-20 08:18:03 +04:00
|
|
|
struct pnfs_layout_segment *lseg, *ret = NULL;
|
|
|
|
|
|
|
|
dprintk("%s:Begin\n", __func__);
|
|
|
|
|
2011-01-06 14:36:21 +03:00
|
|
|
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
|
2011-01-06 14:36:23 +03:00
|
|
|
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
|
2016-05-25 17:31:14 +03:00
|
|
|
pnfs_lseg_range_match(&lseg->pls_range, range,
|
|
|
|
strict_iomode)) {
|
2012-09-19 04:57:08 +04:00
|
|
|
ret = pnfs_get_lseg(lseg);
|
2010-10-20 08:18:03 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dprintk("%s:Return lseg %p ref %d\n",
|
2017-10-20 12:53:32 +03:00
|
|
|
__func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
|
2010-10-20 08:18:03 +04:00
|
|
|
return ret;
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
|
|
|
|
2012-05-23 13:02:37 +04:00
|
|
|
/*
|
|
|
|
* Use mdsthreshold hints set at each OPEN to determine if I/O should go
|
|
|
|
* to the MDS or over pNFS
|
|
|
|
*
|
|
|
|
* The nfs_inode read_io and write_io fields are cumulative counters reset
|
|
|
|
* when there are no layout segments. Note that in pnfs_update_layout iomode
|
|
|
|
* is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
|
|
|
|
* WRITE request.
|
|
|
|
*
|
|
|
|
* A return of true means use MDS I/O.
|
|
|
|
*
|
|
|
|
* From rfc 5661:
|
|
|
|
* If a file's size is smaller than the file size threshold, data accesses
|
|
|
|
* SHOULD be sent to the metadata server. If an I/O request has a length that
|
|
|
|
* is below the I/O size threshold, the I/O SHOULD be sent to the metadata
|
|
|
|
* server. If both file size and I/O size are provided, the client SHOULD
|
|
|
|
* reach or exceed both thresholds before sending its read or write
|
|
|
|
* requests to the data server.
|
|
|
|
*/
|
|
|
|
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
|
|
|
|
struct inode *ino, int iomode)
|
|
|
|
{
|
|
|
|
struct nfs4_threshold *t = ctx->mdsthreshold;
|
|
|
|
struct nfs_inode *nfsi = NFS_I(ino);
|
|
|
|
loff_t fsize = i_size_read(ino);
|
|
|
|
bool size = false, size_set = false, io = false, io_set = false, ret = false;
|
|
|
|
|
|
|
|
if (t == NULL)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
|
|
|
|
__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
|
|
|
|
|
|
|
|
switch (iomode) {
|
|
|
|
case IOMODE_READ:
|
|
|
|
if (t->bm & THRESHOLD_RD) {
|
|
|
|
dprintk("%s fsize %llu\n", __func__, fsize);
|
|
|
|
size_set = true;
|
|
|
|
if (fsize < t->rd_sz)
|
|
|
|
size = true;
|
|
|
|
}
|
|
|
|
if (t->bm & THRESHOLD_RD_IO) {
|
|
|
|
dprintk("%s nfsi->read_io %llu\n", __func__,
|
|
|
|
nfsi->read_io);
|
|
|
|
io_set = true;
|
|
|
|
if (nfsi->read_io < t->rd_io_sz)
|
|
|
|
io = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IOMODE_RW:
|
|
|
|
if (t->bm & THRESHOLD_WR) {
|
|
|
|
dprintk("%s fsize %llu\n", __func__, fsize);
|
|
|
|
size_set = true;
|
|
|
|
if (fsize < t->wr_sz)
|
|
|
|
size = true;
|
|
|
|
}
|
|
|
|
if (t->bm & THRESHOLD_WR_IO) {
|
|
|
|
dprintk("%s nfsi->write_io %llu\n", __func__,
|
|
|
|
nfsi->write_io);
|
|
|
|
io_set = true;
|
|
|
|
if (nfsi->write_io < t->wr_io_sz)
|
|
|
|
io = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (size_set && io_set) {
|
|
|
|
if (size && io)
|
|
|
|
ret = true;
|
|
|
|
} else if (size || io)
|
|
|
|
ret = true;
|
|
|
|
|
|
|
|
dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-05 21:07:12 +03:00
|
|
|
static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
|
2014-12-01 03:22:23 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* send layoutcommit as it can hold up layoutreturn due to lseg
|
|
|
|
* reference
|
|
|
|
*/
|
|
|
|
pnfs_layoutcommit_inode(lo->plh_inode, false);
|
2018-09-05 21:07:12 +03:00
|
|
|
return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
2015-12-15 00:25:11 +03:00
|
|
|
nfs_wait_bit_killable,
|
2018-09-05 21:07:12 +03:00
|
|
|
TASK_KILLABLE);
|
2014-12-01 03:22:23 +03:00
|
|
|
}
|
|
|
|
|
2018-06-24 05:54:33 +03:00
|
|
|
static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
atomic_inc(&lo->plh_outstanding);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&lo->plh_outstanding))
|
|
|
|
wake_up_var(&lo->plh_outstanding);
|
|
|
|
}
|
|
|
|
|
2021-01-22 18:05:51 +03:00
|
|
|
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
|
|
|
|
}
|
|
|
|
|
2014-12-12 01:02:04 +03:00
|
|
|
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
unsigned long *bitlock = &lo->plh_flags;
|
|
|
|
|
|
|
|
clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
|
|
|
|
}
|
|
|
|
|
2016-09-22 19:30:20 +03:00
|
|
|
static void _add_to_server_list(struct pnfs_layout_hdr *lo,
|
|
|
|
struct nfs_server *server)
|
|
|
|
{
|
2020-02-19 01:14:40 +03:00
|
|
|
if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
|
2016-09-22 19:30:20 +03:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
|
|
|
|
/* The lo must be on the clp list if there is any
|
|
|
|
* chance of a CB_LAYOUTRECALL(FILE) coming in.
|
|
|
|
*/
|
|
|
|
spin_lock(&clp->cl_lock);
|
2020-02-19 01:14:40 +03:00
|
|
|
list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
|
2016-09-22 19:30:20 +03:00
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-20 08:18:01 +04:00
|
|
|
/*
|
|
|
|
* Layout segment is retreived from the server if not cached.
|
|
|
|
* The appropriate layout segment is referenced and returned to the caller.
|
|
|
|
*/
|
2011-06-14 02:22:38 +04:00
|
|
|
struct pnfs_layout_segment *
|
2010-10-20 08:18:01 +04:00
|
|
|
pnfs_update_layout(struct inode *ino,
|
|
|
|
struct nfs_open_context *ctx,
|
2011-05-22 20:47:26 +04:00
|
|
|
loff_t pos,
|
|
|
|
u64 count,
|
2011-05-12 02:00:51 +04:00
|
|
|
enum pnfs_iomode iomode,
|
2016-05-25 17:31:14 +03:00
|
|
|
bool strict_iomode,
|
2011-05-12 02:00:51 +04:00
|
|
|
gfp_t gfp_flags)
|
2010-10-20 08:18:01 +04:00
|
|
|
{
|
2011-05-22 20:47:26 +04:00
|
|
|
struct pnfs_layout_range arg = {
|
|
|
|
.iomode = iomode,
|
|
|
|
.offset = pos,
|
|
|
|
.length = count,
|
|
|
|
};
|
2017-09-11 20:09:37 +03:00
|
|
|
unsigned pg_offset;
|
2011-06-02 00:44:44 +04:00
|
|
|
struct nfs_server *server = NFS_SERVER(ino);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
struct pnfs_layout_hdr *lo = NULL;
|
2010-10-20 08:18:01 +04:00
|
|
|
struct pnfs_layout_segment *lseg = NULL;
|
2016-09-21 12:14:28 +03:00
|
|
|
struct nfs4_layoutget *lgp;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
nfs4_stateid stateid;
|
|
|
|
long timeout = 0;
|
2016-07-14 21:28:31 +03:00
|
|
|
unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
|
2013-03-01 05:30:10 +04:00
|
|
|
bool first;
|
2010-10-20 08:18:01 +04:00
|
|
|
|
2015-12-10 18:41:58 +03:00
|
|
|
if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_NO_PNFS);
|
2012-09-26 19:21:40 +04:00
|
|
|
goto out;
|
2015-12-10 18:41:58 +03:00
|
|
|
}
|
2012-05-23 13:02:37 +04:00
|
|
|
|
2015-12-10 18:41:58 +03:00
|
|
|
if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_MDSTHRESH);
|
2012-09-26 19:21:40 +04:00
|
|
|
goto out;
|
2015-12-10 18:41:58 +03:00
|
|
|
}
|
2012-05-23 13:02:37 +04:00
|
|
|
|
2014-08-22 13:37:41 +04:00
|
|
|
lookup_again:
|
2018-09-05 21:07:12 +03:00
|
|
|
lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
|
|
|
|
if (IS_ERR(lseg))
|
|
|
|
goto out;
|
2014-08-22 13:37:41 +04:00
|
|
|
first = false;
|
2010-10-20 08:18:01 +04:00
|
|
|
spin_lock(&ino->i_lock);
|
2011-07-31 04:52:32 +04:00
|
|
|
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
|
2012-09-21 05:25:19 +04:00
|
|
|
if (lo == NULL) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_NOMEM);
|
2012-09-21 05:25:19 +04:00
|
|
|
goto out;
|
|
|
|
}
|
2010-10-20 08:18:01 +04:00
|
|
|
|
2011-01-06 14:36:30 +03:00
|
|
|
/* Do we even need to bother with this? */
|
2012-03-01 20:17:47 +04:00
|
|
|
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_BULK_RECALL);
|
2011-01-06 14:36:30 +03:00
|
|
|
dprintk("%s matches recall, use MDS\n", __func__);
|
2010-10-20 08:18:01 +04:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if LAYOUTGET already failed once we don't try again */
|
2015-12-15 00:25:11 +03:00
|
|
|
if (pnfs_layout_io_test_failed(lo, iomode)) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
|
2010-10-20 08:18:01 +04:00
|
|
|
goto out_unlock;
|
2015-12-10 18:41:58 +03:00
|
|
|
}
|
2010-10-20 08:18:01 +04:00
|
|
|
|
2018-06-24 05:54:33 +03:00
|
|
|
/*
|
|
|
|
* If the layout segment list is empty, but there are outstanding
|
|
|
|
* layoutget calls, then they might be subject to a layoutrecall.
|
|
|
|
*/
|
2021-07-03 02:48:41 +03:00
|
|
|
if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
|
2018-06-24 05:54:33 +03:00
|
|
|
atomic_read(&lo->plh_outstanding) != 0) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
2018-09-05 21:07:12 +03:00
|
|
|
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
|
2019-03-12 23:04:51 +03:00
|
|
|
!atomic_read(&lo->plh_outstanding)));
|
2019-07-18 22:33:42 +03:00
|
|
|
if (IS_ERR(lseg))
|
2018-06-24 05:54:33 +03:00
|
|
|
goto out_put_layout_hdr;
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
goto lookup_again;
|
|
|
|
}
|
|
|
|
|
2021-01-05 14:43:45 +03:00
|
|
|
/*
|
|
|
|
* Because we free lsegs when sending LAYOUTRETURN, we need to wait
|
|
|
|
* for LAYOUTRETURN.
|
|
|
|
*/
|
|
|
|
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
dprintk("%s wait for layoutreturn\n", __func__);
|
|
|
|
lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
|
|
|
|
if (!IS_ERR(lseg)) {
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
dprintk("%s retrying\n", __func__);
|
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo,
|
|
|
|
lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_RETRY);
|
|
|
|
goto lookup_again;
|
|
|
|
}
|
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_RETURN);
|
|
|
|
goto out_put_layout_hdr;
|
|
|
|
}
|
|
|
|
|
2016-05-25 17:31:14 +03:00
|
|
|
lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
if (lseg) {
|
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_FOUND_CACHED);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose a stateid for the LAYOUTGET. If we don't have a layout
|
|
|
|
* stateid, or it has been invalidated, then we must use the open
|
|
|
|
* stateid.
|
|
|
|
*/
|
2016-06-17 23:48:19 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
|
2019-07-16 22:38:28 +03:00
|
|
|
int status;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The first layoutget for the file. Need to serialize per
|
2014-08-22 13:37:41 +04:00
|
|
|
* RFC 5661 Errata 3208.
|
|
|
|
*/
|
|
|
|
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
|
|
|
|
&lo->plh_flags)) {
|
|
|
|
spin_unlock(&ino->i_lock);
|
2018-09-05 21:07:12 +03:00
|
|
|
lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
|
|
|
|
NFS_LAYOUT_FIRST_LAYOUTGET,
|
|
|
|
TASK_KILLABLE));
|
|
|
|
if (IS_ERR(lseg))
|
|
|
|
goto out_put_layout_hdr;
|
2014-08-22 13:37:41 +04:00
|
|
|
pnfs_put_layout_hdr(lo);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
dprintk("%s retrying\n", __func__);
|
2014-08-22 13:37:41 +04:00
|
|
|
goto lookup_again;
|
|
|
|
}
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
|
2020-04-13 22:55:21 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
first = true;
|
2019-07-16 22:38:28 +03:00
|
|
|
status = nfs4_select_rw_stateid(ctx->state,
|
2017-09-11 20:09:37 +03:00
|
|
|
iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
|
2019-07-16 22:38:28 +03:00
|
|
|
NULL, &stateid, NULL);
|
|
|
|
if (status != 0) {
|
2019-07-22 20:06:17 +03:00
|
|
|
lseg = ERR_PTR(status);
|
2017-09-11 20:09:37 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count,
|
|
|
|
iomode, lo, lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_INVALID_OPEN);
|
2019-07-16 22:38:28 +03:00
|
|
|
nfs4_schedule_stateid_recovery(server, ctx->state);
|
|
|
|
pnfs_clear_first_layoutget(lo);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
goto lookup_again;
|
2017-09-11 20:09:37 +03:00
|
|
|
}
|
2020-04-13 22:55:21 +03:00
|
|
|
spin_lock(&ino->i_lock);
|
2014-08-22 13:37:41 +04:00
|
|
|
} else {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
nfs4_stateid_copy(&stateid, &lo->plh_stateid);
|
2014-08-22 13:37:41 +04:00
|
|
|
}
|
2011-03-01 04:34:22 +03:00
|
|
|
|
2015-12-10 18:41:58 +03:00
|
|
|
if (pnfs_layoutgets_blocked(lo)) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
2015-12-10 18:41:58 +03:00
|
|
|
PNFS_UPDATE_LAYOUT_BLOCKED);
|
2011-01-06 14:36:25 +03:00
|
|
|
goto out_unlock;
|
2015-12-10 18:41:58 +03:00
|
|
|
}
|
2018-06-24 05:54:33 +03:00
|
|
|
nfs_layoutget_begin(lo);
|
2011-02-03 21:28:52 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
2013-03-01 05:30:10 +04:00
|
|
|
|
2016-09-22 19:30:20 +03:00
|
|
|
_add_to_server_list(lo, server);
|
2010-10-20 08:18:01 +04:00
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
|
|
|
pg_offset = arg.offset & ~PAGE_MASK;
|
2011-05-22 20:47:46 +04:00
|
|
|
if (pg_offset) {
|
|
|
|
arg.offset -= pg_offset;
|
|
|
|
arg.length += pg_offset;
|
|
|
|
}
|
2011-06-14 02:22:38 +04:00
|
|
|
if (arg.length != NFS4_MAX_UINT64)
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 15:29:47 +03:00
|
|
|
arg.length = PAGE_ALIGN(arg.length);
|
2011-05-22 20:47:46 +04:00
|
|
|
|
2016-10-06 19:08:51 +03:00
|
|
|
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
|
2016-09-21 12:14:28 +03:00
|
|
|
if (!lgp) {
|
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
|
|
|
|
PNFS_UPDATE_LAYOUT_NOMEM);
|
2018-06-24 05:54:33 +03:00
|
|
|
nfs_layoutget_end(lo);
|
2016-09-21 12:14:28 +03:00
|
|
|
goto out_put_layout_hdr;
|
|
|
|
}
|
|
|
|
|
2021-07-03 00:24:22 +03:00
|
|
|
lgp->lo = lo;
|
|
|
|
pnfs_get_layout_hdr(lo);
|
|
|
|
|
2016-09-20 00:47:09 +03:00
|
|
|
lseg = nfs4_proc_layoutget(lgp, &timeout);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
|
2018-06-24 05:54:33 +03:00
|
|
|
nfs_layoutget_end(lo);
|
2016-05-17 19:28:46 +03:00
|
|
|
if (IS_ERR(lseg)) {
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
switch(PTR_ERR(lseg)) {
|
2016-07-15 01:46:24 +03:00
|
|
|
case -EBUSY:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
if (time_after(jiffies, giveup))
|
|
|
|
lseg = NULL;
|
2016-07-14 21:28:31 +03:00
|
|
|
break;
|
|
|
|
case -ERECALLCONFLICT:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
case -EAGAIN:
|
2016-07-15 01:34:12 +03:00
|
|
|
break;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 19:28:47 +03:00
|
|
|
default:
|
|
|
|
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
|
|
|
|
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
|
|
|
lseg = NULL;
|
|
|
|
}
|
2016-07-15 01:34:12 +03:00
|
|
|
goto out_put_layout_hdr;
|
|
|
|
}
|
|
|
|
if (lseg) {
|
|
|
|
if (first)
|
|
|
|
pnfs_clear_first_layoutget(lo);
|
|
|
|
trace_pnfs_update_layout(ino, pos, count,
|
|
|
|
iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
goto lookup_again;
|
2016-05-17 19:28:46 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
|
|
|
}
|
|
|
|
|
2012-09-21 05:25:19 +04:00
|
|
|
out_put_layout_hdr:
|
2014-12-12 01:02:04 +03:00
|
|
|
if (first)
|
|
|
|
pnfs_clear_first_layoutget(lo);
|
2019-07-18 16:32:17 +03:00
|
|
|
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
|
|
|
|
PNFS_UPDATE_LAYOUT_EXIT);
|
2012-09-19 04:51:13 +04:00
|
|
|
pnfs_put_layout_hdr(lo);
|
2010-10-20 08:18:01 +04:00
|
|
|
out:
|
2012-09-26 19:21:40 +04:00
|
|
|
dprintk("%s: inode %s/%llu pNFS layout segment %s for "
|
|
|
|
"(%s, offset: %llu, length: %llu)\n",
|
|
|
|
__func__, ino->i_sb->s_id,
|
|
|
|
(unsigned long long)NFS_FILEID(ino),
|
2015-12-03 21:57:48 +03:00
|
|
|
IS_ERR_OR_NULL(lseg) ? "not found" : "found",
|
2012-09-26 19:21:40 +04:00
|
|
|
iomode==IOMODE_RW ? "read/write" : "read-only",
|
|
|
|
(unsigned long long)pos,
|
|
|
|
(unsigned long long)count);
|
2010-10-20 08:18:01 +04:00
|
|
|
return lseg;
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&ino->i_lock);
|
2012-09-21 05:25:19 +04:00
|
|
|
goto out_put_layout_hdr;
|
2010-10-20 08:18:01 +04:00
|
|
|
}
|
2011-06-14 02:22:38 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_update_layout);
|
2010-10-20 08:18:03 +04:00
|
|
|
|
2015-08-25 18:16:13 +03:00
|
|
|
static bool
|
|
|
|
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
|
|
|
|
{
|
|
|
|
switch (range->iomode) {
|
|
|
|
case IOMODE_READ:
|
|
|
|
case IOMODE_RW:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (range->offset == NFS4_MAX_UINT64)
|
|
|
|
return false;
|
|
|
|
if (range->length == 0)
|
|
|
|
return false;
|
|
|
|
if (range->length != NFS4_MAX_UINT64 &&
|
|
|
|
range->length > NFS4_MAX_UINT64 - range->offset)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-09-22 19:30:20 +03:00
|
|
|
static struct pnfs_layout_hdr *
|
|
|
|
_pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
|
|
|
|
spin_lock(&ino->i_lock);
|
|
|
|
lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
|
|
|
|
if (!lo)
|
|
|
|
goto out_unlock;
|
|
|
|
if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
|
|
|
|
goto out_unlock;
|
|
|
|
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
|
|
|
|
goto out_unlock;
|
|
|
|
if (pnfs_layoutgets_blocked(lo))
|
|
|
|
goto out_unlock;
|
|
|
|
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
|
|
|
|
goto out_unlock;
|
2018-06-24 05:54:33 +03:00
|
|
|
nfs_layoutget_begin(lo);
|
2016-09-22 19:30:20 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
_add_to_server_list(lo, NFS_SERVER(ino));
|
|
|
|
return lo;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-10-06 19:11:21 +03:00
|
|
|
static void _lgopen_prepare_attached(struct nfs4_opendata *data,
|
|
|
|
struct nfs_open_context *ctx)
|
|
|
|
{
|
2016-09-22 19:30:20 +03:00
|
|
|
struct inode *ino = data->dentry->d_inode;
|
|
|
|
struct pnfs_layout_range rng = {
|
|
|
|
.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
|
|
|
|
IOMODE_RW: IOMODE_READ,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
|
|
|
struct nfs4_layoutget *lgp;
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
|
2017-02-02 20:26:38 +03:00
|
|
|
/* Heuristic: don't send layoutget if we have cached data */
|
|
|
|
if (rng.iomode == IOMODE_READ &&
|
|
|
|
(i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
|
|
|
|
return;
|
|
|
|
|
2016-09-22 19:30:20 +03:00
|
|
|
lo = _pnfs_grab_empty_layout(ino, ctx);
|
|
|
|
if (!lo)
|
|
|
|
return;
|
|
|
|
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid,
|
|
|
|
&rng, GFP_KERNEL);
|
|
|
|
if (!lgp) {
|
|
|
|
pnfs_clear_first_layoutget(lo);
|
2021-01-06 22:13:22 +03:00
|
|
|
nfs_layoutget_end(lo);
|
2016-09-22 19:30:20 +03:00
|
|
|
pnfs_put_layout_hdr(lo);
|
|
|
|
return;
|
|
|
|
}
|
2021-07-03 00:24:22 +03:00
|
|
|
lgp->lo = lo;
|
2016-09-22 19:30:20 +03:00
|
|
|
data->lgp = lgp;
|
|
|
|
data->o_arg.lg_args = &lgp->args;
|
|
|
|
data->o_res.lg_res = &lgp->res;
|
2016-10-06 19:11:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void _lgopen_prepare_floating(struct nfs4_opendata *data,
|
|
|
|
struct nfs_open_context *ctx)
|
|
|
|
{
|
2021-07-03 00:24:22 +03:00
|
|
|
struct inode *ino = data->dentry->d_inode;
|
2016-10-06 19:11:21 +03:00
|
|
|
struct pnfs_layout_range rng = {
|
|
|
|
.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
|
|
|
|
IOMODE_RW: IOMODE_READ,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
|
|
|
struct nfs4_layoutget *lgp;
|
|
|
|
|
2021-07-03 00:24:22 +03:00
|
|
|
lgp = pnfs_alloc_init_layoutget_args(ino, ctx, ¤t_stateid,
|
2016-10-06 19:11:21 +03:00
|
|
|
&rng, GFP_KERNEL);
|
|
|
|
if (!lgp)
|
|
|
|
return;
|
|
|
|
data->lgp = lgp;
|
|
|
|
data->o_arg.lg_args = &lgp->args;
|
|
|
|
data->o_res.lg_res = &lgp->res;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnfs_lgopen_prepare(struct nfs4_opendata *data,
|
|
|
|
struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
|
|
|
|
|
|
|
|
if (!(pnfs_enabled_sb(server) &&
|
|
|
|
server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
|
|
|
|
return;
|
|
|
|
/* Could check on max_ops, but currently hardcoded high enough */
|
2016-10-04 22:26:41 +03:00
|
|
|
if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
|
|
|
|
return;
|
2021-07-03 00:24:22 +03:00
|
|
|
if (data->lgp)
|
|
|
|
return;
|
2016-10-06 19:11:21 +03:00
|
|
|
if (data->state)
|
|
|
|
_lgopen_prepare_attached(data, ctx);
|
|
|
|
else
|
|
|
|
_lgopen_prepare_floating(data, ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
|
|
|
|
struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
struct pnfs_layout_segment *lseg;
|
2016-10-05 16:37:12 +03:00
|
|
|
struct nfs_server *srv = NFS_SERVER(ino);
|
2016-10-06 19:11:21 +03:00
|
|
|
u32 iomode;
|
|
|
|
|
2016-10-04 22:26:41 +03:00
|
|
|
if (!lgp)
|
2016-10-06 19:11:21 +03:00
|
|
|
return;
|
2016-10-04 22:26:41 +03:00
|
|
|
dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
|
|
|
|
if (lgp->res.status) {
|
|
|
|
switch (lgp->res.status) {
|
|
|
|
default:
|
2017-02-02 05:02:07 +03:00
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Halt lgopen attempts if the server doesn't recognise
|
|
|
|
* the "current stateid" value, the layout type, or the
|
|
|
|
* layoutget operation as being valid.
|
|
|
|
* Also if it complains about too many ops in the compound
|
|
|
|
* or of the request/reply being too big.
|
|
|
|
*/
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_NOTSUPP:
|
|
|
|
case -NFS4ERR_REP_TOO_BIG:
|
|
|
|
case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
|
|
|
|
case -NFS4ERR_REQ_TOO_BIG:
|
|
|
|
case -NFS4ERR_TOO_MANY_OPS:
|
|
|
|
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
|
2016-10-05 16:37:12 +03:00
|
|
|
srv->caps &= ~NFS_CAP_LGOPEN;
|
2016-10-04 22:26:41 +03:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2021-07-03 00:24:22 +03:00
|
|
|
if (!lgp->lo) {
|
2016-09-22 19:30:20 +03:00
|
|
|
lo = _pnfs_grab_empty_layout(ino, ctx);
|
|
|
|
if (!lo)
|
|
|
|
return;
|
2021-07-03 00:24:22 +03:00
|
|
|
lgp->lo = lo;
|
2016-10-06 19:11:21 +03:00
|
|
|
} else
|
2021-07-03 00:24:22 +03:00
|
|
|
lo = lgp->lo;
|
2016-10-06 19:11:21 +03:00
|
|
|
|
|
|
|
lseg = pnfs_layout_process(lgp);
|
2018-05-22 18:15:32 +03:00
|
|
|
if (!IS_ERR(lseg)) {
|
2016-10-06 19:11:21 +03:00
|
|
|
iomode = lgp->args.range.iomode;
|
|
|
|
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
|
|
|
pnfs_put_lseg(lseg);
|
|
|
|
}
|
2016-10-18 20:39:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
|
|
|
|
{
|
|
|
|
if (lgp != NULL) {
|
2021-07-03 00:24:22 +03:00
|
|
|
if (lgp->lo) {
|
|
|
|
pnfs_clear_first_layoutget(lgp->lo);
|
|
|
|
nfs_layoutget_end(lgp->lo);
|
2016-10-18 20:39:51 +03:00
|
|
|
}
|
|
|
|
pnfs_layoutget_free(lgp);
|
|
|
|
}
|
2016-10-06 19:11:21 +03:00
|
|
|
}
|
|
|
|
|
2012-09-18 01:12:15 +04:00
|
|
|
struct pnfs_layout_segment *
|
2010-10-20 08:18:03 +04:00
|
|
|
pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|
|
|
{
|
2021-07-03 00:24:22 +03:00
|
|
|
struct pnfs_layout_hdr *lo = lgp->lo;
|
2010-10-20 08:18:03 +04:00
|
|
|
struct nfs4_layoutget_res *res = &lgp->res;
|
|
|
|
struct pnfs_layout_segment *lseg;
|
2011-01-06 14:36:21 +03:00
|
|
|
struct inode *ino = lo->plh_inode;
|
2014-02-12 19:02:27 +04:00
|
|
|
LIST_HEAD(free_me);
|
2015-08-25 18:16:13 +03:00
|
|
|
|
|
|
|
if (!pnfs_sanity_check_layout_range(&res->range))
|
2016-05-17 19:28:48 +03:00
|
|
|
return ERR_PTR(-EINVAL);
|
2010-10-20 08:18:03 +04:00
|
|
|
|
|
|
|
/* Inject layout blob into I/O device driver */
|
2011-05-12 02:00:51 +04:00
|
|
|
lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
|
2016-05-17 19:28:48 +03:00
|
|
|
if (IS_ERR_OR_NULL(lseg)) {
|
2010-10-20 08:18:03 +04:00
|
|
|
if (!lseg)
|
2016-05-17 19:28:48 +03:00
|
|
|
lseg = ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
dprintk("%s: Could not allocate layout: error %ld\n",
|
|
|
|
__func__, PTR_ERR(lseg));
|
|
|
|
return lseg;
|
2010-10-20 08:18:03 +04:00
|
|
|
}
|
|
|
|
|
2016-07-24 22:10:12 +03:00
|
|
|
pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
|
2014-08-21 20:09:18 +04:00
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
spin_lock(&ino->i_lock);
|
2015-08-04 23:40:08 +03:00
|
|
|
if (pnfs_layoutgets_blocked(lo)) {
|
2011-01-06 14:36:30 +03:00
|
|
|
dprintk("%s forget reply due to state\n", __func__);
|
2016-05-17 19:28:48 +03:00
|
|
|
goto out_forget;
|
2011-01-06 14:36:30 +03:00
|
|
|
}
|
2012-10-03 03:38:41 +04:00
|
|
|
|
2021-07-03 02:48:41 +03:00
|
|
|
if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
|
|
|
|
goto out_forget;
|
|
|
|
|
2021-01-22 18:05:51 +03:00
|
|
|
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
|
2014-08-21 20:09:20 +04:00
|
|
|
/* existing state ID, make sure the sequence number matches. */
|
|
|
|
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
|
2021-07-03 02:48:41 +03:00
|
|
|
if (!pnfs_layout_is_valid(lo))
|
2021-01-22 18:05:51 +03:00
|
|
|
lo->plh_barrier = 0;
|
2014-08-21 20:09:20 +04:00
|
|
|
dprintk("%s forget reply due to sequence\n", __func__);
|
2016-05-17 19:28:48 +03:00
|
|
|
goto out_forget;
|
2014-08-21 20:09:20 +04:00
|
|
|
}
|
2020-01-27 21:07:26 +03:00
|
|
|
pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
|
2021-01-22 18:05:51 +03:00
|
|
|
} else if (pnfs_layout_is_valid(lo)) {
|
2014-08-21 20:09:20 +04:00
|
|
|
/*
|
|
|
|
* We got an entirely new state ID. Mark all segments for the
|
2016-11-23 20:36:04 +03:00
|
|
|
* inode invalid, and retry the layoutget
|
2014-08-21 20:09:20 +04:00
|
|
|
*/
|
2021-01-22 01:11:42 +03:00
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = IOMODE_ANY,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
2021-04-16 00:33:07 +03:00
|
|
|
pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0);
|
2016-11-23 20:36:04 +03:00
|
|
|
goto out_forget;
|
2021-01-22 18:05:51 +03:00
|
|
|
} else {
|
|
|
|
/* We have a completely new layout */
|
|
|
|
pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
|
2014-08-21 20:09:20 +04:00
|
|
|
}
|
2012-10-03 03:38:41 +04:00
|
|
|
|
2012-09-19 04:57:08 +04:00
|
|
|
pnfs_get_lseg(lseg);
|
2015-08-25 15:54:17 +03:00
|
|
|
pnfs_layout_insert_lseg(lo, lseg, &free_me);
|
2016-07-21 18:53:29 +03:00
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
|
2015-08-21 07:49:44 +03:00
|
|
|
if (res->return_on_close)
|
2011-01-06 14:36:32 +03:00
|
|
|
set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
|
|
|
|
|
2010-10-20 08:18:03 +04:00
|
|
|
spin_unlock(&ino->i_lock);
|
2014-02-12 19:02:27 +04:00
|
|
|
pnfs_free_lseg_list(&free_me);
|
2012-09-18 01:12:15 +04:00
|
|
|
return lseg;
|
2011-01-06 14:36:30 +03:00
|
|
|
|
2016-05-17 19:28:48 +03:00
|
|
|
out_forget:
|
2011-01-06 14:36:30 +03:00
|
|
|
spin_unlock(&ino->i_lock);
|
|
|
|
lseg->pls_layout = lo;
|
|
|
|
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
|
2016-05-17 19:28:48 +03:00
|
|
|
return ERR_PTR(-EAGAIN);
|
2010-10-20 08:18:03 +04:00
|
|
|
}
|
|
|
|
|
2016-02-15 20:36:04 +03:00
|
|
|
/**
|
|
|
|
* pnfs_mark_matching_lsegs_return - Free or return matching layout segments
|
|
|
|
* @lo: pointer to layout header
|
|
|
|
* @tmp_list: list header to be used with pnfs_free_lseg_list()
|
|
|
|
* @return_range: describe layout segment ranges to be returned
|
2018-06-23 17:28:40 +03:00
|
|
|
* @seq: stateid seqid to match
|
2016-02-15 20:36:04 +03:00
|
|
|
*
|
|
|
|
* This function is mainly intended for use by layoutrecall. It attempts
|
|
|
|
* to free the layout segment immediately, or else to mark it for return
|
|
|
|
* as soon as its reference count drops to zero.
|
2018-06-23 17:28:40 +03:00
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* - 0: a layoutreturn needs to be scheduled.
|
|
|
|
* - EBUSY: there are layout segment that are still in use.
|
|
|
|
* - ENOENT: there are no layout segments that need to be returned.
|
2016-02-15 20:36:04 +03:00
|
|
|
*/
|
2016-01-04 19:23:52 +03:00
|
|
|
int
|
2014-09-05 20:53:23 +04:00
|
|
|
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
|
|
|
struct list_head *tmp_list,
|
2016-05-17 19:28:42 +03:00
|
|
|
const struct pnfs_layout_range *return_range,
|
|
|
|
u32 seq)
|
2014-09-05 20:53:23 +04:00
|
|
|
{
|
|
|
|
struct pnfs_layout_segment *lseg, *next;
|
2016-01-04 19:23:52 +03:00
|
|
|
int remaining = 0;
|
2014-09-05 20:53:23 +04:00
|
|
|
|
|
|
|
dprintk("%s:Begin lo %p\n", __func__, lo);
|
|
|
|
|
2015-12-28 18:28:59 +03:00
|
|
|
assert_spin_locked(&lo->plh_inode->i_lock);
|
2014-09-05 20:53:23 +04:00
|
|
|
|
2021-04-15 22:41:57 +03:00
|
|
|
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
|
|
|
tmp_list = &lo->plh_return_segs;
|
|
|
|
|
2014-09-05 20:53:23 +04:00
|
|
|
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
2016-07-22 18:13:22 +03:00
|
|
|
if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
|
2014-09-05 20:53:23 +04:00
|
|
|
dprintk("%s: marking lseg %p iomode %d "
|
|
|
|
"offset %llu length %llu\n", __func__,
|
|
|
|
lseg, lseg->pls_range.iomode,
|
|
|
|
lseg->pls_range.offset,
|
|
|
|
lseg->pls_range.length);
|
2021-04-15 22:41:57 +03:00
|
|
|
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
|
|
|
tmp_list = &lo->plh_return_segs;
|
2020-08-04 23:30:30 +03:00
|
|
|
if (mark_lseg_invalid(lseg, tmp_list))
|
2016-02-15 20:36:04 +03:00
|
|
|
continue;
|
|
|
|
remaining++;
|
2014-09-05 20:53:23 +04:00
|
|
|
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
|
|
|
}
|
2016-05-17 19:28:42 +03:00
|
|
|
|
2018-06-23 17:28:40 +03:00
|
|
|
if (remaining) {
|
2016-05-17 19:28:42 +03:00
|
|
|
pnfs_set_plh_return_info(lo, return_range->iomode, seq);
|
2018-06-23 17:28:40 +03:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
2016-05-17 19:28:42 +03:00
|
|
|
|
2018-06-23 17:28:40 +03:00
|
|
|
if (!list_empty(&lo->plh_return_segs)) {
|
|
|
|
pnfs_set_plh_return_info(lo, return_range->iomode, seq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOENT;
|
2014-09-05 20:53:23 +04:00
|
|
|
}
|
|
|
|
|
2020-02-18 23:58:31 +03:00
|
|
|
static void
|
|
|
|
pnfs_mark_layout_for_return(struct inode *inode,
|
|
|
|
const struct pnfs_layout_range *range)
|
2014-09-05 20:53:23 +04:00
|
|
|
{
|
2020-02-18 23:58:31 +03:00
|
|
|
struct pnfs_layout_hdr *lo;
|
2016-01-04 19:23:52 +03:00
|
|
|
bool return_now = false;
|
2014-09-05 20:53:23 +04:00
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
2020-02-18 23:58:31 +03:00
|
|
|
lo = NFS_I(inode)->layout;
|
2017-04-27 22:30:00 +03:00
|
|
|
if (!pnfs_layout_is_valid(lo)) {
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return;
|
|
|
|
}
|
2020-02-18 23:58:31 +03:00
|
|
|
pnfs_set_plh_return_info(lo, range->iomode, 0);
|
2014-09-05 20:53:23 +04:00
|
|
|
/*
|
|
|
|
* mark all matching lsegs so that we are sure to have no live
|
|
|
|
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
|
|
|
|
* for how it works.
|
|
|
|
*/
|
2020-02-18 23:58:31 +03:00
|
|
|
if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
|
2020-04-02 22:37:02 +03:00
|
|
|
const struct cred *cred;
|
2016-01-04 19:23:52 +03:00
|
|
|
nfs4_stateid stateid;
|
2016-07-21 19:44:15 +03:00
|
|
|
enum pnfs_iomode iomode;
|
2016-01-04 19:23:52 +03:00
|
|
|
|
2020-04-02 22:37:02 +03:00
|
|
|
return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
|
2016-01-04 19:23:52 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
if (return_now)
|
2020-04-02 22:37:02 +03:00
|
|
|
pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
|
2016-01-04 19:23:52 +03:00
|
|
|
} else {
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_commit_inode(inode, 0);
|
|
|
|
}
|
2014-09-05 20:53:23 +04:00
|
|
|
}
|
2020-02-18 23:58:31 +03:00
|
|
|
|
|
|
|
void pnfs_error_mark_layout_for_return(struct inode *inode,
|
|
|
|
struct pnfs_layout_segment *lseg)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = lseg->pls_range.iomode,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
|
|
|
|
|
|
|
pnfs_mark_layout_for_return(inode, &range);
|
|
|
|
}
|
2014-09-05 20:53:23 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
|
|
|
|
|
2020-02-18 23:58:31 +03:00
|
|
|
static bool
|
|
|
|
pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
|
|
|
|
{
|
|
|
|
return pnfs_layout_is_valid(lo) &&
|
|
|
|
!test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
|
|
|
|
!test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pnfs_layout_segment *
|
|
|
|
pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
|
|
|
|
const struct pnfs_layout_range *range,
|
|
|
|
enum pnfs_iomode iomode)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_segment *lseg;
|
|
|
|
|
|
|
|
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
|
|
|
|
if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
|
|
|
|
continue;
|
|
|
|
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
|
|
|
continue;
|
|
|
|
if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
|
|
|
|
continue;
|
|
|
|
if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
|
|
|
|
return lseg;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find open file states whose mode matches that of the range */
|
|
|
|
static bool
|
|
|
|
pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
|
|
|
|
const struct pnfs_layout_range *range)
|
|
|
|
{
|
|
|
|
struct list_head *head;
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
fmode_t mode = 0;
|
|
|
|
|
|
|
|
if (!pnfs_layout_can_be_returned(lo) ||
|
|
|
|
!pnfs_find_first_lseg(lo, range, range->iomode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
head = &NFS_I(lo->plh_inode)->open_files;
|
|
|
|
list_for_each_entry_rcu(ctx, head, list) {
|
|
|
|
if (ctx->state)
|
|
|
|
mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (range->iomode) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case IOMODE_READ:
|
|
|
|
mode &= ~FMODE_WRITE;
|
|
|
|
break;
|
|
|
|
case IOMODE_RW:
|
|
|
|
if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
|
|
|
|
mode &= ~FMODE_READ;
|
|
|
|
}
|
|
|
|
return mode == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
|
|
|
|
{
|
|
|
|
const struct pnfs_layout_range *range = data;
|
|
|
|
struct pnfs_layout_hdr *lo;
|
|
|
|
struct inode *inode;
|
|
|
|
restart:
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
|
|
|
|
if (!pnfs_layout_can_be_returned(lo) ||
|
|
|
|
test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
|
|
|
continue;
|
|
|
|
inode = lo->plh_inode;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (!pnfs_should_return_unused_layout(lo, range)) {
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
inode = pnfs_grab_inode_layout_hdr(lo);
|
|
|
|
if (!inode)
|
|
|
|
continue;
|
|
|
|
rcu_read_unlock();
|
|
|
|
pnfs_mark_layout_for_return(inode, range);
|
|
|
|
iput(inode);
|
|
|
|
cond_resched();
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pnfs_layout_return_unused_byclid(struct nfs_client *clp,
|
|
|
|
enum pnfs_iomode iomode)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_range range = {
|
|
|
|
.iomode = iomode,
|
|
|
|
.offset = 0,
|
|
|
|
.length = NFS4_MAX_UINT64,
|
|
|
|
};
|
|
|
|
|
|
|
|
nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
|
|
|
|
&range);
|
|
|
|
}
|
|
|
|
|
2017-04-25 17:56:19 +03:00
|
|
|
void
|
|
|
|
pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
|
|
|
|
{
|
|
|
|
if (pgio->pg_lseg == NULL ||
|
|
|
|
test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
|
|
|
|
return;
|
|
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
|
|
pgio->pg_lseg = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
|
|
|
|
|
2017-05-23 03:20:23 +03:00
|
|
|
/*
|
|
|
|
* Check for any intersection between the request and the pgio->pg_lseg,
|
|
|
|
* and if none, put this pgio->pg_lseg away.
|
|
|
|
*/
|
2020-03-23 21:48:23 +03:00
|
|
|
void
|
2017-05-23 03:20:23 +03:00
|
|
|
pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
|
|
|
|
{
|
|
|
|
if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
|
|
|
|
pnfs_put_lseg(pgio->pg_lseg);
|
|
|
|
pgio->pg_lseg = NULL;
|
|
|
|
}
|
|
|
|
}
|
2020-03-23 21:48:23 +03:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
|
2017-05-23 03:20:23 +03:00
|
|
|
|
2011-06-10 21:30:23 +04:00
|
|
|
void
|
|
|
|
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
|
|
|
|
{
|
2021-05-12 12:20:04 +03:00
|
|
|
u64 rd_size;
|
2012-09-25 10:55:57 +04:00
|
|
|
|
2017-04-25 17:56:19 +03:00
|
|
|
pnfs_generic_pg_check_layout(pgio);
|
2017-05-23 03:20:23 +03:00
|
|
|
pnfs_generic_pg_check_range(pgio, req);
|
2015-01-24 17:14:52 +03:00
|
|
|
if (pgio->pg_lseg == NULL) {
|
|
|
|
if (pgio->pg_dreq == NULL)
|
|
|
|
rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
|
|
|
|
else
|
|
|
|
rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
|
|
|
|
|
|
|
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
2019-04-07 20:59:11 +03:00
|
|
|
nfs_req_openctx(req),
|
2015-01-24 17:14:52 +03:00
|
|
|
req_offset(req),
|
|
|
|
rd_size,
|
|
|
|
IOMODE_READ,
|
2016-05-25 17:31:14 +03:00
|
|
|
false,
|
2015-01-24 17:14:52 +03:00
|
|
|
GFP_KERNEL);
|
2015-12-03 21:57:48 +03:00
|
|
|
if (IS_ERR(pgio->pg_lseg)) {
|
|
|
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
|
|
|
pgio->pg_lseg = NULL;
|
|
|
|
return;
|
|
|
|
}
|
2015-01-24 17:14:52 +03:00
|
|
|
}
|
2011-06-10 21:30:23 +04:00
|
|
|
/* If no lseg, fall back to read through mds */
|
|
|
|
if (pgio->pg_lseg == NULL)
|
2011-07-13 23:59:57 +04:00
|
|
|
nfs_pageio_reset_read_mds(pgio);
|
2011-06-10 21:30:23 +04:00
|
|
|
|
2011-06-10 21:30:23 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
|
|
|
|
|
|
|
|
void
|
2012-09-25 10:55:57 +04:00
|
|
|
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
|
|
|
|
struct nfs_page *req, u64 wb_size)
|
2011-06-10 21:30:23 +04:00
|
|
|
{
|
2017-04-25 17:56:19 +03:00
|
|
|
pnfs_generic_pg_check_layout(pgio);
|
2017-05-23 03:20:23 +03:00
|
|
|
pnfs_generic_pg_check_range(pgio, req);
|
2015-12-03 21:57:48 +03:00
|
|
|
if (pgio->pg_lseg == NULL) {
|
2015-01-24 17:14:52 +03:00
|
|
|
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
|
2019-04-07 20:59:11 +03:00
|
|
|
nfs_req_openctx(req),
|
2015-01-24 17:14:52 +03:00
|
|
|
req_offset(req),
|
|
|
|
wb_size,
|
|
|
|
IOMODE_RW,
|
2016-05-25 17:31:14 +03:00
|
|
|
false,
|
2019-06-11 23:49:52 +03:00
|
|
|
GFP_KERNEL);
|
2015-12-03 21:57:48 +03:00
|
|
|
if (IS_ERR(pgio->pg_lseg)) {
|
|
|
|
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
|
|
|
|
pgio->pg_lseg = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2011-06-10 21:30:23 +04:00
|
|
|
/* If no lseg, fall back to write through mds */
|
|
|
|
if (pgio->pg_lseg == NULL)
|
2011-07-13 23:59:57 +04:00
|
|
|
nfs_pageio_reset_write_mds(pgio);
|
2011-06-10 21:30:23 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
|
|
|
|
|
2014-09-10 23:48:01 +04:00
|
|
|
void
|
|
|
|
pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
|
|
|
if (desc->pg_lseg) {
|
|
|
|
pnfs_put_lseg(desc->pg_lseg);
|
|
|
|
desc->pg_lseg = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
|
|
|
|
|
2014-05-15 19:56:43 +04:00
|
|
|
/*
|
|
|
|
* Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
|
|
|
|
* of bytes (maximum @req->wb_bytes) that can be coalesced.
|
|
|
|
*/
|
|
|
|
size_t
|
2014-09-19 18:55:07 +04:00
|
|
|
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
|
|
|
|
struct nfs_page *prev, struct nfs_page *req)
|
2011-03-01 04:34:14 +03:00
|
|
|
{
|
2014-05-15 19:56:51 +04:00
|
|
|
unsigned int size;
|
2014-06-10 01:47:26 +04:00
|
|
|
u64 seg_end, req_start, seg_left;
|
2014-05-15 19:56:51 +04:00
|
|
|
|
|
|
|
size = nfs_generic_pg_test(pgio, prev, req);
|
|
|
|
if (!size)
|
|
|
|
return 0;
|
2011-03-01 04:34:14 +03:00
|
|
|
|
2011-06-10 21:30:23 +04:00
|
|
|
/*
|
2014-06-10 01:47:26 +04:00
|
|
|
* 'size' contains the number of bytes left in the current page (up
|
|
|
|
* to the original size asked for in @req->wb_bytes).
|
|
|
|
*
|
|
|
|
* Calculate how many bytes are left in the layout segment
|
|
|
|
* and if there are less bytes than 'size', return that instead.
|
2011-06-10 21:30:23 +04:00
|
|
|
*
|
|
|
|
* Please also note that 'end_offset' is actually the offset of the
|
|
|
|
* first byte that lies outside the pnfs_layout_range. FIXME?
|
|
|
|
*
|
|
|
|
*/
|
2014-05-15 19:56:55 +04:00
|
|
|
if (pgio->pg_lseg) {
|
2016-10-25 19:24:25 +03:00
|
|
|
seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
|
2014-06-10 01:47:26 +04:00
|
|
|
pgio->pg_lseg->pls_range.length);
|
|
|
|
req_start = req_offset(req);
|
2017-05-23 03:20:23 +03:00
|
|
|
|
2014-06-10 01:47:26 +04:00
|
|
|
/* start of request is past the last byte of this segment */
|
2017-05-23 03:20:23 +03:00
|
|
|
if (req_start >= seg_end)
|
2014-05-15 19:56:55 +04:00
|
|
|
return 0;
|
2014-06-10 01:47:26 +04:00
|
|
|
|
|
|
|
/* adjust 'size' iff there are fewer bytes left in the
|
|
|
|
* segment than what nfs_generic_pg_test returned */
|
|
|
|
seg_left = seg_end - req_start;
|
|
|
|
if (seg_left < size)
|
|
|
|
size = (unsigned int)seg_left;
|
2014-05-15 19:56:55 +04:00
|
|
|
}
|
2014-05-15 19:56:51 +04:00
|
|
|
|
2014-05-15 19:56:55 +04:00
|
|
|
return size;
|
2011-03-01 04:34:14 +03:00
|
|
|
}
|
2011-05-25 21:54:40 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
|
2011-03-01 04:34:14 +03:00
|
|
|
|
2014-06-09 19:48:38 +04:00
|
|
|
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
|
2012-01-06 17:57:46 +04:00
|
|
|
{
|
|
|
|
struct nfs_pageio_descriptor pgio;
|
|
|
|
|
|
|
|
/* Resend all requests through the MDS */
|
2014-06-09 19:48:38 +04:00
|
|
|
nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
|
|
|
|
hdr->completion_ops);
|
2015-06-18 02:56:22 +03:00
|
|
|
set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
|
2014-06-09 19:48:38 +04:00
|
|
|
return nfs_pageio_resend(&pgio, hdr);
|
2012-01-06 17:57:46 +04:00
|
|
|
}
|
2012-04-28 01:53:46 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
|
2012-01-06 17:57:46 +04:00
|
|
|
|
2014-06-09 19:48:35 +04:00
|
|
|
static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
|
2012-04-20 22:47:37 +04:00
|
|
|
{
|
2012-04-20 22:47:44 +04:00
|
|
|
|
|
|
|
dprintk("pnfs write error = %d\n", hdr->pnfs_error);
|
|
|
|
if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
|
2012-04-20 22:47:37 +04:00
|
|
|
PNFS_LAYOUTRET_ON_ERROR) {
|
2012-04-20 22:47:44 +04:00
|
|
|
pnfs_return_layout(hdr->inode);
|
2012-04-20 22:47:37 +04:00
|
|
|
}
|
2012-04-20 22:47:47 +04:00
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
|
2014-06-09 19:48:38 +04:00
|
|
|
hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
|
2012-04-20 22:47:37 +04:00
|
|
|
}
|
|
|
|
|
2011-05-22 20:52:03 +04:00
|
|
|
/*
|
|
|
|
* Called by non rpc-based layout drivers
|
|
|
|
*/
|
2014-06-09 19:48:35 +04:00
|
|
|
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
|
2011-03-03 18:13:44 +03:00
|
|
|
{
|
2015-10-16 12:23:29 +03:00
|
|
|
if (likely(!hdr->pnfs_error)) {
|
2015-03-26 03:40:38 +03:00
|
|
|
pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
|
|
|
|
hdr->mds_offset + hdr->res.count);
|
2014-06-09 19:48:35 +04:00
|
|
|
hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
|
2015-10-16 12:23:29 +03:00
|
|
|
}
|
|
|
|
trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
|
|
|
|
if (unlikely(hdr->pnfs_error))
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_ld_handle_write_error(hdr);
|
|
|
|
hdr->mds_ops->rpc_release(hdr);
|
2011-03-03 18:13:44 +03:00
|
|
|
}
|
2011-05-22 20:52:03 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
|
2011-03-03 18:13:44 +03:00
|
|
|
|
2011-07-13 23:59:19 +04:00
|
|
|
static void
|
|
|
|
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
|
2014-06-09 19:48:35 +04:00
|
|
|
struct nfs_pgio_header *hdr)
|
2011-07-13 23:59:19 +04:00
|
|
|
{
|
2014-11-10 03:35:35 +03:00
|
|
|
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
2014-09-19 18:55:07 +04:00
|
|
|
|
2012-04-20 22:47:47 +04:00
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
2014-09-19 18:55:07 +04:00
|
|
|
list_splice_tail_init(&hdr->pages, &mirror->pg_list);
|
2012-04-20 22:47:47 +04:00
|
|
|
nfs_pageio_reset_write_mds(desc);
|
2014-09-19 18:55:07 +04:00
|
|
|
mirror->pg_recoalesce = 1;
|
2012-04-20 22:47:47 +04:00
|
|
|
}
|
2017-12-16 00:12:32 +03:00
|
|
|
hdr->completion_ops->completion(hdr);
|
2011-07-13 23:59:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum pnfs_try_status
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
|
2011-07-13 23:59:19 +04:00
|
|
|
const struct rpc_call_ops *call_ops,
|
|
|
|
struct pnfs_layout_segment *lseg,
|
|
|
|
int how)
|
2011-03-03 18:13:45 +03:00
|
|
|
{
|
2012-04-20 22:47:44 +04:00
|
|
|
struct inode *inode = hdr->inode;
|
2011-03-03 18:13:45 +03:00
|
|
|
enum pnfs_try_status trypnfs;
|
|
|
|
struct nfs_server *nfss = NFS_SERVER(inode);
|
|
|
|
|
2012-04-20 22:47:44 +04:00
|
|
|
hdr->mds_ops = call_ops;
|
2011-03-03 18:13:45 +03:00
|
|
|
|
|
|
|
dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
|
2014-06-09 19:48:35 +04:00
|
|
|
inode->i_ino, hdr->args.count, hdr->args.offset, how);
|
|
|
|
trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
|
2012-04-20 22:47:47 +04:00
|
|
|
if (trypnfs != PNFS_NOT_ATTEMPTED)
|
2011-03-03 18:13:45 +03:00
|
|
|
nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
|
|
|
|
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
|
|
|
|
return trypnfs;
|
|
|
|
}
|
|
|
|
|
2011-07-13 23:59:19 +04:00
|
|
|
static void
|
2014-05-15 19:56:53 +04:00
|
|
|
pnfs_do_write(struct nfs_pageio_descriptor *desc,
|
|
|
|
struct nfs_pgio_header *hdr, int how)
|
2011-07-13 23:59:19 +04:00
|
|
|
{
|
|
|
|
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
|
|
|
|
struct pnfs_layout_segment *lseg = desc->pg_lseg;
|
2014-05-15 19:56:53 +04:00
|
|
|
enum pnfs_try_status trypnfs;
|
2011-07-13 23:59:19 +04:00
|
|
|
|
2014-06-09 19:48:35 +04:00
|
|
|
trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
|
2017-04-29 07:02:37 +03:00
|
|
|
switch (trypnfs) {
|
|
|
|
case PNFS_NOT_ATTEMPTED:
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_write_through_mds(desc, hdr);
|
2020-11-20 21:26:46 +03:00
|
|
|
break;
|
2017-04-29 07:02:37 +03:00
|
|
|
case PNFS_ATTEMPTED:
|
|
|
|
break;
|
|
|
|
case PNFS_TRY_AGAIN:
|
|
|
|
/* cleanup hdr and prepare to redo pnfs */
|
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
|
|
|
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
|
|
|
list_splice_init(&hdr->pages, &mirror->pg_list);
|
|
|
|
mirror->pg_recoalesce = 1;
|
|
|
|
}
|
|
|
|
hdr->mds_ops->rpc_release(hdr);
|
|
|
|
}
|
2011-07-13 23:59:19 +04:00
|
|
|
}
|
|
|
|
|
2012-04-20 22:47:47 +04:00
|
|
|
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
|
|
|
|
{
|
2012-09-19 04:57:08 +04:00
|
|
|
pnfs_put_lseg(hdr->lseg);
|
2014-06-09 19:48:33 +04:00
|
|
|
nfs_pgio_header_free(hdr);
|
2012-04-20 22:47:47 +04:00
|
|
|
}
|
|
|
|
|
2011-07-13 23:59:19 +04:00
|
|
|
int
|
|
|
|
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
2012-04-20 22:47:47 +04:00
|
|
|
struct nfs_pgio_header *hdr;
|
2011-07-13 23:59:19 +04:00
|
|
|
int ret;
|
|
|
|
|
2014-06-09 19:48:33 +04:00
|
|
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
|
|
|
if (!hdr) {
|
2015-12-04 21:03:17 +03:00
|
|
|
desc->pg_error = -ENOMEM;
|
|
|
|
return desc->pg_error;
|
2011-07-13 23:59:19 +04:00
|
|
|
}
|
2012-04-20 22:47:47 +04:00
|
|
|
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
|
2014-09-10 23:48:01 +04:00
|
|
|
|
2012-09-19 04:57:08 +04:00
|
|
|
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
|
2014-05-06 17:12:36 +04:00
|
|
|
ret = nfs_generic_pgio(desc, hdr);
|
2014-09-10 23:48:01 +04:00
|
|
|
if (!ret)
|
2014-05-15 19:56:53 +04:00
|
|
|
pnfs_do_write(desc, hdr, desc->pg_ioflags);
|
2014-09-19 18:55:07 +04:00
|
|
|
|
2012-04-20 22:47:47 +04:00
|
|
|
return ret;
|
2011-07-13 23:59:19 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
|
|
|
|
|
2014-06-09 19:48:38 +04:00
|
|
|
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
|
2011-11-10 23:30:37 +04:00
|
|
|
{
|
|
|
|
struct nfs_pageio_descriptor pgio;
|
|
|
|
|
2012-04-20 22:47:37 +04:00
|
|
|
/* Resend all requests through the MDS */
|
2014-06-09 19:48:38 +04:00
|
|
|
nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
|
|
|
|
return nfs_pageio_resend(&pgio, hdr);
|
2012-04-20 22:47:37 +04:00
|
|
|
}
|
2012-04-28 01:53:46 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
|
2012-04-20 22:47:37 +04:00
|
|
|
|
2014-06-09 19:48:35 +04:00
|
|
|
static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
|
2012-04-20 22:47:37 +04:00
|
|
|
{
|
2012-04-20 22:47:44 +04:00
|
|
|
dprintk("pnfs read error = %d\n", hdr->pnfs_error);
|
|
|
|
if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
|
2012-04-20 22:47:37 +04:00
|
|
|
PNFS_LAYOUTRET_ON_ERROR) {
|
2012-04-20 22:47:44 +04:00
|
|
|
pnfs_return_layout(hdr->inode);
|
2012-04-20 22:47:37 +04:00
|
|
|
}
|
2012-04-20 22:47:46 +04:00
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
|
2014-06-09 19:48:38 +04:00
|
|
|
hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
|
2011-11-10 23:30:37 +04:00
|
|
|
}
|
|
|
|
|
2011-05-22 20:52:03 +04:00
|
|
|
/*
|
|
|
|
* Called by non rpc-based layout drivers
|
|
|
|
*/
|
2014-06-09 19:48:35 +04:00
|
|
|
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
|
2011-05-22 20:52:03 +04:00
|
|
|
{
|
2016-09-16 01:26:05 +03:00
|
|
|
if (likely(!hdr->pnfs_error))
|
2014-06-09 19:48:35 +04:00
|
|
|
hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
|
2015-10-16 12:23:29 +03:00
|
|
|
trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
|
|
|
|
if (unlikely(hdr->pnfs_error))
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_ld_handle_read_error(hdr);
|
|
|
|
hdr->mds_ops->rpc_release(hdr);
|
2011-05-22 20:52:03 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
|
|
|
|
|
2011-07-13 23:58:28 +04:00
|
|
|
static void
|
|
|
|
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
|
2014-06-09 19:48:35 +04:00
|
|
|
struct nfs_pgio_header *hdr)
|
2011-07-13 23:58:28 +04:00
|
|
|
{
|
2014-11-10 03:35:35 +03:00
|
|
|
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
2014-09-19 18:55:07 +04:00
|
|
|
|
2012-04-20 22:47:46 +04:00
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
2014-09-19 18:55:07 +04:00
|
|
|
list_splice_tail_init(&hdr->pages, &mirror->pg_list);
|
2012-04-20 22:47:46 +04:00
|
|
|
nfs_pageio_reset_read_mds(desc);
|
2014-09-19 18:55:07 +04:00
|
|
|
mirror->pg_recoalesce = 1;
|
2012-04-20 22:47:46 +04:00
|
|
|
}
|
2017-12-16 00:12:32 +03:00
|
|
|
hdr->completion_ops->completion(hdr);
|
2011-07-13 23:58:28 +04:00
|
|
|
}
|
|
|
|
|
2011-03-01 04:34:16 +03:00
|
|
|
/*
|
|
|
|
* Call the appropriate parallel I/O subsystem read function.
|
|
|
|
*/
|
2011-07-13 23:58:28 +04:00
|
|
|
static enum pnfs_try_status
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
|
2011-07-13 23:58:28 +04:00
|
|
|
const struct rpc_call_ops *call_ops,
|
|
|
|
struct pnfs_layout_segment *lseg)
|
2011-03-01 04:34:16 +03:00
|
|
|
{
|
2012-04-20 22:47:44 +04:00
|
|
|
struct inode *inode = hdr->inode;
|
2011-03-01 04:34:16 +03:00
|
|
|
struct nfs_server *nfss = NFS_SERVER(inode);
|
|
|
|
enum pnfs_try_status trypnfs;
|
|
|
|
|
2012-04-20 22:47:44 +04:00
|
|
|
hdr->mds_ops = call_ops;
|
2011-03-01 04:34:16 +03:00
|
|
|
|
|
|
|
dprintk("%s: Reading ino:%lu %u@%llu\n",
|
2014-06-09 19:48:35 +04:00
|
|
|
__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
|
2011-03-01 04:34:16 +03:00
|
|
|
|
2014-06-09 19:48:35 +04:00
|
|
|
trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
|
2012-04-20 22:47:46 +04:00
|
|
|
if (trypnfs != PNFS_NOT_ATTEMPTED)
|
2011-03-01 04:34:16 +03:00
|
|
|
nfs_inc_stats(inode, NFSIOS_PNFS_READ);
|
|
|
|
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
|
|
|
|
return trypnfs;
|
|
|
|
}
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2014-11-10 03:35:38 +03:00
|
|
|
/* Resend all requests through pnfs. */
|
2020-08-11 20:36:32 +03:00
|
|
|
void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
|
|
|
|
unsigned int mirror_idx)
|
2014-11-10 03:35:38 +03:00
|
|
|
{
|
|
|
|
struct nfs_pageio_descriptor pgio;
|
|
|
|
|
2016-04-01 18:42:28 +03:00
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
2016-11-27 23:12:39 +03:00
|
|
|
/* Prevent deadlocks with layoutreturn! */
|
|
|
|
pnfs_put_lseg(hdr->lseg);
|
|
|
|
hdr->lseg = NULL;
|
|
|
|
|
2016-04-01 18:42:28 +03:00
|
|
|
nfs_pageio_init_read(&pgio, hdr->inode, false,
|
|
|
|
hdr->completion_ops);
|
2020-08-11 20:36:32 +03:00
|
|
|
pgio.pg_mirror_idx = mirror_idx;
|
2016-04-01 18:42:28 +03:00
|
|
|
hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
|
|
|
|
}
|
2014-11-10 03:35:38 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
|
|
|
|
|
2011-07-13 23:58:28 +04:00
|
|
|
static void
|
2014-05-15 19:56:53 +04:00
|
|
|
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
|
2011-07-13 23:58:28 +04:00
|
|
|
{
|
|
|
|
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
|
|
|
|
struct pnfs_layout_segment *lseg = desc->pg_lseg;
|
2014-05-15 19:56:53 +04:00
|
|
|
enum pnfs_try_status trypnfs;
|
2011-07-13 23:58:28 +04:00
|
|
|
|
2014-06-09 19:48:35 +04:00
|
|
|
trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
|
2017-04-25 18:26:53 +03:00
|
|
|
switch (trypnfs) {
|
|
|
|
case PNFS_NOT_ATTEMPTED:
|
2014-06-09 19:48:35 +04:00
|
|
|
pnfs_read_through_mds(desc, hdr);
|
2020-11-20 21:26:46 +03:00
|
|
|
break;
|
2017-04-25 18:26:53 +03:00
|
|
|
case PNFS_ATTEMPTED:
|
|
|
|
break;
|
|
|
|
case PNFS_TRY_AGAIN:
|
|
|
|
/* cleanup hdr and prepare to redo pnfs */
|
|
|
|
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
|
|
|
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
|
|
|
|
list_splice_init(&hdr->pages, &mirror->pg_list);
|
|
|
|
mirror->pg_recoalesce = 1;
|
|
|
|
}
|
|
|
|
hdr->mds_ops->rpc_release(hdr);
|
|
|
|
}
|
2011-07-13 23:58:28 +04:00
|
|
|
}
|
|
|
|
|
2012-04-20 22:47:46 +04:00
|
|
|
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
|
|
|
|
{
|
2012-09-19 04:57:08 +04:00
|
|
|
pnfs_put_lseg(hdr->lseg);
|
2014-06-09 19:48:33 +04:00
|
|
|
nfs_pgio_header_free(hdr);
|
2012-04-20 22:47:46 +04:00
|
|
|
}
|
|
|
|
|
2011-07-13 23:58:28 +04:00
|
|
|
int
|
|
|
|
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
|
|
|
|
{
|
2012-04-20 22:47:46 +04:00
|
|
|
struct nfs_pgio_header *hdr;
|
2011-07-13 23:58:28 +04:00
|
|
|
int ret;
|
|
|
|
|
2014-06-09 19:48:33 +04:00
|
|
|
hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
|
|
|
|
if (!hdr) {
|
2015-12-04 21:03:17 +03:00
|
|
|
desc->pg_error = -ENOMEM;
|
|
|
|
return desc->pg_error;
|
2011-07-13 23:58:28 +04:00
|
|
|
}
|
2012-04-20 22:47:46 +04:00
|
|
|
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
|
2012-09-19 04:57:08 +04:00
|
|
|
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
|
2014-05-06 17:12:36 +04:00
|
|
|
ret = nfs_generic_pgio(desc, hdr);
|
2014-09-10 23:48:01 +04:00
|
|
|
if (!ret)
|
2014-05-15 19:56:53 +04:00
|
|
|
pnfs_do_read(desc, hdr);
|
2012-04-20 22:47:46 +04:00
|
|
|
return ret;
|
2011-07-13 23:58:28 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
|
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
static void pnfs_clear_layoutcommitting(struct inode *inode)
|
|
|
|
{
|
|
|
|
unsigned long *bitlock = &NFS_I(inode)->flags;
|
|
|
|
|
|
|
|
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
|
2014-03-17 21:06:10 +04:00
|
|
|
smp_mb__after_atomic();
|
2014-01-13 22:34:36 +04:00
|
|
|
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
|
|
|
|
}
|
|
|
|
|
2011-03-23 16:27:54 +03:00
|
|
|
/*
|
2011-07-31 04:52:33 +04:00
|
|
|
* There can be multiple RW segments.
|
2011-03-23 16:27:54 +03:00
|
|
|
*/
|
2011-07-31 04:52:33 +04:00
|
|
|
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
|
2011-03-23 16:27:54 +03:00
|
|
|
{
|
2011-07-31 04:52:33 +04:00
|
|
|
struct pnfs_layout_segment *lseg;
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2011-07-31 04:52:33 +04:00
|
|
|
list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
|
|
|
|
if (lseg->pls_range.iomode == IOMODE_RW &&
|
2013-03-20 20:34:32 +04:00
|
|
|
test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
|
2011-07-31 04:52:33 +04:00
|
|
|
list_add(&lseg->pls_lc_list, listp);
|
|
|
|
}
|
2011-03-23 16:27:54 +03:00
|
|
|
}
|
|
|
|
|
2013-03-20 20:34:32 +04:00
|
|
|
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
|
|
|
|
{
|
|
|
|
struct pnfs_layout_segment *lseg, *tmp;
|
|
|
|
|
|
|
|
/* Matched by references in pnfs_set_layoutcommit */
|
|
|
|
list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
|
|
|
|
list_del_init(&lseg->pls_lc_list);
|
|
|
|
pnfs_put_lseg(lseg);
|
|
|
|
}
|
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
pnfs_clear_layoutcommitting(inode);
|
2013-03-20 20:34:32 +04:00
|
|
|
}
|
|
|
|
|
2011-09-23 05:50:12 +04:00
|
|
|
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
|
|
|
|
{
|
2012-09-19 00:41:18 +04:00
|
|
|
pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
|
2011-09-23 05:50:12 +04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
|
|
|
|
|
2011-03-23 16:27:54 +03:00
|
|
|
void
|
2015-03-26 03:40:38 +03:00
|
|
|
pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
|
|
|
|
loff_t end_pos)
|
2011-03-23 16:27:54 +03:00
|
|
|
{
|
2012-04-20 22:47:44 +04:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2011-04-13 18:53:51 +04:00
|
|
|
bool mark_as_dirty = false;
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2012-04-20 22:47:44 +04:00
|
|
|
spin_lock(&inode->i_lock);
|
2011-03-23 16:27:54 +03:00
|
|
|
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
|
2015-03-26 03:10:20 +03:00
|
|
|
nfsi->layout->plh_lwb = end_pos;
|
2011-04-13 18:53:51 +04:00
|
|
|
mark_as_dirty = true;
|
2011-03-23 16:27:54 +03:00
|
|
|
dprintk("%s: Set layoutcommit for inode %lu ",
|
2012-04-20 22:47:44 +04:00
|
|
|
__func__, inode->i_ino);
|
2015-03-26 03:10:20 +03:00
|
|
|
} else if (end_pos > nfsi->layout->plh_lwb)
|
|
|
|
nfsi->layout->plh_lwb = end_pos;
|
2015-03-26 03:40:38 +03:00
|
|
|
if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
|
2011-07-31 04:52:33 +04:00
|
|
|
/* references matched in nfs4_layoutcommit_release */
|
2015-03-26 03:40:38 +03:00
|
|
|
pnfs_get_lseg(lseg);
|
2011-07-31 04:52:33 +04:00
|
|
|
}
|
2012-04-20 22:47:44 +04:00
|
|
|
spin_unlock(&inode->i_lock);
|
2011-07-31 04:52:31 +04:00
|
|
|
dprintk("%s: lseg %p end_pos %llu\n",
|
2015-03-26 03:40:38 +03:00
|
|
|
__func__, lseg, nfsi->layout->plh_lwb);
|
2011-04-13 18:53:51 +04:00
|
|
|
|
|
|
|
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
|
|
|
|
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
|
|
|
|
if (mark_as_dirty)
|
2012-04-20 22:47:44 +04:00
|
|
|
mark_inode_dirty_sync(inode);
|
2011-03-23 16:27:54 +03:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
|
|
|
|
|
2011-07-31 04:52:38 +04:00
|
|
|
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
|
|
|
|
{
|
|
|
|
struct nfs_server *nfss = NFS_SERVER(data->args.inode);
|
|
|
|
|
|
|
|
if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
|
|
|
|
nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
|
2013-03-20 20:34:32 +04:00
|
|
|
pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
|
2011-07-31 04:52:38 +04:00
|
|
|
}
|
|
|
|
|
2011-03-12 10:58:09 +03:00
|
|
|
/*
|
|
|
|
* For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
|
|
|
|
* NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
|
|
|
|
* data to disk to allow the server to recover the data if it crashes.
|
|
|
|
* LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
|
|
|
|
* is off, and a COMMIT is sent to a data server, or
|
|
|
|
* if WRITEs to a data server return NFS_DATA_SYNC.
|
|
|
|
*/
|
2011-03-23 16:27:54 +03:00
|
|
|
int
|
2011-03-12 10:58:10 +03:00
|
|
|
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
|
2011-03-23 16:27:54 +03:00
|
|
|
{
|
2014-08-21 20:09:25 +04:00
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
|
2011-03-23 16:27:54 +03:00
|
|
|
struct nfs4_layoutcommit_data *data;
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
loff_t end_pos;
|
2014-01-13 22:34:36 +04:00
|
|
|
int status;
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
if (!pnfs_layoutcommit_outstanding(inode))
|
2011-03-12 10:58:09 +03:00
|
|
|
return 0;
|
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
|
2011-10-24 07:21:17 +04:00
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
status = -EAGAIN;
|
2011-10-24 07:21:17 +04:00
|
|
|
if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
|
2014-01-13 22:34:36 +04:00
|
|
|
if (!sync)
|
|
|
|
goto out;
|
sched: Remove proliferation of wait_on_bit() action functions
The current "wait_on_bit" interface requires an 'action'
function to be provided which does the actual waiting.
There are over 20 such functions, many of them identical.
Most cases can be satisfied by one of just two functions, one
which uses io_schedule() and one which just uses schedule().
So:
Rename wait_on_bit and wait_on_bit_lock to
wait_on_bit_action and wait_on_bit_lock_action
to make it explicit that they need an action function.
Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io
which are *not* given an action function but implicitly use
a standard one.
The decision to error-out if a signal is pending is now made
based on the 'mode' argument rather than being encoded in the action
function.
All instances of the old wait_on_bit and wait_on_bit_lock which
can use the new version have been changed accordingly and their
action functions have been discarded.
wait_on_bit{_lock} does not return any specific error code in the
event of a signal so the caller must check for non-zero and
interpolate their own error code as appropriate.
The wait_on_bit() call in __fscache_wait_on_invalidate() was
ambiguous as it specified TASK_UNINTERRUPTIBLE but used
fscache_wait_bit_interruptible as an action function.
David Howells confirms this should be uniformly
"uninterruptible"
The main remaining user of wait_on_bit{,_lock}_action is NFS
which needs to use a freezer-aware schedule() call.
A comment in fs/gfs2/glock.c notes that having multiple 'action'
functions is useful as they display differently in the 'wchan'
field of 'ps'. (and /proc/$PID/wchan).
As the new bit_wait{,_io} functions are tagged "__sched", they
will not show up at all, but something higher in the stack. So
the distinction will still be visible, only with different
function names (gds2_glock_wait versus gfs2_glock_dq_wait in the
gfs2/glock.c case).
Since first version of this patch (against 3.15) two new action
functions appeared, on in NFS and one in CIFS. CIFS also now
uses an action function that makes the same freezer aware
schedule call as NFS.
Signed-off-by: NeilBrown <neilb@suse.de>
Acked-by: David Howells <dhowells@redhat.com> (fscache, keys)
Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2)
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steve French <sfrench@samba.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 09:16:04 +04:00
|
|
|
status = wait_on_bit_lock_action(&nfsi->flags,
|
2014-01-13 22:34:36 +04:00
|
|
|
NFS_INO_LAYOUTCOMMITTING,
|
|
|
|
nfs_wait_bit_killable,
|
|
|
|
TASK_KILLABLE);
|
2011-10-24 07:21:17 +04:00
|
|
|
if (status)
|
2014-01-13 22:34:36 +04:00
|
|
|
goto out;
|
2011-10-24 07:21:17 +04:00
|
|
|
}
|
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
status = -ENOMEM;
|
|
|
|
/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
|
|
|
|
data = kzalloc(sizeof(*data), GFP_NOFS);
|
|
|
|
if (!data)
|
|
|
|
goto clear_layoutcommitting;
|
|
|
|
|
|
|
|
status = 0;
|
2011-03-12 10:58:09 +03:00
|
|
|
spin_lock(&inode->i_lock);
|
2014-01-13 22:34:36 +04:00
|
|
|
if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
|
|
|
|
goto out_unlock;
|
2011-07-31 04:52:33 +04:00
|
|
|
|
2014-01-13 22:34:36 +04:00
|
|
|
INIT_LIST_HEAD(&data->lseg_list);
|
2011-07-31 04:52:33 +04:00
|
|
|
pnfs_list_write_lseg(inode, &data->lseg_list);
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2011-07-31 04:52:31 +04:00
|
|
|
end_pos = nfsi->layout->plh_lwb;
|
2011-03-23 16:27:54 +03:00
|
|
|
|
2012-03-05 03:13:56 +04:00
|
|
|
nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
|
2020-04-02 22:47:08 +03:00
|
|
|
data->cred = get_cred(nfsi->layout->plh_lc_cred);
|
2011-03-23 16:27:54 +03:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
|
|
|
|
data->args.inode = inode;
|
|
|
|
nfs_fattr_init(&data->fattr);
|
|
|
|
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
|
|
|
|
data->res.fattr = &data->fattr;
|
2016-06-27 01:54:58 +03:00
|
|
|
if (end_pos != 0)
|
|
|
|
data->args.lastbytewritten = end_pos - 1;
|
|
|
|
else
|
|
|
|
data->args.lastbytewritten = U64_MAX;
|
2011-03-23 16:27:54 +03:00
|
|
|
data->res.server = NFS_SERVER(inode);
|
|
|
|
|
2014-08-21 20:09:25 +04:00
|
|
|
if (ld->prepare_layoutcommit) {
|
|
|
|
status = ld->prepare_layoutcommit(&data->args);
|
|
|
|
if (status) {
|
2018-12-03 03:30:31 +03:00
|
|
|
put_cred(data->cred);
|
2014-08-21 20:09:25 +04:00
|
|
|
spin_lock(&inode->i_lock);
|
2015-03-26 03:10:20 +03:00
|
|
|
set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
|
|
|
|
if (end_pos > nfsi->layout->plh_lwb)
|
2014-08-21 20:09:25 +04:00
|
|
|
nfsi->layout->plh_lwb = end_pos;
|
2015-07-10 22:58:42 +03:00
|
|
|
goto out_unlock;
|
2014-08-21 20:09:25 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-23 16:27:54 +03:00
|
|
|
status = nfs4_proc_layoutcommit(data, sync);
|
|
|
|
out:
|
2011-10-24 07:21:17 +04:00
|
|
|
if (status)
|
|
|
|
mark_inode_dirty_sync(inode);
|
2011-03-23 16:27:54 +03:00
|
|
|
dprintk("<-- %s status %d\n", __func__, status);
|
|
|
|
return status;
|
2014-01-13 22:34:36 +04:00
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&inode->i_lock);
|
2011-10-24 07:21:17 +04:00
|
|
|
kfree(data);
|
2014-01-13 22:34:36 +04:00
|
|
|
clear_layoutcommitting:
|
|
|
|
pnfs_clear_layoutcommitting(inode);
|
2011-10-24 07:21:17 +04:00
|
|
|
goto out;
|
2011-03-23 16:27:54 +03:00
|
|
|
}
|
2014-08-07 06:12:38 +04:00
|
|
|
EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
|
2012-05-23 13:02:35 +04:00
|
|
|
|
2015-03-25 21:14:42 +03:00
|
|
|
int
|
|
|
|
pnfs_generic_sync(struct inode *inode, bool datasync)
|
|
|
|
{
|
|
|
|
return pnfs_layoutcommit_inode(inode, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_generic_sync);
|
|
|
|
|
2012-05-23 13:02:35 +04:00
|
|
|
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
|
|
|
|
{
|
|
|
|
struct nfs4_threshold *thp;
|
|
|
|
|
|
|
|
thp = kzalloc(sizeof(*thp), GFP_NOFS);
|
|
|
|
if (!thp) {
|
|
|
|
dprintk("%s mdsthreshold allocation failed\n", __func__);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return thp;
|
|
|
|
}
|
2015-06-23 14:51:57 +03:00
|
|
|
|
2015-06-25 13:19:32 +03:00
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4_2)
|
2015-06-23 14:51:57 +03:00
|
|
|
int
|
2015-08-06 00:31:58 +03:00
|
|
|
pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
|
2015-06-23 14:51:57 +03:00
|
|
|
{
|
|
|
|
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2015-06-23 14:52:03 +03:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2015-06-23 14:51:57 +03:00
|
|
|
struct nfs42_layoutstat_data *data;
|
|
|
|
struct pnfs_layout_hdr *hdr;
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
|
|
|
|
goto out;
|
|
|
|
|
2015-06-27 18:45:46 +03:00
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
|
|
|
|
goto out;
|
|
|
|
|
2015-06-23 14:52:03 +03:00
|
|
|
if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
|
|
|
|
goto out;
|
|
|
|
|
2015-06-23 14:51:57 +03:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (!NFS_I(inode)->layout) {
|
|
|
|
spin_unlock(&inode->i_lock);
|
2016-05-16 21:41:14 +03:00
|
|
|
goto out_clear_layoutstats;
|
2015-06-23 14:51:57 +03:00
|
|
|
}
|
|
|
|
hdr = NFS_I(inode)->layout;
|
|
|
|
pnfs_get_layout_hdr(hdr);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
|
2015-08-06 00:31:58 +03:00
|
|
|
data = kzalloc(sizeof(*data), gfp_flags);
|
2015-06-23 14:51:57 +03:00
|
|
|
if (!data) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
data->args.fh = NFS_FH(inode);
|
|
|
|
data->args.inode = inode;
|
|
|
|
status = ld->prepare_layoutstats(&data->args);
|
|
|
|
if (status)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
|
|
|
|
|
|
|
|
out:
|
|
|
|
dprintk("%s returns %d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree(data);
|
|
|
|
out_put:
|
|
|
|
pnfs_put_layout_hdr(hdr);
|
2016-05-16 21:41:14 +03:00
|
|
|
out_clear_layoutstats:
|
2015-06-23 14:52:03 +03:00
|
|
|
smp_mb__before_atomic();
|
|
|
|
clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
|
|
|
|
smp_mb__after_atomic();
|
2015-06-23 14:51:57 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
|
2015-06-25 13:19:32 +03:00
|
|
|
#endif
|
2015-08-25 03:39:18 +03:00
|
|
|
|
|
|
|
unsigned int layoutstats_timer;
|
|
|
|
module_param(layoutstats_timer, uint, 0644);
|
|
|
|
EXPORT_SYMBOL_GPL(layoutstats_timer);
|