2019-05-19 15:08:55 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/delegation.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004 Trond Myklebust
|
|
|
|
*
|
|
|
|
* NFS file delegation management
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#include <linux/completion.h>
|
2006-01-03 11:55:24 +03:00
|
|
|
#include <linux/kthread.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/spinlock.h>
|
2018-01-09 16:21:17 +03:00
|
|
|
#include <linux/iversion.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <linux/nfs4.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_xdr.h>
|
|
|
|
|
2005-06-22 21:16:21 +04:00
|
|
|
#include "nfs4_fs.h"
|
2018-03-20 23:43:20 +03:00
|
|
|
#include "nfs4session.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
#include "delegation.h"
|
2006-08-23 04:06:10 +04:00
|
|
|
#include "internal.h"
|
2013-08-13 18:36:56 +04:00
|
|
|
#include "nfs4trace.h"
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2020-01-27 17:58:19 +03:00
|
|
|
#define NFS_DEFAULT_DELEGATION_WATERMARK (5000U)
|
|
|
|
|
2020-01-27 17:58:18 +03:00
|
|
|
static atomic_long_t nfs_active_delegations;
|
2020-01-27 17:58:19 +03:00
|
|
|
static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK;
|
2020-01-27 17:58:18 +03:00
|
|
|
|
|
|
|
static void __nfs_free_delegation(struct nfs_delegation *delegation)
|
2007-08-06 20:18:34 +04:00
|
|
|
{
|
2018-12-03 03:30:31 +03:00
|
|
|
put_cred(delegation->cred);
|
|
|
|
delegation->cred = NULL;
|
2011-05-01 17:21:54 +04:00
|
|
|
kfree_rcu(delegation, rcu);
|
2007-07-06 23:12:04 +04:00
|
|
|
}
|
|
|
|
|
2020-01-27 17:58:18 +03:00
|
|
|
static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
|
|
|
|
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
|
|
|
|
atomic_long_dec(&nfs_active_delegations);
|
2020-02-05 17:01:54 +03:00
|
|
|
if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
|
|
|
|
nfs_clear_verifier_delegated(delegation->inode);
|
2020-01-27 17:58:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-13 22:51:06 +03:00
|
|
|
static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
refcount_inc(&delegation->refcount);
|
|
|
|
return delegation;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_put_delegation(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
if (refcount_dec_and_test(&delegation->refcount))
|
|
|
|
__nfs_free_delegation(delegation);
|
|
|
|
}
|
|
|
|
|
2020-01-27 17:58:18 +03:00
|
|
|
static void nfs_free_delegation(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
nfs_mark_delegation_revoked(delegation);
|
2020-02-13 22:51:06 +03:00
|
|
|
nfs_put_delegation(delegation);
|
2020-01-27 17:58:18 +03:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_mark_delegation_referenced - set delegation's REFERENCED flag
|
|
|
|
* @delegation: delegation to process
|
|
|
|
*
|
|
|
|
*/
|
2008-12-23 23:21:52 +03:00
|
|
|
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
|
|
|
|
}
|
|
|
|
|
2021-05-08 17:01:32 +03:00
|
|
|
static void nfs_mark_return_delegation(struct nfs_server *server,
|
|
|
|
struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
|
|
|
|
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
|
|
|
|
}
|
|
|
|
|
2016-09-22 20:38:54 +03:00
|
|
|
static bool
|
|
|
|
nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
|
|
|
|
fmode_t flags)
|
|
|
|
{
|
|
|
|
if (delegation != NULL && (delegation->type & flags) == flags &&
|
|
|
|
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
|
|
|
|
!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-11-01 01:40:32 +03:00
|
|
|
struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (nfs4_is_valid_delegation(delegation, 0))
|
|
|
|
return delegation;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-07-03 09:05:00 +04:00
|
|
|
static int
|
|
|
|
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
|
2008-12-23 23:21:52 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
flags &= FMODE_READ|FMODE_WRITE;
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
2016-09-22 20:38:54 +03:00
|
|
|
if (nfs4_is_valid_delegation(delegation, flags)) {
|
2014-07-03 09:05:00 +04:00
|
|
|
if (mark)
|
|
|
|
nfs_mark_delegation_referenced(delegation);
|
2008-12-23 23:21:52 +03:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
2014-07-03 09:05:00 +04:00
|
|
|
/**
|
2021-03-05 04:29:50 +03:00
|
|
|
* nfs4_have_delegation - check if inode has a delegation, mark it
|
2014-07-03 09:05:00 +04:00
|
|
|
* NFS_DELEGATION_REFERENCED if there is one.
|
|
|
|
* @inode: inode to check
|
|
|
|
* @flags: delegation types to check for
|
|
|
|
*
|
|
|
|
* Returns one if inode has the indicated delegation, otherwise zero.
|
|
|
|
*/
|
|
|
|
int nfs4_have_delegation(struct inode *inode, fmode_t flags)
|
|
|
|
{
|
|
|
|
return nfs4_do_check_delegation(inode, flags, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs4_check_delegation - check if inode has a delegation, do not mark
|
|
|
|
* NFS_DELEGATION_REFERENCED if it has one.
|
|
|
|
*/
|
|
|
|
int nfs4_check_delegation(struct inode *inode, fmode_t flags)
|
|
|
|
{
|
|
|
|
return nfs4_do_check_delegation(inode, flags, false);
|
|
|
|
}
|
2008-12-23 23:21:52 +03:00
|
|
|
|
2018-10-04 21:45:00 +03:00
|
|
|
static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
|
2005-11-04 23:38:11 +03:00
|
|
|
{
|
|
|
|
struct inode *inode = state->inode;
|
|
|
|
struct file_lock *fl;
|
2015-01-16 23:05:55 +03:00
|
|
|
struct file_lock_context *flctx = inode->i_flctx;
|
|
|
|
struct list_head *list;
|
2009-06-18 00:22:58 +04:00
|
|
|
int status = 0;
|
2005-11-04 23:38:11 +03:00
|
|
|
|
2015-01-16 23:05:55 +03:00
|
|
|
if (flctx == NULL)
|
2013-02-07 19:54:07 +04:00
|
|
|
goto out;
|
2013-04-10 23:36:48 +04:00
|
|
|
|
2015-01-16 23:05:55 +03:00
|
|
|
list = &flctx->flc_posix;
|
2015-01-16 23:05:57 +03:00
|
|
|
spin_lock(&flctx->flc_lock);
|
2015-01-16 23:05:55 +03:00
|
|
|
restart:
|
|
|
|
list_for_each_entry(fl, list, fl_list) {
|
2018-10-04 21:45:00 +03:00
|
|
|
if (nfs_file_open_context(fl->fl_file)->state != state)
|
2005-11-04 23:38:11 +03:00
|
|
|
continue;
|
2015-01-16 23:05:57 +03:00
|
|
|
spin_unlock(&flctx->flc_lock);
|
2013-04-01 23:56:46 +04:00
|
|
|
status = nfs4_lock_delegation_recall(fl, state, stateid);
|
2009-06-18 00:22:58 +04:00
|
|
|
if (status < 0)
|
2009-06-18 00:23:00 +04:00
|
|
|
goto out;
|
2015-01-16 23:05:57 +03:00
|
|
|
spin_lock(&flctx->flc_lock);
|
2005-11-04 23:38:11 +03:00
|
|
|
}
|
2015-01-16 23:05:55 +03:00
|
|
|
if (list == &flctx->flc_posix) {
|
|
|
|
list = &flctx->flc_flock;
|
|
|
|
goto restart;
|
2005-11-04 23:38:11 +03:00
|
|
|
}
|
2015-01-16 23:05:57 +03:00
|
|
|
spin_unlock(&flctx->flc_lock);
|
2009-06-18 00:23:00 +04:00
|
|
|
out:
|
2005-11-04 23:38:11 +03:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2015-09-20 17:50:17 +03:00
|
|
|
static int nfs_delegation_claim_opens(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid, fmode_t type)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_open_context *ctx;
|
2013-02-05 20:43:28 +04:00
|
|
|
struct nfs4_state_owner *sp;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct nfs4_state *state;
|
2013-02-05 20:43:28 +04:00
|
|
|
unsigned int seq;
|
2005-11-04 23:38:11 +03:00
|
|
|
int err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
again:
|
2018-09-02 22:57:01 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
2005-04-17 02:20:36 +04:00
|
|
|
state = ctx->state;
|
|
|
|
if (state == NULL)
|
|
|
|
continue;
|
|
|
|
if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
|
|
|
|
continue;
|
2014-10-18 00:02:52 +04:00
|
|
|
if (!nfs4_valid_open_stateid(state))
|
|
|
|
continue;
|
2012-03-05 03:13:56 +04:00
|
|
|
if (!nfs4_stateid_match(&state->stateid, stateid))
|
2007-07-05 22:55:18 +04:00
|
|
|
continue;
|
2018-09-02 22:57:01 +03:00
|
|
|
if (!get_nfs_open_context(ctx))
|
|
|
|
continue;
|
|
|
|
rcu_read_unlock();
|
2013-02-05 20:43:28 +04:00
|
|
|
sp = state->owner;
|
2013-02-07 19:54:07 +04:00
|
|
|
/* Block nfs4_proc_unlck */
|
|
|
|
mutex_lock(&sp->so_delegreturn_mutex);
|
2013-02-05 20:43:28 +04:00
|
|
|
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
|
2019-07-19 21:08:37 +03:00
|
|
|
err = nfs4_open_delegation_recall(ctx, state, stateid);
|
2013-02-05 20:43:28 +04:00
|
|
|
if (!err)
|
2018-10-04 21:45:00 +03:00
|
|
|
err = nfs_delegation_claim_locks(state, stateid);
|
2013-02-05 20:43:28 +04:00
|
|
|
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
|
|
|
|
err = -EAGAIN;
|
2013-02-07 19:54:07 +04:00
|
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
2005-04-17 02:20:36 +04:00
|
|
|
put_nfs_open_context(ctx);
|
2005-11-04 23:38:11 +03:00
|
|
|
if (err != 0)
|
2009-12-03 16:10:17 +03:00
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
goto again;
|
|
|
|
}
|
2018-09-02 22:57:01 +03:00
|
|
|
rcu_read_unlock();
|
2009-12-03 16:10:17 +03:00
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_inode_reclaim_delegation - process a delegation reclaim request
|
|
|
|
* @inode: inode to process
|
|
|
|
* @cred: credential to use for request
|
2018-03-21 00:03:13 +03:00
|
|
|
* @type: delegation type
|
|
|
|
* @stateid: delegation stateid
|
|
|
|
* @pagemod_limit: write delegation "space_limit"
|
2010-12-24 04:33:04 +03:00
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2018-12-03 03:30:31 +03:00
|
|
|
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
|
2018-03-21 00:03:13 +03:00
|
|
|
fmode_t type,
|
|
|
|
const nfs4_stateid *stateid,
|
|
|
|
unsigned long pagemod_limit)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-05-01 20:36:18 +04:00
|
|
|
struct nfs_delegation *delegation;
|
2018-12-03 03:30:31 +03:00
|
|
|
const struct cred *oldcred = NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-05-01 20:36:18 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (delegation != NULL) {
|
|
|
|
spin_lock(&delegation->lock);
|
2019-10-22 15:52:47 +03:00
|
|
|
if (nfs4_is_valid_delegation(delegation, 0)) {
|
2018-03-21 00:03:13 +03:00
|
|
|
nfs4_stateid_copy(&delegation->stateid, stateid);
|
|
|
|
delegation->type = type;
|
|
|
|
delegation->pagemod_limit = pagemod_limit;
|
2010-05-01 20:36:18 +04:00
|
|
|
oldcred = delegation->cred;
|
2018-12-03 03:30:31 +03:00
|
|
|
delegation->cred = get_cred(cred);
|
2010-05-01 20:36:18 +04:00
|
|
|
clear_bit(NFS_DELEGATION_NEED_RECLAIM,
|
|
|
|
&delegation->flags);
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
rcu_read_unlock();
|
2018-12-03 03:30:31 +03:00
|
|
|
put_cred(oldcred);
|
2018-03-21 00:03:13 +03:00
|
|
|
trace_nfs4_reclaim_delegation(inode, type);
|
2016-09-22 20:39:12 +03:00
|
|
|
return;
|
2010-05-01 20:36:18 +04:00
|
|
|
}
|
2016-09-22 20:39:12 +03:00
|
|
|
/* We appear to have raced with a delegation return. */
|
|
|
|
spin_unlock(&delegation->lock);
|
2010-05-01 20:36:18 +04:00
|
|
|
}
|
2016-09-22 20:39:12 +03:00
|
|
|
rcu_read_unlock();
|
2018-03-21 00:03:13 +03:00
|
|
|
nfs_inode_set_delegation(inode, cred, type, stateid, pagemod_limit);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-01-26 00:38:18 +03:00
|
|
|
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
|
|
|
|
{
|
2020-02-13 22:51:07 +03:00
|
|
|
const struct cred *cred;
|
2008-01-26 00:38:18 +03:00
|
|
|
int res = 0;
|
|
|
|
|
2020-02-13 22:51:07 +03:00
|
|
|
if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
cred = get_cred(delegation->cred);
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
res = nfs4_proc_delegreturn(inode, cred,
|
2014-11-11 02:43:56 +03:00
|
|
|
&delegation->stateid,
|
|
|
|
issync);
|
2020-02-13 22:51:07 +03:00
|
|
|
put_cred(cred);
|
|
|
|
}
|
2008-01-26 00:38:18 +03:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-12-23 23:21:39 +03:00
|
|
|
static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
struct inode *inode = NULL;
|
|
|
|
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
if (delegation->inode != NULL)
|
|
|
|
inode = igrab(delegation->inode);
|
2019-02-21 22:51:25 +03:00
|
|
|
if (!inode)
|
|
|
|
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
|
2008-12-23 23:21:39 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
static struct nfs_delegation *
|
|
|
|
nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *ret = NULL;
|
|
|
|
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
|
|
|
|
|
|
|
|
if (delegation == NULL)
|
|
|
|
goto out;
|
|
|
|
spin_lock(&delegation->lock);
|
2020-02-13 22:51:06 +03:00
|
|
|
if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
|
2021-05-08 17:01:32 +03:00
|
|
|
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
|
2020-02-13 22:51:06 +03:00
|
|
|
/* Refcount matched in nfs_end_delegation_return() */
|
|
|
|
ret = nfs_get_delegation(delegation);
|
|
|
|
}
|
2013-02-05 20:43:28 +04:00
|
|
|
spin_unlock(&delegation->lock);
|
2020-02-05 17:01:54 +03:00
|
|
|
if (ret)
|
|
|
|
nfs_clear_verifier_delegated(&nfsi->vfs_inode);
|
2013-02-05 20:43:28 +04:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nfs_delegation *
|
|
|
|
nfs_start_delegation_return(struct nfs_inode *nfsi)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = nfs_start_delegation_return_locked(nfsi);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return delegation;
|
|
|
|
}
|
|
|
|
|
2021-05-08 17:01:32 +03:00
|
|
|
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
|
|
|
|
struct nfs_client *clp, int err)
|
2013-02-05 20:43:28 +04:00
|
|
|
{
|
|
|
|
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
|
2021-05-08 17:01:32 +03:00
|
|
|
if (err == -EAGAIN) {
|
|
|
|
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
|
|
|
|
set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
|
|
|
|
}
|
2013-02-05 20:43:28 +04:00
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:32:54 +03:00
|
|
|
static struct nfs_delegation *
|
|
|
|
nfs_detach_delegation_locked(struct nfs_inode *nfsi,
|
2013-02-05 20:43:28 +04:00
|
|
|
struct nfs_delegation *delegation,
|
|
|
|
struct nfs_client *clp)
|
2008-01-26 00:38:18 +03:00
|
|
|
{
|
2013-02-05 20:43:28 +04:00
|
|
|
struct nfs_delegation *deleg_cur =
|
NFS: Fix RCU issues in the NFSv4 delegation code
Fix a number of RCU issues in the NFSv4 delegation code.
(1) delegation->cred doesn't need to be RCU protected as it's essentially an
invariant refcounted structure.
By the time we get to nfs_free_delegation(), the delegation is being
released, so no one else should be attempting to use the saved
credentials, and they can be cleared.
However, since the list of delegations could still be under traversal at
this point by such as nfs_client_return_marked_delegations(), the cred
should be released in nfs_do_free_delegation() rather than in
nfs_free_delegation(). Simply using rcu_assign_pointer() to clear it is
insufficient as that doesn't stop the cred from being destroyed, and nor
does calling put_rpccred() after call_rcu(), given that the latter is
asynchronous.
(2) nfs_detach_delegation_locked() and nfs_inode_set_delegation() should use
rcu_derefence_protected() because they can only be called if
nfs_client::cl_lock is held, and that guards against anyone changing
nfsi->delegation under it. Furthermore, the barrier imposed by
rcu_dereference() is superfluous, given that the spin_lock() is also a
barrier.
(3) nfs_detach_delegation_locked() is now passed a pointer to the nfs_client
struct so that it can issue lockdep advice based on clp->cl_lock for (2).
(4) nfs_inode_return_delegation_noreclaim() and nfs_inode_return_delegation()
should use rcu_access_pointer() outside the spinlocked region as they
merely examine the pointer and don't follow it, thus rendering unnecessary
the need to impose a partial ordering over the one item of interest.
These result in an RCU warning like the following:
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
fs/nfs/delegation.c:332 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
2 locks held by mount.nfs4/2281:
#0: (&type->s_umount_key#34){+.+...}, at: [<ffffffff810b25b4>] deactivate_super+0x60/0x80
#1: (iprune_sem){+.+...}, at: [<ffffffff810c332a>] invalidate_inodes+0x39/0x13a
stack backtrace:
Pid: 2281, comm: mount.nfs4 Not tainted 2.6.34-rc1-cachefs #110
Call Trace:
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4591>] nfs_inode_return_delegation_noreclaim+0x5b/0xa0 [nfs]
[<ffffffffa0095d63>] nfs4_clear_inode+0x11/0x1e [nfs]
[<ffffffff810c2d92>] clear_inode+0x9e/0xf8
[<ffffffff810c3028>] dispose_list+0x67/0x10e
[<ffffffff810c340d>] invalidate_inodes+0x11c/0x13a
[<ffffffff810b1dc1>] generic_shutdown_super+0x42/0xf4
[<ffffffff810b1ebe>] kill_anon_super+0x11/0x4f
[<ffffffffa009893c>] nfs4_kill_super+0x3f/0x72 [nfs]
[<ffffffff810b25bc>] deactivate_super+0x68/0x80
[<ffffffff810c6744>] mntput_no_expire+0xbb/0xf8
[<ffffffff810c681b>] release_mounts+0x9a/0xb0
[<ffffffff810c689b>] put_mnt_ns+0x6a/0x79
[<ffffffffa00983a1>] nfs_follow_remote_path+0x5a/0x146 [nfs]
[<ffffffffa0098334>] ? nfs_do_root_mount+0x82/0x95 [nfs]
[<ffffffffa00985a9>] nfs4_try_mount+0x75/0xaf [nfs]
[<ffffffffa0098874>] nfs4_get_sb+0x291/0x31a [nfs]
[<ffffffff810b2059>] vfs_kern_mount+0xb8/0x177
[<ffffffff810b2176>] do_kern_mount+0x48/0xe8
[<ffffffff810c810b>] do_mount+0x782/0x7f9
[<ffffffff810c8205>] sys_mount+0x83/0xbe
[<ffffffff81001eeb>] system_call_fastpath+0x16/0x1b
Also on:
fs/nfs/delegation.c:215 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4223>] nfs_inode_set_delegation+0xfe/0x219 [nfs]
[<ffffffffa00a9c6f>] nfs4_opendata_to_nfs4_state+0x2c2/0x30d [nfs]
[<ffffffffa00aa15d>] nfs4_do_open+0x2a6/0x3a6 [nfs]
...
And:
fs/nfs/delegation.c:40 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b3bef>] nfs_free_delegation+0x3d/0x6e [nfs]
[<ffffffffa00b3e71>] nfs_do_return_delegation+0x26/0x30 [nfs]
[<ffffffffa00b406a>] __nfs_inode_return_delegation+0x1ef/0x1fe [nfs]
[<ffffffffa00b448a>] nfs_client_return_marked_delegations+0xc9/0x124 [nfs]
...
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2010-05-01 20:37:18 +04:00
|
|
|
rcu_dereference_protected(nfsi->delegation,
|
2013-02-05 20:43:28 +04:00
|
|
|
lockdep_is_held(&clp->cl_lock));
|
2008-01-26 00:38:18 +03:00
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
if (deleg_cur == NULL || delegation != deleg_cur)
|
|
|
|
return NULL;
|
2010-12-24 04:32:54 +03:00
|
|
|
|
2008-12-23 23:21:38 +03:00
|
|
|
spin_lock(&delegation->lock);
|
2019-10-21 21:12:13 +03:00
|
|
|
if (!delegation->inode) {
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-01-26 00:38:18 +03:00
|
|
|
list_del_rcu(&delegation->super_list);
|
2008-12-23 23:21:39 +03:00
|
|
|
delegation->inode = NULL;
|
2008-01-26 00:38:18 +03:00
|
|
|
rcu_assign_pointer(nfsi->delegation, NULL);
|
2008-12-23 23:21:38 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
2008-01-26 00:38:18 +03:00
|
|
|
return delegation;
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:32:54 +03:00
|
|
|
static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
|
2013-02-05 20:43:28 +04:00
|
|
|
struct nfs_delegation *delegation,
|
|
|
|
struct nfs_server *server)
|
2010-12-24 04:32:54 +03:00
|
|
|
{
|
2010-12-24 04:33:04 +03:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2010-12-24 04:32:54 +03:00
|
|
|
|
|
|
|
spin_lock(&clp->cl_lock);
|
2013-02-05 20:43:28 +04:00
|
|
|
delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
|
2010-12-24 04:32:54 +03:00
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
return delegation;
|
|
|
|
}
|
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
static struct nfs_delegation *
|
|
|
|
nfs_inode_detach_delegation(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2019-10-21 20:56:59 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(nfsi->delegation);
|
|
|
|
if (delegation != NULL)
|
|
|
|
delegation = nfs_detach_delegation(nfsi, delegation, server);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return delegation;
|
2013-02-05 20:43:28 +04:00
|
|
|
}
|
|
|
|
|
2020-01-27 20:44:41 +03:00
|
|
|
static void
|
|
|
|
nfs_update_delegation_cred(struct nfs_delegation *delegation,
|
|
|
|
const struct cred *cred)
|
|
|
|
{
|
|
|
|
const struct cred *old;
|
|
|
|
|
|
|
|
if (cred_fscmp(delegation->cred, cred) != 0) {
|
|
|
|
old = xchg(&delegation->cred, get_cred(cred));
|
|
|
|
put_cred(old);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-19 19:22:28 +03:00
|
|
|
static void
|
|
|
|
nfs_update_inplace_delegation(struct nfs_delegation *delegation,
|
|
|
|
const struct nfs_delegation *update)
|
|
|
|
{
|
|
|
|
if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
|
|
|
|
delegation->stateid.seqid = update->stateid.seqid;
|
|
|
|
smp_wmb();
|
|
|
|
delegation->type = update->type;
|
2020-01-27 20:44:41 +03:00
|
|
|
delegation->pagemod_limit = update->pagemod_limit;
|
|
|
|
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
|
|
|
|
delegation->change_attr = update->change_attr;
|
|
|
|
nfs_update_delegation_cred(delegation, update->cred);
|
|
|
|
/* smp_mb__before_atomic() is implicit due to xchg() */
|
|
|
|
clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
|
2020-01-27 17:58:18 +03:00
|
|
|
atomic_long_inc(&nfs_active_delegations);
|
2020-01-27 20:44:41 +03:00
|
|
|
}
|
2014-12-19 19:22:28 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_inode_set_delegation - set up a delegation on an inode
|
|
|
|
* @inode: inode to which delegation applies
|
|
|
|
* @cred: cred to use for subsequent delegation processing
|
2018-03-21 00:03:13 +03:00
|
|
|
* @type: delegation type
|
|
|
|
* @stateid: delegation stateid
|
|
|
|
* @pagemod_limit: write delegation "space_limit"
|
2010-12-24 04:33:04 +03:00
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative errno value.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2018-12-03 03:30:31 +03:00
|
|
|
int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
|
2018-03-21 00:03:13 +03:00
|
|
|
fmode_t type,
|
|
|
|
const nfs4_stateid *stateid,
|
|
|
|
unsigned long pagemod_limit)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-12-24 04:33:04 +03:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
NFS: Fix RCU issues in the NFSv4 delegation code
Fix a number of RCU issues in the NFSv4 delegation code.
(1) delegation->cred doesn't need to be RCU protected as it's essentially an
invariant refcounted structure.
By the time we get to nfs_free_delegation(), the delegation is being
released, so no one else should be attempting to use the saved
credentials, and they can be cleared.
However, since the list of delegations could still be under traversal at
this point by such as nfs_client_return_marked_delegations(), the cred
should be released in nfs_do_free_delegation() rather than in
nfs_free_delegation(). Simply using rcu_assign_pointer() to clear it is
insufficient as that doesn't stop the cred from being destroyed, and nor
does calling put_rpccred() after call_rcu(), given that the latter is
asynchronous.
(2) nfs_detach_delegation_locked() and nfs_inode_set_delegation() should use
rcu_derefence_protected() because they can only be called if
nfs_client::cl_lock is held, and that guards against anyone changing
nfsi->delegation under it. Furthermore, the barrier imposed by
rcu_dereference() is superfluous, given that the spin_lock() is also a
barrier.
(3) nfs_detach_delegation_locked() is now passed a pointer to the nfs_client
struct so that it can issue lockdep advice based on clp->cl_lock for (2).
(4) nfs_inode_return_delegation_noreclaim() and nfs_inode_return_delegation()
should use rcu_access_pointer() outside the spinlocked region as they
merely examine the pointer and don't follow it, thus rendering unnecessary
the need to impose a partial ordering over the one item of interest.
These result in an RCU warning like the following:
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
fs/nfs/delegation.c:332 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
2 locks held by mount.nfs4/2281:
#0: (&type->s_umount_key#34){+.+...}, at: [<ffffffff810b25b4>] deactivate_super+0x60/0x80
#1: (iprune_sem){+.+...}, at: [<ffffffff810c332a>] invalidate_inodes+0x39/0x13a
stack backtrace:
Pid: 2281, comm: mount.nfs4 Not tainted 2.6.34-rc1-cachefs #110
Call Trace:
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4591>] nfs_inode_return_delegation_noreclaim+0x5b/0xa0 [nfs]
[<ffffffffa0095d63>] nfs4_clear_inode+0x11/0x1e [nfs]
[<ffffffff810c2d92>] clear_inode+0x9e/0xf8
[<ffffffff810c3028>] dispose_list+0x67/0x10e
[<ffffffff810c340d>] invalidate_inodes+0x11c/0x13a
[<ffffffff810b1dc1>] generic_shutdown_super+0x42/0xf4
[<ffffffff810b1ebe>] kill_anon_super+0x11/0x4f
[<ffffffffa009893c>] nfs4_kill_super+0x3f/0x72 [nfs]
[<ffffffff810b25bc>] deactivate_super+0x68/0x80
[<ffffffff810c6744>] mntput_no_expire+0xbb/0xf8
[<ffffffff810c681b>] release_mounts+0x9a/0xb0
[<ffffffff810c689b>] put_mnt_ns+0x6a/0x79
[<ffffffffa00983a1>] nfs_follow_remote_path+0x5a/0x146 [nfs]
[<ffffffffa0098334>] ? nfs_do_root_mount+0x82/0x95 [nfs]
[<ffffffffa00985a9>] nfs4_try_mount+0x75/0xaf [nfs]
[<ffffffffa0098874>] nfs4_get_sb+0x291/0x31a [nfs]
[<ffffffff810b2059>] vfs_kern_mount+0xb8/0x177
[<ffffffff810b2176>] do_kern_mount+0x48/0xe8
[<ffffffff810c810b>] do_mount+0x782/0x7f9
[<ffffffff810c8205>] sys_mount+0x83/0xbe
[<ffffffff81001eeb>] system_call_fastpath+0x16/0x1b
Also on:
fs/nfs/delegation.c:215 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4223>] nfs_inode_set_delegation+0xfe/0x219 [nfs]
[<ffffffffa00a9c6f>] nfs4_opendata_to_nfs4_state+0x2c2/0x30d [nfs]
[<ffffffffa00aa15d>] nfs4_do_open+0x2a6/0x3a6 [nfs]
...
And:
fs/nfs/delegation.c:40 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b3bef>] nfs_free_delegation+0x3d/0x6e [nfs]
[<ffffffffa00b3e71>] nfs_do_return_delegation+0x26/0x30 [nfs]
[<ffffffffa00b406a>] __nfs_inode_return_delegation+0x1ef/0x1fe [nfs]
[<ffffffffa00b448a>] nfs_client_return_marked_delegations+0xc9/0x124 [nfs]
...
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2010-05-01 20:37:18 +04:00
|
|
|
struct nfs_delegation *delegation, *old_delegation;
|
2008-01-26 00:38:18 +03:00
|
|
|
struct nfs_delegation *freeme = NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
int status = 0;
|
|
|
|
|
2010-05-13 20:51:01 +04:00
|
|
|
delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (delegation == NULL)
|
|
|
|
return -ENOMEM;
|
2018-03-21 00:03:13 +03:00
|
|
|
nfs4_stateid_copy(&delegation->stateid, stateid);
|
2020-02-13 22:51:06 +03:00
|
|
|
refcount_set(&delegation->refcount, 1);
|
2018-03-21 00:03:13 +03:00
|
|
|
delegation->type = type;
|
|
|
|
delegation->pagemod_limit = pagemod_limit;
|
2018-01-09 16:21:17 +03:00
|
|
|
delegation->change_attr = inode_peek_iversion_raw(inode);
|
2018-12-03 03:30:31 +03:00
|
|
|
delegation->cred = get_cred(cred);
|
2005-04-17 02:20:36 +04:00
|
|
|
delegation->inode = inode;
|
2008-12-23 23:21:52 +03:00
|
|
|
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
|
2008-12-23 23:21:38 +03:00
|
|
|
spin_lock_init(&delegation->lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
spin_lock(&clp->cl_lock);
|
NFS: Fix RCU issues in the NFSv4 delegation code
Fix a number of RCU issues in the NFSv4 delegation code.
(1) delegation->cred doesn't need to be RCU protected as it's essentially an
invariant refcounted structure.
By the time we get to nfs_free_delegation(), the delegation is being
released, so no one else should be attempting to use the saved
credentials, and they can be cleared.
However, since the list of delegations could still be under traversal at
this point by such as nfs_client_return_marked_delegations(), the cred
should be released in nfs_do_free_delegation() rather than in
nfs_free_delegation(). Simply using rcu_assign_pointer() to clear it is
insufficient as that doesn't stop the cred from being destroyed, and nor
does calling put_rpccred() after call_rcu(), given that the latter is
asynchronous.
(2) nfs_detach_delegation_locked() and nfs_inode_set_delegation() should use
rcu_derefence_protected() because they can only be called if
nfs_client::cl_lock is held, and that guards against anyone changing
nfsi->delegation under it. Furthermore, the barrier imposed by
rcu_dereference() is superfluous, given that the spin_lock() is also a
barrier.
(3) nfs_detach_delegation_locked() is now passed a pointer to the nfs_client
struct so that it can issue lockdep advice based on clp->cl_lock for (2).
(4) nfs_inode_return_delegation_noreclaim() and nfs_inode_return_delegation()
should use rcu_access_pointer() outside the spinlocked region as they
merely examine the pointer and don't follow it, thus rendering unnecessary
the need to impose a partial ordering over the one item of interest.
These result in an RCU warning like the following:
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
fs/nfs/delegation.c:332 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
2 locks held by mount.nfs4/2281:
#0: (&type->s_umount_key#34){+.+...}, at: [<ffffffff810b25b4>] deactivate_super+0x60/0x80
#1: (iprune_sem){+.+...}, at: [<ffffffff810c332a>] invalidate_inodes+0x39/0x13a
stack backtrace:
Pid: 2281, comm: mount.nfs4 Not tainted 2.6.34-rc1-cachefs #110
Call Trace:
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4591>] nfs_inode_return_delegation_noreclaim+0x5b/0xa0 [nfs]
[<ffffffffa0095d63>] nfs4_clear_inode+0x11/0x1e [nfs]
[<ffffffff810c2d92>] clear_inode+0x9e/0xf8
[<ffffffff810c3028>] dispose_list+0x67/0x10e
[<ffffffff810c340d>] invalidate_inodes+0x11c/0x13a
[<ffffffff810b1dc1>] generic_shutdown_super+0x42/0xf4
[<ffffffff810b1ebe>] kill_anon_super+0x11/0x4f
[<ffffffffa009893c>] nfs4_kill_super+0x3f/0x72 [nfs]
[<ffffffff810b25bc>] deactivate_super+0x68/0x80
[<ffffffff810c6744>] mntput_no_expire+0xbb/0xf8
[<ffffffff810c681b>] release_mounts+0x9a/0xb0
[<ffffffff810c689b>] put_mnt_ns+0x6a/0x79
[<ffffffffa00983a1>] nfs_follow_remote_path+0x5a/0x146 [nfs]
[<ffffffffa0098334>] ? nfs_do_root_mount+0x82/0x95 [nfs]
[<ffffffffa00985a9>] nfs4_try_mount+0x75/0xaf [nfs]
[<ffffffffa0098874>] nfs4_get_sb+0x291/0x31a [nfs]
[<ffffffff810b2059>] vfs_kern_mount+0xb8/0x177
[<ffffffff810b2176>] do_kern_mount+0x48/0xe8
[<ffffffff810c810b>] do_mount+0x782/0x7f9
[<ffffffff810c8205>] sys_mount+0x83/0xbe
[<ffffffff81001eeb>] system_call_fastpath+0x16/0x1b
Also on:
fs/nfs/delegation.c:215 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b4223>] nfs_inode_set_delegation+0xfe/0x219 [nfs]
[<ffffffffa00a9c6f>] nfs4_opendata_to_nfs4_state+0x2c2/0x30d [nfs]
[<ffffffffa00aa15d>] nfs4_do_open+0x2a6/0x3a6 [nfs]
...
And:
fs/nfs/delegation.c:40 invoked rcu_dereference_check() without protection!
[<ffffffff8105149f>] lockdep_rcu_dereference+0xaa/0xb2
[<ffffffffa00b3bef>] nfs_free_delegation+0x3d/0x6e [nfs]
[<ffffffffa00b3e71>] nfs_do_return_delegation+0x26/0x30 [nfs]
[<ffffffffa00b406a>] __nfs_inode_return_delegation+0x1ef/0x1fe [nfs]
[<ffffffffa00b448a>] nfs_client_return_marked_delegations+0xc9/0x124 [nfs]
...
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2010-05-01 20:37:18 +04:00
|
|
|
old_delegation = rcu_dereference_protected(nfsi->delegation,
|
2010-12-24 04:33:04 +03:00
|
|
|
lockdep_is_held(&clp->cl_lock));
|
2019-10-21 20:56:59 +03:00
|
|
|
if (old_delegation == NULL)
|
|
|
|
goto add_new;
|
|
|
|
/* Is this an update of the existing delegation? */
|
|
|
|
if (nfs4_stateid_match_other(&old_delegation->stateid,
|
|
|
|
&delegation->stateid)) {
|
|
|
|
spin_lock(&old_delegation->lock);
|
|
|
|
nfs_update_inplace_delegation(old_delegation,
|
|
|
|
delegation);
|
|
|
|
spin_unlock(&old_delegation->lock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!test_bit(NFS_DELEGATION_REVOKED, &old_delegation->flags)) {
|
2008-01-26 00:38:18 +03:00
|
|
|
/*
|
|
|
|
* Deal with broken servers that hand out two
|
|
|
|
* delegations for the same file.
|
2012-03-11 21:11:00 +04:00
|
|
|
* Allow for upgrades to a WRITE delegation, but
|
|
|
|
* nothing else.
|
2008-01-26 00:38:18 +03:00
|
|
|
*/
|
|
|
|
dfprintk(FILE, "%s: server %s handed out "
|
|
|
|
"a duplicate delegation!\n",
|
2008-05-03 00:42:44 +04:00
|
|
|
__func__, clp->cl_hostname);
|
2012-03-11 21:11:00 +04:00
|
|
|
if (delegation->type == old_delegation->type ||
|
|
|
|
!(delegation->type & FMODE_WRITE)) {
|
2008-01-26 00:38:18 +03:00
|
|
|
freeme = delegation;
|
|
|
|
delegation = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2015-02-27 22:25:50 +03:00
|
|
|
if (test_and_set_bit(NFS_DELEGATION_RETURNING,
|
|
|
|
&old_delegation->flags))
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2019-10-21 20:56:59 +03:00
|
|
|
freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp);
|
|
|
|
if (freeme == NULL)
|
|
|
|
goto out;
|
|
|
|
add_new:
|
2021-04-11 21:31:24 +03:00
|
|
|
/*
|
|
|
|
* If we didn't revalidate the change attribute before setting
|
|
|
|
* the delegation, then pre-emptively ask for a full attribute
|
|
|
|
* cache revalidation.
|
|
|
|
*/
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_CHANGE)
|
|
|
|
nfs_set_cache_invalid(inode,
|
|
|
|
NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
|
|
|
|
NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
|
|
|
|
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
|
|
|
|
NFS_INO_INVALID_OTHER | NFS_INO_INVALID_DATA |
|
|
|
|
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
|
|
|
|
NFS_INO_INVALID_XATTR);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
|
2015-03-04 23:59:05 +03:00
|
|
|
list_add_tail_rcu(&delegation->super_list, &server->delegations);
|
2008-01-26 00:38:18 +03:00
|
|
|
rcu_assign_pointer(nfsi->delegation, delegation);
|
|
|
|
delegation = NULL;
|
2007-07-04 00:10:55 +04:00
|
|
|
|
2020-01-27 17:58:18 +03:00
|
|
|
atomic_long_inc(&nfs_active_delegations);
|
|
|
|
|
2018-03-21 00:03:13 +03:00
|
|
|
trace_nfs4_set_delegation(inode, type);
|
2008-01-26 00:38:18 +03:00
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock(&clp->cl_lock);
|
2007-10-19 03:59:20 +04:00
|
|
|
if (delegation != NULL)
|
2020-01-27 17:58:18 +03:00
|
|
|
__nfs_free_delegation(delegation);
|
2019-10-21 20:56:59 +03:00
|
|
|
if (freeme != NULL) {
|
2008-01-26 00:38:18 +03:00
|
|
|
nfs_do_return_delegation(inode, freeme, 0);
|
2019-10-21 20:56:59 +03:00
|
|
|
nfs_free_delegation(freeme);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Basic procedure for returning a delegation to the server
|
|
|
|
*/
|
2013-02-05 20:43:28 +04:00
|
|
|
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-02-05 20:43:28 +04:00
|
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
2021-05-07 16:14:37 +03:00
|
|
|
unsigned int mode = O_WRONLY | O_RDWR;
|
2014-11-11 02:43:56 +03:00
|
|
|
int err = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
if (delegation == NULL)
|
|
|
|
return 0;
|
2021-05-07 16:14:37 +03:00
|
|
|
|
|
|
|
if (!issync)
|
|
|
|
mode |= O_NONBLOCK;
|
|
|
|
/* Recall of any remaining application leases */
|
|
|
|
err = break_lease(inode, mode);
|
|
|
|
|
|
|
|
while (err == 0) {
|
2014-11-11 02:43:56 +03:00
|
|
|
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
|
|
|
|
break;
|
2015-09-20 17:50:17 +03:00
|
|
|
err = nfs_delegation_claim_opens(inode, &delegation->stateid,
|
|
|
|
delegation->type);
|
2013-02-05 20:43:28 +04:00
|
|
|
if (!issync || err != -EAGAIN)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Guard against state recovery
|
|
|
|
*/
|
|
|
|
err = nfs4_wait_clnt_recover(clp);
|
2021-05-07 16:14:37 +03:00
|
|
|
}
|
2013-02-05 20:43:28 +04:00
|
|
|
|
|
|
|
if (err) {
|
2021-05-08 17:01:32 +03:00
|
|
|
nfs_abort_delegation_return(delegation, clp, err);
|
2013-02-05 20:43:28 +04:00
|
|
|
goto out;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-12-03 16:10:17 +03:00
|
|
|
err = nfs_do_return_delegation(inode, delegation, issync);
|
|
|
|
out:
|
2020-02-13 22:51:06 +03:00
|
|
|
/* Refcount matched in nfs_start_delegation_return_locked() */
|
|
|
|
nfs_put_delegation(delegation);
|
2009-12-03 16:10:17 +03:00
|
|
|
return err;
|
2007-07-05 22:55:18 +04:00
|
|
|
}
|
|
|
|
|
2013-04-03 22:33:49 +04:00
|
|
|
static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
|
|
|
|
ret = true;
|
2020-01-27 17:58:16 +03:00
|
|
|
else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
|
2013-04-03 22:33:49 +04:00
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
inode = delegation->inode;
|
|
|
|
if (inode && list_empty(&NFS_I(inode)->open_files))
|
|
|
|
ret = true;
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
}
|
2020-01-27 17:58:16 +03:00
|
|
|
if (ret)
|
|
|
|
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
|
2019-10-22 20:40:47 +03:00
|
|
|
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
|
2021-05-08 17:01:32 +03:00
|
|
|
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
|
2019-10-22 20:40:47 +03:00
|
|
|
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
|
|
|
|
ret = false;
|
|
|
|
|
2013-04-03 22:33:49 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-27 16:29:02 +03:00
|
|
|
static int nfs_server_return_marked_delegations(struct nfs_server *server,
|
|
|
|
void __always_unused *data)
|
2008-12-23 23:21:46 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
struct nfs_delegation *prev;
|
2008-12-23 23:21:46 +03:00
|
|
|
struct inode *inode;
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
struct inode *place_holder = NULL;
|
|
|
|
struct nfs_delegation *place_holder_deleg = NULL;
|
2009-12-03 16:10:17 +03:00
|
|
|
int err = 0;
|
2008-12-23 23:21:46 +03:00
|
|
|
|
|
|
|
restart:
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
/*
|
|
|
|
* To avoid quadratic looping we hold a reference
|
|
|
|
* to an inode place_holder. Each time we restart, we
|
2020-02-27 16:29:02 +03:00
|
|
|
* list delegation in the server from the delegations
|
|
|
|
* of that inode.
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
* prev is an RCU-protected pointer to a delegation which
|
|
|
|
* wasn't marked for return and might be a good choice for
|
|
|
|
* the next place_holder.
|
|
|
|
*/
|
|
|
|
prev = NULL;
|
2020-02-27 16:29:02 +03:00
|
|
|
delegation = NULL;
|
|
|
|
rcu_read_lock();
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
if (place_holder)
|
2020-02-27 16:29:02 +03:00
|
|
|
delegation = rcu_dereference(NFS_I(place_holder)->delegation);
|
|
|
|
if (!delegation || delegation != place_holder_deleg)
|
|
|
|
delegation = list_entry_rcu(server->delegations.next,
|
|
|
|
struct nfs_delegation, super_list);
|
|
|
|
list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) {
|
|
|
|
struct inode *to_put = NULL;
|
|
|
|
|
|
|
|
if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags))
|
|
|
|
continue;
|
|
|
|
if (!nfs_delegation_need_return(delegation)) {
|
|
|
|
if (nfs4_is_valid_delegation(delegation, 0))
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
prev = delegation;
|
2020-02-27 16:29:02 +03:00
|
|
|
continue;
|
|
|
|
}
|
NFS: Avoid quadratic search when freeing delegations.
There are three places that walk all delegation for an nfs_client and
restart whenever they find something interesting - potentially
resulting in a quadratic search: If there are 10,000 uninteresting
delegations followed by 10,000 interesting one, then the code
skips over 100,000,000 delegations, which can take a noticeable amount
of time.
Of these nfs_delegation_reap_unclaimed() and
nfs_reap_expired_delegations() are only called during unusual events:
a server reboots or reports expired delegations, probably due to a
network partition. Optimizing these is not particularly important.
The third, nfs_client_return_marked_delegations(), is called
periodically via nfs_expire_unreferenced_delegations(). It could
cause periodic problems on a busy server.
New delegations are added to the end of the list, so if there are
10,000 open files with delegations, and 10,000 more recently opened files
that received delegations but are now closed, then
nfs_client_return_marked_delegations() can take seconds to skip over
the 10,000 open files 10,000 times. That is a waste of time.
The avoid this waste a place-holder (an inode) is kept when locks are
dropped, so that the place can usually be found again after taking
rcu_readlock(). This place holder ensure that we find the right
starting point in the list of nfs_servers, and makes is probable that
we find the right starting point in the list of delegations.
We might need to occasionally restart at the head of that list.
It might be possible that the place_holder inode could lose its
delegation separately, and then get a new one using the same (freed
and then reallocated) 'struct nfs_delegation'. Were this to happen,
the new delegation would be at the end of the list and we would miss
returning some other delegations. This would have the effect of
unnecessarily delaying the return of some unused delegations until the
next time this function is called - typically 90 seconds later. As
this is not a correctness issue and is vanishingly unlikely to happen,
it does not seem worth addressing.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-04-30 07:31:30 +03:00
|
|
|
|
2020-02-27 16:29:02 +03:00
|
|
|
if (prev) {
|
|
|
|
struct inode *tmp = nfs_delegation_grab_inode(prev);
|
|
|
|
if (tmp) {
|
|
|
|
to_put = place_holder;
|
|
|
|
place_holder = tmp;
|
|
|
|
place_holder_deleg = prev;
|
2015-02-26 17:57:34 +03:00
|
|
|
}
|
2020-02-27 16:29:02 +03:00
|
|
|
}
|
2010-12-24 04:33:04 +03:00
|
|
|
|
2020-02-27 16:29:02 +03:00
|
|
|
inode = nfs_delegation_grab_inode(delegation);
|
|
|
|
if (inode == NULL) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
iput(to_put);
|
|
|
|
goto restart;
|
2009-12-03 16:10:17 +03:00
|
|
|
}
|
2020-02-27 16:29:02 +03:00
|
|
|
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
iput(to_put);
|
|
|
|
|
|
|
|
err = nfs_end_delegation_return(inode, delegation, 0);
|
|
|
|
iput(inode);
|
|
|
|
cond_resched();
|
|
|
|
if (!err)
|
|
|
|
goto restart;
|
|
|
|
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
|
|
|
|
goto out;
|
2008-12-23 23:21:46 +03:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2020-02-27 16:29:02 +03:00
|
|
|
out:
|
|
|
|
iput(place_holder);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-05-08 17:01:32 +03:00
|
|
|
static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *d;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu (d, &server->delegations, super_list) {
|
|
|
|
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
|
|
|
|
continue;
|
|
|
|
nfs_mark_return_delegation(server, d);
|
|
|
|
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state))
|
|
|
|
goto out;
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) {
|
|
|
|
if (nfs_server_clear_delayed_delegations(server))
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-27 16:29:02 +03:00
|
|
|
/**
|
|
|
|
* nfs_client_return_marked_delegations - return previously marked delegations
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
* Note that this function is designed to be called by the state
|
|
|
|
* manager thread. For this reason, it cannot flush the dirty data,
|
|
|
|
* since that could deadlock in case of a state recovery error.
|
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative errno value.
|
|
|
|
*/
|
|
|
|
int nfs_client_return_marked_delegations(struct nfs_client *clp)
|
|
|
|
{
|
2021-05-08 17:01:32 +03:00
|
|
|
int err = nfs_client_for_each_server(
|
|
|
|
clp, nfs_server_return_marked_delegations, NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
/* If a return was delayed, sleep to prevent hard looping */
|
|
|
|
if (nfs_client_clear_delayed_delegations(clp))
|
|
|
|
ssleep(1);
|
|
|
|
return 0;
|
2008-12-23 23:21:46 +03:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
2019-10-21 21:04:00 +03:00
|
|
|
* nfs_inode_evict_delegation - return delegation, don't reclaim opens
|
2010-12-24 04:33:04 +03:00
|
|
|
* @inode: inode to process
|
|
|
|
*
|
|
|
|
* Does not protect against delegation reclaims, therefore really only safe
|
2019-10-21 21:04:00 +03:00
|
|
|
* to be called from nfs4_clear_inode(). Guaranteed to always free
|
|
|
|
* the delegation structure.
|
2008-01-25 02:14:34 +03:00
|
|
|
*/
|
2019-10-21 21:04:00 +03:00
|
|
|
void nfs_inode_evict_delegation(struct inode *inode)
|
2008-01-25 02:14:34 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
delegation = nfs_inode_detach_delegation(inode);
|
2019-10-21 21:04:00 +03:00
|
|
|
if (delegation != NULL) {
|
2020-01-27 17:58:15 +03:00
|
|
|
set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
|
2019-10-21 21:04:00 +03:00
|
|
|
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
|
2015-03-25 20:19:42 +03:00
|
|
|
nfs_do_return_delegation(inode, delegation, 1);
|
2019-10-21 20:56:59 +03:00
|
|
|
nfs_free_delegation(delegation);
|
2019-10-21 21:04:00 +03:00
|
|
|
}
|
2008-01-25 02:14:34 +03:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
2021-03-05 04:29:50 +03:00
|
|
|
* nfs4_inode_return_delegation - synchronously return a delegation
|
2010-12-24 04:33:04 +03:00
|
|
|
* @inode: inode to process
|
|
|
|
*
|
2012-05-07 03:34:17 +04:00
|
|
|
* This routine will always flush any dirty data to disk on the
|
|
|
|
* assumption that if we need to return the delegation, then
|
|
|
|
* we should stop caching.
|
|
|
|
*
|
2010-12-24 04:33:04 +03:00
|
|
|
* Returns zero on success, or a negative errno value.
|
|
|
|
*/
|
2012-06-20 23:53:44 +04:00
|
|
|
int nfs4_inode_return_delegation(struct inode *inode)
|
2007-07-05 22:55:18 +04:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2013-02-05 20:43:28 +04:00
|
|
|
delegation = nfs_start_delegation_return(nfsi);
|
2021-05-07 16:14:37 +03:00
|
|
|
/* Synchronous recall of any application leases */
|
|
|
|
break_lease(inode, O_WRONLY | O_RDWR);
|
|
|
|
nfs_wb_all(inode);
|
2013-02-05 20:43:28 +04:00
|
|
|
if (delegation != NULL)
|
2021-05-07 16:14:37 +03:00
|
|
|
return nfs_end_delegation_return(inode, delegation, 1);
|
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2020-01-27 17:58:17 +03:00
|
|
|
/**
|
2021-03-05 04:29:50 +03:00
|
|
|
* nfs4_inode_return_delegation_on_close - asynchronously return a delegation
|
2020-01-27 17:58:17 +03:00
|
|
|
* @inode: inode to process
|
|
|
|
*
|
|
|
|
* This routine is called on file close in order to determine if the
|
|
|
|
* inode delegation needs to be returned immediately.
|
|
|
|
*/
|
|
|
|
void nfs4_inode_return_delegation_on_close(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
struct nfs_delegation *ret = NULL;
|
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
return;
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = nfs4_get_valid_delegation(inode);
|
|
|
|
if (!delegation)
|
|
|
|
goto out;
|
2020-01-27 17:58:19 +03:00
|
|
|
if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
|
|
|
|
atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) {
|
2020-01-27 17:58:17 +03:00
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
if (delegation->inode &&
|
|
|
|
list_empty(&NFS_I(inode)->open_files) &&
|
|
|
|
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
|
|
|
|
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
|
2020-02-13 22:51:06 +03:00
|
|
|
/* Refcount matched in nfs_end_delegation_return() */
|
|
|
|
ret = nfs_get_delegation(delegation);
|
2020-01-27 17:58:17 +03:00
|
|
|
}
|
|
|
|
spin_unlock(&delegation->lock);
|
2020-02-05 17:01:54 +03:00
|
|
|
if (ret)
|
|
|
|
nfs_clear_verifier_delegated(inode);
|
2020-01-27 17:58:17 +03:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
nfs_end_delegation_return(inode, ret, 0);
|
|
|
|
}
|
|
|
|
|
2018-03-20 23:43:20 +03:00
|
|
|
/**
|
|
|
|
* nfs4_inode_make_writeable
|
|
|
|
* @inode: pointer to inode
|
|
|
|
*
|
|
|
|
* Make the inode writeable by returning the delegation if necessary
|
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative errno value.
|
|
|
|
*/
|
|
|
|
int nfs4_inode_make_writeable(struct inode *inode)
|
|
|
|
{
|
2019-10-27 20:48:18 +03:00
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = nfs4_get_valid_delegation(inode);
|
|
|
|
if (delegation == NULL ||
|
|
|
|
(nfs4_has_session(NFS_SERVER(inode)->nfs_client) &&
|
|
|
|
(delegation->type & FMODE_WRITE))) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return nfs4_inode_return_delegation(inode);
|
2018-03-20 23:43:20 +03:00
|
|
|
}
|
|
|
|
|
2013-04-03 22:33:49 +04:00
|
|
|
static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
|
|
|
|
struct nfs_delegation *delegation)
|
|
|
|
{
|
|
|
|
set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
|
|
|
|
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
|
2008-12-23 23:21:51 +03:00
|
|
|
}
|
|
|
|
|
2013-04-04 03:04:58 +04:00
|
|
|
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
|
|
|
nfs_mark_return_delegation(server, delegation);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-04-04 03:23:58 +04:00
|
|
|
static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
|
|
|
nfs_server_mark_return_all_delegations(server);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_delegation_run_state_manager(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
|
|
|
|
nfs4_schedule_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_expire_all_delegations
|
|
|
|
* @clp: client to process
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void nfs_expire_all_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
nfs_client_mark_return_all_delegations(clp);
|
|
|
|
nfs_delegation_run_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
2021-03-05 04:29:50 +03:00
|
|
|
* nfs_server_return_all_delegations - return delegations for one superblock
|
2019-02-18 21:32:38 +03:00
|
|
|
* @server: pointer to nfs_server to process
|
2010-12-24 04:33:04 +03:00
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2012-06-20 23:53:41 +04:00
|
|
|
void nfs_server_return_all_delegations(struct nfs_server *server)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-12-24 04:33:04 +03:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2013-04-04 03:04:58 +04:00
|
|
|
bool need_wait;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (clp == NULL)
|
|
|
|
return;
|
2010-12-24 04:33:04 +03:00
|
|
|
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_lock();
|
2013-04-04 03:04:58 +04:00
|
|
|
need_wait = nfs_server_mark_return_all_delegations(server);
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_unlock();
|
2010-12-24 04:33:04 +03:00
|
|
|
|
2013-04-04 03:04:58 +04:00
|
|
|
if (need_wait) {
|
2009-12-03 16:10:17 +03:00
|
|
|
nfs4_schedule_state_manager(clp);
|
2013-04-04 03:04:58 +04:00
|
|
|
nfs4_wait_clnt_recover(clp);
|
|
|
|
}
|
2008-12-23 23:21:46 +03:00
|
|
|
}
|
|
|
|
|
2013-04-04 03:27:52 +04:00
|
|
|
static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
|
2010-12-24 04:33:04 +03:00
|
|
|
fmode_t flags)
|
2008-12-23 23:21:46 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
2009-12-05 21:20:52 +03:00
|
|
|
if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
|
|
|
|
continue;
|
|
|
|
if (delegation->type & flags)
|
2013-04-04 03:27:52 +04:00
|
|
|
nfs_mark_return_if_closed_delegation(server, delegation);
|
2008-12-23 23:21:47 +03:00
|
|
|
}
|
2010-12-24 04:33:04 +03:00
|
|
|
}
|
|
|
|
|
2013-04-04 03:27:52 +04:00
|
|
|
static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
|
2010-12-24 04:33:04 +03:00
|
|
|
fmode_t flags)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
2013-04-04 03:27:52 +04:00
|
|
|
nfs_mark_return_unused_delegation_types(server, flags);
|
2008-12-23 23:21:46 +03:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2019-10-21 20:56:59 +03:00
|
|
|
static void nfs_revoke_delegation(struct inode *inode,
|
2016-09-22 20:38:58 +03:00
|
|
|
const nfs4_stateid *stateid)
|
2014-11-11 02:43:56 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
2016-09-22 20:39:14 +03:00
|
|
|
nfs4_stateid tmp;
|
2016-09-22 20:38:58 +03:00
|
|
|
bool ret = false;
|
|
|
|
|
2014-11-11 02:43:56 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
2016-09-22 20:38:58 +03:00
|
|
|
if (delegation == NULL)
|
|
|
|
goto out;
|
2016-09-22 20:39:14 +03:00
|
|
|
if (stateid == NULL) {
|
|
|
|
nfs4_stateid_copy(&tmp, &delegation->stateid);
|
|
|
|
stateid = &tmp;
|
2019-10-21 21:15:32 +03:00
|
|
|
} else {
|
|
|
|
if (!nfs4_stateid_match_other(stateid, &delegation->stateid))
|
|
|
|
goto out;
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
if (stateid->seqid) {
|
|
|
|
if (nfs4_stateid_is_newer(&delegation->stateid, stateid)) {
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
delegation->stateid.seqid = stateid->seqid;
|
|
|
|
}
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
}
|
2020-01-27 17:58:18 +03:00
|
|
|
nfs_mark_delegation_revoked(delegation);
|
2016-09-22 20:38:58 +03:00
|
|
|
ret = true;
|
|
|
|
out:
|
2014-11-11 02:43:56 +03:00
|
|
|
rcu_read_unlock();
|
2016-09-22 20:39:14 +03:00
|
|
|
if (ret)
|
|
|
|
nfs_inode_find_state_and_recover(inode, stateid);
|
2014-11-11 02:43:56 +03:00
|
|
|
}
|
|
|
|
|
2016-09-22 20:38:58 +03:00
|
|
|
void nfs_remove_bad_delegation(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid)
|
2012-03-06 04:56:44 +04:00
|
|
|
{
|
2019-10-21 20:56:59 +03:00
|
|
|
nfs_revoke_delegation(inode, stateid);
|
2012-03-06 04:56:44 +04:00
|
|
|
}
|
2012-03-07 19:49:41 +04:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
|
2012-03-06 04:56:44 +04:00
|
|
|
|
2019-10-21 21:22:14 +03:00
|
|
|
void nfs_delegation_mark_returned(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (!delegation)
|
|
|
|
goto out_rcu_unlock;
|
|
|
|
|
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
if (!nfs4_stateid_match_other(stateid, &delegation->stateid))
|
|
|
|
goto out_spin_unlock;
|
|
|
|
if (stateid->seqid) {
|
|
|
|
/* If delegation->stateid is newer, dont mark as returned */
|
|
|
|
if (nfs4_stateid_is_newer(&delegation->stateid, stateid))
|
|
|
|
goto out_clear_returning;
|
|
|
|
if (delegation->stateid.seqid != stateid->seqid)
|
|
|
|
delegation->stateid.seqid = stateid->seqid;
|
|
|
|
}
|
|
|
|
|
2020-01-27 17:58:18 +03:00
|
|
|
nfs_mark_delegation_revoked(delegation);
|
2019-10-21 21:22:14 +03:00
|
|
|
|
|
|
|
out_clear_returning:
|
|
|
|
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
|
|
|
|
out_spin_unlock:
|
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
out_rcu_unlock:
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
nfs_inode_find_state_and_recover(inode, stateid);
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
2013-04-04 03:27:52 +04:00
|
|
|
* nfs_expire_unused_delegation_types
|
2010-12-24 04:33:04 +03:00
|
|
|
* @clp: client to process
|
|
|
|
* @flags: delegation types to expire
|
|
|
|
*
|
|
|
|
*/
|
2013-04-04 03:27:52 +04:00
|
|
|
void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
|
2006-01-03 11:55:24 +03:00
|
|
|
{
|
2013-04-04 03:27:52 +04:00
|
|
|
nfs_client_mark_return_unused_delegation_types(clp, flags);
|
2008-12-23 23:21:50 +03:00
|
|
|
nfs_delegation_run_state_manager(clp);
|
2006-01-03 11:55:24 +03:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
|
2008-12-23 23:21:52 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
2008-12-23 23:21:52 +03:00
|
|
|
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
|
|
|
|
continue;
|
2013-04-03 22:33:49 +04:00
|
|
|
nfs_mark_return_if_closed_delegation(server, delegation);
|
2008-12-23 23:21:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_expire_unreferenced_delegations - Eliminate unused delegations
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
*/
|
2008-12-23 23:21:52 +03:00
|
|
|
void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
|
|
|
|
{
|
2010-12-24 04:33:04 +03:00
|
|
|
struct nfs_server *server;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
|
|
|
nfs_mark_return_unreferenced_delegations(server);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2008-12-23 23:21:52 +03:00
|
|
|
nfs_delegation_run_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_async_inode_return_delegation - asynchronously return a delegation
|
|
|
|
* @inode: inode to process
|
2012-03-05 03:13:56 +04:00
|
|
|
* @stateid: state ID information
|
2010-12-24 04:33:04 +03:00
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative errno value.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2010-12-24 04:33:04 +03:00
|
|
|
int nfs_async_inode_return_delegation(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2011-07-25 23:37:29 +04:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2008-12-23 23:21:51 +03:00
|
|
|
struct nfs_delegation *delegation;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-12-23 23:21:51 +03:00
|
|
|
rcu_read_lock();
|
2019-10-22 15:46:06 +03:00
|
|
|
delegation = nfs4_get_valid_delegation(inode);
|
2014-03-03 07:03:12 +04:00
|
|
|
if (delegation == NULL)
|
|
|
|
goto out_enoent;
|
2015-09-20 21:58:42 +03:00
|
|
|
if (stateid != NULL &&
|
|
|
|
!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
|
2014-03-03 07:03:12 +04:00
|
|
|
goto out_enoent;
|
2011-07-25 23:37:29 +04:00
|
|
|
nfs_mark_return_delegation(server, delegation);
|
2008-12-23 23:21:51 +03:00
|
|
|
rcu_read_unlock();
|
2010-12-24 04:33:04 +03:00
|
|
|
|
2021-05-07 16:14:37 +03:00
|
|
|
/* If there are any application leases or delegations, recall them */
|
|
|
|
break_lease(inode, O_WRONLY | O_RDWR | O_NONBLOCK);
|
|
|
|
|
2008-12-23 23:21:51 +03:00
|
|
|
nfs_delegation_run_state_manager(clp);
|
|
|
|
return 0;
|
2014-03-03 07:03:12 +04:00
|
|
|
out_enoent:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return -ENOENT;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
static struct inode *
|
|
|
|
nfs_delegation_find_inode_server(struct nfs_server *server,
|
|
|
|
const struct nfs_fh *fhandle)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
2021-01-10 23:46:06 +03:00
|
|
|
struct super_block *freeme = NULL;
|
|
|
|
struct inode *res = NULL;
|
2010-12-24 04:33:04 +03:00
|
|
|
|
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
2008-12-23 23:21:39 +03:00
|
|
|
spin_lock(&delegation->lock);
|
|
|
|
if (delegation->inode != NULL &&
|
2019-10-22 15:46:06 +03:00
|
|
|
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
|
2008-12-23 23:21:39 +03:00
|
|
|
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
|
2021-01-10 23:46:06 +03:00
|
|
|
if (nfs_sb_active(server->super)) {
|
|
|
|
freeme = server->super;
|
|
|
|
res = igrab(delegation->inode);
|
|
|
|
}
|
2018-06-07 21:22:00 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
if (res != NULL)
|
|
|
|
return res;
|
2018-11-14 00:37:54 +03:00
|
|
|
if (freeme) {
|
|
|
|
rcu_read_unlock();
|
2021-01-10 23:46:06 +03:00
|
|
|
nfs_sb_deactive(freeme);
|
2018-11-14 00:37:54 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
}
|
2018-06-07 21:22:00 +03:00
|
|
|
return ERR_PTR(-EAGAIN);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-12-23 23:21:39 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2018-06-07 21:22:00 +03:00
|
|
|
return ERR_PTR(-ENOENT);
|
2010-12-24 04:33:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_delegation_find_inode - retrieve the inode associated with a delegation
|
|
|
|
* @clp: client state handle
|
|
|
|
* @fhandle: filehandle from a delegation recall
|
|
|
|
*
|
|
|
|
* Returns pointer to inode matching "fhandle," or NULL if a matching inode
|
|
|
|
* cannot be found.
|
|
|
|
*/
|
|
|
|
struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
|
|
|
|
const struct nfs_fh *fhandle)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
2018-06-07 21:22:00 +03:00
|
|
|
struct inode *res;
|
2010-12-24 04:33:04 +03:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
|
|
|
res = nfs_delegation_find_inode_server(server, fhandle);
|
2018-06-14 16:39:17 +03:00
|
|
|
if (res != ERR_PTR(-ENOENT)) {
|
|
|
|
rcu_read_unlock();
|
2018-06-07 21:22:00 +03:00
|
|
|
return res;
|
2018-06-14 16:39:17 +03:00
|
|
|
}
|
2010-12-24 04:33:04 +03:00
|
|
|
}
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_unlock();
|
2018-06-07 21:22:00 +03:00
|
|
|
return ERR_PTR(-ENOENT);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2016-09-22 20:38:59 +03:00
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
|
|
|
/*
|
|
|
|
* If the delegation may have been admin revoked, then we
|
|
|
|
* cannot reclaim it.
|
|
|
|
*/
|
|
|
|
if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags))
|
|
|
|
continue;
|
2010-12-24 04:33:04 +03:00
|
|
|
set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
|
2016-09-22 20:38:59 +03:00
|
|
|
}
|
2010-12-24 04:33:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-08-23 04:06:08 +04:00
|
|
|
void nfs_delegation_mark_reclaim(struct nfs_client *clp)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-12-24 04:33:04 +03:00
|
|
|
struct nfs_server *server;
|
|
|
|
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_lock();
|
2010-12-24 04:33:04 +03:00
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
|
|
|
nfs_delegation_mark_reclaim_server(server);
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2020-02-27 17:08:25 +03:00
|
|
|
static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
|
|
|
|
void __always_unused *data)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-07-06 23:12:04 +04:00
|
|
|
struct nfs_delegation *delegation;
|
2008-12-23 23:21:39 +03:00
|
|
|
struct inode *inode;
|
2007-07-06 23:12:04 +04:00
|
|
|
restart:
|
|
|
|
rcu_read_lock();
|
2020-02-27 17:08:25 +03:00
|
|
|
restart_locked:
|
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
|
|
|
if (test_bit(NFS_DELEGATION_INODE_FREEING,
|
|
|
|
&delegation->flags) ||
|
|
|
|
test_bit(NFS_DELEGATION_RETURNING,
|
|
|
|
&delegation->flags) ||
|
|
|
|
test_bit(NFS_DELEGATION_NEED_RECLAIM,
|
|
|
|
&delegation->flags) == 0)
|
|
|
|
continue;
|
|
|
|
inode = nfs_delegation_grab_inode(delegation);
|
|
|
|
if (inode == NULL)
|
|
|
|
goto restart_locked;
|
|
|
|
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (delegation != NULL) {
|
|
|
|
if (nfs_detach_delegation(NFS_I(inode), delegation,
|
|
|
|
server) != NULL)
|
|
|
|
nfs_free_delegation(delegation);
|
|
|
|
/* Match nfs_start_delegation_return_locked */
|
|
|
|
nfs_put_delegation(delegation);
|
2010-12-24 04:33:04 +03:00
|
|
|
}
|
2020-02-27 17:08:25 +03:00
|
|
|
iput(inode);
|
|
|
|
cond_resched();
|
|
|
|
goto restart;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_unlock();
|
2020-02-27 17:08:25 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
nfs_client_for_each_server(clp, nfs_server_reap_unclaimed_delegations,
|
|
|
|
NULL);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2006-03-20 21:44:46 +03:00
|
|
|
|
2016-09-22 20:39:00 +03:00
|
|
|
static inline bool nfs4_server_rebooted(const struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
return (clp->cl_state & (BIT(NFS4CLNT_CHECK_LEASE) |
|
|
|
|
BIT(NFS4CLNT_LEASE_EXPIRED) |
|
|
|
|
BIT(NFS4CLNT_SESSION_RESET))) != 0;
|
|
|
|
}
|
|
|
|
|
2016-09-22 20:38:59 +03:00
|
|
|
static void nfs_mark_test_expired_delegation(struct nfs_server *server,
|
|
|
|
struct nfs_delegation *delegation)
|
|
|
|
{
|
2016-09-22 20:39:06 +03:00
|
|
|
if (delegation->stateid.type == NFS4_INVALID_STATEID_TYPE)
|
|
|
|
return;
|
2016-09-22 20:38:59 +03:00
|
|
|
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
|
|
|
|
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
|
|
|
|
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
|
|
|
|
}
|
|
|
|
|
2016-09-22 20:39:00 +03:00
|
|
|
static void nfs_inode_mark_test_expired_delegation(struct nfs_server *server,
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (delegation)
|
|
|
|
nfs_mark_test_expired_delegation(server, delegation);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-09-22 20:38:59 +03:00
|
|
|
static void nfs_delegation_mark_test_expired_server(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list)
|
|
|
|
nfs_mark_test_expired_delegation(server, delegation);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_mark_test_expired_all_delegations - mark all delegations for testing
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
* Iterates through all the delegations associated with this server and
|
|
|
|
* marks them as needing to be checked for validity.
|
|
|
|
*/
|
|
|
|
void nfs_mark_test_expired_all_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
|
|
|
nfs_delegation_mark_test_expired_server(server);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2019-05-06 18:59:05 +03:00
|
|
|
/**
|
|
|
|
* nfs_test_expired_all_delegations - test all delegations for a client
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
* Helper for handling "recallable state revoked" status from server.
|
|
|
|
*/
|
|
|
|
void nfs_test_expired_all_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
nfs_mark_test_expired_all_delegations(clp);
|
|
|
|
nfs4_schedule_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
2019-07-26 16:40:53 +03:00
|
|
|
static void
|
|
|
|
nfs_delegation_test_free_expired(struct inode *inode,
|
|
|
|
nfs4_stateid *stateid,
|
|
|
|
const struct cred *cred)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (!cred)
|
|
|
|
return;
|
|
|
|
status = ops->test_and_free_expired(server, stateid, cred);
|
|
|
|
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
|
|
|
|
nfs_remove_bad_delegation(inode, stateid);
|
|
|
|
}
|
|
|
|
|
2020-02-27 17:15:19 +03:00
|
|
|
static int nfs_server_reap_expired_delegations(struct nfs_server *server,
|
|
|
|
void __always_unused *data)
|
2016-09-22 20:38:59 +03:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
struct inode *inode;
|
2018-12-03 03:30:31 +03:00
|
|
|
const struct cred *cred;
|
2016-09-22 20:38:59 +03:00
|
|
|
nfs4_stateid stateid;
|
|
|
|
restart:
|
|
|
|
rcu_read_lock();
|
2020-02-27 17:15:19 +03:00
|
|
|
restart_locked:
|
|
|
|
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
|
|
|
if (test_bit(NFS_DELEGATION_INODE_FREEING,
|
|
|
|
&delegation->flags) ||
|
|
|
|
test_bit(NFS_DELEGATION_RETURNING,
|
|
|
|
&delegation->flags) ||
|
|
|
|
test_bit(NFS_DELEGATION_TEST_EXPIRED,
|
|
|
|
&delegation->flags) == 0)
|
|
|
|
continue;
|
|
|
|
inode = nfs_delegation_grab_inode(delegation);
|
|
|
|
if (inode == NULL)
|
|
|
|
goto restart_locked;
|
2020-04-02 22:27:09 +03:00
|
|
|
spin_lock(&delegation->lock);
|
2020-02-27 17:15:19 +03:00
|
|
|
cred = get_cred_rcu(delegation->cred);
|
|
|
|
nfs4_stateid_copy(&stateid, &delegation->stateid);
|
2020-04-02 22:27:09 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
2020-02-27 17:15:19 +03:00
|
|
|
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
|
|
|
|
rcu_read_unlock();
|
|
|
|
nfs_delegation_test_free_expired(inode, &stateid, cred);
|
|
|
|
put_cred(cred);
|
|
|
|
if (!nfs4_server_rebooted(server->nfs_client)) {
|
2016-09-22 20:38:59 +03:00
|
|
|
iput(inode);
|
2018-04-30 07:31:30 +03:00
|
|
|
cond_resched();
|
2016-09-22 20:38:59 +03:00
|
|
|
goto restart;
|
|
|
|
}
|
2020-02-27 17:15:19 +03:00
|
|
|
nfs_inode_mark_test_expired_delegation(server,inode);
|
|
|
|
iput(inode);
|
|
|
|
return -EAGAIN;
|
2016-09-22 20:38:59 +03:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2020-02-27 17:15:19 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_reap_expired_delegations - reap expired delegations
|
|
|
|
* @clp: nfs_client to process
|
|
|
|
*
|
|
|
|
* Iterates through all the delegations associated with this server and
|
|
|
|
* checks if they have may have been revoked. This function is usually
|
|
|
|
* expected to be called in cases where the server may have lost its
|
|
|
|
* lease.
|
|
|
|
*/
|
|
|
|
void nfs_reap_expired_delegations(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations,
|
|
|
|
NULL);
|
2016-09-22 20:38:59 +03:00
|
|
|
}
|
|
|
|
|
2016-09-22 20:39:07 +03:00
|
|
|
void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
|
|
|
|
const nfs4_stateid *stateid)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (delegation &&
|
2019-10-26 17:16:15 +03:00
|
|
|
nfs4_stateid_match_or_older(&delegation->stateid, stateid) &&
|
|
|
|
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
|
2016-09-22 20:39:07 +03:00
|
|
|
nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation);
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (found)
|
|
|
|
nfs4_schedule_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs_delegations_present - check for existence of delegations
|
|
|
|
* @clp: client state handle
|
|
|
|
*
|
|
|
|
* Returns one if there are any nfs_delegation structures attached
|
|
|
|
* to this nfs_client.
|
|
|
|
*/
|
|
|
|
int nfs_delegations_present(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
|
|
|
|
if (!list_empty(&server->delegations)) {
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-11-06 23:28:05 +03:00
|
|
|
/**
|
|
|
|
* nfs4_refresh_delegation_stateid - Update delegation stateid seqid
|
|
|
|
* @dst: stateid to refresh
|
|
|
|
* @inode: inode to check
|
|
|
|
*
|
|
|
|
* Returns "true" and updates "dst->seqid" * if inode had a delegation
|
|
|
|
* that matches our delegation stateid. Otherwise "false" is returned.
|
|
|
|
*/
|
|
|
|
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
bool ret = false;
|
|
|
|
if (!inode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (delegation != NULL &&
|
2019-10-22 20:34:06 +03:00
|
|
|
nfs4_stateid_match_other(dst, &delegation->stateid) &&
|
2019-10-25 01:00:35 +03:00
|
|
|
nfs4_stateid_is_newer(&delegation->stateid, dst) &&
|
2019-10-22 20:34:06 +03:00
|
|
|
!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
|
2017-11-06 23:28:05 +03:00
|
|
|
dst->seqid = delegation->stateid.seqid;
|
2019-11-01 01:40:33 +03:00
|
|
|
ret = true;
|
2017-11-06 23:28:05 +03:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-12-24 04:33:04 +03:00
|
|
|
/**
|
|
|
|
* nfs4_copy_delegation_stateid - Copy inode's state ID information
|
|
|
|
* @inode: inode to check
|
2012-03-09 02:16:12 +04:00
|
|
|
* @flags: delegation type requirement
|
2016-05-17 00:42:44 +03:00
|
|
|
* @dst: stateid data structure to fill in
|
|
|
|
* @cred: optional argument to retrieve credential
|
2010-12-24 04:33:04 +03:00
|
|
|
*
|
2012-03-09 02:16:12 +04:00
|
|
|
* Returns "true" and fills in "dst->data" * if inode had a delegation,
|
|
|
|
* otherwise "false" is returned.
|
2010-12-24 04:33:04 +03:00
|
|
|
*/
|
2016-05-17 00:42:44 +03:00
|
|
|
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
|
2018-12-03 03:30:31 +03:00
|
|
|
nfs4_stateid *dst, const struct cred **cred)
|
2006-03-20 21:44:46 +03:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_delegation *delegation;
|
2020-04-02 22:27:09 +03:00
|
|
|
bool ret = false;
|
2006-03-20 21:44:46 +03:00
|
|
|
|
2012-03-09 02:16:12 +04:00
|
|
|
flags &= FMODE_READ|FMODE_WRITE;
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(nfsi->delegation);
|
2020-04-02 22:27:09 +03:00
|
|
|
if (!delegation)
|
|
|
|
goto out;
|
|
|
|
spin_lock(&delegation->lock);
|
2016-09-22 20:38:54 +03:00
|
|
|
ret = nfs4_is_valid_delegation(delegation, flags);
|
2012-03-09 02:16:12 +04:00
|
|
|
if (ret) {
|
2012-03-05 03:13:56 +04:00
|
|
|
nfs4_stateid_copy(dst, &delegation->stateid);
|
2012-03-09 02:16:12 +04:00
|
|
|
nfs_mark_delegation_referenced(delegation);
|
2016-05-17 00:42:44 +03:00
|
|
|
if (cred)
|
2018-12-03 03:30:31 +03:00
|
|
|
*cred = get_cred(delegation->cred);
|
2006-03-20 21:44:46 +03:00
|
|
|
}
|
2020-04-02 22:27:09 +03:00
|
|
|
spin_unlock(&delegation->lock);
|
|
|
|
out:
|
2007-07-06 23:12:04 +04:00
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
2006-03-20 21:44:46 +03:00
|
|
|
}
|
2015-09-06 02:06:58 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs4_delegation_flush_on_close - Check if we must flush file on close
|
|
|
|
* @inode: inode to check
|
|
|
|
*
|
|
|
|
* This function checks the number of outstanding writes to the file
|
|
|
|
* against the delegation 'space_limit' field to see if
|
|
|
|
* the spec requires us to flush the file on close.
|
|
|
|
*/
|
|
|
|
bool nfs4_delegation_flush_on_close(const struct inode *inode)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(nfsi->delegation);
|
|
|
|
if (delegation == NULL || !(delegation->type & FMODE_WRITE))
|
|
|
|
goto out;
|
2017-08-01 22:39:46 +03:00
|
|
|
if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit)
|
2015-09-06 02:06:58 +03:00
|
|
|
ret = false;
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
2020-01-27 17:58:19 +03:00
|
|
|
|
|
|
|
module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644);
|