2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* fs/nfs/nfs4proc.c
|
|
|
|
*
|
|
|
|
* Client-side procedure declarations for NFSv4.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002 The Regents of the University of Michigan.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Kendrick Smith <kmsmith@umich.edu>
|
|
|
|
* Andy Adamson <andros@umich.edu>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the University nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
|
|
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
2011-12-10 00:05:58 +00:00
|
|
|
#include <linux/ratelimit.h>
|
|
|
|
#include <linux/printk.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
|
|
|
#include <linux/nfs.h>
|
|
|
|
#include <linux/nfs4.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_page.h>
|
2011-04-13 18:31:30 +00:00
|
|
|
#include <linux/nfs_mount.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/namei.h>
|
2005-10-18 21:20:17 +00:00
|
|
|
#include <linux/mount.h>
|
2009-04-01 13:22:29 +00:00
|
|
|
#include <linux/module.h>
|
2010-12-09 11:35:25 +00:00
|
|
|
#include <linux/xattr.h>
|
2011-01-26 00:15:32 +00:00
|
|
|
#include <linux/utsname.h>
|
2011-12-01 21:44:39 +00:00
|
|
|
#include <linux/freezer.h>
|
2018-01-09 13:21:17 +00:00
|
|
|
#include <linux/iversion.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-06-22 17:16:21 +00:00
|
|
|
#include "nfs4_fs.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "delegation.h"
|
2008-02-20 01:04:23 +00:00
|
|
|
#include "internal.h"
|
2006-03-20 18:44:14 +00:00
|
|
|
#include "iostat.h"
|
2009-04-01 13:22:31 +00:00
|
|
|
#include "callback.h"
|
2010-10-20 04:18:03 +00:00
|
|
|
#include "pnfs.h"
|
NFS: Always use the same SETCLIENTID boot verifier
Currently our NFS client assigns a unique SETCLIENTID boot verifier
for each server IP address it knows about. It's set to CURRENT_TIME
when the struct nfs_client for that server IP is created.
During the SETCLIENTID operation, our client also presents an
nfs_client_id4 string to servers, as an identifier on which the server
can hang all of this client's NFSv4 state. Our client's
nfs_client_id4 string is unique for each server IP address.
An NFSv4 server is obligated to wipe all NFSv4 state associated with
an nfs_client_id4 string when the client presents the same
nfs_client_id4 string along with a changed SETCLIENTID boot verifier.
When our client unmounts the last of a server's shares, it destroys
that server's struct nfs_client. The next time the client mounts that
NFS server, it creates a fresh struct nfs_client with a fresh boot
verifier. On seeing the fresh verifer, the server wipes any previous
NFSv4 state associated with that nfs_client_id4.
However, NFSv4.1 clients are supposed to present the same
nfs_client_id4 string to all servers. And, to support Transparent
State Migration, the same nfs_client_id4 string should be presented
to all NFSv4.0 servers so they recognize that migrated state for this
client belongs with state a server may already have for this client.
(This is known as the Uniform Client String model).
If the nfs_client_id4 string is the same but the boot verifier changes
for each server IP address, SETCLIENTID and EXCHANGE_ID operations
from such a client could unintentionally result in a server wiping a
client's previously obtained lease.
Thus, if our NFS client is going to use a fixed nfs_client_id4 string,
either for NFSv4.0 or NFSv4.1 mounts, our NFS client should use a
boot verifier that does not change depending on server IP address.
Replace our current per-nfs_client boot verifier with a per-nfs_net
boot verifier.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-05-22 02:45:41 +00:00
|
|
|
#include "netns.h"
|
2020-10-07 22:24:18 +00:00
|
|
|
#include "sysfs.h"
|
2015-04-15 17:00:05 +00:00
|
|
|
#include "nfs4idmap.h"
|
2012-11-26 17:49:34 +00:00
|
|
|
#include "nfs4session.h"
|
2012-12-20 21:52:38 +00:00
|
|
|
#include "fscache.h"
|
2020-06-23 22:39:03 +00:00
|
|
|
#include "nfs42.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-08-09 15:51:26 +00:00
|
|
|
#include "nfs4trace.h"
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#define NFSDBG_FACILITY NFSDBG_PROC
|
|
|
|
|
2018-04-07 17:44:28 +00:00
|
|
|
#define NFS4_BITMASK_SZ 3
|
|
|
|
|
2006-09-15 12:30:46 +00:00
|
|
|
#define NFS4_POLL_RETRY_MIN (HZ/10)
|
2005-04-16 22:20:36 +00:00
|
|
|
#define NFS4_POLL_RETRY_MAX (15*HZ)
|
|
|
|
|
2016-05-12 09:16:38 +00:00
|
|
|
/* file attributes which can be mapped to nfs attributes */
|
|
|
|
#define NFS4_VALID_ATTRS (ATTR_MODE \
|
|
|
|
| ATTR_UID \
|
|
|
|
| ATTR_GID \
|
|
|
|
| ATTR_SIZE \
|
|
|
|
| ATTR_ATIME \
|
|
|
|
| ATTR_MTIME \
|
|
|
|
| ATTR_CTIME \
|
|
|
|
| ATTR_ATIME_SET \
|
|
|
|
| ATTR_MTIME_SET)
|
|
|
|
|
2006-01-03 08:55:12 +00:00
|
|
|
struct nfs4_opendata;
|
2009-12-15 05:27:57 +00:00
|
|
|
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
|
2012-03-01 22:01:57 +00:00
|
|
|
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
|
2021-10-22 17:11:07 +00:00
|
|
|
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr, struct inode *inode);
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
|
2010-04-16 20:22:51 +00:00
|
|
|
struct nfs_fattr *fattr, struct iattr *sattr,
|
2021-10-22 17:11:08 +00:00
|
|
|
struct nfs_open_context *ctx, struct nfs4_label *ilabel);
|
2011-06-02 18:59:10 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
2017-10-19 19:46:45 +00:00
|
|
|
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred,
|
2017-10-19 19:46:45 +00:00
|
|
|
struct nfs4_slot *slot,
|
|
|
|
bool is_privileged);
|
2013-05-20 15:20:27 +00:00
|
|
|
static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *);
|
2016-09-22 17:39:04 +00:00
|
|
|
static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *, bool);
|
2011-06-02 18:59:10 +00:00
|
|
|
#endif
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
static inline struct nfs4_label *
|
|
|
|
nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr, struct nfs4_label *label)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (label == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
|
|
|
|
return NULL;
|
|
|
|
|
2022-10-19 17:12:11 +00:00
|
|
|
label->lfs = 0;
|
|
|
|
label->pi = 0;
|
|
|
|
label->len = 0;
|
|
|
|
label->label = NULL;
|
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
err = security_dentry_init_security(dentry, sattr->ia_mode,
|
2021-10-12 13:23:07 +00:00
|
|
|
&dentry->d_name, NULL,
|
|
|
|
(void **)&label->label, &label->len);
|
2013-05-22 16:50:44 +00:00
|
|
|
if (err == 0)
|
|
|
|
return label;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
nfs4_label_release_security(struct nfs4_label *label)
|
|
|
|
{
|
|
|
|
if (label)
|
|
|
|
security_release_secctx(label->label, label->len);
|
|
|
|
}
|
|
|
|
static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
|
|
|
|
{
|
|
|
|
if (label)
|
|
|
|
return server->attr_bitmask;
|
|
|
|
|
|
|
|
return server->attr_bitmask_nl;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline struct nfs4_label *
|
|
|
|
nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr, struct nfs4_label *l)
|
|
|
|
{ return NULL; }
|
|
|
|
static inline void
|
|
|
|
nfs4_label_release_security(struct nfs4_label *label)
|
|
|
|
{ return; }
|
|
|
|
static inline u32 *
|
|
|
|
nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
|
|
|
|
{ return server->attr_bitmask; }
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Prevent leaks of NFSv4 errors into userland */
|
2008-12-30 21:35:55 +00:00
|
|
|
static int nfs4_map_errors(int err)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-10-23 18:46:42 +00:00
|
|
|
if (err >= -1000)
|
|
|
|
return err;
|
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_RESOURCE:
|
2013-03-01 01:30:10 +00:00
|
|
|
case -NFS4ERR_LAYOUTTRYLATER:
|
|
|
|
case -NFS4ERR_RECALLCONFLICT:
|
2023-11-15 18:55:29 +00:00
|
|
|
case -NFS4ERR_RETURNCONFLICT:
|
2009-10-23 18:46:42 +00:00
|
|
|
return -EREMOTEIO;
|
2011-03-24 17:12:30 +00:00
|
|
|
case -NFS4ERR_WRONGSEC:
|
2013-08-13 20:37:38 +00:00
|
|
|
case -NFS4ERR_WRONG_CRED:
|
2011-03-24 17:12:30 +00:00
|
|
|
return -EPERM;
|
2011-02-22 23:44:31 +00:00
|
|
|
case -NFS4ERR_BADOWNER:
|
|
|
|
case -NFS4ERR_BADNAME:
|
|
|
|
return -EINVAL;
|
2012-05-28 15:36:28 +00:00
|
|
|
case -NFS4ERR_SHARE_DENIED:
|
|
|
|
return -EACCES;
|
2012-06-06 18:12:07 +00:00
|
|
|
case -NFS4ERR_MINOR_VERS_MISMATCH:
|
|
|
|
return -EPROTONOSUPPORT;
|
2013-03-23 19:22:45 +00:00
|
|
|
case -NFS4ERR_FILE_OPEN:
|
|
|
|
return -EBUSY;
|
2020-11-03 01:11:32 +00:00
|
|
|
case -NFS4ERR_NOT_SAME:
|
|
|
|
return -ENOTSYNC;
|
2009-10-23 18:46:42 +00:00
|
|
|
default:
|
2005-04-16 22:20:36 +00:00
|
|
|
dprintk("%s could not handle NFSv4 error %d\n",
|
2008-05-02 20:42:44 +00:00
|
|
|
__func__, -err);
|
2009-10-23 18:46:42 +00:00
|
|
|
break;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-10-23 18:46:42 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is our standard bitmap for GETATTR requests.
|
|
|
|
*/
|
2012-06-05 13:16:47 +00:00
|
|
|
const u32 nfs4_fattr_bitmap[3] = {
|
2005-04-16 22:20:36 +00:00
|
|
|
FATTR4_WORD0_TYPE
|
|
|
|
| FATTR4_WORD0_CHANGE
|
|
|
|
| FATTR4_WORD0_SIZE
|
|
|
|
| FATTR4_WORD0_FSID
|
|
|
|
| FATTR4_WORD0_FILEID,
|
|
|
|
FATTR4_WORD1_MODE
|
|
|
|
| FATTR4_WORD1_NUMLINKS
|
|
|
|
| FATTR4_WORD1_OWNER
|
|
|
|
| FATTR4_WORD1_OWNER_GROUP
|
|
|
|
| FATTR4_WORD1_RAWDEV
|
|
|
|
| FATTR4_WORD1_SPACE_USED
|
|
|
|
| FATTR4_WORD1_TIME_ACCESS
|
|
|
|
| FATTR4_WORD1_TIME_METADATA
|
nfs: Fetch MOUNTED_ON_FILEID when updating an inode
2ef47eb1 (NFS: Fix use of nfs_attr_use_mounted_on_fileid()) was a good
start to fixing a circular directory structure warning for NFS v4
"junctioned" mountpoints. Unfortunately, further testing continued to
generate this error.
My server is configured like this:
anna@nfsd ~ % df
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 9.1G 2.0G 6.5G 24% /
/dev/vdc1 1014M 33M 982M 4% /exports
/dev/vdc2 1014M 33M 982M 4% /exports/vol1
/dev/vdc3 1014M 33M 982M 4% /exports/vol1/vol2
anna@nfsd ~ % cat /etc/exports
/exports/ *(rw,async,no_subtree_check,no_root_squash)
/exports/vol1/ *(rw,async,no_subtree_check,no_root_squash)
/exports/vol1/vol2 *(rw,async,no_subtree_check,no_root_squash)
I've been running chown across the entire mountpoint twice in a row to
hit this problem. The first run succeeds, but the second one fails with
the circular directory warning along with:
anna@client ~ % dmesg
[Apr 3 14:28] NFS: server 192.168.100.204 error: fileid changed
fsid 0:39: expected fileid 0x100080, got 0x80
WHere 0x80 is the mountpoint's fileid and 0x100080 is the mounted-on
fileid.
This patch fixes the issue by requesting an updated mounted-on fileid
from the server during nfs_update_inode(), and then checking that the
fileid stored in the nfs_inode matches either the fileid or mounted-on
fileid returned by the server.
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-04-03 18:35:59 +00:00
|
|
|
| FATTR4_WORD1_TIME_MODIFY
|
|
|
|
| FATTR4_WORD1_MOUNTED_ON_FILEID,
|
2013-05-22 16:50:44 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
FATTR4_WORD2_SECURITY_LABEL
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2012-06-05 13:16:47 +00:00
|
|
|
static const u32 nfs4_pnfs_open_bitmap[3] = {
|
|
|
|
FATTR4_WORD0_TYPE
|
|
|
|
| FATTR4_WORD0_CHANGE
|
|
|
|
| FATTR4_WORD0_SIZE
|
|
|
|
| FATTR4_WORD0_FSID
|
|
|
|
| FATTR4_WORD0_FILEID,
|
|
|
|
FATTR4_WORD1_MODE
|
|
|
|
| FATTR4_WORD1_NUMLINKS
|
|
|
|
| FATTR4_WORD1_OWNER
|
|
|
|
| FATTR4_WORD1_OWNER_GROUP
|
|
|
|
| FATTR4_WORD1_RAWDEV
|
|
|
|
| FATTR4_WORD1_SPACE_USED
|
|
|
|
| FATTR4_WORD1_TIME_ACCESS
|
|
|
|
| FATTR4_WORD1_TIME_METADATA
|
|
|
|
| FATTR4_WORD1_TIME_MODIFY,
|
|
|
|
FATTR4_WORD2_MDSTHRESHOLD
|
2015-12-26 20:06:03 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
| FATTR4_WORD2_SECURITY_LABEL
|
|
|
|
#endif
|
2012-06-05 13:16:47 +00:00
|
|
|
};
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
static const u32 nfs4_open_noattr_bitmap[3] = {
|
|
|
|
FATTR4_WORD0_TYPE
|
|
|
|
| FATTR4_WORD0_FILEID,
|
|
|
|
};
|
|
|
|
|
2013-05-22 16:50:41 +00:00
|
|
|
const u32 nfs4_statfs_bitmap[3] = {
|
2005-04-16 22:20:36 +00:00
|
|
|
FATTR4_WORD0_FILES_AVAIL
|
|
|
|
| FATTR4_WORD0_FILES_FREE
|
|
|
|
| FATTR4_WORD0_FILES_TOTAL,
|
|
|
|
FATTR4_WORD1_SPACE_AVAIL
|
|
|
|
| FATTR4_WORD1_SPACE_FREE
|
|
|
|
| FATTR4_WORD1_SPACE_TOTAL
|
|
|
|
};
|
|
|
|
|
2013-05-22 16:50:41 +00:00
|
|
|
const u32 nfs4_pathconf_bitmap[3] = {
|
2005-04-16 22:20:36 +00:00
|
|
|
FATTR4_WORD0_MAXLINK
|
|
|
|
| FATTR4_WORD0_MAXNAME,
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
2011-07-31 00:52:37 +00:00
|
|
|
const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
|
2005-04-16 22:20:36 +00:00
|
|
|
| FATTR4_WORD0_MAXREAD
|
|
|
|
| FATTR4_WORD0_MAXWRITE
|
|
|
|
| FATTR4_WORD0_LEASE_TIME,
|
2010-10-12 23:30:06 +00:00
|
|
|
FATTR4_WORD1_TIME_DELTA
|
2011-07-31 00:52:37 +00:00
|
|
|
| FATTR4_WORD1_FS_LAYOUT_TYPES,
|
|
|
|
FATTR4_WORD2_LAYOUT_BLKSIZE
|
2015-09-25 18:24:37 +00:00
|
|
|
| FATTR4_WORD2_CLONE_BLKSIZE
|
2021-03-26 13:50:19 +00:00
|
|
|
| FATTR4_WORD2_CHANGE_ATTR_TYPE
|
2020-06-23 22:38:55 +00:00
|
|
|
| FATTR4_WORD2_XATTR_SUPPORT
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2013-05-22 16:50:41 +00:00
|
|
|
const u32 nfs4_fs_locations_bitmap[3] = {
|
2017-11-05 20:45:22 +00:00
|
|
|
FATTR4_WORD0_CHANGE
|
2006-06-09 13:34:25 +00:00
|
|
|
| FATTR4_WORD0_SIZE
|
|
|
|
| FATTR4_WORD0_FSID
|
|
|
|
| FATTR4_WORD0_FILEID
|
|
|
|
| FATTR4_WORD0_FS_LOCATIONS,
|
2017-11-05 20:45:22 +00:00
|
|
|
FATTR4_WORD1_OWNER
|
2006-06-09 13:34:25 +00:00
|
|
|
| FATTR4_WORD1_OWNER_GROUP
|
|
|
|
| FATTR4_WORD1_RAWDEV
|
|
|
|
| FATTR4_WORD1_SPACE_USED
|
|
|
|
| FATTR4_WORD1_TIME_ACCESS
|
|
|
|
| FATTR4_WORD1_TIME_METADATA
|
|
|
|
| FATTR4_WORD1_TIME_MODIFY
|
2013-05-22 16:50:41 +00:00
|
|
|
| FATTR4_WORD1_MOUNTED_ON_FILEID,
|
2006-06-09 13:34:25 +00:00
|
|
|
};
|
|
|
|
|
2018-04-07 17:44:28 +00:00
|
|
|
static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
|
2021-04-10 04:23:03 +00:00
|
|
|
struct inode *inode, unsigned long flags)
|
2018-04-07 17:44:28 +00:00
|
|
|
{
|
|
|
|
unsigned long cache_validity;
|
|
|
|
|
|
|
|
memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
|
|
|
|
if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
|
|
|
|
return;
|
|
|
|
|
2021-04-10 04:23:03 +00:00
|
|
|
cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
|
2018-04-07 17:44:28 +00:00
|
|
|
|
2021-04-10 04:23:03 +00:00
|
|
|
/* Remove the attributes over which we have full control */
|
|
|
|
dst[1] &= ~FATTR4_WORD1_RAWDEV;
|
2018-04-07 17:44:28 +00:00
|
|
|
if (!(cache_validity & NFS_INO_INVALID_SIZE))
|
|
|
|
dst[0] &= ~FATTR4_WORD0_SIZE;
|
|
|
|
|
|
|
|
if (!(cache_validity & NFS_INO_INVALID_CHANGE))
|
|
|
|
dst[0] &= ~FATTR4_WORD0_CHANGE;
|
|
|
|
|
2021-04-13 13:41:16 +00:00
|
|
|
if (!(cache_validity & NFS_INO_INVALID_MODE))
|
|
|
|
dst[1] &= ~FATTR4_WORD1_MODE;
|
2021-04-10 04:23:03 +00:00
|
|
|
if (!(cache_validity & NFS_INO_INVALID_OTHER))
|
2021-04-13 13:41:16 +00:00
|
|
|
dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
|
2018-04-07 17:44:28 +00:00
|
|
|
}
|
|
|
|
|
2006-10-20 06:28:51 +00:00
|
|
|
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_readdir_arg *readdir)
|
|
|
|
{
|
2017-06-16 16:06:59 +00:00
|
|
|
unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
|
2006-10-20 06:28:49 +00:00
|
|
|
__be32 *start, *p;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (cookie > 2) {
|
2005-06-22 17:16:28 +00:00
|
|
|
readdir->cookie = cookie;
|
2005-04-16 22:20:36 +00:00
|
|
|
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
readdir->cookie = 0;
|
|
|
|
memset(&readdir->verifier, 0, sizeof(readdir->verifier));
|
|
|
|
if (cookie == 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFSv4 servers do not return entries for '.' and '..'
|
|
|
|
* Therefore, we fake these entries here. We let '.'
|
|
|
|
* have cookie 0 and '..' have cookie 1. Note that
|
|
|
|
* when talking to the server, we always send cookie 0
|
|
|
|
* instead of 1 or 2.
|
|
|
|
*/
|
2011-11-25 15:14:33 +00:00
|
|
|
start = p = kmap_atomic(*readdir->pages);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (cookie == 0) {
|
|
|
|
*p++ = xdr_one; /* next */
|
|
|
|
*p++ = xdr_zero; /* cookie, first word */
|
|
|
|
*p++ = xdr_one; /* cookie, second word */
|
|
|
|
*p++ = xdr_one; /* entry len */
|
|
|
|
memcpy(p, ".\0\0\0", 4); /* entry */
|
|
|
|
p++;
|
|
|
|
*p++ = xdr_one; /* bitmap length */
|
2017-06-16 16:06:59 +00:00
|
|
|
*p++ = htonl(attrs); /* bitmap */
|
|
|
|
*p++ = htonl(12); /* attribute buffer length */
|
|
|
|
*p++ = htonl(NF4DIR);
|
2015-03-17 22:25:59 +00:00
|
|
|
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*p++ = xdr_one; /* next */
|
|
|
|
*p++ = xdr_zero; /* cookie, first word */
|
|
|
|
*p++ = xdr_two; /* cookie, second word */
|
|
|
|
*p++ = xdr_two; /* entry len */
|
|
|
|
memcpy(p, "..\0\0", 4); /* entry */
|
|
|
|
p++;
|
|
|
|
*p++ = xdr_one; /* bitmap length */
|
2017-06-16 16:06:59 +00:00
|
|
|
*p++ = htonl(attrs); /* bitmap */
|
|
|
|
*p++ = htonl(12); /* attribute buffer length */
|
|
|
|
*p++ = htonl(NF4DIR);
|
2015-03-17 22:25:59 +00:00
|
|
|
p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
readdir->pgbase = (char *)p - (char *)start;
|
|
|
|
readdir->count -= readdir->pgbase;
|
2011-11-25 15:14:33 +00:00
|
|
|
kunmap_atomic(start);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2022-04-25 22:04:27 +00:00
|
|
|
static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
|
|
|
|
{
|
|
|
|
if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
|
|
|
|
fattr->pre_change_attr = version;
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-22 17:39:05 +00:00
|
|
|
static void nfs4_test_and_free_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2016-09-22 17:39:05 +00:00
|
|
|
{
|
|
|
|
const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
|
|
|
|
|
|
|
|
ops->test_and_free_expired(server, stateid, cred);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __nfs4_free_revoked_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2016-09-22 17:39:05 +00:00
|
|
|
{
|
|
|
|
stateid->type = NFS4_REVOKED_STATEID_TYPE;
|
|
|
|
nfs4_test_and_free_stateid(server, stateid, cred);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_free_revoked_stateid(struct nfs_server *server,
|
|
|
|
const nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2016-09-22 17:39:05 +00:00
|
|
|
{
|
|
|
|
nfs4_stateid tmp;
|
|
|
|
|
|
|
|
nfs4_stateid_copy(&tmp, stateid);
|
|
|
|
__nfs4_free_revoked_stateid(server, &tmp, cred);
|
|
|
|
}
|
|
|
|
|
2014-09-18 06:09:27 +00:00
|
|
|
static long nfs4_update_delay(long *timeout)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
if (!timeout)
|
|
|
|
return NFS4_POLL_RETRY_MAX;
|
|
|
|
if (*timeout <= 0)
|
|
|
|
*timeout = NFS4_POLL_RETRY_MIN;
|
|
|
|
if (*timeout > NFS4_POLL_RETRY_MAX)
|
|
|
|
*timeout = NFS4_POLL_RETRY_MAX;
|
|
|
|
ret = *timeout;
|
|
|
|
*timeout <<= 1;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-04-07 17:59:09 +00:00
|
|
|
static int nfs4_delay_killable(long *timeout)
|
2008-12-23 20:21:44 +00:00
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 11:18:22 +00:00
|
|
|
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
|
|
|
|
schedule_timeout(nfs4_update_delay(timeout));
|
2019-04-07 17:59:09 +00:00
|
|
|
if (!__fatal_signal_pending(current))
|
|
|
|
return 0;
|
|
|
|
return -EINTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_delay_interruptible(long *timeout)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 11:18:22 +00:00
|
|
|
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
|
|
|
|
schedule_timeout(nfs4_update_delay(timeout));
|
2019-04-07 17:59:09 +00:00
|
|
|
if (!signal_pending(current))
|
|
|
|
return 0;
|
|
|
|
return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_delay(long *timeout, bool interruptible)
|
|
|
|
{
|
|
|
|
if (interruptible)
|
|
|
|
return nfs4_delay_interruptible(timeout);
|
|
|
|
return nfs4_delay_killable(timeout);
|
2008-12-23 20:21:44 +00:00
|
|
|
}
|
|
|
|
|
2019-07-11 23:02:18 +00:00
|
|
|
static const nfs4_stateid *
|
|
|
|
nfs4_recoverable_stateid(const nfs4_stateid *stateid)
|
|
|
|
{
|
|
|
|
if (!stateid)
|
|
|
|
return NULL;
|
|
|
|
switch (stateid->type) {
|
|
|
|
case NFS4_OPEN_STATEID_TYPE:
|
|
|
|
case NFS4_LOCK_STATEID_TYPE:
|
|
|
|
case NFS4_DELEGATION_STATEID_TYPE:
|
|
|
|
return stateid;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-12-23 20:21:44 +00:00
|
|
|
/* This is the error handling routine for processes that are allowed
|
|
|
|
* to sleep.
|
|
|
|
*/
|
2015-09-20 18:32:45 +00:00
|
|
|
static int nfs4_do_handle_exception(struct nfs_server *server,
|
|
|
|
int errorcode, struct nfs4_exception *exception)
|
2008-12-23 20:21:44 +00:00
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2008-12-23 20:21:46 +00:00
|
|
|
struct nfs4_state *state = exception->state;
|
2019-07-11 23:02:18 +00:00
|
|
|
const nfs4_stateid *stateid;
|
2012-03-07 21:39:06 +00:00
|
|
|
struct inode *inode = exception->inode;
|
2008-12-23 20:21:44 +00:00
|
|
|
int ret = errorcode;
|
|
|
|
|
2015-09-20 18:32:45 +00:00
|
|
|
exception->delay = 0;
|
|
|
|
exception->recovering = 0;
|
2008-12-23 20:21:44 +00:00
|
|
|
exception->retry = 0;
|
2016-09-22 17:39:15 +00:00
|
|
|
|
2019-07-11 23:02:18 +00:00
|
|
|
stateid = nfs4_recoverable_stateid(exception->stateid);
|
2016-09-22 17:39:15 +00:00
|
|
|
if (stateid == NULL && state != NULL)
|
2019-07-11 23:02:18 +00:00
|
|
|
stateid = nfs4_recoverable_stateid(&state->stateid);
|
2016-09-22 17:39:15 +00:00
|
|
|
|
2008-12-23 20:21:44 +00:00
|
|
|
switch(errorcode) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
2018-05-30 02:06:08 +00:00
|
|
|
case -NFS4ERR_BADHANDLE:
|
|
|
|
case -ESTALE:
|
|
|
|
if (inode != NULL && S_ISREG(inode->i_mode))
|
|
|
|
pnfs_destroy_layout(NFS_I(inode));
|
|
|
|
break;
|
2015-06-16 15:26:35 +00:00
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
2016-09-22 17:39:15 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2015-06-16 15:26:35 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2019-06-14 18:22:12 +00:00
|
|
|
case -NFS4ERR_PARTNER_NO_AUTH:
|
2016-09-22 17:39:15 +00:00
|
|
|
if (inode != NULL && stateid != NULL) {
|
|
|
|
nfs_inode_find_state_and_recover(inode,
|
|
|
|
stateid);
|
|
|
|
goto wait_on_recovery;
|
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2016-09-22 17:39:15 +00:00
|
|
|
case -NFS4ERR_OPENMODE:
|
2016-06-26 12:44:35 +00:00
|
|
|
if (inode) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nfs_async_inode_return_delegation(inode,
|
|
|
|
stateid);
|
|
|
|
if (err == 0)
|
|
|
|
goto wait_on_recovery;
|
|
|
|
if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
|
|
|
|
exception->retry = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-03-07 21:39:06 +00:00
|
|
|
if (state == NULL)
|
|
|
|
break;
|
2013-03-14 20:57:48 +00:00
|
|
|
ret = nfs4_schedule_stateid_recovery(server, state);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2012-03-07 21:39:06 +00:00
|
|
|
goto wait_on_recovery;
|
2008-12-23 20:21:44 +00:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2010-01-26 20:42:47 +00:00
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
2011-03-09 21:00:53 +00:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
|
|
|
goto wait_on_recovery;
|
2013-10-17 18:13:19 +00:00
|
|
|
case -NFS4ERR_MOVED:
|
|
|
|
ret = nfs4_schedule_migration_recovery(server);
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
goto wait_on_recovery;
|
2013-10-17 18:13:41 +00:00
|
|
|
case -NFS4ERR_LEASE_MOVED:
|
|
|
|
nfs4_schedule_lease_moved_recovery(clp);
|
|
|
|
goto wait_on_recovery;
|
2010-01-26 20:42:38 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
nfs41: kick start nfs41 session recovery when handling errors
Remove checking for any errors that the SEQUENCE operation does not return.
-NFS4ERR_STALE_CLIENTID, NFS4ERR_EXPIRED, NFS4ERR_CB_PATH_DOWN, NFS4ERR_BACK_CHAN_BUSY, NFS4ERR_OP_NOT_IN_SESSION.
SEQUENCE operation error recovery is very primative, we only reset the session.
Remove checking for any errors that are returned by the SEQUENCE operation, but
that resetting the session won't address.
NFS4ERR_RETRY_UNCACHED_REP, NFS4ERR_SEQUENCE_POS,NFS4ERR_TOO_MANY_OPS.
Add error checking for missing SEQUENCE errors that a session reset will
address.
NFS4ERR_BAD_HIGH_SLOT, NFS4ERR_DEADSESSION, NFS4ERR_SEQ_FALSE_RETRY.
A reset of the session is currently our only response to a SEQUENCE operation
error. Don't reset the session on errors where a new session won't help.
Don't reset the session on errors where a new session won't help.
[nfs41: nfs4_async_handle_error update error checking]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: trigger the state manager for session reset]
Replace session state bit with nfs_client state bit. Set the
NFS4CLNT_SESSION_SETUP bit upon a session related error in the sync/async
error handlers.
[nfs41: _nfs4_async_handle_error fix session reset error list]
Sequence operation errors that session reset could help.
NFS4ERR_BADSESSION
NFS4ERR_BADSLOT
NFS4ERR_BAD_HIGH_SLOT
NFS4ERR_DEADSESSION
NFS4ERR_CONN_NOT_BOUND_TO_SESSION
NFS4ERR_SEQ_FALSE_RETRY
NFS4ERR_SEQ_MISORDERED
Sequence operation errors that a session reset would not help
NFS4ERR_BADXDR
NFS4ERR_DELAY
NFS4ERR_REP_TOO_BIG
NFS4ERR_REP_TOO_BIG_TO_CACHE
NFS4ERR_REQ_TOO_BIG
NFS4ERR_RETRY_UNCACHED_REP
NFS4ERR_SEQUENCE_POS
NFS4ERR_TOO_MANY_OPS
Signed-off-by: Andy Adamson <andros@netapp.com>
[nfs41 nfs4_handle_exception fix session reset error list]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[moved nfs41_sequece_call_done code to nfs41: sequence operation]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-04-01 13:22:42 +00:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
case -NFS4ERR_SEQ_FALSE_RETRY:
|
|
|
|
case -NFS4ERR_SEQ_MISORDERED:
|
2019-11-13 07:34:00 +00:00
|
|
|
/* Handled in nfs41_sequence_process() */
|
2012-10-30 20:06:35 +00:00
|
|
|
goto wait_on_recovery;
|
2010-01-26 20:42:38 +00:00
|
|
|
#endif /* defined(CONFIG_NFS_V4_1) */
|
2008-12-23 20:21:44 +00:00
|
|
|
case -NFS4ERR_FILE_OPEN:
|
2009-12-03 20:58:56 +00:00
|
|
|
if (exception->timeout > HZ) {
|
|
|
|
/* We have retried a decent amount, time to
|
|
|
|
* fail
|
|
|
|
*/
|
|
|
|
ret = -EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2008-12-23 20:21:44 +00:00
|
|
|
case -NFS4ERR_DELAY:
|
2015-09-20 20:10:18 +00:00
|
|
|
nfs_inc_server_stats(server, NFSIOS_DELAY);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2015-09-20 20:10:18 +00:00
|
|
|
case -NFS4ERR_GRACE:
|
2016-07-14 22:46:24 +00:00
|
|
|
case -NFS4ERR_LAYOUTTRYLATER:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
case -NFS4ERR_RECALLCONFLICT:
|
2023-11-15 18:55:29 +00:00
|
|
|
case -NFS4ERR_RETURNCONFLICT:
|
2015-09-20 18:32:45 +00:00
|
|
|
exception->delay = 1;
|
|
|
|
return 0;
|
|
|
|
|
2011-05-03 17:43:03 +00:00
|
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
2008-12-23 20:21:44 +00:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
exception->retry = 1;
|
2011-02-22 23:44:32 +00:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_BADOWNER:
|
|
|
|
/* The following works around a Linux server bug! */
|
|
|
|
case -NFS4ERR_BADNAME:
|
|
|
|
if (server->caps & NFS_CAP_UIDGID_NOMAP) {
|
|
|
|
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
|
|
|
|
exception->retry = 1;
|
|
|
|
printk(KERN_WARNING "NFS: v4 server %s "
|
|
|
|
"does not accept raw "
|
|
|
|
"uid/gids. "
|
|
|
|
"Reenabling the idmapper.\n",
|
|
|
|
server->nfs_client->cl_hostname);
|
|
|
|
}
|
2008-12-23 20:21:44 +00:00
|
|
|
}
|
|
|
|
/* We failed to handle the error */
|
|
|
|
return nfs4_map_errors(ret);
|
2011-03-09 21:00:53 +00:00
|
|
|
wait_on_recovery:
|
2015-09-20 18:32:45 +00:00
|
|
|
exception->recovering = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-09-09 16:23:01 +00:00
|
|
|
/*
|
|
|
|
* Track the number of NFS4ERR_DELAY related retransmissions and return
|
|
|
|
* EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
|
|
|
|
* set by 'nfs_delay_retrans'.
|
|
|
|
*/
|
|
|
|
static int nfs4_exception_should_retrans(const struct nfs_server *server,
|
|
|
|
struct nfs4_exception *exception)
|
|
|
|
{
|
|
|
|
if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
|
|
|
|
if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-20 18:32:45 +00:00
|
|
|
/* This is the error handling routine for processes that are allowed
|
|
|
|
* to sleep.
|
|
|
|
*/
|
|
|
|
int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nfs4_do_handle_exception(server, errorcode, exception);
|
|
|
|
if (exception->delay) {
|
2023-09-09 16:23:01 +00:00
|
|
|
int ret2 = nfs4_exception_should_retrans(server, exception);
|
|
|
|
if (ret2 < 0) {
|
|
|
|
exception->retry = 0;
|
|
|
|
return ret2;
|
|
|
|
}
|
2019-04-07 17:59:09 +00:00
|
|
|
ret = nfs4_delay(&exception->timeout,
|
|
|
|
exception->interruptible);
|
2015-09-20 18:32:45 +00:00
|
|
|
goto out_retry;
|
|
|
|
}
|
|
|
|
if (exception->recovering) {
|
2021-06-01 15:10:05 +00:00
|
|
|
if (exception->task_is_privileged)
|
|
|
|
return -EDEADLOCK;
|
2015-09-20 18:32:45 +00:00
|
|
|
ret = nfs4_wait_clnt_recover(clp);
|
|
|
|
if (test_bit(NFS_MIG_FAILED, &server->mig_status))
|
|
|
|
return -EIO;
|
|
|
|
goto out_retry;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
out_retry:
|
2010-01-26 20:42:47 +00:00
|
|
|
if (ret == 0)
|
|
|
|
exception->retry = 1;
|
|
|
|
return ret;
|
2008-12-23 20:21:44 +00:00
|
|
|
}
|
|
|
|
|
2015-09-20 19:51:00 +00:00
|
|
|
static int
|
|
|
|
nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
|
|
|
|
int errorcode, struct nfs4_exception *exception)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nfs4_do_handle_exception(server, errorcode, exception);
|
|
|
|
if (exception->delay) {
|
2023-09-09 16:23:01 +00:00
|
|
|
int ret2 = nfs4_exception_should_retrans(server, exception);
|
|
|
|
if (ret2 < 0) {
|
|
|
|
exception->retry = 0;
|
|
|
|
return ret2;
|
|
|
|
}
|
2015-09-20 19:51:00 +00:00
|
|
|
rpc_delay(task, nfs4_update_delay(&exception->timeout));
|
|
|
|
goto out_retry;
|
|
|
|
}
|
|
|
|
if (exception->recovering) {
|
2021-06-01 15:10:05 +00:00
|
|
|
if (exception->task_is_privileged)
|
|
|
|
return -EDEADLOCK;
|
2015-09-20 19:51:00 +00:00
|
|
|
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
|
|
|
|
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
|
|
|
|
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
|
|
|
|
goto out_retry;
|
|
|
|
}
|
2013-10-17 18:13:19 +00:00
|
|
|
if (test_bit(NFS_MIG_FAILED, &server->mig_status))
|
2015-09-20 19:51:00 +00:00
|
|
|
ret = -EIO;
|
|
|
|
return ret;
|
|
|
|
out_retry:
|
2018-06-19 21:24:58 +00:00
|
|
|
if (ret == 0) {
|
2010-01-26 20:42:47 +00:00
|
|
|
exception->retry = 1;
|
2018-06-19 21:24:58 +00:00
|
|
|
/*
|
|
|
|
* For NFS4ERR_MOVED, the client transport will need to
|
|
|
|
* be recomputed after migration recovery has completed.
|
|
|
|
*/
|
|
|
|
if (errorcode == -NFS4ERR_MOVED)
|
|
|
|
rpc_task_release_transport(task);
|
|
|
|
}
|
2010-01-26 20:42:47 +00:00
|
|
|
return ret;
|
2008-12-23 20:21:44 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:33 +00:00
|
|
|
int
|
2015-09-20 19:51:00 +00:00
|
|
|
nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
|
|
|
|
struct nfs4_state *state, long *timeout)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.state = state,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (task->tk_status >= 0)
|
|
|
|
return 0;
|
|
|
|
if (timeout)
|
|
|
|
exception.timeout = *timeout;
|
|
|
|
task->tk_status = nfs4_async_handle_exception(task, server,
|
|
|
|
task->tk_status,
|
|
|
|
&exception);
|
|
|
|
if (exception.delay && timeout)
|
|
|
|
*timeout = exception.timeout;
|
|
|
|
if (exception.retry)
|
|
|
|
return -EAGAIN;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-03 19:18:49 +00:00
|
|
|
/*
|
|
|
|
* Return 'true' if 'clp' is using an rpc_client that is integrity protected
|
|
|
|
* or 'false' otherwise.
|
|
|
|
*/
|
|
|
|
static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
|
2017-01-11 21:01:21 +00:00
|
|
|
return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
|
2013-09-03 19:18:49 +00:00
|
|
|
}
|
2008-12-23 20:21:44 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
|
if (time_before(clp->cl_last_renewal,timestamp))
|
|
|
|
clp->cl_last_renewal = timestamp;
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
|
|
|
|
{
|
2015-07-05 18:50:46 +00:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
|
|
|
|
if (!nfs4_has_session(clp))
|
|
|
|
do_renew_lease(clp, timestamp);
|
2010-07-31 18:29:06 +00:00
|
|
|
}
|
|
|
|
|
2013-08-09 16:48:00 +00:00
|
|
|
struct nfs4_call_sync_data {
|
|
|
|
const struct nfs_server *seq_server;
|
|
|
|
struct nfs4_sequence_args *seq_args;
|
|
|
|
struct nfs4_sequence_res *seq_res;
|
|
|
|
};
|
|
|
|
|
2015-06-23 11:51:55 +00:00
|
|
|
void nfs4_init_sequence(struct nfs4_sequence_args *args,
|
2018-05-04 20:22:50 +00:00
|
|
|
struct nfs4_sequence_res *res, int cache_reply,
|
|
|
|
int privileged)
|
2013-08-09 16:48:18 +00:00
|
|
|
{
|
|
|
|
args->sa_slot = NULL;
|
|
|
|
args->sa_cache_this = cache_reply;
|
2018-05-04 20:22:50 +00:00
|
|
|
args->sa_privileged = privileged;
|
2013-08-09 16:48:18 +00:00
|
|
|
|
|
|
|
res->sr_slot = NULL;
|
|
|
|
}
|
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
|
2013-08-09 16:49:19 +00:00
|
|
|
{
|
|
|
|
struct nfs4_slot *slot = res->sr_slot;
|
|
|
|
struct nfs4_slot_table *tbl;
|
|
|
|
|
|
|
|
tbl = slot->table;
|
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
|
|
|
if (!nfs41_wake_and_assign_slot(tbl, slot))
|
|
|
|
nfs4_free_slot(tbl, slot);
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
|
|
|
|
res->sr_slot = NULL;
|
2016-08-28 15:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs40_sequence_done(struct rpc_task *task,
|
|
|
|
struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
if (res->sr_slot != NULL)
|
|
|
|
nfs40_sequence_free_slot(res);
|
2013-08-09 16:49:19 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:03 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
static void nfs41_release_slot(struct nfs4_slot *slot)
|
2009-04-01 13:22:17 +00:00
|
|
|
{
|
2012-11-16 17:25:01 +00:00
|
|
|
struct nfs4_session *session;
|
2009-04-01 13:22:17 +00:00
|
|
|
struct nfs4_slot_table *tbl;
|
2012-11-26 21:16:54 +00:00
|
|
|
bool send_new_highest_used_slotid = false;
|
2009-04-01 13:22:17 +00:00
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
if (!slot)
|
|
|
|
return;
|
2014-01-29 17:24:03 +00:00
|
|
|
tbl = slot->table;
|
2012-11-16 17:25:01 +00:00
|
|
|
session = tbl->session;
|
2009-12-04 20:55:38 +00:00
|
|
|
|
2016-08-28 14:28:25 +00:00
|
|
|
/* Bump the slot sequence number */
|
|
|
|
if (slot->seq_done)
|
|
|
|
slot->seq_nr++;
|
|
|
|
slot->seq_done = 0;
|
|
|
|
|
2009-12-06 00:32:19 +00:00
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
2012-11-26 21:16:54 +00:00
|
|
|
/* Be nice to the server: try to ensure that the last transmitted
|
|
|
|
* value for highest_user_slotid <= target_highest_slotid
|
|
|
|
*/
|
|
|
|
if (tbl->highest_used_slotid > tbl->target_highest_slotid)
|
|
|
|
send_new_highest_used_slotid = true;
|
|
|
|
|
2014-01-29 17:24:03 +00:00
|
|
|
if (nfs41_wake_and_assign_slot(tbl, slot)) {
|
2012-11-29 22:27:47 +00:00
|
|
|
send_new_highest_used_slotid = false;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2014-01-29 17:24:03 +00:00
|
|
|
nfs4_free_slot(tbl, slot);
|
2012-11-26 21:16:54 +00:00
|
|
|
|
|
|
|
if (tbl->highest_used_slotid != NFS4_NO_SLOT)
|
|
|
|
send_new_highest_used_slotid = false;
|
2012-11-29 22:27:47 +00:00
|
|
|
out_unlock:
|
2009-12-06 00:32:19 +00:00
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
2012-11-26 21:16:54 +00:00
|
|
|
if (send_new_highest_used_slotid)
|
2015-07-13 18:01:31 +00:00
|
|
|
nfs41_notify_server(session->clp);
|
2016-08-28 17:25:43 +00:00
|
|
|
if (waitqueue_active(&tbl->slot_waitq))
|
|
|
|
wake_up_all(&tbl->slot_waitq);
|
2009-04-01 13:22:17 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
nfs41_release_slot(res->sr_slot);
|
|
|
|
res->sr_slot = NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-20 21:53:34 +00:00
|
|
|
static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
|
|
|
|
u32 seqnr)
|
|
|
|
{
|
|
|
|
if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
|
|
|
|
slot->seq_nr_highest_sent = seqnr;
|
|
|
|
}
|
2022-07-12 13:16:04 +00:00
|
|
|
static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
|
2018-06-20 21:53:34 +00:00
|
|
|
{
|
2022-07-12 13:16:04 +00:00
|
|
|
nfs4_slot_sequence_record_sent(slot, seqnr);
|
2018-06-20 21:53:34 +00:00
|
|
|
slot->seq_nr_last_acked = seqnr;
|
|
|
|
}
|
|
|
|
|
2020-07-08 14:33:40 +00:00
|
|
|
static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
|
|
|
|
struct nfs4_slot *slot)
|
|
|
|
{
|
|
|
|
struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
|
|
|
|
if (!IS_ERR(task))
|
|
|
|
rpc_put_task_async(task);
|
|
|
|
}
|
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
static int nfs41_sequence_process(struct rpc_task *task,
|
|
|
|
struct nfs4_sequence_res *res)
|
2009-04-01 13:22:18 +00:00
|
|
|
{
|
2012-11-16 17:25:01 +00:00
|
|
|
struct nfs4_session *session;
|
2014-01-29 17:24:03 +00:00
|
|
|
struct nfs4_slot *slot = res->sr_slot;
|
2010-07-31 18:29:06 +00:00
|
|
|
struct nfs_client *clp;
|
2019-11-13 07:34:00 +00:00
|
|
|
int status;
|
2012-12-11 15:31:12 +00:00
|
|
|
int ret = 1;
|
2009-04-01 13:22:18 +00:00
|
|
|
|
2014-01-29 17:24:03 +00:00
|
|
|
if (slot == NULL)
|
|
|
|
goto out_noaction;
|
2011-04-18 19:57:32 +00:00
|
|
|
/* don't increment the sequence number if the task wasn't sent */
|
2019-03-01 16:40:05 +00:00
|
|
|
if (!RPC_WAS_SENT(task) || slot->seq_done)
|
2009-04-01 13:22:18 +00:00
|
|
|
goto out;
|
|
|
|
|
2012-11-16 17:25:01 +00:00
|
|
|
session = slot->table->session;
|
2020-07-08 14:33:40 +00:00
|
|
|
clp = session->clp;
|
2012-11-16 17:12:38 +00:00
|
|
|
|
2013-08-14 21:58:28 +00:00
|
|
|
trace_nfs4_sequence_done(session, res);
|
2019-11-13 07:34:00 +00:00
|
|
|
|
|
|
|
status = res->sr_status;
|
|
|
|
if (task->tk_status == -NFS4ERR_DEADSESSION)
|
|
|
|
status = -NFS4ERR_DEADSESSION;
|
|
|
|
|
2009-12-04 20:55:39 +00:00
|
|
|
/* Check the SEQUENCE operation status */
|
2019-11-13 07:34:00 +00:00
|
|
|
switch (status) {
|
2010-07-31 18:29:06 +00:00
|
|
|
case 0:
|
2018-06-20 21:53:34 +00:00
|
|
|
/* Mark this sequence number as having been acked */
|
|
|
|
nfs4_slot_sequence_acked(slot, slot->seq_nr);
|
2009-04-01 13:22:18 +00:00
|
|
|
/* Update the slot's sequence and clientid lease timer */
|
2016-08-28 14:28:25 +00:00
|
|
|
slot->seq_done = 1;
|
2012-12-15 20:21:52 +00:00
|
|
|
do_renew_lease(clp, res->sr_timestamp);
|
2009-12-05 18:46:14 +00:00
|
|
|
/* Check sequence flags */
|
2016-09-22 17:38:51 +00:00
|
|
|
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
|
|
|
|
!!slot->privileged);
|
2012-11-20 17:49:27 +00:00
|
|
|
nfs41_update_target_slotid(slot->table, slot, res);
|
2010-07-31 18:29:06 +00:00
|
|
|
break;
|
2012-12-15 20:36:07 +00:00
|
|
|
case 1:
|
|
|
|
/*
|
|
|
|
* sr_status remains 1 if an RPC level error occurred.
|
|
|
|
* The server may or may not have processed the sequence
|
|
|
|
* operation..
|
|
|
|
*/
|
2018-06-20 21:53:34 +00:00
|
|
|
nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
|
|
|
|
slot->seq_done = 1;
|
2012-12-15 20:36:07 +00:00
|
|
|
goto out;
|
2010-07-31 18:29:06 +00:00
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
/* The server detected a resend of the RPC call and
|
|
|
|
* returned NFS4ERR_DELAY as per Section 2.10.6.2
|
|
|
|
* of RFC5661.
|
|
|
|
*/
|
2012-11-16 17:45:06 +00:00
|
|
|
dprintk("%s: slot=%u seq=%u: Operation in progress\n",
|
2010-09-24 13:17:01 +00:00
|
|
|
__func__,
|
2012-11-16 17:45:06 +00:00
|
|
|
slot->slot_nr,
|
2012-11-16 17:12:38 +00:00
|
|
|
slot->seq_nr);
|
2010-07-31 18:29:06 +00:00
|
|
|
goto out_retry;
|
2018-06-09 23:10:31 +00:00
|
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
|
|
|
case -NFS4ERR_SEQ_FALSE_RETRY:
|
|
|
|
/*
|
|
|
|
* The server thinks we tried to replay a request.
|
|
|
|
* Retry the call after bumping the sequence ID.
|
|
|
|
*/
|
2018-06-20 21:53:34 +00:00
|
|
|
nfs4_slot_sequence_acked(slot, slot->seq_nr);
|
2018-06-09 23:10:31 +00:00
|
|
|
goto retry_new_seq;
|
2012-12-11 15:31:12 +00:00
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
/*
|
|
|
|
* The slot id we used was probably retired. Try again
|
|
|
|
* using a different slot id.
|
|
|
|
*/
|
2018-06-09 16:50:50 +00:00
|
|
|
if (slot->slot_nr < slot->table->target_highest_slotid)
|
2017-10-19 19:46:45 +00:00
|
|
|
goto session_recover;
|
2012-12-15 18:56:18 +00:00
|
|
|
goto retry_nowait;
|
|
|
|
case -NFS4ERR_SEQ_MISORDERED:
|
2018-06-20 21:53:34 +00:00
|
|
|
nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
|
2012-12-15 20:36:07 +00:00
|
|
|
/*
|
2018-06-20 21:53:34 +00:00
|
|
|
* Were one or more calls using this slot interrupted?
|
|
|
|
* If the server never received the request, then our
|
2020-07-08 14:33:40 +00:00
|
|
|
* transmitted slot sequence number may be too high. However,
|
|
|
|
* if the server did receive the request then it might
|
|
|
|
* accidentally give us a reply with a mismatched operation.
|
|
|
|
* We can sort this out by sending a lone sequence operation
|
|
|
|
* to the server on the same slot.
|
2012-12-15 18:56:18 +00:00
|
|
|
*/
|
2018-06-20 21:53:34 +00:00
|
|
|
if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
|
|
|
|
slot->seq_nr--;
|
2020-07-08 14:33:40 +00:00
|
|
|
if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
|
|
|
|
nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
|
|
|
|
res->sr_slot = NULL;
|
|
|
|
}
|
2012-12-15 20:21:52 +00:00
|
|
|
goto retry_nowait;
|
|
|
|
}
|
2018-06-20 21:53:34 +00:00
|
|
|
/*
|
|
|
|
* RFC5661:
|
|
|
|
* A retry might be sent while the original request is
|
|
|
|
* still in progress on the replier. The replier SHOULD
|
|
|
|
* deal with the issue by returning NFS4ERR_DELAY as the
|
|
|
|
* reply to SEQUENCE or CB_SEQUENCE operation, but
|
|
|
|
* implementations MAY return NFS4ERR_SEQ_MISORDERED.
|
|
|
|
*
|
|
|
|
* Restart the search after a delay.
|
|
|
|
*/
|
|
|
|
slot->seq_nr = slot->seq_nr_highest_sent;
|
|
|
|
goto out_retry;
|
2019-11-13 07:34:00 +00:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
goto session_recover;
|
2010-07-31 18:29:06 +00:00
|
|
|
default:
|
|
|
|
/* Just update the slot sequence no. */
|
2016-08-28 14:28:25 +00:00
|
|
|
slot->seq_done = 1;
|
2009-04-01 13:22:18 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
/* The session may be reset by one of the error handlers. */
|
|
|
|
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
|
2014-01-29 17:24:03 +00:00
|
|
|
out_noaction:
|
2012-12-11 15:31:12 +00:00
|
|
|
return ret;
|
2017-10-19 19:46:45 +00:00
|
|
|
session_recover:
|
2023-06-18 21:32:25 +00:00
|
|
|
set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
|
2019-11-13 07:34:00 +00:00
|
|
|
nfs4_schedule_session_recovery(session, status);
|
|
|
|
dprintk("%s ERROR: %d Reset session\n", __func__, status);
|
|
|
|
nfs41_sequence_free_slot(res);
|
|
|
|
goto out;
|
2017-10-19 19:46:45 +00:00
|
|
|
retry_new_seq:
|
|
|
|
++slot->seq_nr;
|
2012-12-15 18:56:18 +00:00
|
|
|
retry_nowait:
|
|
|
|
if (rpc_restart_call_prepare(task)) {
|
2016-08-28 15:50:26 +00:00
|
|
|
nfs41_sequence_free_slot(res);
|
2012-12-15 18:56:18 +00:00
|
|
|
task->tk_status = 0;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
goto out;
|
2010-07-31 18:29:06 +00:00
|
|
|
out_retry:
|
2010-07-31 18:29:07 +00:00
|
|
|
if (!rpc_restart_call(task))
|
2010-07-31 18:29:06 +00:00
|
|
|
goto out;
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
return 0;
|
2009-04-01 13:22:18 +00:00
|
|
|
}
|
2016-08-28 15:50:26 +00:00
|
|
|
|
|
|
|
int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
if (!nfs41_sequence_process(task, res))
|
|
|
|
return 0;
|
|
|
|
if (res->sr_slot != NULL)
|
|
|
|
nfs41_sequence_free_slot(res);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
}
|
2014-01-29 16:34:38 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nfs41_sequence_done);
|
2009-04-01 13:22:18 +00:00
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
if (res->sr_slot == NULL)
|
|
|
|
return 1;
|
|
|
|
if (res->sr_slot->table->session != NULL)
|
|
|
|
return nfs41_sequence_process(task, res);
|
|
|
|
return nfs40_sequence_done(task, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
if (res->sr_slot != NULL) {
|
|
|
|
if (res->sr_slot->table->session != NULL)
|
|
|
|
nfs41_sequence_free_slot(res);
|
|
|
|
else
|
|
|
|
nfs40_sequence_free_slot(res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-10 21:24:16 +00:00
|
|
|
int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
|
2010-06-16 13:52:26 +00:00
|
|
|
{
|
2012-11-16 17:25:01 +00:00
|
|
|
if (res->sr_slot == NULL)
|
2010-07-31 18:29:06 +00:00
|
|
|
return 1;
|
2013-08-09 16:49:19 +00:00
|
|
|
if (!res->sr_slot->table->session)
|
|
|
|
return nfs40_sequence_done(task, res);
|
2010-07-31 18:29:06 +00:00
|
|
|
return nfs41_sequence_done(task, res);
|
2010-06-16 13:52:26 +00:00
|
|
|
}
|
2014-06-10 21:24:16 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nfs4_sequence_done);
|
2010-06-16 13:52:26 +00:00
|
|
|
|
2009-04-01 13:22:13 +00:00
|
|
|
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
2013-08-09 16:48:00 +00:00
|
|
|
struct nfs4_call_sync_data *data = calldata;
|
2009-04-01 13:22:13 +00:00
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(data->seq_server->nfs_client,
|
|
|
|
data->seq_args, data->seq_res, task);
|
2009-04-01 13:22:13 +00:00
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:19 +00:00
|
|
|
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
2013-08-09 16:48:00 +00:00
|
|
|
struct nfs4_call_sync_data *data = calldata;
|
2009-04-01 13:22:19 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
nfs41_sequence_done(task, data->seq_res);
|
2009-04-01 13:22:19 +00:00
|
|
|
}
|
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct rpc_call_ops nfs41_call_sync_ops = {
|
2009-04-01 13:22:13 +00:00
|
|
|
.rpc_call_prepare = nfs41_call_sync_prepare,
|
2009-04-01 13:22:19 +00:00
|
|
|
.rpc_call_done = nfs41_call_sync_done,
|
2009-04-01 13:22:13 +00:00
|
|
|
};
|
|
|
|
|
2013-08-09 16:49:19 +00:00
|
|
|
#else /* !CONFIG_NFS_V4_1 */
|
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
return nfs40_sequence_done(task, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
if (res->sr_slot != NULL)
|
|
|
|
nfs40_sequence_free_slot(res);
|
|
|
|
}
|
|
|
|
|
2014-06-10 21:24:16 +00:00
|
|
|
int nfs4_sequence_done(struct rpc_task *task,
|
|
|
|
struct nfs4_sequence_res *res)
|
2013-08-09 16:48:27 +00:00
|
|
|
{
|
2013-08-09 16:49:19 +00:00
|
|
|
return nfs40_sequence_done(task, res);
|
2013-08-09 16:48:27 +00:00
|
|
|
}
|
2014-06-10 21:24:16 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nfs4_sequence_done);
|
2013-08-09 16:49:19 +00:00
|
|
|
|
|
|
|
#endif /* !CONFIG_NFS_V4_1 */
|
2013-08-09 16:48:27 +00:00
|
|
|
|
2019-03-01 17:13:34 +00:00
|
|
|
static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
|
|
|
|
{
|
|
|
|
res->sr_timestamp = jiffies;
|
|
|
|
res->sr_status_flags = 0;
|
|
|
|
res->sr_status = 1;
|
|
|
|
}
|
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
static
|
|
|
|
void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
struct nfs4_slot *slot)
|
|
|
|
{
|
|
|
|
if (!slot)
|
|
|
|
return;
|
|
|
|
slot->privileged = args->sa_privileged ? 1 : 0;
|
|
|
|
args->sa_slot = slot;
|
|
|
|
|
|
|
|
res->sr_slot = slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs4_setup_sequence(struct nfs_client *client,
|
2017-01-10 16:39:53 +00:00
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct nfs4_session *session = nfs4_get_session(client);
|
2017-01-10 21:49:31 +00:00
|
|
|
struct nfs4_slot_table *tbl = client->cl_slot_tbl;
|
2017-01-11 15:54:04 +00:00
|
|
|
struct nfs4_slot *slot;
|
2017-01-10 16:39:53 +00:00
|
|
|
|
2017-01-10 17:01:46 +00:00
|
|
|
/* slot already allocated? */
|
|
|
|
if (res->sr_slot != NULL)
|
|
|
|
goto out_start;
|
|
|
|
|
2019-04-07 17:58:49 +00:00
|
|
|
if (session)
|
2017-01-10 21:49:31 +00:00
|
|
|
tbl = &session->fc_slot_table;
|
|
|
|
|
2018-06-20 21:53:34 +00:00
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
|
|
|
/* The state manager will wait until the slot table is empty */
|
|
|
|
if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
|
|
|
|
goto out_sleep;
|
|
|
|
|
|
|
|
slot = nfs4_alloc_slot(tbl);
|
|
|
|
if (IS_ERR(slot)) {
|
|
|
|
if (slot == ERR_PTR(-ENOMEM))
|
2019-04-07 17:58:49 +00:00
|
|
|
goto out_sleep_timeout;
|
2018-06-20 21:53:34 +00:00
|
|
|
goto out_sleep;
|
2017-01-11 15:54:04 +00:00
|
|
|
}
|
2018-06-20 21:53:34 +00:00
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
2017-01-11 15:54:04 +00:00
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
nfs4_sequence_attach_slot(args, res, slot);
|
2017-01-11 15:54:04 +00:00
|
|
|
|
2017-01-11 18:37:06 +00:00
|
|
|
trace_nfs4_setup_sequence(session, args);
|
2017-01-10 17:01:46 +00:00
|
|
|
out_start:
|
2019-03-01 17:13:34 +00:00
|
|
|
nfs41_sequence_res_init(res);
|
2017-01-10 17:01:46 +00:00
|
|
|
rpc_call_start(task);
|
|
|
|
return 0;
|
2019-04-07 17:58:49 +00:00
|
|
|
out_sleep_timeout:
|
|
|
|
/* Try again in 1/4 second */
|
|
|
|
if (args->sa_privileged)
|
|
|
|
rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
|
|
|
|
jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
|
|
|
|
else
|
|
|
|
rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
|
|
|
|
NULL, jiffies + (HZ >> 2));
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
return -EAGAIN;
|
2017-01-10 21:29:54 +00:00
|
|
|
out_sleep:
|
|
|
|
if (args->sa_privileged)
|
|
|
|
rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
|
2019-04-07 17:58:48 +00:00
|
|
|
RPC_PRIORITY_PRIVILEGED);
|
2017-01-10 21:29:54 +00:00
|
|
|
else
|
|
|
|
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
return -EAGAIN;
|
2017-01-10 16:39:53 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
|
|
|
|
|
2013-08-09 16:48:27 +00:00
|
|
|
static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_call_sync_data *data = calldata;
|
2017-01-09 20:48:22 +00:00
|
|
|
nfs4_setup_sequence(data->seq_server->nfs_client,
|
2013-08-09 16:48:27 +00:00
|
|
|
data->seq_args, data->seq_res, task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_call_sync_data *data = calldata;
|
|
|
|
nfs4_sequence_done(task, data->seq_res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs40_call_sync_ops = {
|
|
|
|
.rpc_call_prepare = nfs40_call_sync_prepare,
|
|
|
|
.rpc_call_done = nfs40_call_sync_done,
|
|
|
|
};
|
|
|
|
|
2019-08-14 19:27:00 +00:00
|
|
|
static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct rpc_task *task;
|
|
|
|
|
|
|
|
task = rpc_run_task(task_setup);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
|
|
|
|
|
|
|
ret = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-06 20:39:37 +00:00
|
|
|
static int nfs4_do_call_sync(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
unsigned short task_flags)
|
2009-04-01 13:22:13 +00:00
|
|
|
{
|
2013-08-09 16:48:27 +00:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2013-08-09 16:48:00 +00:00
|
|
|
struct nfs4_call_sync_data data = {
|
2010-06-16 13:52:26 +00:00
|
|
|
.seq_server = server,
|
2009-04-01 13:22:13 +00:00
|
|
|
.seq_args = args,
|
|
|
|
.seq_res = res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
2011-03-24 17:12:24 +00:00
|
|
|
.rpc_client = clnt,
|
2009-04-01 13:22:13 +00:00
|
|
|
.rpc_message = msg,
|
2013-08-09 16:48:27 +00:00
|
|
|
.callback_ops = clp->cl_mvops->call_sync_ops,
|
2020-01-06 20:39:37 +00:00
|
|
|
.callback_data = &data,
|
|
|
|
.flags = task_flags,
|
2009-04-01 13:22:13 +00:00
|
|
|
};
|
|
|
|
|
2019-08-14 19:27:00 +00:00
|
|
|
return nfs4_call_sync_custom(&task_setup);
|
2009-04-01 13:22:13 +00:00
|
|
|
}
|
|
|
|
|
2020-01-06 20:39:37 +00:00
|
|
|
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res)
|
|
|
|
{
|
2021-06-24 03:28:51 +00:00
|
|
|
unsigned short task_flags = 0;
|
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (server->caps & NFS_CAP_MOVEABLE)
|
2021-06-24 03:28:51 +00:00
|
|
|
task_flags = RPC_TASK_MOVEABLE;
|
|
|
|
return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
|
2020-01-06 20:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
int nfs4_call_sync(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2011-03-24 17:12:23 +00:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply)
|
|
|
|
{
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(args, res, cache_reply, 0);
|
2013-08-09 16:48:27 +00:00
|
|
|
return nfs4_call_sync_sequence(clnt, server, msg, args, res);
|
2011-03-24 17:12:23 +00:00
|
|
|
}
|
2009-04-01 13:22:03 +00:00
|
|
|
|
2018-07-31 19:54:10 +00:00
|
|
|
static void
|
|
|
|
nfs4_inc_nlink_locked(struct inode *inode)
|
|
|
|
{
|
2021-04-01 18:59:59 +00:00
|
|
|
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
|
|
|
|
NFS_INO_INVALID_CTIME |
|
|
|
|
NFS_INO_INVALID_NLINK);
|
2018-07-31 19:54:10 +00:00
|
|
|
inc_nlink(inode);
|
|
|
|
}
|
|
|
|
|
2021-04-01 18:57:56 +00:00
|
|
|
static void
|
|
|
|
nfs4_inc_nlink(struct inode *inode)
|
|
|
|
{
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
nfs4_inc_nlink_locked(inode);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
2018-07-31 19:54:10 +00:00
|
|
|
static void
|
|
|
|
nfs4_dec_nlink_locked(struct inode *inode)
|
|
|
|
{
|
2021-04-01 18:59:59 +00:00
|
|
|
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
|
|
|
|
NFS_INO_INVALID_CTIME |
|
|
|
|
NFS_INO_INVALID_NLINK);
|
2018-07-31 19:54:10 +00:00
|
|
|
drop_nlink(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr_locked(struct inode *inode,
|
|
|
|
struct nfs4_change_info *cinfo,
|
2018-07-31 19:54:11 +00:00
|
|
|
unsigned long timestamp, unsigned long cache_validity)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-06-23 22:38:59 +00:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2021-03-26 15:01:19 +00:00
|
|
|
u64 change_attr = inode_peek_iversion_raw(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-03-08 19:42:55 +00:00
|
|
|
cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
|
2021-06-25 19:49:31 +00:00
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
cache_validity |= NFS_INO_INVALID_DATA;
|
2020-06-23 22:38:59 +00:00
|
|
|
|
2021-03-26 15:01:19 +00:00
|
|
|
switch (NFS_SERVER(inode)->change_attr_type) {
|
|
|
|
case NFS4_CHANGE_TYPE_IS_UNDEFINED:
|
2021-06-25 19:49:31 +00:00
|
|
|
if (cinfo->after == change_attr)
|
2021-03-26 15:01:19 +00:00
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if ((s64)(change_attr - cinfo->after) >= 0)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-06-25 19:49:31 +00:00
|
|
|
inode_set_iversion_raw(inode, cinfo->after);
|
|
|
|
if (!cinfo->atomic || cinfo->before != change_attr) {
|
|
|
|
if (S_ISDIR(inode->i_mode))
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs_force_lookup_revalidate(inode);
|
|
|
|
|
2021-06-25 19:49:31 +00:00
|
|
|
if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
|
|
|
|
cache_validity |=
|
|
|
|
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
|
|
|
|
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
|
|
|
|
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
|
2022-02-08 17:14:44 +00:00
|
|
|
NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
|
2021-06-25 19:49:31 +00:00
|
|
|
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
|
2016-12-16 21:55:55 +00:00
|
|
|
}
|
2021-06-25 19:49:31 +00:00
|
|
|
nfsi->attrtimeo_timestamp = jiffies;
|
2017-01-12 03:07:28 +00:00
|
|
|
nfsi->read_cache_jiffies = timestamp;
|
2015-02-27 00:52:06 +00:00
|
|
|
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
|
2018-07-31 19:54:12 +00:00
|
|
|
nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
|
2021-03-26 15:01:19 +00:00
|
|
|
out:
|
|
|
|
nfs_set_cache_invalid(inode, cache_validity);
|
2018-07-31 19:54:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-23 22:38:59 +00:00
|
|
|
void
|
|
|
|
nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
|
2018-07-31 19:54:11 +00:00
|
|
|
unsigned long timestamp, unsigned long cache_validity)
|
2018-07-31 19:54:10 +00:00
|
|
|
{
|
|
|
|
spin_lock(&dir->i_lock);
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
|
2006-05-25 05:40:57 +00:00
|
|
|
spin_unlock(&dir->i_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-11-06 20:28:03 +00:00
|
|
|
struct nfs4_open_createattrs {
|
|
|
|
struct nfs4_label *label;
|
|
|
|
struct iattr *sattr;
|
|
|
|
const __u32 verf[2];
|
|
|
|
};
|
|
|
|
|
2013-03-15 20:44:28 +00:00
|
|
|
static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
|
|
|
|
int err, struct nfs4_exception *exception)
|
|
|
|
{
|
|
|
|
if (err != -EINVAL)
|
|
|
|
return false;
|
|
|
|
if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
|
|
|
|
return false;
|
|
|
|
server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
|
|
|
|
exception->retry = 1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-27 10:30:48 +00:00
|
|
|
static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
|
|
|
|
{
|
|
|
|
fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
|
|
|
|
|
|
|
|
return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
|
|
|
|
}
|
|
|
|
|
2015-01-30 19:21:14 +00:00
|
|
|
static u32
|
|
|
|
nfs4_map_atomic_open_share(struct nfs_server *server,
|
|
|
|
fmode_t fmode, int openflags)
|
|
|
|
{
|
|
|
|
u32 res = 0;
|
|
|
|
|
|
|
|
switch (fmode & (FMODE_READ | FMODE_WRITE)) {
|
|
|
|
case FMODE_READ:
|
|
|
|
res = NFS4_SHARE_ACCESS_READ;
|
|
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
|
|
|
res = NFS4_SHARE_ACCESS_WRITE;
|
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
res = NFS4_SHARE_ACCESS_BOTH;
|
|
|
|
}
|
|
|
|
if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
|
|
|
|
goto out;
|
|
|
|
/* Want no delegation if we're using O_DIRECT */
|
|
|
|
if (openflags & O_DIRECT)
|
|
|
|
res |= NFS4_SHARE_WANT_NO_DELEG;
|
|
|
|
out:
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2013-03-15 20:44:28 +00:00
|
|
|
static enum open_claim_type4
|
|
|
|
nfs4_map_atomic_open_claim(struct nfs_server *server,
|
|
|
|
enum open_claim_type4 claim)
|
|
|
|
{
|
|
|
|
if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
|
|
|
|
return claim;
|
|
|
|
switch (claim) {
|
|
|
|
default:
|
|
|
|
return claim;
|
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
return NFS4_OPEN_CLAIM_NULL;
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
|
|
|
|
return NFS4_OPEN_CLAIM_DELEGATE_CUR;
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
|
|
|
|
return NFS4_OPEN_CLAIM_DELEGATE_PREV;
|
|
|
|
}
|
|
|
|
}
|
2007-07-04 03:48:13 +00:00
|
|
|
|
|
|
|
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
|
|
|
|
{
|
|
|
|
p->o_res.f_attr = &p->f_attr;
|
2008-04-07 17:20:54 +00:00
|
|
|
p->o_res.seqid = p->o_arg.seqid;
|
|
|
|
p->c_res.seqid = p->c_arg.seqid;
|
2007-07-04 03:48:13 +00:00
|
|
|
p->o_res.server = p->o_arg.server;
|
2012-10-03 06:39:34 +00:00
|
|
|
p->o_res.access_request = p->o_arg.access;
|
2007-07-04 03:48:13 +00:00
|
|
|
nfs_fattr_init(&p->f_attr);
|
2012-01-07 18:22:46 +00:00
|
|
|
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
|
2007-07-04 03:48:13 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 22:30:55 +00:00
|
|
|
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
|
2008-12-23 20:21:56 +00:00
|
|
|
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
|
2017-11-06 20:28:03 +00:00
|
|
|
const struct nfs4_open_createattrs *c,
|
2013-03-15 18:57:33 +00:00
|
|
|
enum open_claim_type4 claim,
|
2010-05-13 16:51:01 +00:00
|
|
|
gfp_t gfp_mask)
|
2006-01-03 08:55:08 +00:00
|
|
|
{
|
2011-06-22 22:30:55 +00:00
|
|
|
struct dentry *parent = dget_parent(dentry);
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *dir = d_inode(parent);
|
2006-01-03 08:55:08 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2015-01-24 00:19:25 +00:00
|
|
|
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
|
2017-11-06 20:28:03 +00:00
|
|
|
struct nfs4_label *label = (c != NULL) ? c->label : NULL;
|
2006-01-03 08:55:08 +00:00
|
|
|
struct nfs4_opendata *p;
|
|
|
|
|
2010-05-13 16:51:01 +00:00
|
|
|
p = kzalloc(sizeof(*p), gfp_mask);
|
2006-01-03 08:55:08 +00:00
|
|
|
if (p == NULL)
|
|
|
|
goto err;
|
2013-05-22 16:50:43 +00:00
|
|
|
|
2021-10-22 17:11:06 +00:00
|
|
|
p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
|
|
|
|
if (IS_ERR(p->f_attr.label))
|
2013-05-22 16:50:43 +00:00
|
|
|
goto err_free_p;
|
|
|
|
|
2015-07-27 07:31:38 +00:00
|
|
|
p->a_label = nfs4_label_alloc(server, gfp_mask);
|
|
|
|
if (IS_ERR(p->a_label))
|
|
|
|
goto err_free_f;
|
|
|
|
|
2015-01-24 00:19:25 +00:00
|
|
|
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
|
|
|
|
p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
|
2015-01-23 23:48:00 +00:00
|
|
|
if (IS_ERR(p->o_arg.seqid))
|
2013-05-22 16:50:43 +00:00
|
|
|
goto err_free_label;
|
2011-06-22 22:30:55 +00:00
|
|
|
nfs_sb_active(dentry->d_sb);
|
|
|
|
p->dentry = dget(dentry);
|
2006-01-03 08:55:08 +00:00
|
|
|
p->dir = parent;
|
|
|
|
p->owner = sp;
|
|
|
|
atomic_inc(&sp->so_count);
|
2008-12-23 20:21:56 +00:00
|
|
|
p->o_arg.open_flags = flags;
|
|
|
|
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
|
2016-11-10 20:40:34 +00:00
|
|
|
p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
|
2015-01-30 19:21:14 +00:00
|
|
|
p->o_arg.share_access = nfs4_map_atomic_open_share(server,
|
|
|
|
fmode, flags);
|
2019-06-07 10:37:30 +00:00
|
|
|
if (flags & O_CREAT) {
|
|
|
|
p->o_arg.umask = current_umask();
|
|
|
|
p->o_arg.label = nfs4_label_copy(p->a_label, label);
|
|
|
|
if (c->sattr != NULL && c->sattr->ia_valid != 0) {
|
|
|
|
p->o_arg.u.attrs = &p->attrs;
|
|
|
|
memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
|
|
|
|
|
|
|
|
memcpy(p->o_arg.u.verifier.data, c->verf,
|
|
|
|
sizeof(p->o_arg.u.verifier.data));
|
|
|
|
}
|
|
|
|
}
|
2021-07-14 17:00:58 +00:00
|
|
|
/* ask server to check for all possible rights as results
|
|
|
|
* are cached */
|
|
|
|
switch (p->o_arg.claim) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
|
|
|
|
NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
|
2022-02-23 20:43:26 +00:00
|
|
|
NFS4_ACCESS_EXECUTE |
|
|
|
|
nfs_access_xattr_mask(server);
|
2012-10-02 21:49:52 +00:00
|
|
|
}
|
2006-08-23 00:06:09 +00:00
|
|
|
p->o_arg.clientid = server->nfs_client->cl_clientid;
|
2012-04-20 23:24:51 +00:00
|
|
|
p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
|
|
|
|
p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
|
2011-06-22 22:30:55 +00:00
|
|
|
p->o_arg.name = &dentry->d_name;
|
2006-01-03 08:55:08 +00:00
|
|
|
p->o_arg.server = server;
|
2013-05-22 16:50:44 +00:00
|
|
|
p->o_arg.bitmask = nfs4_bitmask(server, label);
|
2012-06-05 13:16:47 +00:00
|
|
|
p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
|
2013-03-15 20:44:28 +00:00
|
|
|
switch (p->o_arg.claim) {
|
2013-03-15 18:57:33 +00:00
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
|
|
|
|
p->o_arg.fh = NFS_FH(dir);
|
|
|
|
break;
|
|
|
|
case NFS4_OPEN_CLAIM_PREVIOUS:
|
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
|
2015-03-17 22:25:59 +00:00
|
|
|
p->o_arg.fh = NFS_FH(d_inode(dentry));
|
2013-03-15 18:57:33 +00:00
|
|
|
}
|
2006-01-03 08:55:12 +00:00
|
|
|
p->c_arg.fh = &p->o_res.fh;
|
|
|
|
p->c_arg.stateid = &p->o_res.stateid;
|
|
|
|
p->c_arg.seqid = p->o_arg.seqid;
|
2007-07-04 03:48:13 +00:00
|
|
|
nfs4_init_opendata_res(p);
|
2007-06-17 20:02:44 +00:00
|
|
|
kref_init(&p->kref);
|
2006-01-03 08:55:08 +00:00
|
|
|
return p;
|
2013-05-22 16:50:43 +00:00
|
|
|
|
|
|
|
err_free_label:
|
2015-07-27 07:31:38 +00:00
|
|
|
nfs4_label_free(p->a_label);
|
|
|
|
err_free_f:
|
2021-10-22 17:11:06 +00:00
|
|
|
nfs4_label_free(p->f_attr.label);
|
2013-05-22 16:50:43 +00:00
|
|
|
err_free_p:
|
2006-01-03 08:55:08 +00:00
|
|
|
kfree(p);
|
|
|
|
err:
|
|
|
|
dput(parent);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-06-17 20:02:44 +00:00
|
|
|
static void nfs4_opendata_free(struct kref *kref)
|
2006-01-03 08:55:08 +00:00
|
|
|
{
|
2007-06-17 20:02:44 +00:00
|
|
|
struct nfs4_opendata *p = container_of(kref,
|
|
|
|
struct nfs4_opendata, kref);
|
2011-06-22 22:30:55 +00:00
|
|
|
struct super_block *sb = p->dentry->d_sb;
|
2007-06-17 20:02:44 +00:00
|
|
|
|
2016-10-18 17:39:51 +00:00
|
|
|
nfs4_lgopen_release(p->lgp);
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs_free_seqid(p->o_arg.seqid);
|
2016-08-28 15:50:26 +00:00
|
|
|
nfs4_sequence_free_slot(&p->o_res.seq_res);
|
2007-07-05 23:02:21 +00:00
|
|
|
if (p->state != NULL)
|
|
|
|
nfs4_put_open_state(p->state);
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_put_state_owner(p->owner);
|
2013-05-22 16:50:43 +00:00
|
|
|
|
2015-07-27 07:31:38 +00:00
|
|
|
nfs4_label_free(p->a_label);
|
2021-10-22 17:11:06 +00:00
|
|
|
nfs4_label_free(p->f_attr.label);
|
2013-05-22 16:50:43 +00:00
|
|
|
|
2007-06-17 20:02:44 +00:00
|
|
|
dput(p->dir);
|
2011-06-22 22:30:55 +00:00
|
|
|
dput(p->dentry);
|
|
|
|
nfs_sb_deactive(sb);
|
2012-01-07 18:22:46 +00:00
|
|
|
nfs_fattr_free_names(&p->f_attr);
|
2014-03-26 20:24:37 +00:00
|
|
|
kfree(p->f_attr.mdsthreshold);
|
2007-06-17 20:02:44 +00:00
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_opendata_put(struct nfs4_opendata *p)
|
|
|
|
{
|
|
|
|
if (p != NULL)
|
|
|
|
kref_put(&p->kref, nfs4_opendata_free);
|
2006-01-03 08:55:08 +00:00
|
|
|
}
|
|
|
|
|
2015-09-20 14:50:17 +00:00
|
|
|
static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
|
|
|
|
fmode_t fmode)
|
|
|
|
{
|
|
|
|
switch(fmode & (FMODE_READ|FMODE_WRITE)) {
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
return state->n_rdwr != 0;
|
|
|
|
case FMODE_WRITE:
|
|
|
|
return state->n_wronly != 0;
|
|
|
|
case FMODE_READ:
|
|
|
|
return state->n_rdonly != 0;
|
|
|
|
}
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-09-27 21:12:33 +00:00
|
|
|
static int can_open_cached(struct nfs4_state *state, fmode_t mode,
|
|
|
|
int open_mode, enum open_claim_type4 claim)
|
2007-07-08 18:11:36 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2008-12-23 20:21:56 +00:00
|
|
|
|
2012-01-18 03:04:26 +00:00
|
|
|
if (open_mode & (O_EXCL|O_TRUNC))
|
2008-12-23 20:21:56 +00:00
|
|
|
goto out;
|
2018-09-27 21:12:33 +00:00
|
|
|
switch (claim) {
|
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
goto out;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2008-12-23 20:21:56 +00:00
|
|
|
switch (mode & (FMODE_READ|FMODE_WRITE)) {
|
2007-07-08 18:11:36 +00:00
|
|
|
case FMODE_READ:
|
2009-12-08 13:33:16 +00:00
|
|
|
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
|
|
|
|
&& state->n_rdonly != 0;
|
2007-07-08 18:11:36 +00:00
|
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
2009-12-08 13:33:16 +00:00
|
|
|
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
|
|
|
|
&& state->n_wronly != 0;
|
2007-07-08 18:11:36 +00:00
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
2009-12-08 13:33:16 +00:00
|
|
|
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
|
|
|
|
&& state->n_rdwr != 0;
|
2007-07-08 18:11:36 +00:00
|
|
|
}
|
2008-12-23 20:21:56 +00:00
|
|
|
out:
|
2007-07-08 18:11:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-08-20 03:30:00 +00:00
|
|
|
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
|
|
|
|
enum open_claim_type4 claim)
|
2007-07-05 23:02:21 +00:00
|
|
|
{
|
2011-12-10 00:05:58 +00:00
|
|
|
if (delegation == NULL)
|
|
|
|
return 0;
|
2008-12-23 20:21:56 +00:00
|
|
|
if ((delegation->type & fmode) != fmode)
|
2007-07-05 23:02:21 +00:00
|
|
|
return 0;
|
2015-08-20 03:30:00 +00:00
|
|
|
switch (claim) {
|
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
break;
|
|
|
|
case NFS4_OPEN_CLAIM_PREVIOUS:
|
|
|
|
if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
|
|
|
|
break;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2015-08-20 03:30:00 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2008-12-23 20:21:52 +00:00
|
|
|
nfs_mark_delegation_referenced(delegation);
|
2007-07-05 23:02:21 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-12-23 20:21:56 +00:00
|
|
|
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
|
2006-01-03 08:55:13 +00:00
|
|
|
{
|
2008-12-23 20:21:56 +00:00
|
|
|
switch (fmode) {
|
2006-01-03 08:55:13 +00:00
|
|
|
case FMODE_WRITE:
|
|
|
|
state->n_wronly++;
|
|
|
|
break;
|
|
|
|
case FMODE_READ:
|
|
|
|
state->n_rdonly++;
|
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
state->n_rdwr++;
|
|
|
|
}
|
2008-12-23 20:21:56 +00:00
|
|
|
nfs4_state_set_mode_locked(state, state->state | fmode);
|
2007-07-05 22:07:55 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 17:39:21 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
|
|
|
|
return true;
|
|
|
|
if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
|
|
|
|
return true;
|
|
|
|
if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
|
|
|
|
wake_up_all(&state->waitq);
|
|
|
|
}
|
|
|
|
|
2014-02-13 00:15:06 +00:00
|
|
|
static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = state->owner->so_server->nfs_client;
|
|
|
|
bool need_recover = false;
|
|
|
|
|
|
|
|
if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
|
|
|
|
need_recover = true;
|
|
|
|
if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
|
|
|
|
need_recover = true;
|
|
|
|
if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
|
|
|
|
need_recover = true;
|
|
|
|
if (need_recover)
|
|
|
|
nfs4_state_mark_reclaim_nograce(clp, state);
|
|
|
|
}
|
|
|
|
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
/*
|
|
|
|
* Check for whether or not the caller may update the open stateid
|
|
|
|
* to the value passed in by stateid.
|
|
|
|
*
|
|
|
|
* Note: This function relies heavily on the server implementing
|
|
|
|
* RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
|
|
|
|
* correctly.
|
|
|
|
* i.e. The stateid seqids have to be initialised to 1, and
|
|
|
|
* are then incremented on every state transition.
|
|
|
|
*/
|
2020-09-25 19:48:39 +00:00
|
|
|
static bool nfs_stateid_is_sequential(struct nfs4_state *state,
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
const nfs4_stateid *stateid)
|
2014-02-10 23:20:47 +00:00
|
|
|
{
|
2020-09-25 19:48:39 +00:00
|
|
|
if (test_bit(NFS_OPEN_STATE, &state->flags)) {
|
|
|
|
/* The common case - we're updating to a new sequence number */
|
2021-10-27 01:56:40 +00:00
|
|
|
if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
|
|
|
|
if (nfs4_stateid_is_next(&state->open_stateid, stateid))
|
|
|
|
return true;
|
|
|
|
return false;
|
2020-09-25 19:48:39 +00:00
|
|
|
}
|
2021-10-27 01:56:40 +00:00
|
|
|
/* The server returned a new stateid */
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
}
|
2021-10-27 01:56:40 +00:00
|
|
|
/* This is the first OPEN in this generation */
|
|
|
|
if (stateid->seqid == cpu_to_be32(1))
|
|
|
|
return true;
|
2014-02-10 23:20:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-01-23 23:06:09 +00:00
|
|
|
static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
|
|
|
|
{
|
2015-07-22 17:46:13 +00:00
|
|
|
if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
|
|
|
|
return;
|
2015-01-23 23:06:09 +00:00
|
|
|
if (state->n_wronly)
|
|
|
|
set_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
if (state->n_rdonly)
|
|
|
|
set_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
if (state->n_rdwr)
|
|
|
|
set_bit(NFS_O_RDWR_STATE, &state->flags);
|
2015-07-22 17:46:13 +00:00
|
|
|
set_bit(NFS_OPEN_STATE, &state->flags);
|
2015-01-23 23:06:09 +00:00
|
|
|
}
|
|
|
|
|
2014-02-11 15:41:07 +00:00
|
|
|
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
|
|
|
|
nfs4_stateid *stateid, fmode_t fmode)
|
2007-07-05 22:07:55 +00:00
|
|
|
{
|
2014-02-11 15:41:07 +00:00
|
|
|
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
switch (fmode & (FMODE_READ|FMODE_WRITE)) {
|
|
|
|
case FMODE_WRITE:
|
|
|
|
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case FMODE_READ:
|
|
|
|
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_OPEN_STATE, &state->flags);
|
|
|
|
}
|
|
|
|
if (stateid == NULL)
|
|
|
|
return;
|
2016-11-14 16:19:55 +00:00
|
|
|
/* Handle OPEN+OPEN_DOWNGRADE races */
|
|
|
|
if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
|
|
|
|
!nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
|
2015-01-23 23:06:09 +00:00
|
|
|
nfs_resync_open_stateid_locked(state);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
goto out;
|
2015-01-23 23:06:09 +00:00
|
|
|
}
|
2007-07-05 22:07:55 +00:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
2012-03-04 23:13:56 +00:00
|
|
|
nfs4_stateid_copy(&state->stateid, stateid);
|
|
|
|
nfs4_stateid_copy(&state->open_stateid, stateid);
|
2017-11-06 20:28:02 +00:00
|
|
|
trace_nfs4_open_stateid_update(state->inode, stateid, 0);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
out:
|
|
|
|
nfs_state_log_update_open_stateid(state);
|
2014-02-11 15:41:07 +00:00
|
|
|
}
|
|
|
|
|
2015-08-31 01:37:59 +00:00
|
|
|
static void nfs_clear_open_stateid(struct nfs4_state *state,
|
|
|
|
nfs4_stateid *arg_stateid,
|
|
|
|
nfs4_stateid *stateid, fmode_t fmode)
|
2014-02-11 15:41:07 +00:00
|
|
|
{
|
|
|
|
write_seqlock(&state->seqlock);
|
2016-11-14 16:19:55 +00:00
|
|
|
/* Ignore, if the CLOSE argment doesn't match the current stateid */
|
|
|
|
if (nfs4_state_match_open_stateid_other(state, arg_stateid))
|
|
|
|
nfs_clear_open_stateid_locked(state, stateid, fmode);
|
2014-02-11 15:41:07 +00:00
|
|
|
write_sequnlock(&state->seqlock);
|
2014-02-13 00:15:06 +00:00
|
|
|
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
|
|
|
|
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
|
2014-02-11 15:41:07 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 17:39:13 +00:00
|
|
|
static void nfs_set_open_stateid_locked(struct nfs4_state *state,
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
const nfs4_stateid *stateid, nfs4_stateid *freeme)
|
2019-01-22 19:01:16 +00:00
|
|
|
__must_hold(&state->owner->so_lock)
|
|
|
|
__must_hold(&state->seqlock)
|
|
|
|
__must_hold(RCU)
|
|
|
|
|
2007-07-05 22:07:55 +00:00
|
|
|
{
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
int status = 0;
|
|
|
|
for (;;) {
|
|
|
|
|
2020-09-25 19:48:39 +00:00
|
|
|
if (nfs_stateid_is_sequential(state, stateid))
|
2007-07-05 22:07:55 +00:00
|
|
|
break;
|
2020-09-25 19:48:39 +00:00
|
|
|
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
if (status)
|
2007-07-05 22:07:55 +00:00
|
|
|
break;
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
/* Rely on seqids for serialisation with NFSv4.0 */
|
|
|
|
if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
|
|
|
|
break;
|
|
|
|
|
2020-09-25 19:48:39 +00:00
|
|
|
set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
|
|
|
|
/*
|
|
|
|
* Ensure we process the state changes in the same order
|
|
|
|
* in which the server processed them by delaying the
|
|
|
|
* update of the stateid until we are in sequence.
|
|
|
|
*/
|
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
spin_unlock(&state->owner->so_lock);
|
|
|
|
rcu_read_unlock();
|
2017-11-06 20:28:02 +00:00
|
|
|
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
|
2020-09-25 19:48:39 +00:00
|
|
|
|
2021-05-10 02:34:37 +00:00
|
|
|
if (!fatal_signal_pending(current)) {
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
if (schedule_timeout(5*HZ) == 0)
|
|
|
|
status = -EAGAIN;
|
|
|
|
else
|
|
|
|
status = 0;
|
|
|
|
} else
|
|
|
|
status = -EINTR;
|
|
|
|
finish_wait(&state->waitq, &wait);
|
|
|
|
rcu_read_lock();
|
|
|
|
spin_lock(&state->owner->so_lock);
|
|
|
|
write_seqlock(&state->seqlock);
|
2007-07-05 22:07:55 +00:00
|
|
|
}
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
|
2017-11-07 18:10:46 +00:00
|
|
|
if (test_bit(NFS_OPEN_STATE, &state->flags) &&
|
|
|
|
!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
nfs4_stateid_copy(freeme, &state->open_stateid);
|
|
|
|
nfs_test_and_clear_all_open_stateid(state);
|
|
|
|
}
|
|
|
|
|
2014-02-10 23:20:47 +00:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
|
|
|
nfs4_stateid_copy(&state->stateid, stateid);
|
|
|
|
nfs4_stateid_copy(&state->open_stateid, stateid);
|
2017-11-06 20:28:02 +00:00
|
|
|
trace_nfs4_open_stateid_update(state->inode, stateid, status);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
nfs_state_log_update_open_stateid(state);
|
2007-07-05 22:07:55 +00:00
|
|
|
}
|
|
|
|
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
static void nfs_state_set_open_stateid(struct nfs4_state *state,
|
2016-09-22 17:39:13 +00:00
|
|
|
const nfs4_stateid *open_stateid,
|
|
|
|
fmode_t fmode,
|
|
|
|
nfs4_stateid *freeme)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-07-09 14:45:42 +00:00
|
|
|
/*
|
|
|
|
* Protect the call to nfs4_state_set_mode_locked and
|
|
|
|
* serialise the stateid update
|
|
|
|
*/
|
|
|
|
write_seqlock(&state->seqlock);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
nfs_set_open_stateid_locked(state, open_stateid, freeme);
|
|
|
|
switch (fmode) {
|
|
|
|
case FMODE_READ:
|
|
|
|
set_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
|
|
|
set_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
set_bit(NFS_O_RDWR_STATE, &state->flags);
|
2007-07-05 22:07:55 +00:00
|
|
|
}
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
set_bit(NFS_OPEN_STATE, &state->flags);
|
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
}
|
|
|
|
|
2019-07-22 17:32:59 +00:00
|
|
|
static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_OPEN_STATE, &state->flags);
|
|
|
|
}
|
|
|
|
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
static void nfs_state_set_delegation(struct nfs4_state *state,
|
|
|
|
const nfs4_stateid *deleg_stateid,
|
|
|
|
fmode_t fmode)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Protect the call to nfs4_state_set_mode_locked and
|
|
|
|
* serialise the stateid update
|
|
|
|
*/
|
|
|
|
write_seqlock(&state->seqlock);
|
|
|
|
nfs4_stateid_copy(&state->stateid, deleg_stateid);
|
|
|
|
set_bit(NFS_DELEGATED_STATE, &state->flags);
|
2007-07-09 14:45:42 +00:00
|
|
|
write_sequnlock(&state->seqlock);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-09-05 18:07:15 +00:00
|
|
|
static void nfs_state_clear_delegation(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
write_seqlock(&state->seqlock);
|
|
|
|
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
|
|
|
|
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
}
|
|
|
|
|
2019-10-08 20:33:53 +00:00
|
|
|
int update_open_stateid(struct nfs4_state *state,
|
2016-09-22 17:39:13 +00:00
|
|
|
const nfs4_stateid *open_stateid,
|
|
|
|
const nfs4_stateid *delegation,
|
|
|
|
fmode_t fmode)
|
2008-12-23 20:21:38 +00:00
|
|
|
{
|
2016-09-22 17:39:13 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2008-12-23 20:21:38 +00:00
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
|
|
|
struct nfs_delegation *deleg_cur;
|
2016-10-18 15:21:30 +00:00
|
|
|
nfs4_stateid freeme = { };
|
2008-12-23 20:21:38 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2008-12-23 20:21:56 +00:00
|
|
|
fmode &= (FMODE_READ|FMODE_WRITE);
|
2008-12-23 20:21:38 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
spin_lock(&state->owner->so_lock);
|
|
|
|
if (open_stateid != NULL) {
|
|
|
|
nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
2019-10-22 16:12:17 +00:00
|
|
|
deleg_cur = nfs4_get_valid_delegation(state->inode);
|
2008-12-23 20:21:38 +00:00
|
|
|
if (deleg_cur == NULL)
|
|
|
|
goto no_delegation;
|
|
|
|
|
|
|
|
spin_lock(&deleg_cur->lock);
|
2013-08-21 19:48:42 +00:00
|
|
|
if (rcu_dereference(nfsi->delegation) != deleg_cur ||
|
2013-02-05 16:43:28 +00:00
|
|
|
test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
|
2008-12-23 20:21:56 +00:00
|
|
|
(deleg_cur->type & fmode) != fmode)
|
2008-12-23 20:21:38 +00:00
|
|
|
goto no_delegation_unlock;
|
|
|
|
|
|
|
|
if (delegation == NULL)
|
|
|
|
delegation = &deleg_cur->stateid;
|
2019-10-22 16:12:17 +00:00
|
|
|
else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
|
2008-12-23 20:21:38 +00:00
|
|
|
goto no_delegation_unlock;
|
|
|
|
|
2008-12-23 20:21:52 +00:00
|
|
|
nfs_mark_delegation_referenced(deleg_cur);
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
|
2008-12-23 20:21:38 +00:00
|
|
|
ret = 1;
|
|
|
|
no_delegation_unlock:
|
|
|
|
spin_unlock(&deleg_cur->lock);
|
|
|
|
no_delegation:
|
NFSv4: Fix OPEN / CLOSE race
Ben Coddington has noted the following race between OPEN and CLOSE
on a single client.
Process 1 Process 2 Server
========= ========= ======
1) OPEN file
2) OPEN file
3) Process OPEN (1) seqid=1
4) Process OPEN (2) seqid=2
5) Reply OPEN (2)
6) Receive reply (2)
7) new stateid, seqid=2
8) CLOSE file, using
stateid w/ seqid=2
9) Reply OPEN (1)
10( Process CLOSE (8)
11) Reply CLOSE (8)
12) Forget stateid
file closed
13) Receive reply (7)
14) Forget stateid
file closed.
15) Receive reply (1).
16) New stateid seqid=1
is really the same
stateid that was
closed.
IOW: the reply to the first OPEN is delayed. Since "Process 2" does
not wait before closing the file, and it does not cache the closed
stateid, then when the delayed reply is finally received, it is treated
as setting up a new stateid by the client.
The fix is to ensure that the client processes the OPEN and CLOSE calls
in the same order in which the server processed them.
This commit ensures that we examine the seqid of the stateid
returned by OPEN. If it is a new stateid, we assume the seqid
must be equal to the value 1, and that each state transition
increments the seqid value by 1 (See RFC7530, Section 9.1.4.2,
and RFC5661, Section 8.2.2).
If the tracker sees that an OPEN returns with a seqid that is greater
than the cached seqid + 1, then it bumps a flag to ensure that the
caller waits for the RPCs carrying the missing seqids to complete.
Note that there can still be pathologies where the server crashes before
it can even send us the missing seqids. Since the OPEN call is still
holding a slot when it waits here, that could cause the recovery to
stall forever. To avoid that, we time out after a 5 second wait.
Reported-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2017-11-06 20:28:01 +00:00
|
|
|
if (ret)
|
|
|
|
update_open_stateflags(state, fmode);
|
|
|
|
spin_unlock(&state->owner->so_lock);
|
2008-12-23 20:21:38 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2014-02-13 00:15:06 +00:00
|
|
|
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
|
2016-09-22 17:39:13 +00:00
|
|
|
nfs4_schedule_state_manager(clp);
|
|
|
|
if (freeme.type != 0)
|
|
|
|
nfs4_test_and_free_stateid(server, &freeme,
|
|
|
|
state->owner->so_cred);
|
2008-12-23 20:21:38 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-24 20:07:56 +00:00
|
|
|
static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
|
|
|
|
const nfs4_stateid *stateid)
|
|
|
|
{
|
|
|
|
struct nfs4_state *state = lsp->ls_state;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
spin_lock(&state->state_lock);
|
|
|
|
if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
|
|
|
|
goto out_noupdate;
|
|
|
|
if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
|
|
|
|
goto out_noupdate;
|
|
|
|
nfs4_stateid_copy(&lsp->ls_stateid, stateid);
|
|
|
|
ret = true;
|
|
|
|
out_noupdate:
|
|
|
|
spin_unlock(&state->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-12-23 20:21:38 +00:00
|
|
|
|
2008-12-23 20:21:56 +00:00
|
|
|
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
|
2007-07-05 23:02:21 +00:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2018-03-20 20:43:13 +00:00
|
|
|
fmode &= FMODE_READ|FMODE_WRITE;
|
2007-07-05 23:02:21 +00:00
|
|
|
rcu_read_lock();
|
2019-10-27 17:38:45 +00:00
|
|
|
delegation = nfs4_get_valid_delegation(inode);
|
2008-12-23 20:21:56 +00:00
|
|
|
if (delegation == NULL || (delegation->type & fmode) == fmode) {
|
2007-07-05 23:02:21 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2012-06-20 19:53:44 +00:00
|
|
|
nfs4_inode_return_delegation(inode);
|
2007-07-05 23:02:21 +00:00
|
|
|
}
|
|
|
|
|
2007-07-08 18:11:36 +00:00
|
|
|
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
2007-07-05 23:02:21 +00:00
|
|
|
{
|
|
|
|
struct nfs4_state *state = opendata->state;
|
|
|
|
struct nfs_delegation *delegation;
|
2013-05-29 19:36:40 +00:00
|
|
|
int open_mode = opendata->o_arg.open_flags;
|
2008-12-23 20:21:56 +00:00
|
|
|
fmode_t fmode = opendata->o_arg.fmode;
|
2015-08-20 03:30:00 +00:00
|
|
|
enum open_claim_type4 claim = opendata->o_arg.claim;
|
2007-07-05 23:02:21 +00:00
|
|
|
nfs4_stateid stateid;
|
|
|
|
int ret = -EAGAIN;
|
|
|
|
|
|
|
|
for (;;) {
|
2014-09-03 18:15:40 +00:00
|
|
|
spin_lock(&state->owner->so_lock);
|
2018-09-27 21:12:33 +00:00
|
|
|
if (can_open_cached(state, fmode, open_mode, claim)) {
|
2014-09-03 18:15:40 +00:00
|
|
|
update_open_stateflags(state, fmode);
|
2007-07-08 18:11:36 +00:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
2014-09-03 18:15:40 +00:00
|
|
|
goto out_return_state;
|
2007-07-08 18:11:36 +00:00
|
|
|
}
|
2014-09-03 18:15:40 +00:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
2008-12-23 20:21:38 +00:00
|
|
|
rcu_read_lock();
|
2019-10-31 22:40:32 +00:00
|
|
|
delegation = nfs4_get_valid_delegation(state->inode);
|
2015-08-20 03:30:00 +00:00
|
|
|
if (!can_open_delegated(delegation, fmode, claim)) {
|
2008-12-23 20:21:38 +00:00
|
|
|
rcu_read_unlock();
|
2007-07-05 23:02:21 +00:00
|
|
|
break;
|
2008-12-23 20:21:38 +00:00
|
|
|
}
|
2007-07-05 23:02:21 +00:00
|
|
|
/* Save the delegation */
|
2012-03-04 23:13:56 +00:00
|
|
|
nfs4_stateid_copy(&stateid, &delegation->stateid);
|
2007-07-05 23:02:21 +00:00
|
|
|
rcu_read_unlock();
|
2013-04-09 16:56:52 +00:00
|
|
|
nfs_release_seqid(opendata->o_arg.seqid);
|
2013-04-23 18:52:44 +00:00
|
|
|
if (!opendata->is_recover) {
|
|
|
|
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
}
|
2007-07-05 23:02:21 +00:00
|
|
|
ret = -EAGAIN;
|
2008-12-23 20:21:38 +00:00
|
|
|
|
|
|
|
/* Try to update the stateid using the delegation */
|
2008-12-23 20:21:56 +00:00
|
|
|
if (update_open_stateid(state, NULL, &stateid, fmode))
|
2008-12-23 20:21:38 +00:00
|
|
|
goto out_return_state;
|
2007-07-05 23:02:21 +00:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
out_return_state:
|
2018-09-02 23:19:07 +00:00
|
|
|
refcount_inc(&state->count);
|
2007-07-05 23:02:21 +00:00
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
static void
|
|
|
|
nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
int delegation_flags = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
|
|
|
if (delegation)
|
|
|
|
delegation_flags = delegation->flags;
|
|
|
|
rcu_read_unlock();
|
2015-10-02 15:44:54 +00:00
|
|
|
switch (data->o_arg.claim) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
|
2012-10-03 01:07:32 +00:00
|
|
|
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
|
|
|
|
"returning a delegation for "
|
|
|
|
"OPEN(CLAIM_DELEGATE_CUR)\n",
|
|
|
|
clp->cl_hostname);
|
2015-10-02 15:44:54 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
|
2012-10-03 01:07:32 +00:00
|
|
|
nfs_inode_set_delegation(state->inode,
|
2018-03-20 21:03:13 +00:00
|
|
|
data->owner->so_cred,
|
|
|
|
data->o_res.delegation_type,
|
|
|
|
&data->o_res.delegation,
|
|
|
|
data->o_res.pagemod_limit);
|
2012-10-03 01:07:32 +00:00
|
|
|
else
|
|
|
|
nfs_inode_reclaim_delegation(state->inode,
|
2018-03-20 21:03:13 +00:00
|
|
|
data->owner->so_cred,
|
|
|
|
data->o_res.delegation_type,
|
|
|
|
&data->o_res.delegation,
|
|
|
|
data->o_res.pagemod_limit);
|
2018-07-05 09:48:14 +00:00
|
|
|
|
|
|
|
if (data->o_res.do_recall)
|
|
|
|
nfs_async_inode_return_delegation(state->inode,
|
|
|
|
&data->o_res.delegation);
|
2012-10-03 01:07:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the inode attributes against the CLAIM_PREVIOUS returned attributes
|
|
|
|
* and update the nfs4_state.
|
|
|
|
*/
|
|
|
|
static struct nfs4_state *
|
|
|
|
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct inode *inode = data->state->inode;
|
|
|
|
struct nfs4_state *state = data->state;
|
|
|
|
int ret;
|
|
|
|
|
2013-10-21 17:10:13 +00:00
|
|
|
if (!data->rpc_done) {
|
2017-01-11 21:08:35 +00:00
|
|
|
if (data->rpc_status)
|
|
|
|
return ERR_PTR(data->rpc_status);
|
2023-03-21 04:17:36 +00:00
|
|
|
return nfs4_try_open_cached(data);
|
2012-10-03 01:07:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = nfs_refresh_inode(inode, &data->f_attr);
|
|
|
|
if (ret)
|
2017-01-11 21:08:35 +00:00
|
|
|
return ERR_PTR(ret);
|
2012-10-03 01:07:32 +00:00
|
|
|
|
|
|
|
if (data->o_res.delegation_type != 0)
|
|
|
|
nfs4_opendata_check_deleg(data, state);
|
2023-03-21 04:17:36 +00:00
|
|
|
|
2019-07-29 17:25:00 +00:00
|
|
|
if (!update_open_stateid(state, &data->o_res.stateid,
|
|
|
|
NULL, data->o_arg.fmode))
|
|
|
|
return ERR_PTR(-EAGAIN);
|
2018-09-02 23:19:07 +00:00
|
|
|
refcount_inc(&state->count);
|
2012-10-03 01:07:32 +00:00
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2017-08-08 13:06:18 +00:00
|
|
|
static struct inode *
|
|
|
|
nfs4_opendata_get_inode(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
switch (data->o_arg.claim) {
|
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
|
|
|
|
if (!(data->f_attr.valid & NFS_ATTR_FATTR))
|
|
|
|
return ERR_PTR(-EAGAIN);
|
|
|
|
inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
|
2021-10-22 17:11:11 +00:00
|
|
|
&data->f_attr);
|
2017-08-08 13:06:18 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
inode = d_inode(data->dentry);
|
|
|
|
ihold(inode);
|
|
|
|
nfs_refresh_inode(inode, &data->f_attr);
|
|
|
|
}
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
static struct nfs4_state *
|
2017-08-08 14:38:07 +00:00
|
|
|
nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
|
2006-01-03 08:55:11 +00:00
|
|
|
{
|
2017-08-08 14:38:07 +00:00
|
|
|
struct nfs4_state *state;
|
2006-01-03 08:55:11 +00:00
|
|
|
struct inode *inode;
|
2017-08-08 14:38:07 +00:00
|
|
|
|
|
|
|
inode = nfs4_opendata_get_inode(data);
|
|
|
|
if (IS_ERR(inode))
|
|
|
|
return ERR_CAST(inode);
|
|
|
|
if (data->state != NULL && data->state->inode == inode) {
|
|
|
|
state = data->state;
|
2018-09-02 23:19:07 +00:00
|
|
|
refcount_inc(&state->count);
|
2017-08-08 14:38:07 +00:00
|
|
|
} else
|
|
|
|
state = nfs4_get_open_state(inode, data->owner);
|
|
|
|
iput(inode);
|
|
|
|
if (state == NULL)
|
|
|
|
state = ERR_PTR(-ENOMEM);
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nfs4_state *
|
|
|
|
_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct nfs4_state *state;
|
2006-01-03 08:55:11 +00:00
|
|
|
|
2007-07-05 23:02:21 +00:00
|
|
|
if (!data->rpc_done) {
|
2007-07-08 18:11:36 +00:00
|
|
|
state = nfs4_try_open_cached(data);
|
2015-11-24 18:29:42 +00:00
|
|
|
trace_nfs4_cached_open(data->state);
|
2007-07-05 23:02:21 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-08-08 14:38:07 +00:00
|
|
|
state = nfs4_opendata_find_nfs4_state(data);
|
|
|
|
if (IS_ERR(state))
|
|
|
|
goto out;
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
if (data->o_res.delegation_type != 0)
|
|
|
|
nfs4_opendata_check_deleg(data, state);
|
2019-07-29 17:25:00 +00:00
|
|
|
if (!update_open_stateid(state, &data->o_res.stateid,
|
|
|
|
NULL, data->o_arg.fmode)) {
|
|
|
|
nfs4_put_open_state(state);
|
|
|
|
state = ERR_PTR(-EAGAIN);
|
|
|
|
}
|
2007-07-05 23:02:21 +00:00
|
|
|
out:
|
2013-03-01 00:19:59 +00:00
|
|
|
nfs_release_seqid(data->o_arg.seqid);
|
2006-01-03 08:55:11 +00:00
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
static struct nfs4_state *
|
|
|
|
nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
|
|
|
|
{
|
2016-08-28 15:50:26 +00:00
|
|
|
struct nfs4_state *ret;
|
|
|
|
|
2012-10-03 01:07:32 +00:00
|
|
|
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
|
2016-08-28 15:50:26 +00:00
|
|
|
ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
|
|
|
|
else
|
|
|
|
ret = _nfs4_opendata_to_nfs4_state(data);
|
|
|
|
nfs4_sequence_free_slot(&data->o_res.seq_res);
|
|
|
|
return ret;
|
2012-10-03 01:07:32 +00:00
|
|
|
}
|
|
|
|
|
2018-09-02 19:57:01 +00:00
|
|
|
static struct nfs_open_context *
|
|
|
|
nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
|
2006-01-03 08:55:15 +00:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
|
2018-09-02 19:57:01 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
2006-01-03 08:55:15 +00:00
|
|
|
if (ctx->state != state)
|
|
|
|
continue;
|
2018-09-02 19:57:01 +00:00
|
|
|
if ((ctx->mode & mode) != mode)
|
|
|
|
continue;
|
|
|
|
if (!get_nfs_open_context(ctx))
|
|
|
|
continue;
|
|
|
|
rcu_read_unlock();
|
2006-01-03 08:55:15 +00:00
|
|
|
return ctx;
|
|
|
|
}
|
2018-09-02 19:57:01 +00:00
|
|
|
rcu_read_unlock();
|
2006-01-03 08:55:15 +00:00
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
}
|
|
|
|
|
2018-09-02 19:57:01 +00:00
|
|
|
static struct nfs_open_context *
|
|
|
|
nfs4_state_find_open_context(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
|
|
|
|
ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
|
|
|
|
if (!IS_ERR(ctx))
|
|
|
|
return ctx;
|
|
|
|
ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
|
|
|
|
if (!IS_ERR(ctx))
|
|
|
|
return ctx;
|
|
|
|
return nfs4_state_find_open_context_mode(state, FMODE_READ);
|
|
|
|
}
|
|
|
|
|
2013-03-15 18:57:33 +00:00
|
|
|
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
|
|
|
|
struct nfs4_state *state, enum open_claim_type4 claim)
|
2007-07-18 01:50:45 +00:00
|
|
|
{
|
|
|
|
struct nfs4_opendata *opendata;
|
|
|
|
|
2013-03-15 18:57:33 +00:00
|
|
|
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
|
2017-11-06 20:28:03 +00:00
|
|
|
NULL, claim, GFP_NOFS);
|
2007-07-18 01:50:45 +00:00
|
|
|
if (opendata == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
opendata->state = state;
|
2018-09-02 23:19:07 +00:00
|
|
|
refcount_inc(&state->count);
|
2007-07-18 01:50:45 +00:00
|
|
|
return opendata;
|
|
|
|
}
|
|
|
|
|
2015-09-20 14:50:17 +00:00
|
|
|
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
|
2022-11-04 17:20:01 +00:00
|
|
|
fmode_t fmode)
|
2006-01-03 08:55:15 +00:00
|
|
|
{
|
2007-07-04 03:48:13 +00:00
|
|
|
struct nfs4_state *newstate;
|
2022-11-04 17:20:01 +00:00
|
|
|
struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
|
|
|
|
int openflags = opendata->o_arg.open_flags;
|
2006-01-03 08:55:15 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-09-20 14:50:17 +00:00
|
|
|
if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
|
NFSv4: When returning a delegation, don't reclaim an incompatible open mode.
It is possible to have an active open with one mode, and a delegation
for the same file with a different mode.
In particular, a WR_ONLY open and an RD_ONLY delegation.
This happens if a WR_ONLY open is followed by a RD_ONLY open which
provides a delegation, but is then close.
When returning the delegation, we currently try to claim opens for
every open type (n_rdwr, n_rdonly, n_wronly). As there is no harm
in claiming an open for a mode that we already have, this is often
simplest.
However if the delegation only provides a subset of the modes that we
currently have open, this will produce an error from the server.
So when claiming open modes prior to returning a delegation, skip the
open request if the mode is not covered by the delegation - the open_stateid
must already cover that mode, so there is nothing to do.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-06-29 04:28:54 +00:00
|
|
|
return 0;
|
2008-12-23 20:21:56 +00:00
|
|
|
opendata->o_arg.fmode = fmode;
|
2022-11-04 17:20:01 +00:00
|
|
|
opendata->o_arg.share_access =
|
|
|
|
nfs4_map_atomic_open_share(server, fmode, openflags);
|
2007-07-04 03:48:13 +00:00
|
|
|
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
|
|
|
|
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
|
|
|
|
nfs4_init_opendata_res(opendata);
|
2009-12-15 05:27:57 +00:00
|
|
|
ret = _nfs4_recover_proc_open(opendata);
|
2006-01-03 08:55:15 +00:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-04 03:48:13 +00:00
|
|
|
newstate = nfs4_opendata_to_nfs4_state(opendata);
|
2007-07-07 12:04:47 +00:00
|
|
|
if (IS_ERR(newstate))
|
|
|
|
return PTR_ERR(newstate);
|
2015-09-20 14:50:17 +00:00
|
|
|
if (newstate != opendata->state)
|
|
|
|
ret = -ESTALE;
|
2011-06-22 22:20:23 +00:00
|
|
|
nfs4_close_state(newstate, fmode);
|
2015-09-20 14:50:17 +00:00
|
|
|
return ret;
|
2006-01-03 08:55:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* memory barrier prior to reading state->n_* */
|
|
|
|
smp_rmb();
|
2015-09-20 14:50:17 +00:00
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_READ);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-09 01:04:15 +00:00
|
|
|
/*
|
|
|
|
* We may have performed cached opens for all three recoveries.
|
|
|
|
* Check if we need to update the current stateid.
|
|
|
|
*/
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
|
2012-03-04 23:13:56 +00:00
|
|
|
!nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
|
2007-07-09 14:45:42 +00:00
|
|
|
write_seqlock(&state->seqlock);
|
2007-07-09 01:04:15 +00:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
2012-03-04 23:13:56 +00:00
|
|
|
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
|
2007-07-09 14:45:42 +00:00
|
|
|
write_sequnlock(&state->seqlock);
|
2007-07-09 01:04:15 +00:00
|
|
|
}
|
2006-01-03 08:55:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* OPEN_RECLAIM:
|
|
|
|
* reclaim state on the server after a reboot.
|
|
|
|
*/
|
2007-06-05 15:46:42 +00:00
|
|
|
static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-07-09 01:04:15 +00:00
|
|
|
struct nfs_delegation *delegation;
|
2006-01-03 08:55:15 +00:00
|
|
|
struct nfs4_opendata *opendata;
|
2008-12-23 20:21:56 +00:00
|
|
|
fmode_t delegation_type = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
int status;
|
|
|
|
|
2013-03-15 18:57:33 +00:00
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state,
|
|
|
|
NFS4_OPEN_CLAIM_PREVIOUS);
|
2007-07-18 01:50:45 +00:00
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
2007-07-09 01:04:15 +00:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
2008-12-23 20:21:39 +00:00
|
|
|
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
|
2007-08-27 13:57:46 +00:00
|
|
|
delegation_type = delegation->type;
|
2007-07-09 01:04:15 +00:00
|
|
|
rcu_read_unlock();
|
2006-01-03 08:55:15 +00:00
|
|
|
opendata->o_arg.u.delegation_type = delegation_type;
|
|
|
|
status = nfs4_open_recover(opendata, state);
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_opendata_put(opendata);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2007-06-05 15:46:42 +00:00
|
|
|
static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2007-06-05 15:46:42 +00:00
|
|
|
err = _nfs4_do_open_reclaim(ctx, state);
|
2013-08-12 20:19:27 +00:00
|
|
|
trace_nfs4_open_reclaim(ctx, 0, err);
|
2013-03-15 20:44:28 +00:00
|
|
|
if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
|
|
|
|
continue;
|
2010-10-19 23:47:49 +00:00
|
|
|
if (err != -NFS4ERR_DELAY)
|
2005-06-22 17:16:29 +00:00
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:15 +00:00
|
|
|
static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctx = nfs4_state_find_open_context(state);
|
|
|
|
if (IS_ERR(ctx))
|
2013-03-28 18:01:33 +00:00
|
|
|
return -EAGAIN;
|
2019-07-22 17:32:59 +00:00
|
|
|
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
|
|
nfs_state_clear_open_state_flags(state);
|
2007-06-05 15:46:42 +00:00
|
|
|
ret = nfs4_do_open_reclaim(ctx, state);
|
2006-01-03 08:55:15 +00:00
|
|
|
put_nfs_open_context(ctx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-12-12 22:57:09 +00:00
|
|
|
static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-04-01 19:40:44 +00:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "NFS: %s: unhandled error "
|
|
|
|
"%d.\n", __func__, err);
|
2020-11-20 18:26:46 +00:00
|
|
|
fallthrough;
|
2013-04-01 19:40:44 +00:00
|
|
|
case 0:
|
|
|
|
case -ENOENT:
|
2015-06-04 17:51:13 +00:00
|
|
|
case -EAGAIN:
|
2013-04-01 19:40:44 +00:00
|
|
|
case -ESTALE:
|
2019-08-07 11:31:27 +00:00
|
|
|
case -ETIMEDOUT:
|
2013-04-01 19:40:44 +00:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
return -EAGAIN;
|
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
/* Don't recall a delegation if it was lost */
|
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
|
|
|
return -EAGAIN;
|
2013-10-17 18:13:24 +00:00
|
|
|
case -NFS4ERR_MOVED:
|
|
|
|
nfs4_schedule_migration_recovery(server);
|
|
|
|
return -EAGAIN;
|
2013-10-17 18:13:41 +00:00
|
|
|
case -NFS4ERR_LEASE_MOVED:
|
|
|
|
nfs4_schedule_lease_moved_recovery(server->nfs_client);
|
|
|
|
return -EAGAIN;
|
2013-04-01 19:40:44 +00:00
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
2016-09-22 17:39:08 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2013-04-01 19:40:44 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2013-04-01 19:56:46 +00:00
|
|
|
case -NFS4ERR_OPENMODE:
|
2013-04-01 19:40:44 +00:00
|
|
|
nfs_inode_find_state_and_recover(state->inode,
|
|
|
|
stateid);
|
|
|
|
nfs4_schedule_stateid_recovery(server, state);
|
2014-11-10 23:43:56 +00:00
|
|
|
return -EAGAIN;
|
2013-04-01 19:40:44 +00:00
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
ssleep(1);
|
|
|
|
return -EAGAIN;
|
2013-04-01 19:56:46 +00:00
|
|
|
case -ENOMEM:
|
|
|
|
case -NFS4ERR_DENIED:
|
2017-12-12 22:57:09 +00:00
|
|
|
if (fl) {
|
|
|
|
struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
|
|
|
|
if (lsp)
|
|
|
|
set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
|
|
|
|
}
|
2013-04-01 19:56:46 +00:00
|
|
|
return 0;
|
2013-04-01 19:40:44 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-09-20 14:50:17 +00:00
|
|
|
int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
|
2019-07-19 18:08:37 +00:00
|
|
|
struct nfs4_state *state, const nfs4_stateid *stateid)
|
2013-04-01 19:56:46 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_opendata *opendata;
|
2015-09-20 14:50:17 +00:00
|
|
|
int err = 0;
|
2013-04-01 19:56:46 +00:00
|
|
|
|
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state,
|
|
|
|
NFS4_OPEN_CLAIM_DELEG_CUR_FH);
|
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
|
|
|
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
|
2019-07-19 18:08:37 +00:00
|
|
|
if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
|
2015-09-20 14:50:17 +00:00
|
|
|
err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
|
|
|
|
if (err)
|
2019-07-19 18:08:37 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
|
2015-09-20 14:50:17 +00:00
|
|
|
err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
|
|
|
|
if (err)
|
2019-07-19 18:08:37 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
|
2015-09-20 14:50:17 +00:00
|
|
|
err = nfs4_open_recover_helper(opendata, FMODE_READ);
|
2019-07-19 18:08:37 +00:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2015-09-20 14:50:17 +00:00
|
|
|
}
|
2019-07-19 18:08:37 +00:00
|
|
|
nfs_state_clear_delegation(state);
|
|
|
|
out:
|
2013-04-01 19:56:46 +00:00
|
|
|
nfs4_opendata_put(opendata);
|
2017-12-12 22:57:09 +00:00
|
|
|
return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
|
2013-04-01 19:56:46 +00:00
|
|
|
}
|
|
|
|
|
2013-08-09 16:49:47 +00:00
|
|
|
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(data->o_arg.server->nfs_client,
|
|
|
|
&data->c_arg.seq_args, &data->c_res.seq_res, task);
|
2013-08-09 16:49:47 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:12 +00:00
|
|
|
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
|
2014-02-01 19:53:23 +00:00
|
|
|
nfs40_sequence_done(task, &data->c_res.seq_res);
|
2013-08-09 16:49:47 +00:00
|
|
|
|
2006-01-03 08:55:12 +00:00
|
|
|
data->rpc_status = task->tk_status;
|
2006-01-03 08:55:21 +00:00
|
|
|
if (data->rpc_status == 0) {
|
2012-03-04 23:13:56 +00:00
|
|
|
nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
|
2008-01-02 20:19:18 +00:00
|
|
|
nfs_confirm_seqid(&data->owner->so_seqid, 0);
|
2006-01-03 08:55:21 +00:00
|
|
|
renew_lease(data->o_res.server, data->timestamp);
|
2017-06-20 12:33:44 +00:00
|
|
|
data->rpc_done = true;
|
2006-01-03 08:55:21 +00:00
|
|
|
}
|
2006-01-03 08:55:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_open_confirm_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
|
|
|
|
/* If this request hasn't been cancelled, do nothing */
|
2017-06-20 12:33:44 +00:00
|
|
|
if (!data->cancelled)
|
2006-01-03 08:55:12 +00:00
|
|
|
goto out_free;
|
|
|
|
/* In case of error, no cleanup! */
|
2007-07-07 17:19:59 +00:00
|
|
|
if (!data->rpc_done)
|
2006-01-03 08:55:12 +00:00
|
|
|
goto out_free;
|
|
|
|
state = nfs4_opendata_to_nfs4_state(data);
|
2007-07-07 12:04:47 +00:00
|
|
|
if (!IS_ERR(state))
|
2011-06-22 22:20:23 +00:00
|
|
|
nfs4_close_state(state, data->o_arg.fmode);
|
2006-01-03 08:55:12 +00:00
|
|
|
out_free:
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_opendata_put(data);
|
2006-01-03 08:55:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_open_confirm_ops = {
|
2013-08-09 16:49:47 +00:00
|
|
|
.rpc_call_prepare = nfs4_open_confirm_prepare,
|
2006-01-03 08:55:12 +00:00
|
|
|
.rpc_call_done = nfs4_open_confirm_done,
|
|
|
|
.rpc_release = nfs4_open_confirm_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
|
|
|
|
*/
|
|
|
|
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
|
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
|
2006-01-03 08:55:12 +00:00
|
|
|
struct rpc_task *task;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
|
|
|
|
.rpc_argp = &data->c_arg,
|
|
|
|
.rpc_resp = &data->c_res,
|
|
|
|
.rpc_cred = data->owner->so_cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_open_confirm_ops,
|
|
|
|
.callback_data = data,
|
2008-02-20 01:04:23 +00:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2020-02-08 00:38:12 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
|
2007-07-14 19:39:59 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int status;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
|
|
|
|
data->is_recover);
|
2007-06-17 20:02:44 +00:00
|
|
|
kref_get(&data->kref);
|
2017-06-20 12:33:44 +00:00
|
|
|
data->rpc_done = false;
|
2007-07-07 17:19:59 +00:00
|
|
|
data->rpc_status = 0;
|
2007-07-14 19:40:01 +00:00
|
|
|
data->timestamp = jiffies;
|
2007-07-14 19:39:59 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-20 23:11:10 +00:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 08:55:12 +00:00
|
|
|
return PTR_ERR(task);
|
2017-01-11 20:01:43 +00:00
|
|
|
status = rpc_wait_for_completion_task(task);
|
2006-01-03 08:55:12 +00:00
|
|
|
if (status != 0) {
|
2017-06-20 12:33:44 +00:00
|
|
|
data->cancelled = true;
|
2006-01-03 08:55:12 +00:00
|
|
|
smp_wmb();
|
|
|
|
} else
|
|
|
|
status = data->rpc_status;
|
2006-11-12 03:18:03 +00:00
|
|
|
rpc_put_task(task);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-03 08:55:11 +00:00
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state_owner *sp = data->owner;
|
2013-04-16 22:42:34 +00:00
|
|
|
struct nfs_client *clp = sp->so_server->nfs_client;
|
2015-08-20 03:30:00 +00:00
|
|
|
enum open_claim_type4 claim = data->o_arg.claim;
|
2007-07-14 19:40:01 +00:00
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_wait;
|
2007-07-05 23:02:21 +00:00
|
|
|
/*
|
|
|
|
* Check if we still need to send an OPEN call, or if we can use
|
|
|
|
* a delegation instead.
|
|
|
|
*/
|
|
|
|
if (data->state != NULL) {
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2018-09-27 21:12:33 +00:00
|
|
|
if (can_open_cached(data->state, data->o_arg.fmode,
|
|
|
|
data->o_arg.open_flags, claim))
|
2007-07-08 18:11:36 +00:00
|
|
|
goto out_no_action;
|
2007-07-05 23:02:21 +00:00
|
|
|
rcu_read_lock();
|
2019-10-31 22:40:32 +00:00
|
|
|
delegation = nfs4_get_valid_delegation(data->state->inode);
|
2015-08-20 03:30:00 +00:00
|
|
|
if (can_open_delegated(delegation, data->o_arg.fmode, claim))
|
2011-12-10 00:05:58 +00:00
|
|
|
goto unlock_no_action;
|
2007-07-05 23:02:21 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2012-04-20 23:24:51 +00:00
|
|
|
/* Update client id. */
|
2013-04-16 22:42:34 +00:00
|
|
|
data->o_arg.clientid = clp->cl_clientid;
|
2015-08-20 03:30:00 +00:00
|
|
|
switch (claim) {
|
|
|
|
default:
|
|
|
|
break;
|
2013-04-23 18:31:19 +00:00
|
|
|
case NFS4_OPEN_CLAIM_PREVIOUS:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
|
2012-10-03 01:07:32 +00:00
|
|
|
data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2013-04-23 18:31:19 +00:00
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
2007-07-18 01:50:45 +00:00
|
|
|
}
|
2006-01-03 08:55:21 +00:00
|
|
|
data->timestamp = jiffies;
|
2017-01-09 20:48:22 +00:00
|
|
|
if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
|
2009-04-01 13:22:21 +00:00
|
|
|
&data->o_arg.seq_args,
|
2012-10-29 22:37:40 +00:00
|
|
|
&data->o_res.seq_res,
|
|
|
|
task) != 0)
|
|
|
|
nfs_release_seqid(data->o_arg.seqid);
|
2013-04-16 22:42:34 +00:00
|
|
|
|
|
|
|
/* Set the create mode (note dependency on the session type) */
|
|
|
|
data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
|
|
|
|
if (data->o_arg.open_flags & O_EXCL) {
|
2021-07-14 17:00:58 +00:00
|
|
|
data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
|
|
|
|
if (clp->cl_mvops->minor_version == 0) {
|
|
|
|
data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
|
|
|
|
/* don't put an ACCESS op in OPEN compound if O_EXCL,
|
|
|
|
* because ACCESS will return permission denied for
|
|
|
|
* all bits until close */
|
|
|
|
data->o_res.access_request = data->o_arg.access = 0;
|
|
|
|
} else if (nfs4_has_persistent_session(clp))
|
2013-04-16 22:42:34 +00:00
|
|
|
data->o_arg.createmode = NFS4_CREATE_GUARDED;
|
|
|
|
}
|
2007-07-08 18:11:36 +00:00
|
|
|
return;
|
2011-12-10 00:05:58 +00:00
|
|
|
unlock_no_action:
|
2015-11-24 18:29:42 +00:00
|
|
|
trace_nfs4_cached_open(data->state);
|
2011-12-10 00:05:58 +00:00
|
|
|
rcu_read_unlock();
|
2007-07-08 18:11:36 +00:00
|
|
|
out_no_action:
|
|
|
|
task->tk_action = NULL;
|
2013-02-12 00:01:21 +00:00
|
|
|
out_wait:
|
2012-11-29 22:27:47 +00:00
|
|
|
nfs4_sequence_done(task, &data->o_res.seq_res);
|
2009-12-15 05:27:57 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
static void nfs4_open_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
data->rpc_status = task->tk_status;
|
2009-04-01 13:22:21 +00:00
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
if (!nfs4_sequence_process(task, &data->o_res.seq_res))
|
2010-07-31 18:29:06 +00:00
|
|
|
return;
|
2009-04-01 13:22:21 +00:00
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
if (task->tk_status == 0) {
|
2012-10-03 00:09:00 +00:00
|
|
|
if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
|
|
|
|
switch (data->o_res.f_attr->mode & S_IFMT) {
|
2005-10-18 21:20:18 +00:00
|
|
|
case S_IFREG:
|
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2006-01-03 08:55:11 +00:00
|
|
|
data->rpc_status = -ELOOP;
|
2005-10-18 21:20:18 +00:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
2006-01-03 08:55:11 +00:00
|
|
|
data->rpc_status = -EISDIR;
|
2005-10-18 21:20:18 +00:00
|
|
|
break;
|
|
|
|
default:
|
2006-01-03 08:55:11 +00:00
|
|
|
data->rpc_status = -ENOTDIR;
|
2012-10-03 00:09:00 +00:00
|
|
|
}
|
2005-10-18 21:20:18 +00:00
|
|
|
}
|
2006-01-03 08:55:21 +00:00
|
|
|
renew_lease(data->o_res.server, data->timestamp);
|
2007-07-08 20:19:56 +00:00
|
|
|
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
|
|
|
|
nfs_confirm_seqid(&data->owner->so_seqid, 0);
|
2005-10-18 21:20:18 +00:00
|
|
|
}
|
2017-06-20 12:33:44 +00:00
|
|
|
data->rpc_done = true;
|
2006-01-03 08:55:11 +00:00
|
|
|
}
|
2005-10-18 21:20:18 +00:00
|
|
|
|
2006-01-03 08:55:11 +00:00
|
|
|
static void nfs4_open_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
|
|
|
|
/* If this request hasn't been cancelled, do nothing */
|
2017-06-20 12:33:44 +00:00
|
|
|
if (!data->cancelled)
|
2006-01-03 08:55:11 +00:00
|
|
|
goto out_free;
|
|
|
|
/* In case of error, no cleanup! */
|
2007-07-07 17:19:59 +00:00
|
|
|
if (data->rpc_status != 0 || !data->rpc_done)
|
2006-01-03 08:55:11 +00:00
|
|
|
goto out_free;
|
|
|
|
/* In case we need an open_confirm, no cleanup! */
|
|
|
|
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
|
|
|
|
goto out_free;
|
|
|
|
state = nfs4_opendata_to_nfs4_state(data);
|
2007-07-07 12:04:47 +00:00
|
|
|
if (!IS_ERR(state))
|
2011-06-22 22:20:23 +00:00
|
|
|
nfs4_close_state(state, data->o_arg.fmode);
|
2006-01-03 08:55:11 +00:00
|
|
|
out_free:
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_opendata_put(data);
|
2006-01-03 08:55:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_open_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_open_prepare,
|
|
|
|
.rpc_call_done = nfs4_open_done,
|
|
|
|
.rpc_release = nfs4_open_release,
|
|
|
|
};
|
|
|
|
|
2016-09-19 14:06:49 +00:00
|
|
|
static int nfs4_run_open_task(struct nfs4_opendata *data,
|
|
|
|
struct nfs_open_context *ctx)
|
2006-01-03 08:55:11 +00:00
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *dir = d_inode(data->dir);
|
2006-01-03 08:55:11 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_openargs *o_arg = &data->o_arg;
|
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
|
|
|
struct rpc_task *task;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
|
|
|
|
.rpc_argp = o_arg,
|
|
|
|
.rpc_resp = o_res,
|
|
|
|
.rpc_cred = data->owner->so_cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_open_ops,
|
|
|
|
.callback_data = data,
|
2008-02-20 01:04:23 +00:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2020-02-08 00:38:12 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
|
2007-07-14 19:39:59 +00:00
|
|
|
};
|
2006-01-03 08:55:11 +00:00
|
|
|
int status;
|
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
|
2021-06-24 03:28:51 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
|
|
|
|
2007-06-17 20:02:44 +00:00
|
|
|
kref_get(&data->kref);
|
2017-06-20 12:33:44 +00:00
|
|
|
data->rpc_done = false;
|
2007-07-07 17:19:59 +00:00
|
|
|
data->rpc_status = 0;
|
2017-06-20 12:33:44 +00:00
|
|
|
data->cancelled = false;
|
|
|
|
data->is_recover = false;
|
2016-09-19 14:06:49 +00:00
|
|
|
if (!ctx) {
|
|
|
|
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
|
2017-06-20 12:33:44 +00:00
|
|
|
data->is_recover = true;
|
2019-08-07 11:31:27 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_TIMEOUT;
|
2016-10-06 16:11:21 +00:00
|
|
|
} else {
|
2016-09-19 14:06:49 +00:00
|
|
|
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
|
2016-10-06 16:11:21 +00:00
|
|
|
pnfs_lgopen_prepare(data, ctx);
|
|
|
|
}
|
2007-07-14 19:39:59 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2017-01-11 20:04:25 +00:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2017-01-11 20:01:43 +00:00
|
|
|
status = rpc_wait_for_completion_task(task);
|
2017-01-11 20:04:25 +00:00
|
|
|
if (status != 0) {
|
2017-06-20 12:33:44 +00:00
|
|
|
data->cancelled = true;
|
2017-01-11 20:04:25 +00:00
|
|
|
smp_wmb();
|
|
|
|
} else
|
|
|
|
status = data->rpc_status;
|
|
|
|
rpc_put_task(task);
|
2009-12-15 05:27:57 +00:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
|
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *dir = d_inode(data->dir);
|
2009-12-15 05:27:57 +00:00
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
2017-01-11 20:04:25 +00:00
|
|
|
int status;
|
2009-12-15 05:27:57 +00:00
|
|
|
|
2016-09-19 14:06:49 +00:00
|
|
|
status = nfs4_run_open_task(data, NULL);
|
2009-12-15 05:27:57 +00:00
|
|
|
if (status != 0 || !data->rpc_done)
|
|
|
|
return status;
|
|
|
|
|
2012-01-07 18:22:46 +00:00
|
|
|
nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
|
|
|
|
|
2017-01-11 21:13:29 +00:00
|
|
|
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
|
2009-12-15 05:27:57 +00:00
|
|
|
status = _nfs4_proc_open_confirm(data);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2014-07-10 12:54:32 +00:00
|
|
|
/*
|
|
|
|
* Additional permission checks in order to distinguish between an
|
|
|
|
* open for read, and an open for execute. This works around the
|
|
|
|
* fact that NFSv4 OPEN treats read and execute permissions as being
|
|
|
|
* the same.
|
|
|
|
* Note that in the non-execute case, we want to turn off permission
|
|
|
|
* checking if we just created a new file (POSIX open() semantics).
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_opendata_access(const struct cred *cred,
|
2012-09-10 18:00:46 +00:00
|
|
|
struct nfs4_opendata *opendata,
|
2022-09-23 05:40:15 +00:00
|
|
|
struct nfs4_state *state, fmode_t fmode)
|
2012-09-10 18:00:46 +00:00
|
|
|
{
|
|
|
|
struct nfs_access_entry cache;
|
2017-07-25 20:10:47 +00:00
|
|
|
u32 mask, flags;
|
2012-09-10 18:00:46 +00:00
|
|
|
|
|
|
|
/* access call failed or for some reason the server doesn't
|
|
|
|
* support any access modes -- defer access call until later */
|
|
|
|
if (opendata->o_res.access_supported == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mask = 0;
|
2022-09-23 05:40:15 +00:00
|
|
|
if (fmode & FMODE_EXEC) {
|
2013-01-03 21:42:29 +00:00
|
|
|
/* ONLY check for exec rights */
|
2017-07-25 20:10:47 +00:00
|
|
|
if (S_ISDIR(state->inode->i_mode))
|
|
|
|
mask = NFS4_ACCESS_LOOKUP;
|
|
|
|
else
|
|
|
|
mask = NFS4_ACCESS_EXECUTE;
|
2014-07-10 12:54:32 +00:00
|
|
|
} else if ((fmode & FMODE_READ) && !opendata->file_created)
|
2017-07-25 20:10:47 +00:00
|
|
|
mask = NFS4_ACCESS_READ;
|
2012-09-10 18:00:46 +00:00
|
|
|
|
|
|
|
nfs_access_set_mask(&cache, opendata->o_res.access_result);
|
2021-09-27 23:47:57 +00:00
|
|
|
nfs_access_add_cache(state->inode, &cache, cred);
|
2012-09-10 18:00:46 +00:00
|
|
|
|
2017-07-25 20:10:47 +00:00
|
|
|
flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
|
|
|
|
if ((mask & ~cache.mask & flags) == 0)
|
2012-09-10 18:00:46 +00:00
|
|
|
return 0;
|
|
|
|
|
2012-11-02 22:00:56 +00:00
|
|
|
return -EACCES;
|
2012-09-10 18:00:46 +00:00
|
|
|
}
|
|
|
|
|
2009-12-15 05:27:57 +00:00
|
|
|
/*
|
|
|
|
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
|
|
|
|
*/
|
2016-09-19 14:06:49 +00:00
|
|
|
static int _nfs4_proc_open(struct nfs4_opendata *data,
|
|
|
|
struct nfs_open_context *ctx)
|
2009-12-15 05:27:57 +00:00
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *dir = d_inode(data->dir);
|
2009-12-15 05:27:57 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_openargs *o_arg = &data->o_arg;
|
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
|
|
|
int status;
|
|
|
|
|
2016-09-19 14:06:49 +00:00
|
|
|
status = nfs4_run_open_task(data, ctx);
|
2011-10-18 23:11:49 +00:00
|
|
|
if (!data->rpc_done)
|
|
|
|
return status;
|
|
|
|
if (status != 0) {
|
|
|
|
if (status == -NFS4ERR_BADNAME &&
|
|
|
|
!(o_arg->open_flags & O_CREAT))
|
|
|
|
return -ENOENT;
|
2006-01-03 08:55:11 +00:00
|
|
|
return status;
|
2011-10-18 23:11:49 +00:00
|
|
|
}
|
2006-01-03 08:55:11 +00:00
|
|
|
|
2012-01-07 18:22:46 +00:00
|
|
|
nfs_fattr_map_and_free_names(server, &data->f_attr);
|
|
|
|
|
2013-09-23 22:01:28 +00:00
|
|
|
if (o_arg->open_flags & O_CREAT) {
|
|
|
|
if (o_arg->open_flags & O_EXCL)
|
2017-06-20 12:33:44 +00:00
|
|
|
data->file_created = true;
|
2013-09-23 22:01:28 +00:00
|
|
|
else if (o_res->cinfo.before != o_res->cinfo.after)
|
2017-06-20 12:33:44 +00:00
|
|
|
data->file_created = true;
|
2018-01-09 13:21:17 +00:00
|
|
|
if (data->file_created ||
|
|
|
|
inode_peek_iversion_raw(dir) != o_res->cinfo.after)
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr(dir, &o_res->cinfo,
|
|
|
|
o_res->f_attr->time_start,
|
|
|
|
NFS_INO_INVALID_DATA);
|
2013-09-23 22:01:28 +00:00
|
|
|
}
|
2010-04-11 20:48:44 +00:00
|
|
|
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
|
|
|
|
server->caps &= ~NFS_CAP_POSIX_LOCK;
|
2005-04-16 22:20:36 +00:00
|
|
|
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
|
2006-01-03 08:55:12 +00:00
|
|
|
status = _nfs4_proc_open_confirm(data);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status != 0)
|
2006-01-03 08:55:11 +00:00
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2017-04-15 23:20:01 +00:00
|
|
|
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
|
2023-07-13 19:54:16 +00:00
|
|
|
struct nfs_fh *fh = &o_res->fh;
|
|
|
|
|
2017-04-15 23:20:01 +00:00
|
|
|
nfs4_sequence_free_slot(&o_res->seq_res);
|
2023-07-13 19:54:16 +00:00
|
|
|
if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
|
|
|
|
fh = NFS_FH(d_inode(data->dentry));
|
|
|
|
nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
|
2017-04-15 23:20:01 +00:00
|
|
|
}
|
2006-01-03 08:55:11 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OPEN_EXPIRED:
|
|
|
|
* reclaim state on the server after a network partition.
|
|
|
|
* Assumes caller holds the appropriate lock
|
|
|
|
*/
|
2007-06-05 15:46:42 +00:00
|
|
|
static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-03 08:55:08 +00:00
|
|
|
struct nfs4_opendata *opendata;
|
2006-01-03 08:55:15 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-11-04 17:20:01 +00:00
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
|
2007-07-18 01:50:45 +00:00
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
2022-11-04 17:20:01 +00:00
|
|
|
/*
|
|
|
|
* We're not recovering a delegation, so ask for no delegation.
|
|
|
|
* Otherwise the recovery thread could deadlock with an outstanding
|
|
|
|
* delegation return.
|
|
|
|
*/
|
|
|
|
opendata->o_arg.open_flags = O_DIRECT;
|
2006-01-03 08:55:15 +00:00
|
|
|
ret = nfs4_open_recover(opendata, state);
|
2008-04-05 19:54:17 +00:00
|
|
|
if (ret == -ESTALE)
|
2011-06-22 22:40:12 +00:00
|
|
|
d_drop(ctx->dentry);
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_opendata_put(opendata);
|
2006-01-03 08:55:15 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 20:53:21 +00:00
|
|
|
static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-06-22 17:16:29 +00:00
|
|
|
{
|
2007-06-05 15:46:42 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2005-06-22 17:16:29 +00:00
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2007-06-05 15:46:42 +00:00
|
|
|
err = _nfs4_open_expired(ctx, state);
|
2013-08-12 20:19:27 +00:00
|
|
|
trace_nfs4_open_expired(ctx, 0, err);
|
2013-03-15 20:44:28 +00:00
|
|
|
if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
|
|
|
|
continue;
|
2009-12-03 20:53:21 +00:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
err = 0;
|
|
|
|
}
|
2005-06-22 17:16:29 +00:00
|
|
|
} while (exception.retry);
|
2009-12-03 20:53:21 +00:00
|
|
|
out:
|
2005-06-22 17:16:29 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
2006-01-03 08:55:15 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-03 08:55:15 +00:00
|
|
|
ctx = nfs4_state_find_open_context(state);
|
|
|
|
if (IS_ERR(ctx))
|
2013-03-28 18:01:33 +00:00
|
|
|
return -EAGAIN;
|
2007-06-05 15:46:42 +00:00
|
|
|
ret = nfs4_do_open_expired(ctx, state);
|
2006-01-03 08:55:15 +00:00
|
|
|
put_nfs_open_context(ctx);
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 17:38:58 +00:00
|
|
|
static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
|
|
|
|
const nfs4_stateid *stateid)
|
2014-10-17 12:10:25 +00:00
|
|
|
{
|
2016-09-22 17:38:58 +00:00
|
|
|
nfs_remove_bad_delegation(state->inode, stateid);
|
2018-09-05 18:07:15 +00:00
|
|
|
nfs_state_clear_delegation(state);
|
2014-10-17 12:10:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
|
2016-09-22 17:38:58 +00:00
|
|
|
nfs_finish_clear_delegation_stateid(state, NULL);
|
2014-10-17 12:10:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
/* NFSv4.0 doesn't allow for delegation recovery on open expire */
|
|
|
|
nfs40_clear_delegation_stateid(state);
|
2019-07-22 17:32:59 +00:00
|
|
|
nfs_state_clear_open_state_flags(state);
|
2014-10-17 12:10:25 +00:00
|
|
|
return nfs4_open_expired(sp, state);
|
|
|
|
}
|
|
|
|
|
2016-09-22 17:38:59 +00:00
|
|
|
static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2016-09-22 17:38:59 +00:00
|
|
|
{
|
|
|
|
return -NFS4ERR_BAD_STATEID;
|
|
|
|
}
|
|
|
|
|
2011-06-02 18:59:10 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2016-09-22 17:38:57 +00:00
|
|
|
static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2016-09-22 17:38:57 +00:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
2016-09-22 17:39:02 +00:00
|
|
|
switch (stateid->type) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case NFS4_INVALID_STATEID_TYPE:
|
|
|
|
case NFS4_SPECIAL_STATEID_TYPE:
|
|
|
|
return -NFS4ERR_BAD_STATEID;
|
|
|
|
case NFS4_REVOKED_STATEID_TYPE:
|
|
|
|
goto out_free;
|
|
|
|
}
|
2016-09-22 17:38:57 +00:00
|
|
|
|
2016-09-22 17:39:02 +00:00
|
|
|
status = nfs41_test_stateid(server, stateid, cred);
|
2016-09-22 17:38:57 +00:00
|
|
|
switch (status) {
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
2016-09-22 17:39:02 +00:00
|
|
|
break;
|
|
|
|
default:
|
2016-09-22 17:38:57 +00:00
|
|
|
return status;
|
|
|
|
}
|
2016-09-22 17:39:02 +00:00
|
|
|
out_free:
|
|
|
|
/* Ack the revoked state to the server */
|
2016-09-22 17:39:04 +00:00
|
|
|
nfs41_free_stateid(server, stateid, cred, true);
|
2016-09-22 17:39:02 +00:00
|
|
|
return -NFS4ERR_EXPIRED;
|
2016-09-22 17:38:57 +00:00
|
|
|
}
|
|
|
|
|
2019-07-22 17:32:59 +00:00
|
|
|
static int nfs41_check_delegation_stateid(struct nfs4_state *state)
|
2011-06-02 18:59:10 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2014-11-12 19:44:49 +00:00
|
|
|
nfs4_stateid stateid;
|
2013-05-20 15:20:27 +00:00
|
|
|
struct nfs_delegation *delegation;
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = NULL;
|
2019-07-22 17:32:59 +00:00
|
|
|
int status, ret = NFS_OK;
|
2012-07-11 20:30:14 +00:00
|
|
|
|
2013-05-20 15:20:27 +00:00
|
|
|
/* Get the delegation credential for use by test/free_stateid */
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
2014-11-12 19:44:49 +00:00
|
|
|
if (delegation == NULL) {
|
2013-05-20 15:20:27 +00:00
|
|
|
rcu_read_unlock();
|
2018-09-05 18:07:15 +00:00
|
|
|
nfs_state_clear_delegation(state);
|
2019-07-22 17:32:59 +00:00
|
|
|
return NFS_OK;
|
2014-11-12 19:44:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-02 19:27:09 +00:00
|
|
|
spin_lock(&delegation->lock);
|
2014-11-12 19:44:49 +00:00
|
|
|
nfs4_stateid_copy(&stateid, &delegation->stateid);
|
|
|
|
|
2018-09-05 18:07:14 +00:00
|
|
|
if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
|
|
|
|
&delegation->flags)) {
|
2020-04-02 19:27:09 +00:00
|
|
|
spin_unlock(&delegation->lock);
|
2016-09-22 17:38:55 +00:00
|
|
|
rcu_read_unlock();
|
2019-07-22 17:32:59 +00:00
|
|
|
return NFS_OK;
|
2016-09-22 17:38:55 +00:00
|
|
|
}
|
2014-11-12 19:44:49 +00:00
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
if (delegation->cred)
|
|
|
|
cred = get_cred(delegation->cred);
|
2020-04-02 19:27:09 +00:00
|
|
|
spin_unlock(&delegation->lock);
|
2014-11-12 19:44:49 +00:00
|
|
|
rcu_read_unlock();
|
2016-09-22 17:38:57 +00:00
|
|
|
status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
|
2014-11-12 19:44:49 +00:00
|
|
|
trace_nfs4_test_delegation_stateid(state, NULL, status);
|
2016-09-22 17:39:02 +00:00
|
|
|
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
|
2016-09-22 17:38:58 +00:00
|
|
|
nfs_finish_clear_delegation_stateid(state, &stateid);
|
2019-07-22 17:32:59 +00:00
|
|
|
else
|
|
|
|
ret = status;
|
2013-05-20 15:20:27 +00:00
|
|
|
|
2019-07-19 17:48:44 +00:00
|
|
|
put_cred(cred);
|
2019-07-22 17:32:59 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
nfs4_stateid tmp;
|
|
|
|
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
|
|
|
|
nfs4_copy_delegation_stateid(state->inode, state->state,
|
|
|
|
&tmp, NULL) &&
|
|
|
|
nfs4_stateid_match_other(&state->stateid, &tmp))
|
|
|
|
nfs_state_set_delegation(state, &tmp, state->state);
|
|
|
|
else
|
|
|
|
nfs_state_clear_delegation(state);
|
2012-07-11 20:30:14 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 17:39:03 +00:00
|
|
|
/**
|
|
|
|
* nfs41_check_expired_locks - possibly free a lock stateid
|
|
|
|
*
|
|
|
|
* @state: NFSv4 state for an inode
|
|
|
|
*
|
|
|
|
* Returns NFS_OK if recovery for this stateid is now finished.
|
|
|
|
* Otherwise a negative NFS4ERR value is returned.
|
|
|
|
*/
|
|
|
|
static int nfs41_check_expired_locks(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
int status, ret = NFS_OK;
|
2016-11-19 02:11:39 +00:00
|
|
|
struct nfs4_lock_state *lsp, *prev = NULL;
|
2016-09-22 17:39:03 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
|
|
|
|
if (!test_bit(LK_STATE_IN_USE, &state->flags))
|
|
|
|
goto out;
|
2016-11-19 02:11:39 +00:00
|
|
|
|
|
|
|
spin_lock(&state->state_lock);
|
2016-09-22 17:39:03 +00:00
|
|
|
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
|
|
|
|
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = lsp->ls_state->owner->so_cred;
|
2016-09-22 17:39:03 +00:00
|
|
|
|
2017-10-20 09:53:36 +00:00
|
|
|
refcount_inc(&lsp->ls_count);
|
2016-11-19 02:11:39 +00:00
|
|
|
spin_unlock(&state->state_lock);
|
|
|
|
|
|
|
|
nfs4_put_lock_state(prev);
|
|
|
|
prev = lsp;
|
|
|
|
|
2016-09-22 17:39:03 +00:00
|
|
|
status = nfs41_test_and_free_expired_stateid(server,
|
|
|
|
&lsp->ls_stateid,
|
|
|
|
cred);
|
|
|
|
trace_nfs4_test_lock_stateid(state, lsp, status);
|
|
|
|
if (status == -NFS4ERR_EXPIRED ||
|
|
|
|
status == -NFS4ERR_BAD_STATEID) {
|
|
|
|
clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
|
2016-09-22 17:39:17 +00:00
|
|
|
lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
|
2016-09-22 17:39:03 +00:00
|
|
|
if (!recover_lost_locks)
|
|
|
|
set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
|
|
|
|
} else if (status != NFS_OK) {
|
|
|
|
ret = status;
|
2016-11-19 02:11:39 +00:00
|
|
|
nfs4_put_lock_state(prev);
|
|
|
|
goto out;
|
2016-09-22 17:39:03 +00:00
|
|
|
}
|
2016-11-19 02:11:39 +00:00
|
|
|
spin_lock(&state->state_lock);
|
2016-09-22 17:39:03 +00:00
|
|
|
}
|
2016-11-19 02:11:39 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&state->state_lock);
|
|
|
|
nfs4_put_lock_state(prev);
|
2016-09-22 17:39:03 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-07-11 20:30:14 +00:00
|
|
|
/**
|
|
|
|
* nfs41_check_open_stateid - possibly free an open stateid
|
|
|
|
*
|
|
|
|
* @state: NFSv4 state for an inode
|
|
|
|
*
|
|
|
|
* Returns NFS_OK if recovery for this stateid is now finished.
|
|
|
|
* Otherwise a negative NFS4ERR value is returned.
|
|
|
|
*/
|
|
|
|
static int nfs41_check_open_stateid(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2012-09-26 19:25:53 +00:00
|
|
|
nfs4_stateid *stateid = &state->open_stateid;
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = state->owner->so_cred;
|
2012-07-11 20:30:14 +00:00
|
|
|
int status;
|
|
|
|
|
2019-07-22 17:32:59 +00:00
|
|
|
if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
|
2012-07-11 20:30:14 +00:00
|
|
|
return -NFS4ERR_BAD_STATEID;
|
2016-09-22 17:38:57 +00:00
|
|
|
status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
|
2013-08-21 01:59:40 +00:00
|
|
|
trace_nfs4_test_open_stateid(state, NULL, status);
|
2016-09-22 17:39:02 +00:00
|
|
|
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
|
2019-07-22 17:32:59 +00:00
|
|
|
nfs_state_clear_open_state_flags(state);
|
2016-09-22 17:39:17 +00:00
|
|
|
stateid->type = NFS4_INVALID_STATEID_TYPE;
|
2016-09-22 17:39:21 +00:00
|
|
|
return status;
|
2017-08-09 01:39:28 +00:00
|
|
|
}
|
2016-09-22 17:39:21 +00:00
|
|
|
if (nfs_open_stateid_recover_openmode(state))
|
|
|
|
return -NFS4ERR_OPENMODE;
|
|
|
|
return NFS_OK;
|
2012-01-31 15:39:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
2012-07-11 20:30:05 +00:00
|
|
|
int status;
|
2012-01-31 15:39:30 +00:00
|
|
|
|
2019-07-22 17:32:59 +00:00
|
|
|
status = nfs41_check_delegation_stateid(state);
|
|
|
|
if (status != NFS_OK)
|
|
|
|
return status;
|
|
|
|
nfs41_delegation_recover_stateid(state);
|
|
|
|
|
2016-09-22 17:39:03 +00:00
|
|
|
status = nfs41_check_expired_locks(state);
|
|
|
|
if (status != NFS_OK)
|
|
|
|
return status;
|
2012-07-11 20:30:14 +00:00
|
|
|
status = nfs41_check_open_stateid(state);
|
2012-07-11 20:30:05 +00:00
|
|
|
if (status != NFS_OK)
|
|
|
|
status = nfs4_open_expired(sp, state);
|
|
|
|
return status;
|
2011-06-02 18:59:10 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-06-05 18:49:03 +00:00
|
|
|
/*
|
|
|
|
* on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
|
|
|
|
* fields corresponding to attributes that were used to store the verifier.
|
|
|
|
* Make sure we clobber those fields in the later setattr call
|
|
|
|
*/
|
2018-03-28 20:18:17 +00:00
|
|
|
static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
|
2015-08-26 13:13:37 +00:00
|
|
|
struct iattr *sattr, struct nfs4_label **label)
|
2007-06-05 18:49:03 +00:00
|
|
|
{
|
2018-03-28 20:18:17 +00:00
|
|
|
const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
|
|
|
|
__u32 attrset[3];
|
|
|
|
unsigned ret;
|
|
|
|
unsigned i;
|
2015-08-26 13:13:37 +00:00
|
|
|
|
2018-03-28 20:18:17 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(attrset); i++) {
|
|
|
|
attrset[i] = opendata->o_res.attrset[i];
|
|
|
|
if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
|
|
|
|
attrset[i] &= ~bitmask[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
|
|
|
|
sattr->ia_valid : 0;
|
2007-06-05 18:49:03 +00:00
|
|
|
|
2018-03-28 20:18:17 +00:00
|
|
|
if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
|
|
|
|
if (sattr->ia_valid & ATTR_ATIME_SET)
|
|
|
|
ret |= ATTR_ATIME_SET;
|
|
|
|
else
|
|
|
|
ret |= ATTR_ATIME;
|
|
|
|
}
|
2015-08-26 13:13:37 +00:00
|
|
|
|
2018-03-28 20:18:17 +00:00
|
|
|
if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
|
|
|
|
if (sattr->ia_valid & ATTR_MTIME_SET)
|
|
|
|
ret |= ATTR_MTIME_SET;
|
|
|
|
else
|
|
|
|
ret |= ATTR_MTIME;
|
|
|
|
}
|
2015-08-26 13:13:37 +00:00
|
|
|
|
2018-03-28 20:18:17 +00:00
|
|
|
if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
|
2015-08-26 13:13:37 +00:00
|
|
|
*label = NULL;
|
2018-03-28 20:18:17 +00:00
|
|
|
return ret;
|
2007-06-05 18:49:03 +00:00
|
|
|
}
|
|
|
|
|
2013-02-07 19:26:21 +00:00
|
|
|
static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
|
2022-09-23 05:40:15 +00:00
|
|
|
struct nfs_open_context *ctx)
|
2013-02-07 19:26:21 +00:00
|
|
|
{
|
|
|
|
struct nfs4_state_owner *sp = opendata->owner;
|
|
|
|
struct nfs_server *server = sp->so_server;
|
2013-05-29 17:11:28 +00:00
|
|
|
struct dentry *dentry;
|
2013-02-07 19:26:21 +00:00
|
|
|
struct nfs4_state *state;
|
2019-06-27 10:30:48 +00:00
|
|
|
fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
|
2020-02-05 14:01:53 +00:00
|
|
|
struct inode *dir = d_inode(opendata->dir);
|
|
|
|
unsigned long dir_verifier;
|
2013-02-07 19:26:21 +00:00
|
|
|
int ret;
|
|
|
|
|
2020-02-05 14:01:53 +00:00
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
2013-02-07 19:26:21 +00:00
|
|
|
|
2016-09-19 14:06:49 +00:00
|
|
|
ret = _nfs4_proc_open(opendata, ctx);
|
2014-10-23 16:23:03 +00:00
|
|
|
if (ret != 0)
|
2013-02-07 19:26:21 +00:00
|
|
|
goto out;
|
|
|
|
|
2018-05-22 15:17:16 +00:00
|
|
|
state = _nfs4_opendata_to_nfs4_state(opendata);
|
2013-02-07 19:26:21 +00:00
|
|
|
ret = PTR_ERR(state);
|
|
|
|
if (IS_ERR(state))
|
|
|
|
goto out;
|
2017-02-08 16:29:46 +00:00
|
|
|
ctx->state = state;
|
2013-02-07 19:26:21 +00:00
|
|
|
if (server->caps & NFS_CAP_POSIX_LOCK)
|
|
|
|
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
|
2016-09-17 22:17:35 +00:00
|
|
|
if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
|
|
|
|
set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
|
2022-02-02 22:55:02 +00:00
|
|
|
if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
|
|
|
|
set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
|
2013-02-07 19:26:21 +00:00
|
|
|
|
2013-05-29 17:11:28 +00:00
|
|
|
dentry = opendata->dentry;
|
2015-03-17 22:25:59 +00:00
|
|
|
if (d_really_is_negative(dentry)) {
|
2016-03-08 17:44:17 +00:00
|
|
|
struct dentry *alias;
|
2013-05-29 17:11:28 +00:00
|
|
|
d_drop(dentry);
|
2016-03-08 17:44:17 +00:00
|
|
|
alias = d_exact_alias(dentry, state->inode);
|
|
|
|
if (!alias)
|
|
|
|
alias = d_splice_alias(igrab(state->inode), dentry);
|
|
|
|
/* d_splice_alias() can't fail here - it's a non-directory */
|
|
|
|
if (alias) {
|
2013-05-29 17:11:28 +00:00
|
|
|
dput(ctx->dentry);
|
2016-03-08 17:44:17 +00:00
|
|
|
ctx->dentry = dentry = alias;
|
2013-05-29 17:11:28 +00:00
|
|
|
}
|
2020-02-05 14:01:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch(opendata->o_arg.claim) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case NFS4_OPEN_CLAIM_NULL:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
|
|
|
|
case NFS4_OPEN_CLAIM_DELEGATE_PREV:
|
|
|
|
if (!opendata->rpc_done)
|
|
|
|
break;
|
|
|
|
if (opendata->o_res.delegation_type != 0)
|
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2013-05-29 17:11:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-29 16:45:53 +00:00
|
|
|
/* Parse layoutget results before we check for access */
|
|
|
|
pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
|
|
|
|
|
2022-09-23 05:40:15 +00:00
|
|
|
ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode);
|
2013-02-07 19:26:21 +00:00
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
|
2024-02-24 20:59:28 +00:00
|
|
|
if (d_inode(dentry) == state->inode)
|
2013-05-29 17:34:46 +00:00
|
|
|
nfs_inode_attach_open_context(ctx);
|
2016-10-06 16:11:21 +00:00
|
|
|
|
2013-02-07 19:26:21 +00:00
|
|
|
out:
|
2022-08-02 19:48:50 +00:00
|
|
|
if (!opendata->cancelled) {
|
|
|
|
if (opendata->lgp) {
|
|
|
|
nfs4_lgopen_release(opendata->lgp);
|
|
|
|
opendata->lgp = NULL;
|
|
|
|
}
|
2019-03-19 16:12:13 +00:00
|
|
|
nfs4_sequence_free_slot(&opendata->o_res.seq_res);
|
2022-08-02 19:48:50 +00:00
|
|
|
}
|
2013-02-07 19:26:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2006-01-03 08:55:11 +00:00
|
|
|
* Returns a referenced nfs4_state
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2012-05-23 09:02:35 +00:00
|
|
|
static int _nfs4_do_open(struct inode *dir,
|
2013-05-29 16:37:49 +00:00
|
|
|
struct nfs_open_context *ctx,
|
2012-05-23 09:02:35 +00:00
|
|
|
int flags,
|
2017-11-06 20:28:03 +00:00
|
|
|
const struct nfs4_open_createattrs *c,
|
2013-09-23 22:01:28 +00:00
|
|
|
int *opened)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct nfs4_state_owner *sp;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2006-01-03 08:55:08 +00:00
|
|
|
struct nfs4_opendata *opendata;
|
2013-05-29 16:37:49 +00:00
|
|
|
struct dentry *dentry = ctx->dentry;
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = ctx->cred;
|
2013-05-29 16:37:49 +00:00
|
|
|
struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
|
2019-06-27 10:30:48 +00:00
|
|
|
fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
|
2013-03-15 20:44:28 +00:00
|
|
|
enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
|
2017-11-06 20:28:03 +00:00
|
|
|
struct iattr *sattr = c->sattr;
|
|
|
|
struct nfs4_label *label = c->label;
|
2007-07-05 23:02:21 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Protect against reboot recovery conflicts */
|
|
|
|
status = -ENOMEM;
|
2012-01-18 03:04:24 +00:00
|
|
|
sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
|
|
|
|
if (sp == NULL) {
|
2005-04-16 22:20:36 +00:00
|
|
|
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
|
|
|
|
goto out_err;
|
|
|
|
}
|
2017-01-11 21:17:17 +00:00
|
|
|
status = nfs4_client_recover_expired_lease(server->nfs_client);
|
2006-01-03 08:55:24 +00:00
|
|
|
if (status != 0)
|
2006-01-03 08:55:25 +00:00
|
|
|
goto err_put_state_owner;
|
2015-03-17 22:25:59 +00:00
|
|
|
if (d_really_is_positive(dentry))
|
|
|
|
nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
|
2006-01-03 08:55:24 +00:00
|
|
|
status = -ENOMEM;
|
2015-03-17 22:25:59 +00:00
|
|
|
if (d_really_is_positive(dentry))
|
2013-03-15 20:44:28 +00:00
|
|
|
claim = NFS4_OPEN_CLAIM_FH;
|
2017-11-06 20:28:03 +00:00
|
|
|
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
|
|
|
|
c, claim, GFP_KERNEL);
|
2006-01-03 08:55:08 +00:00
|
|
|
if (opendata == NULL)
|
2008-12-23 20:21:45 +00:00
|
|
|
goto err_put_state_owner;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-03-26 20:24:37 +00:00
|
|
|
if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
|
|
|
|
if (!opendata->f_attr.mdsthreshold) {
|
|
|
|
opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
|
|
|
|
if (!opendata->f_attr.mdsthreshold)
|
2021-10-22 17:11:06 +00:00
|
|
|
goto err_opendata_put;
|
2014-03-26 20:24:37 +00:00
|
|
|
}
|
2012-06-05 13:16:47 +00:00
|
|
|
opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
|
2012-05-23 09:02:35 +00:00
|
|
|
}
|
2015-03-17 22:25:59 +00:00
|
|
|
if (d_really_is_positive(dentry))
|
|
|
|
opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
|
2007-07-05 23:02:21 +00:00
|
|
|
|
2022-09-23 05:40:15 +00:00
|
|
|
status = _nfs4_open_and_get_state(opendata, ctx);
|
2012-09-10 18:00:46 +00:00
|
|
|
if (status != 0)
|
2021-10-22 17:11:06 +00:00
|
|
|
goto err_opendata_put;
|
2013-05-29 17:17:04 +00:00
|
|
|
state = ctx->state;
|
2012-09-10 18:00:46 +00:00
|
|
|
|
NFSv4: don't set SETATTR for O_RDONLY|O_EXCL
It is unusual to combine the open flags O_RDONLY and O_EXCL, but
it appears that libre-office does just that.
[pid 3250] stat("/home/USER/.config", {st_mode=S_IFDIR|0700, st_size=8192, ...}) = 0
[pid 3250] open("/home/USER/.config/libreoffice/4-suse/user/extensions/buildid", O_RDONLY|O_EXCL <unfinished ...>
NFSv4 takes O_EXCL as a sign that a setattr command should be sent,
probably to reset the timestamps.
When it was an O_RDONLY open, the SETATTR command does not
identify any actual attributes to change.
If no delegation was provided to the open, the SETATTR uses the
all-zeros stateid and the request is accepted (at least by the
Linux NFS server - no harm, no foul).
If a read-delegation was provided, this is used in the SETATTR
request, and a Netapp filer will justifiably claim
NFS4ERR_BAD_STATEID, which the Linux client takes as a sign
to retry - indefinitely.
So only treat O_EXCL specially if O_CREAT was also given.
Signed-off-by: NeilBrown <neilb@suse.com>
Cc: stable@vger.kernel.org
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-07-30 03:00:56 +00:00
|
|
|
if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
|
2013-04-16 22:42:34 +00:00
|
|
|
(opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
|
2018-03-28 20:18:17 +00:00
|
|
|
unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
|
2016-05-12 09:16:38 +00:00
|
|
|
/*
|
|
|
|
* send create attributes which was not set by open
|
|
|
|
* with an extra setattr.
|
|
|
|
*/
|
2018-03-28 20:18:17 +00:00
|
|
|
if (attrs || label) {
|
|
|
|
unsigned ia_old = sattr->ia_valid;
|
|
|
|
|
|
|
|
sattr->ia_valid = attrs;
|
2016-05-12 09:16:38 +00:00
|
|
|
nfs_fattr_init(opendata->o_res.f_attr);
|
|
|
|
status = nfs4_do_setattr(state->inode, cred,
|
|
|
|
opendata->o_res.f_attr, sattr,
|
2021-10-22 17:11:08 +00:00
|
|
|
ctx, label);
|
2016-05-12 09:16:38 +00:00
|
|
|
if (status == 0) {
|
|
|
|
nfs_setattr_update_inode(state->inode, sattr,
|
|
|
|
opendata->o_res.f_attr);
|
2021-10-22 17:11:12 +00:00
|
|
|
nfs_setsecurity(state->inode, opendata->o_res.f_attr);
|
2016-05-12 09:16:38 +00:00
|
|
|
}
|
2018-03-28 20:18:17 +00:00
|
|
|
sattr->ia_valid = ia_old;
|
2013-05-22 16:50:42 +00:00
|
|
|
}
|
2010-04-16 20:22:51 +00:00
|
|
|
}
|
2015-08-26 13:11:39 +00:00
|
|
|
if (opened && opendata->file_created)
|
2018-06-08 17:22:02 +00:00
|
|
|
*opened = 1;
|
2012-05-23 09:02:35 +00:00
|
|
|
|
2014-03-26 20:24:37 +00:00
|
|
|
if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
|
2012-05-23 09:02:35 +00:00
|
|
|
*ctx_th = opendata->f_attr.mdsthreshold;
|
2014-03-26 20:24:37 +00:00
|
|
|
opendata->f_attr.mdsthreshold = NULL;
|
|
|
|
}
|
2012-05-23 09:02:35 +00:00
|
|
|
|
2007-06-17 20:02:44 +00:00
|
|
|
nfs4_opendata_put(opendata);
|
2005-04-16 22:20:36 +00:00
|
|
|
nfs4_put_state_owner(sp);
|
|
|
|
return 0;
|
2007-06-17 20:02:44 +00:00
|
|
|
err_opendata_put:
|
|
|
|
nfs4_opendata_put(opendata);
|
2006-01-03 08:55:08 +00:00
|
|
|
err_put_state_owner:
|
|
|
|
nfs4_put_state_owner(sp);
|
2005-04-16 22:20:36 +00:00
|
|
|
out_err:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-23 09:02:35 +00:00
|
|
|
static struct nfs4_state *nfs4_do_open(struct inode *dir,
|
2013-05-29 16:37:49 +00:00
|
|
|
struct nfs_open_context *ctx,
|
2012-05-23 09:02:35 +00:00
|
|
|
int flags,
|
|
|
|
struct iattr *sattr,
|
2013-09-23 22:01:28 +00:00
|
|
|
struct nfs4_label *label,
|
|
|
|
int *opened)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-03-15 20:44:28 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_state *res;
|
2017-11-06 20:28:03 +00:00
|
|
|
struct nfs4_open_createattrs c = {
|
|
|
|
.label = label,
|
|
|
|
.sattr = sattr,
|
|
|
|
.verf = {
|
|
|
|
[0] = (__u32)jiffies,
|
|
|
|
[1] = (__u32)current->pid,
|
|
|
|
},
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
do {
|
2017-11-06 20:28:03 +00:00
|
|
|
status = _nfs4_do_open(dir, ctx, flags, &c, opened);
|
2013-05-29 17:17:04 +00:00
|
|
|
res = ctx->state;
|
2013-08-12 20:19:27 +00:00
|
|
|
trace_nfs4_open_file(ctx, flags, status);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status == 0)
|
|
|
|
break;
|
|
|
|
/* NOTE: BAD_SEQID means the server and client disagree about the
|
|
|
|
* book-keeping w.r.t. state-changing operations
|
|
|
|
* (OPEN/CLOSE/LOCK/LOCKU...)
|
|
|
|
* It is actually a sign of a bug on the client or on the server.
|
|
|
|
*
|
|
|
|
* If we receive a BAD_SEQID error in the particular case of
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-18 21:20:12 +00:00
|
|
|
* doing an OPEN, we assume that nfs_increment_open_seqid() will
|
2005-04-16 22:20:36 +00:00
|
|
|
* have unhashed the old state_owner for us, and that we can
|
|
|
|
* therefore safely retry using a new one. We should still warn
|
|
|
|
* the user though...
|
|
|
|
*/
|
|
|
|
if (status == -NFS4ERR_BAD_SEQID) {
|
2012-03-12 22:01:48 +00:00
|
|
|
pr_warn_ratelimited("NFS: v4 server %s "
|
2007-07-08 20:49:11 +00:00
|
|
|
" returned a bad sequence-id error!\n",
|
|
|
|
NFS_SERVER(dir)->nfs_client->cl_hostname);
|
2005-04-16 22:20:36 +00:00
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-18 21:20:21 +00:00
|
|
|
/*
|
|
|
|
* BAD_STATEID on OPEN means that the server cancelled our
|
|
|
|
* state before it received the OPEN_CONFIRM.
|
|
|
|
* Recover by retrying the request as per the discussion
|
|
|
|
* on Page 181 of RFC3530.
|
|
|
|
*/
|
|
|
|
if (status == -NFS4ERR_BAD_STATEID) {
|
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2020-01-28 08:37:47 +00:00
|
|
|
if (status == -NFS4ERR_EXPIRED) {
|
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2007-07-05 23:02:21 +00:00
|
|
|
if (status == -EAGAIN) {
|
|
|
|
/* We must have found a delegation */
|
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-15 20:44:28 +00:00
|
|
|
if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
|
|
|
|
continue;
|
|
|
|
res = ERR_PTR(nfs4_handle_exception(server,
|
2005-04-16 22:20:36 +00:00
|
|
|
status, &exception));
|
|
|
|
} while (exception.retry);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2016-06-26 12:44:35 +00:00
|
|
|
static int _nfs4_do_setattr(struct inode *inode,
|
|
|
|
struct nfs_setattrargs *arg,
|
|
|
|
struct nfs_setattrres *res,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred,
|
2016-10-13 04:26:47 +00:00
|
|
|
struct nfs_open_context *ctx)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-03-20 18:44:46 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2017-01-11 20:04:25 +00:00
|
|
|
struct rpc_message msg = {
|
2008-06-10 23:39:41 +00:00
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
|
2016-06-26 12:44:35 +00:00
|
|
|
.rpc_argp = arg,
|
|
|
|
.rpc_resp = res,
|
2008-06-10 23:39:41 +00:00
|
|
|
.rpc_cred = cred,
|
2017-01-11 20:04:25 +00:00
|
|
|
};
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *delegation_cred = NULL;
|
2006-01-03 08:55:21 +00:00
|
|
|
unsigned long timestamp = jiffies;
|
2013-04-29 14:35:36 +00:00
|
|
|
bool truncate;
|
2005-08-16 15:49:44 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-06-26 12:44:35 +00:00
|
|
|
nfs_fattr_init(res->fattr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-04-29 14:35:36 +00:00
|
|
|
/* Servers should only apply open mode checks for file size changes */
|
2016-06-26 12:44:35 +00:00
|
|
|
truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
|
2020-09-04 21:39:12 +00:00
|
|
|
if (!truncate) {
|
|
|
|
nfs4_inode_make_writeable(inode);
|
2018-04-09 15:15:30 +00:00
|
|
|
goto zero_stateid;
|
2020-09-04 21:39:12 +00:00
|
|
|
}
|
2013-04-29 14:35:36 +00:00
|
|
|
|
2018-04-09 15:15:30 +00:00
|
|
|
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
|
2013-04-29 14:35:36 +00:00
|
|
|
/* Use that stateid */
|
2019-08-03 14:28:18 +00:00
|
|
|
} else if (ctx != NULL && ctx->state) {
|
2016-10-13 04:26:47 +00:00
|
|
|
struct nfs_lock_context *l_ctx;
|
2016-10-13 04:26:47 +00:00
|
|
|
if (!nfs4_valid_open_stateid(ctx->state))
|
2014-03-04 18:48:16 +00:00
|
|
|
return -EBADF;
|
2016-10-13 04:26:47 +00:00
|
|
|
l_ctx = nfs_get_lock_context(ctx);
|
|
|
|
if (IS_ERR(l_ctx))
|
|
|
|
return PTR_ERR(l_ctx);
|
2016-12-06 20:50:06 +00:00
|
|
|
status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
|
|
|
|
&arg->stateid, &delegation_cred);
|
|
|
|
nfs_put_lock_context(l_ctx);
|
|
|
|
if (status == -EIO)
|
2014-03-04 18:48:16 +00:00
|
|
|
return -EBADF;
|
2019-12-18 21:50:42 +00:00
|
|
|
else if (status == -EAGAIN)
|
|
|
|
goto zero_stateid;
|
2018-04-09 15:15:30 +00:00
|
|
|
} else {
|
|
|
|
zero_stateid:
|
2016-06-26 12:44:35 +00:00
|
|
|
nfs4_stateid_copy(&arg->stateid, &zero_stateid);
|
2018-04-09 15:15:30 +00:00
|
|
|
}
|
2016-05-16 21:42:44 +00:00
|
|
|
if (delegation_cred)
|
|
|
|
msg.rpc_cred = delegation_cred;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-06-26 12:44:35 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
|
2016-05-16 21:42:44 +00:00
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
put_cred(delegation_cred);
|
2016-10-13 04:26:47 +00:00
|
|
|
if (status == 0 && ctx != NULL)
|
2006-01-03 08:55:21 +00:00
|
|
|
renew_lease(server, timestamp);
|
2016-06-26 12:44:35 +00:00
|
|
|
trace_nfs4_setattr(inode, &arg->stateid, status);
|
2005-08-16 15:49:44 +00:00
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
|
2008-06-10 23:39:41 +00:00
|
|
|
struct nfs_fattr *fattr, struct iattr *sattr,
|
2021-10-22 17:11:08 +00:00
|
|
|
struct nfs_open_context *ctx, struct nfs4_label *ilabel)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-03-20 18:44:46 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2018-04-07 17:44:28 +00:00
|
|
|
__u32 bitmask[NFS4_BITMASK_SZ];
|
2016-10-13 04:26:47 +00:00
|
|
|
struct nfs4_state *state = ctx ? ctx->state : NULL;
|
2017-01-11 20:04:25 +00:00
|
|
|
struct nfs_setattrargs arg = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.iap = sattr,
|
2016-06-26 12:44:35 +00:00
|
|
|
.server = server,
|
2018-04-07 17:44:28 +00:00
|
|
|
.bitmask = bitmask,
|
2016-06-26 12:44:35 +00:00
|
|
|
.label = ilabel,
|
2017-01-11 20:04:25 +00:00
|
|
|
};
|
|
|
|
struct nfs_setattrres res = {
|
2016-06-26 12:44:35 +00:00
|
|
|
.fattr = fattr,
|
|
|
|
.server = server,
|
2017-01-11 20:04:25 +00:00
|
|
|
};
|
2012-03-06 00:56:44 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.state = state,
|
2012-03-07 21:39:06 +00:00
|
|
|
.inode = inode,
|
2016-06-26 12:44:35 +00:00
|
|
|
.stateid = &arg.stateid,
|
2012-03-06 00:56:44 +00:00
|
|
|
};
|
2021-04-10 04:23:03 +00:00
|
|
|
unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2016-06-26 12:44:35 +00:00
|
|
|
|
2021-04-13 13:41:16 +00:00
|
|
|
if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
|
|
|
|
adjust_flags |= NFS_INO_INVALID_MODE;
|
|
|
|
if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
|
2021-04-10 04:23:03 +00:00
|
|
|
adjust_flags |= NFS_INO_INVALID_OTHER;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2021-10-22 17:11:08 +00:00
|
|
|
nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
|
2021-04-10 04:23:03 +00:00
|
|
|
inode, adjust_flags);
|
2018-04-07 17:44:28 +00:00
|
|
|
|
2016-10-13 04:26:47 +00:00
|
|
|
err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
|
2012-04-18 20:29:11 +00:00
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_OPENMODE:
|
2013-04-29 15:11:58 +00:00
|
|
|
if (!(sattr->ia_valid & ATTR_SIZE)) {
|
|
|
|
pr_warn_once("NFSv4: server %s is incorrectly "
|
|
|
|
"applying open mode checks to "
|
|
|
|
"a SETATTR that is not "
|
|
|
|
"changing file size.\n",
|
|
|
|
server->nfs_client->cl_hostname);
|
|
|
|
}
|
2012-04-18 20:29:11 +00:00
|
|
|
if (state && !(state->state & FMODE_WRITE)) {
|
|
|
|
err = -EBADF;
|
|
|
|
if (sattr->ia_valid & ATTR_OPEN)
|
|
|
|
err = -EACCES;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
2012-04-18 20:29:11 +00:00
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2015-09-22 03:35:22 +00:00
|
|
|
static bool
|
|
|
|
nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
if (inode == NULL || !nfs_have_layout(inode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return pnfs_wait_on_layoutreturn(inode, task);
|
|
|
|
}
|
|
|
|
|
2019-09-20 11:23:47 +00:00
|
|
|
/*
|
|
|
|
* Update the seqid of an open stateid
|
|
|
|
*/
|
|
|
|
static void nfs4_sync_open_stateid(nfs4_stateid *dst,
|
|
|
|
struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
__be32 seqid_open;
|
|
|
|
u32 dst_seqid;
|
|
|
|
int seq;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (!nfs4_valid_open_stateid(state))
|
|
|
|
break;
|
|
|
|
seq = read_seqbegin(&state->seqlock);
|
|
|
|
if (!nfs4_state_match_open_stateid_other(state, dst)) {
|
|
|
|
nfs4_stateid_copy(dst, &state->open_stateid);
|
|
|
|
if (read_seqretry(&state->seqlock, seq))
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
seqid_open = state->open_stateid.seqid;
|
|
|
|
if (read_seqretry(&state->seqlock, seq))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dst_seqid = be32_to_cpu(dst->seqid);
|
|
|
|
if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
|
|
|
|
dst->seqid = seqid_open;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the seqid of an open stateid after receiving
|
|
|
|
* NFS4ERR_OLD_STATEID
|
|
|
|
*/
|
|
|
|
static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
|
|
|
|
struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
__be32 seqid_open;
|
|
|
|
u32 dst_seqid;
|
|
|
|
bool ret;
|
2020-09-25 19:48:39 +00:00
|
|
|
int seq, status = -EAGAIN;
|
|
|
|
DEFINE_WAIT(wait);
|
2019-09-20 11:23:47 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
ret = false;
|
|
|
|
if (!nfs4_valid_open_stateid(state))
|
|
|
|
break;
|
|
|
|
seq = read_seqbegin(&state->seqlock);
|
|
|
|
if (!nfs4_state_match_open_stateid_other(state, dst)) {
|
|
|
|
if (read_seqretry(&state->seqlock, seq))
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2020-09-25 19:48:39 +00:00
|
|
|
|
|
|
|
write_seqlock(&state->seqlock);
|
2019-09-20 11:23:47 +00:00
|
|
|
seqid_open = state->open_stateid.seqid;
|
|
|
|
|
|
|
|
dst_seqid = be32_to_cpu(dst->seqid);
|
2020-09-25 19:48:39 +00:00
|
|
|
|
|
|
|
/* Did another OPEN bump the state's seqid? try again: */
|
|
|
|
if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
|
2019-09-20 11:23:47 +00:00
|
|
|
dst->seqid = seqid_open;
|
2020-09-25 19:48:39 +00:00
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* server says we're behind but we haven't seen the update yet */
|
|
|
|
set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
|
|
|
|
prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
|
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
|
|
|
|
|
2021-05-10 02:34:37 +00:00
|
|
|
if (fatal_signal_pending(current))
|
2020-09-25 19:48:39 +00:00
|
|
|
status = -EINTR;
|
|
|
|
else
|
|
|
|
if (schedule_timeout(5*HZ) != 0)
|
|
|
|
status = 0;
|
|
|
|
|
|
|
|
finish_wait(&state->waitq, &wait);
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
continue;
|
|
|
|
if (status == -EINTR)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* we slept the whole 5 seconds, we must have lost a seqid */
|
|
|
|
dst->seqid = cpu_to_be32(dst_seqid + 1);
|
2019-09-20 11:23:47 +00:00
|
|
|
ret = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_closedata {
|
|
|
|
struct inode *inode;
|
|
|
|
struct nfs4_state *state;
|
|
|
|
struct nfs_closeargs arg;
|
|
|
|
struct nfs_closeres res;
|
2016-11-15 19:56:07 +00:00
|
|
|
struct {
|
|
|
|
struct nfs4_layoutreturn_args arg;
|
|
|
|
struct nfs4_layoutreturn_res res;
|
2016-09-23 15:38:08 +00:00
|
|
|
struct nfs4_xdr_opaque_data ld_private;
|
2016-11-15 19:56:07 +00:00
|
|
|
u32 roc_barrier;
|
|
|
|
bool roc;
|
|
|
|
} lr;
|
2005-10-28 02:12:41 +00:00
|
|
|
struct nfs_fattr fattr;
|
2006-01-03 08:55:21 +00:00
|
|
|
unsigned long timestamp;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static void nfs4_free_closedata(void *data)
|
2005-10-18 21:20:12 +00:00
|
|
|
{
|
2006-01-03 08:55:04 +00:00
|
|
|
struct nfs4_closedata *calldata = data;
|
|
|
|
struct nfs4_state_owner *sp = calldata->state->owner;
|
2011-06-22 22:20:23 +00:00
|
|
|
struct super_block *sb = calldata->state->inode->i_sb;
|
2005-10-18 21:20:12 +00:00
|
|
|
|
2016-11-15 19:56:07 +00:00
|
|
|
if (calldata->lr.roc)
|
2016-11-16 06:11:25 +00:00
|
|
|
pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
|
|
|
|
calldata->res.lr_ret);
|
2005-10-18 21:20:12 +00:00
|
|
|
nfs4_put_open_state(calldata->state);
|
|
|
|
nfs_free_seqid(calldata->arg.seqid);
|
|
|
|
nfs4_put_state_owner(sp);
|
2013-01-11 21:39:51 +00:00
|
|
|
nfs_sb_deactive(sb);
|
2005-10-18 21:20:12 +00:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static void nfs4_close_done(struct rpc_task *task, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-03 08:55:04 +00:00
|
|
|
struct nfs4_closedata *calldata = data;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_state *state = calldata->state;
|
|
|
|
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
2014-08-26 02:09:08 +00:00
|
|
|
nfs4_stateid *res_stateid = NULL;
|
2017-11-07 15:51:37 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.state = state,
|
|
|
|
.inode = calldata->inode,
|
|
|
|
.stateid = &calldata->arg.stateid,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
|
|
|
return;
|
2013-08-12 20:19:27 +00:00
|
|
|
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
|
2016-11-15 19:56:07 +00:00
|
|
|
|
|
|
|
/* Handle Layoutreturn errors */
|
2021-01-04 18:18:03 +00:00
|
|
|
if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
|
|
|
|
&calldata->res.lr_ret) == -EAGAIN)
|
2019-09-20 11:23:41 +00:00
|
|
|
goto out_restart;
|
2016-11-15 19:56:07 +00:00
|
|
|
|
2017-01-11 20:04:25 +00:00
|
|
|
/* hmm. we are done with the inode, and in the process of freeing
|
2005-04-16 22:20:36 +00:00
|
|
|
* the state_owner. we keep this around to process errors
|
|
|
|
*/
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
2014-08-26 02:09:08 +00:00
|
|
|
res_stateid = &calldata->res.stateid;
|
2006-01-03 08:55:21 +00:00
|
|
|
renew_lease(server, calldata->timestamp);
|
2014-08-26 02:09:08 +00:00
|
|
|
break;
|
2016-12-19 15:34:14 +00:00
|
|
|
case -NFS4ERR_ACCESS:
|
|
|
|
if (calldata->arg.bitmask != NULL) {
|
|
|
|
calldata->arg.bitmask = NULL;
|
|
|
|
calldata->res.fattr = NULL;
|
2017-11-06 20:28:09 +00:00
|
|
|
goto out_restart;
|
2016-12-19 15:34:14 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
break;
|
2017-11-06 20:28:05 +00:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
/* Did we race with OPEN? */
|
2019-09-20 11:23:47 +00:00
|
|
|
if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
|
2017-11-06 20:28:09 +00:00
|
|
|
state))
|
|
|
|
goto out_restart;
|
2017-11-06 20:28:05 +00:00
|
|
|
goto out_release;
|
2013-11-20 17:57:19 +00:00
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
2005-04-16 22:20:36 +00:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2016-09-22 17:39:05 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
nfs4_free_revoked_stateid(server,
|
|
|
|
&calldata->arg.stateid,
|
|
|
|
task->tk_msg.rpc_cred);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2008-12-23 20:21:46 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2019-09-20 11:23:46 +00:00
|
|
|
if (calldata->arg.fmode == 0)
|
|
|
|
break;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
2017-11-07 15:51:37 +00:00
|
|
|
task->tk_status = nfs4_async_handle_exception(task,
|
|
|
|
server, task->tk_status, &exception);
|
|
|
|
if (exception.retry)
|
2017-11-06 20:28:09 +00:00
|
|
|
goto out_restart;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2015-08-31 01:37:59 +00:00
|
|
|
nfs_clear_open_stateid(state, &calldata->arg.stateid,
|
|
|
|
res_stateid, calldata->arg.fmode);
|
2013-11-20 17:57:19 +00:00
|
|
|
out_release:
|
2017-11-06 20:28:09 +00:00
|
|
|
task->tk_status = 0;
|
2009-12-15 19:47:36 +00:00
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
2016-12-19 17:14:44 +00:00
|
|
|
nfs_refresh_inode(calldata->inode, &calldata->fattr);
|
2021-10-16 22:03:04 +00:00
|
|
|
dprintk("%s: ret = %d\n", __func__, task->tk_status);
|
2017-11-06 20:28:09 +00:00
|
|
|
return;
|
|
|
|
out_restart:
|
|
|
|
task->tk_status = 0;
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
goto out_release;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:05 +00:00
|
|
|
static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-03 08:55:05 +00:00
|
|
|
struct nfs4_closedata *calldata = data;
|
2005-10-18 21:20:12 +00:00
|
|
|
struct nfs4_state *state = calldata->state;
|
2012-09-21 00:15:57 +00:00
|
|
|
struct inode *inode = calldata->inode;
|
2021-03-25 22:15:36 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2018-06-15 20:31:02 +00:00
|
|
|
struct pnfs_layout_hdr *lo;
|
2014-08-26 02:33:12 +00:00
|
|
|
bool is_rdonly, is_wronly, is_rdwr;
|
2009-12-08 13:33:16 +00:00
|
|
|
int call_close = 0;
|
2005-10-18 21:20:12 +00:00
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_wait;
|
2007-07-05 22:07:55 +00:00
|
|
|
|
2009-12-08 13:33:16 +00:00
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
2005-11-04 20:32:58 +00:00
|
|
|
spin_lock(&state->owner->so_lock);
|
2014-08-26 02:33:12 +00:00
|
|
|
is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
|
2007-07-05 22:07:55 +00:00
|
|
|
/* Calculate the change in open mode */
|
2014-09-18 15:51:32 +00:00
|
|
|
calldata->arg.fmode = 0;
|
2006-01-03 08:55:13 +00:00
|
|
|
if (state->n_rdwr == 0) {
|
2014-09-18 15:51:32 +00:00
|
|
|
if (state->n_rdonly == 0)
|
|
|
|
call_close |= is_rdonly;
|
|
|
|
else if (is_rdonly)
|
|
|
|
calldata->arg.fmode |= FMODE_READ;
|
|
|
|
if (state->n_wronly == 0)
|
|
|
|
call_close |= is_wronly;
|
|
|
|
else if (is_wronly)
|
|
|
|
calldata->arg.fmode |= FMODE_WRITE;
|
2016-06-25 23:19:28 +00:00
|
|
|
if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
|
|
|
|
call_close |= is_rdwr;
|
2014-09-18 15:51:32 +00:00
|
|
|
} else if (is_rdwr)
|
|
|
|
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
|
|
|
|
|
2019-09-20 11:23:47 +00:00
|
|
|
nfs4_sync_open_stateid(&calldata->arg.stateid, state);
|
|
|
|
if (!nfs4_valid_open_stateid(state))
|
2013-03-14 20:57:48 +00:00
|
|
|
call_close = 0;
|
2005-11-04 20:32:58 +00:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
2009-12-08 13:33:16 +00:00
|
|
|
|
|
|
|
if (!call_close) {
|
2006-01-03 08:55:04 +00:00
|
|
|
/* Note: exit _without_ calling nfs4_close_done */
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_no_action;
|
2005-10-18 21:20:12 +00:00
|
|
|
}
|
2009-12-08 13:33:16 +00:00
|
|
|
|
2016-11-16 06:11:25 +00:00
|
|
|
if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
|
2015-09-22 03:35:22 +00:00
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
|
|
|
goto out_wait;
|
|
|
|
}
|
|
|
|
|
2018-06-15 20:31:02 +00:00
|
|
|
lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
|
|
|
|
if (lo && !pnfs_layout_is_valid(lo)) {
|
|
|
|
calldata->arg.lr_args = NULL;
|
|
|
|
calldata->res.lr_res = NULL;
|
|
|
|
}
|
|
|
|
|
2016-12-19 16:36:41 +00:00
|
|
|
if (calldata->arg.fmode == 0)
|
2009-12-08 13:33:16 +00:00
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
|
2016-10-27 22:25:04 +00:00
|
|
|
|
2016-12-19 16:36:41 +00:00
|
|
|
if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
|
2016-10-27 22:25:04 +00:00
|
|
|
/* Close-to-open cache consistency revalidation */
|
2020-09-14 21:05:08 +00:00
|
|
|
if (!nfs4_have_delegation(inode, FMODE_READ)) {
|
2021-03-25 22:15:36 +00:00
|
|
|
nfs4_bitmask_set(calldata->arg.bitmask_store,
|
|
|
|
server->cache_consistency_bitmask,
|
2021-12-27 19:40:51 +00:00
|
|
|
inode, 0);
|
2021-03-25 22:15:36 +00:00
|
|
|
calldata->arg.bitmask = calldata->arg.bitmask_store;
|
2020-09-14 21:05:08 +00:00
|
|
|
} else
|
2016-10-27 22:25:04 +00:00
|
|
|
calldata->arg.bitmask = NULL;
|
|
|
|
}
|
2015-08-19 04:45:13 +00:00
|
|
|
|
2015-01-30 19:21:14 +00:00
|
|
|
calldata->arg.share_access =
|
|
|
|
nfs4_map_atomic_open_share(NFS_SERVER(inode),
|
|
|
|
calldata->arg.fmode, 0);
|
2009-12-08 13:33:16 +00:00
|
|
|
|
2016-12-19 17:14:44 +00:00
|
|
|
if (calldata->res.fattr == NULL)
|
|
|
|
calldata->arg.bitmask = NULL;
|
|
|
|
else if (calldata->arg.bitmask == NULL)
|
|
|
|
calldata->res.fattr = NULL;
|
2006-01-03 08:55:21 +00:00
|
|
|
calldata->timestamp = jiffies;
|
2017-01-09 20:48:22 +00:00
|
|
|
if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
|
2012-01-18 03:04:25 +00:00
|
|
|
&calldata->arg.seq_args,
|
|
|
|
&calldata->res.seq_res,
|
2012-10-29 22:37:40 +00:00
|
|
|
task) != 0)
|
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
2013-02-12 00:01:21 +00:00
|
|
|
return;
|
|
|
|
out_no_action:
|
|
|
|
task->tk_action = NULL;
|
|
|
|
out_wait:
|
|
|
|
nfs4_sequence_done(task, &calldata->res.seq_res);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static const struct rpc_call_ops nfs4_close_ops = {
|
2006-01-03 08:55:05 +00:00
|
|
|
.rpc_call_prepare = nfs4_close_prepare,
|
2006-01-03 08:55:04 +00:00
|
|
|
.rpc_call_done = nfs4_close_done,
|
|
|
|
.rpc_release = nfs4_free_closedata,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* It is possible for data to be read/written from a mem-mapped file
|
|
|
|
* after the sys_close call (which hits the vfs layer as a flush).
|
|
|
|
* This means that we can't safely call nfsv4 close on a file until
|
|
|
|
* the inode is cleared. This in turn means that we are not good
|
|
|
|
* NFSv4 citizens - we do not indicate to the server to update the file's
|
|
|
|
* share state even when we are done with one of the three share
|
|
|
|
* stateid's in the inode.
|
|
|
|
*
|
|
|
|
* NOTE: Caller must be holding the sp->so_owner semaphore!
|
|
|
|
*/
|
2012-09-21 00:31:51 +00:00
|
|
|
int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-06-05 14:31:33 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2015-01-24 00:19:25 +00:00
|
|
|
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_closedata *calldata;
|
2007-06-12 03:05:07 +00:00
|
|
|
struct nfs4_state_owner *sp = state->owner;
|
|
|
|
struct rpc_task *task;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
|
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_close_ops,
|
2008-02-20 01:04:23 +00:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2020-02-08 00:38:12 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
|
2007-07-14 19:39:59 +00:00
|
|
|
};
|
2005-10-18 21:20:12 +00:00
|
|
|
int status = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
|
2021-06-24 03:28:51 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
|
|
|
|
2013-08-13 20:37:34 +00:00
|
|
|
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
|
|
|
|
&task_setup_data.rpc_client, &msg);
|
|
|
|
|
2010-05-13 16:51:01 +00:00
|
|
|
calldata = kzalloc(sizeof(*calldata), gfp_mask);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (calldata == NULL)
|
2005-10-18 21:20:12 +00:00
|
|
|
goto out;
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
|
2007-06-05 14:31:33 +00:00
|
|
|
calldata->inode = state->inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
calldata->state = state;
|
2007-06-05 14:31:33 +00:00
|
|
|
calldata->arg.fh = NFS_FH(state->inode);
|
2017-11-06 20:28:06 +00:00
|
|
|
if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
|
|
|
|
goto out_free_calldata;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Serialization for the sequence id */
|
2015-01-24 00:19:25 +00:00
|
|
|
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
|
|
|
|
calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
|
2015-01-23 23:48:00 +00:00
|
|
|
if (IS_ERR(calldata->arg.seqid))
|
2005-10-18 21:20:12 +00:00
|
|
|
goto out_free_calldata;
|
2016-12-19 17:14:44 +00:00
|
|
|
nfs_fattr_init(&calldata->fattr);
|
2008-12-23 20:21:56 +00:00
|
|
|
calldata->arg.fmode = 0;
|
2016-09-23 15:38:08 +00:00
|
|
|
calldata->lr.arg.ld_private = &calldata->lr.ld_private;
|
2005-10-28 02:12:41 +00:00
|
|
|
calldata->res.fattr = &calldata->fattr;
|
2008-04-07 17:20:54 +00:00
|
|
|
calldata->res.seqid = calldata->arg.seqid;
|
2005-10-28 02:12:41 +00:00
|
|
|
calldata->res.server = server;
|
2016-11-15 19:56:07 +00:00
|
|
|
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
2016-11-16 06:11:25 +00:00
|
|
|
calldata->lr.roc = pnfs_roc(state->inode,
|
|
|
|
&calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
|
|
|
|
if (calldata->lr.roc) {
|
|
|
|
calldata->arg.lr_args = &calldata->lr.arg;
|
|
|
|
calldata->res.lr_res = &calldata->lr.res;
|
|
|
|
}
|
2011-06-22 22:20:23 +00:00
|
|
|
nfs_sb_active(calldata->inode->i_sb);
|
2005-10-18 21:20:12 +00:00
|
|
|
|
2010-12-21 15:52:24 +00:00
|
|
|
msg.rpc_argp = &calldata->arg;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
2007-07-14 19:39:59 +00:00
|
|
|
task_setup_data.callback_data = calldata;
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
2007-06-12 03:05:07 +00:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2007-10-18 22:03:27 +00:00
|
|
|
status = 0;
|
|
|
|
if (wait)
|
|
|
|
status = rpc_wait_for_completion_task(task);
|
2007-06-12 03:05:07 +00:00
|
|
|
rpc_put_task(task);
|
2007-10-18 22:03:27 +00:00
|
|
|
return status;
|
2005-10-18 21:20:12 +00:00
|
|
|
out_free_calldata:
|
|
|
|
kfree(calldata);
|
|
|
|
out:
|
2007-06-12 03:05:07 +00:00
|
|
|
nfs4_put_open_state(state);
|
|
|
|
nfs4_put_state_owner(sp);
|
2005-10-18 21:20:12 +00:00
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-09-17 14:56:51 +00:00
|
|
|
static struct inode *
|
2013-09-23 22:01:28 +00:00
|
|
|
nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
|
|
|
|
int open_flags, struct iattr *attr, int *opened)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct nfs4_state *state;
|
2022-10-19 17:12:11 +00:00
|
|
|
struct nfs4_label l, *label;
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-15 22:17:53 +00:00
|
|
|
/* Protect against concurrent sillydeletes */
|
2013-09-23 22:01:28 +00:00
|
|
|
state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
nfs4_label_release_security(label);
|
|
|
|
|
2010-09-17 14:56:50 +00:00
|
|
|
if (IS_ERR(state))
|
|
|
|
return ERR_CAST(state);
|
2013-05-29 17:11:28 +00:00
|
|
|
return state->inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-03 20:54:02 +00:00
|
|
|
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
|
2009-03-19 19:35:50 +00:00
|
|
|
{
|
|
|
|
if (ctx->state == NULL)
|
|
|
|
return;
|
|
|
|
if (is_sync)
|
2019-06-27 10:30:48 +00:00
|
|
|
nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
|
2009-03-19 19:35:50 +00:00
|
|
|
else
|
2019-06-27 10:30:48 +00:00
|
|
|
nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
|
2009-03-19 19:35:50 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-11-04 20:20:20 +00:00
|
|
|
#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
|
|
|
|
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
|
2020-06-23 22:38:55 +00:00
|
|
|
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
|
2013-11-04 20:20:20 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
|
|
|
|
{
|
2015-08-26 13:12:58 +00:00
|
|
|
u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
|
2009-04-01 13:21:54 +00:00
|
|
|
struct nfs4_server_caps_arg args = {
|
|
|
|
.fhandle = fhandle,
|
2015-08-26 13:12:58 +00:00
|
|
|
.bitmask = bitmask,
|
2009-04-01 13:21:54 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_server_caps_res res = {};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
|
2009-04-01 13:21:54 +00:00
|
|
|
.rpc_argp = &args,
|
2005-04-16 22:20:36 +00:00
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
int status;
|
2017-05-09 19:47:15 +00:00
|
|
|
int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-08-26 13:12:58 +00:00
|
|
|
bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
|
|
|
|
FATTR4_WORD0_FH_EXPIRE_TYPE |
|
|
|
|
FATTR4_WORD0_LINK_SUPPORT |
|
|
|
|
FATTR4_WORD0_SYMLINK_SUPPORT |
|
2021-12-17 20:36:54 +00:00
|
|
|
FATTR4_WORD0_ACLSUPPORT |
|
|
|
|
FATTR4_WORD0_CASE_INSENSITIVE |
|
|
|
|
FATTR4_WORD0_CASE_PRESERVING;
|
2015-08-26 13:12:58 +00:00
|
|
|
if (minorversion)
|
|
|
|
bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status == 0) {
|
2013-11-04 20:20:20 +00:00
|
|
|
/* Sanity check the server answers */
|
2015-08-26 13:12:58 +00:00
|
|
|
switch (minorversion) {
|
2013-11-04 20:20:20 +00:00
|
|
|
case 0:
|
|
|
|
res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
|
|
|
|
res.attr_bitmask[2] = 0;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
|
2018-03-05 20:01:18 +00:00
|
|
|
server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
|
|
|
|
NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
|
|
|
|
server->fattr_valid = NFS_ATTR_FATTR_V4;
|
2014-01-23 14:54:55 +00:00
|
|
|
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
|
|
|
|
res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
|
2005-04-16 22:20:36 +00:00
|
|
|
server->caps |= NFS_CAP_ACLS;
|
|
|
|
if (res.has_links != 0)
|
|
|
|
server->caps |= NFS_CAP_HARDLINKS;
|
|
|
|
if (res.has_symlinks != 0)
|
|
|
|
server->caps |= NFS_CAP_SYMLINKS;
|
2021-12-17 20:36:54 +00:00
|
|
|
if (res.case_insensitive)
|
|
|
|
server->caps |= NFS_CAP_CASE_INSENSITIVE;
|
|
|
|
if (res.case_preserving)
|
|
|
|
server->caps |= NFS_CAP_CASE_PRESERVING;
|
2021-06-02 17:13:11 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
|
|
|
|
server->caps |= NFS_CAP_SECURITY_LABEL;
|
|
|
|
#endif
|
2021-12-09 19:53:30 +00:00
|
|
|
if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
|
|
|
|
server->caps |= NFS_CAP_FS_LOCATIONS;
|
2018-03-05 20:01:18 +00:00
|
|
|
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
|
|
|
|
server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
|
|
|
|
NFS_ATTR_FATTR_OWNER_NAME);
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
|
|
|
|
server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
|
|
|
|
NFS_ATTR_FATTR_GROUP_NAME);
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
|
|
|
|
if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
|
|
|
|
server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
|
2013-05-22 16:50:44 +00:00
|
|
|
memcpy(server->attr_bitmask_nl, res.attr_bitmask,
|
|
|
|
sizeof(server->attr_bitmask));
|
2013-11-04 20:20:20 +00:00
|
|
|
server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
|
2009-08-09 19:06:19 +00:00
|
|
|
|
2009-03-11 18:10:28 +00:00
|
|
|
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
|
|
|
|
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
|
|
|
|
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
|
2013-11-04 20:20:20 +00:00
|
|
|
server->cache_consistency_bitmask[2] = 0;
|
2017-05-09 19:47:15 +00:00
|
|
|
|
|
|
|
/* Avoid a regression due to buggy server */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
|
|
|
|
res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
|
2015-08-26 13:12:58 +00:00
|
|
|
memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
|
|
|
|
sizeof(server->exclcreat_bitmask));
|
2017-05-09 19:47:15 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
server->acl_bitmask = res.acl_bitmask;
|
2012-03-01 22:02:05 +00:00
|
|
|
server->fh_expire_type = res.fh_expire_type;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-04-01 13:22:03 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-06-09 13:34:19 +00:00
|
|
|
int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2021-10-14 17:55:04 +00:00
|
|
|
|
|
|
|
nfs4_server_set_init_caps(server);
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_server_capabilities(server, fhandle),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-12-09 19:53:35 +00:00
|
|
|
static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
|
|
|
|
struct nfs_client *clp,
|
|
|
|
struct nfs_server *server)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < location->nservers; i++) {
|
|
|
|
struct nfs4_string *srv_loc = &location->servers[i];
|
2022-10-17 04:36:50 +00:00
|
|
|
struct sockaddr_storage addr;
|
2021-12-09 19:53:35 +00:00
|
|
|
size_t addrlen;
|
|
|
|
struct xprt_create xprt_args = {
|
|
|
|
.ident = 0,
|
|
|
|
.net = clp->cl_net,
|
|
|
|
};
|
|
|
|
struct nfs4_add_xprt_data xprtdata = {
|
|
|
|
.clp = clp,
|
|
|
|
};
|
|
|
|
struct rpc_add_xprt_test rpcdata = {
|
|
|
|
.add_xprt_test = clp->cl_mvops->session_trunk,
|
|
|
|
.data = &xprtdata,
|
|
|
|
};
|
|
|
|
char *servername = NULL;
|
|
|
|
|
|
|
|
if (!srv_loc->len)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
|
|
|
|
&addr, sizeof(addr),
|
|
|
|
clp->cl_net, server->port);
|
|
|
|
if (!addrlen)
|
|
|
|
return;
|
2022-10-17 04:36:50 +00:00
|
|
|
xprt_args.dstaddr = (struct sockaddr *)&addr;
|
2021-12-09 19:53:35 +00:00
|
|
|
xprt_args.addrlen = addrlen;
|
|
|
|
servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
|
|
|
|
if (!servername)
|
|
|
|
return;
|
|
|
|
memcpy(servername, srv_loc->data, srv_loc->len);
|
|
|
|
servername[srv_loc->len] = '\0';
|
|
|
|
xprt_args.servername = servername;
|
|
|
|
|
|
|
|
xprtdata.cred = nfs4_get_clid_cred(clp);
|
|
|
|
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
|
|
|
|
rpc_clnt_setup_test_and_add_xprt,
|
|
|
|
&rpcdata);
|
|
|
|
if (xprtdata.cred)
|
|
|
|
put_cred(xprtdata.cred);
|
|
|
|
kfree(servername);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-12 15:27:38 +00:00
|
|
|
static int _nfs4_discover_trunking(struct nfs_server *server,
|
|
|
|
struct nfs_fh *fhandle)
|
|
|
|
{
|
|
|
|
struct nfs4_fs_locations *locations = NULL;
|
|
|
|
struct page *page;
|
|
|
|
const struct cred *cred;
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
const struct nfs4_state_maintenance_ops *ops =
|
|
|
|
clp->cl_mvops->state_renewal_ops;
|
2021-12-09 19:53:35 +00:00
|
|
|
int status = -ENOMEM, i;
|
2022-01-12 15:27:38 +00:00
|
|
|
|
|
|
|
cred = ops->get_state_renewal_cred(clp);
|
|
|
|
if (cred == NULL) {
|
|
|
|
cred = nfs4_get_clid_cred(clp);
|
|
|
|
if (cred == NULL)
|
|
|
|
return -ENOKEY;
|
|
|
|
}
|
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
2022-06-27 21:31:29 +00:00
|
|
|
if (!page)
|
2022-10-27 20:50:12 +00:00
|
|
|
goto out_put_cred;
|
2022-01-12 15:27:38 +00:00
|
|
|
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
|
2022-06-27 21:31:29 +00:00
|
|
|
if (!locations)
|
|
|
|
goto out_free;
|
|
|
|
locations->fattr = nfs_alloc_fattr();
|
|
|
|
if (!locations->fattr)
|
|
|
|
goto out_free_2;
|
2022-01-12 15:27:38 +00:00
|
|
|
|
|
|
|
status = nfs4_proc_get_locations(server, fhandle, locations, page,
|
|
|
|
cred);
|
|
|
|
if (status)
|
2022-06-27 21:31:29 +00:00
|
|
|
goto out_free_3;
|
2021-12-09 19:53:35 +00:00
|
|
|
|
|
|
|
for (i = 0; i < locations->nlocations; i++)
|
|
|
|
test_fs_location_for_trunking(&locations->locations[i], clp,
|
|
|
|
server);
|
2022-06-27 21:31:29 +00:00
|
|
|
out_free_3:
|
|
|
|
kfree(locations->fattr);
|
|
|
|
out_free_2:
|
2022-01-12 15:27:38 +00:00
|
|
|
kfree(locations);
|
2022-06-27 21:31:29 +00:00
|
|
|
out_free:
|
|
|
|
__free_page(page);
|
2022-10-27 20:50:12 +00:00
|
|
|
out_put_cred:
|
|
|
|
put_cred(cred);
|
2022-01-12 15:27:38 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_discover_trunking(struct nfs_server *server,
|
|
|
|
struct nfs_fh *fhandle)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!nfs4_has_session(clp))
|
|
|
|
goto out;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_discover_trunking(server, fhandle),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
2013-05-22 16:50:44 +00:00
|
|
|
u32 bitmask[3];
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_lookup_root_arg args = {
|
2013-05-22 16:50:44 +00:00
|
|
|
.bitmask = bitmask,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct nfs4_lookup_res res = {
|
|
|
|
.server = server,
|
2005-10-28 02:12:38 +00:00
|
|
|
.fattr = info->fattr,
|
2005-04-16 22:20:36 +00:00
|
|
|
.fh = fhandle,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2009-04-01 13:22:50 +00:00
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
bitmask[0] = nfs4_fattr_bitmap[0];
|
|
|
|
bitmask[1] = nfs4_fattr_bitmap[1];
|
|
|
|
/*
|
|
|
|
* Process the label in the upcoming getfattr
|
|
|
|
*/
|
|
|
|
bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
|
|
|
|
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(info->fattr);
|
2011-03-24 17:12:24 +00:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2011-04-18 20:52:25 +00:00
|
|
|
err = _nfs4_lookup_root(server, fhandle, info);
|
2013-08-13 17:01:39 +00:00
|
|
|
trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
|
2011-04-18 20:52:25 +00:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
|
|
|
case -NFS4ERR_WRONGSEC:
|
2012-03-27 22:13:02 +00:00
|
|
|
goto out;
|
2011-04-18 20:52:25 +00:00
|
|
|
default:
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
2012-03-27 22:13:02 +00:00
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-24 17:12:31 +00:00
|
|
|
static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info, rpc_authflavor_t flavor)
|
|
|
|
{
|
2013-08-26 23:23:04 +00:00
|
|
|
struct rpc_auth_create_args auth_args = {
|
|
|
|
.pseudoflavor = flavor,
|
|
|
|
};
|
2011-03-24 17:12:31 +00:00
|
|
|
struct rpc_auth *auth;
|
|
|
|
|
2013-08-26 23:23:04 +00:00
|
|
|
auth = rpcauth_create(&auth_args, server->client);
|
2017-01-11 21:30:08 +00:00
|
|
|
if (IS_ERR(auth))
|
|
|
|
return -EACCES;
|
|
|
|
return nfs4_lookup_root(server, fhandle, info);
|
2011-03-24 17:12:31 +00:00
|
|
|
}
|
|
|
|
|
NFS: Use static list of security flavors during root FH lookup recovery
If the Linux NFS client receives an NFS4ERR_WRONGSEC error while
trying to look up an NFS server's root file handle, it retries the
lookup operation with various security flavors to see what flavor
the NFS server will accept for pseudo-fs access.
The list of flavors the client uses during retry consists only of
flavors that are currently registered in the kernel RPC client.
This list may not include any GSS pseudoflavors if auth_rpcgss.ko
has not yet been loaded.
Let's instead use a static list of security flavors that the NFS
standard requires the server to implement (RFC 3530bis, section
3.2.1). The RPC client should now be able to load support for
these dynamically; if not, they are skipped.
Recovery behavior here is prescribed by RFC 3530bis, section
15.33.5:
> For LOOKUPP, PUTROOTFH and PUTPUBFH, the client will be unable to
> use the SECINFO operation since SECINFO requires a current
> filehandle and none exist for these two [sic] operations. Therefore,
> the client must iterate through the security triples available at
> the client and reattempt the PUTROOTFH or PUTPUBFH operation. In
> the unfortunate event none of the MANDATORY security triples are
> supported by the client and server, the client SHOULD try using
> others that support integrity. Failing that, the client can try
> using AUTH_NONE, but because such forms lack integrity checks,
> this puts the client at risk.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-03-16 19:56:02 +00:00
|
|
|
/*
|
|
|
|
* Retry pseudoroot lookup with various security flavors. We do this when:
|
|
|
|
*
|
|
|
|
* NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
|
|
|
|
* NFSv4.1: the server does not support the SECINFO_NO_NAME operation
|
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative NFS4ERR value, or a
|
|
|
|
* negative errno value.
|
|
|
|
*/
|
2011-04-13 18:31:30 +00:00
|
|
|
static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 00:06:13 +00:00
|
|
|
struct nfs_fsinfo *info)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
NFS: Use static list of security flavors during root FH lookup recovery
If the Linux NFS client receives an NFS4ERR_WRONGSEC error while
trying to look up an NFS server's root file handle, it retries the
lookup operation with various security flavors to see what flavor
the NFS server will accept for pseudo-fs access.
The list of flavors the client uses during retry consists only of
flavors that are currently registered in the kernel RPC client.
This list may not include any GSS pseudoflavors if auth_rpcgss.ko
has not yet been loaded.
Let's instead use a static list of security flavors that the NFS
standard requires the server to implement (RFC 3530bis, section
3.2.1). The RPC client should now be able to load support for
these dynamically; if not, they are skipped.
Recovery behavior here is prescribed by RFC 3530bis, section
15.33.5:
> For LOOKUPP, PUTROOTFH and PUTPUBFH, the client will be unable to
> use the SECINFO operation since SECINFO requires a current
> filehandle and none exist for these two [sic] operations. Therefore,
> the client must iterate through the security triples available at
> the client and reattempt the PUTROOTFH or PUTPUBFH operation. In
> the unfortunate event none of the MANDATORY security triples are
> supported by the client and server, the client SHOULD try using
> others that support integrity. Failing that, the client can try
> using AUTH_NONE, but because such forms lack integrity checks,
> this puts the client at risk.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-03-16 19:56:02 +00:00
|
|
|
/* Per 3530bis 15.33.5 */
|
|
|
|
static const rpc_authflavor_t flav_array[] = {
|
|
|
|
RPC_AUTH_GSS_KRB5P,
|
|
|
|
RPC_AUTH_GSS_KRB5I,
|
|
|
|
RPC_AUTH_GSS_KRB5,
|
2013-03-16 19:56:11 +00:00
|
|
|
RPC_AUTH_UNIX, /* courtesy */
|
NFS: Use static list of security flavors during root FH lookup recovery
If the Linux NFS client receives an NFS4ERR_WRONGSEC error while
trying to look up an NFS server's root file handle, it retries the
lookup operation with various security flavors to see what flavor
the NFS server will accept for pseudo-fs access.
The list of flavors the client uses during retry consists only of
flavors that are currently registered in the kernel RPC client.
This list may not include any GSS pseudoflavors if auth_rpcgss.ko
has not yet been loaded.
Let's instead use a static list of security flavors that the NFS
standard requires the server to implement (RFC 3530bis, section
3.2.1). The RPC client should now be able to load support for
these dynamically; if not, they are skipped.
Recovery behavior here is prescribed by RFC 3530bis, section
15.33.5:
> For LOOKUPP, PUTROOTFH and PUTPUBFH, the client will be unable to
> use the SECINFO operation since SECINFO requires a current
> filehandle and none exist for these two [sic] operations. Therefore,
> the client must iterate through the security triples available at
> the client and reattempt the PUTROOTFH or PUTPUBFH operation. In
> the unfortunate event none of the MANDATORY security triples are
> supported by the client and server, the client SHOULD try using
> others that support integrity. Failing that, the client can try
> using AUTH_NONE, but because such forms lack integrity checks,
> this puts the client at risk.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-03-16 19:56:02 +00:00
|
|
|
RPC_AUTH_NULL,
|
|
|
|
};
|
|
|
|
int status = -EPERM;
|
|
|
|
size_t i;
|
2012-07-11 20:31:08 +00:00
|
|
|
|
2013-10-18 19:15:19 +00:00
|
|
|
if (server->auth_info.flavor_len > 0) {
|
|
|
|
/* try each flavor specified by user */
|
|
|
|
for (i = 0; i < server->auth_info.flavor_len; i++) {
|
|
|
|
status = nfs4_lookup_root_sec(server, fhandle, info,
|
|
|
|
server->auth_info.flavors[i]);
|
|
|
|
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* no flavors specified by user, try default list */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
|
|
|
|
status = nfs4_lookup_root_sec(server, fhandle, info,
|
|
|
|
flav_array[i]);
|
|
|
|
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2011-03-24 17:12:31 +00:00
|
|
|
}
|
NFS: Use static list of security flavors during root FH lookup recovery
If the Linux NFS client receives an NFS4ERR_WRONGSEC error while
trying to look up an NFS server's root file handle, it retries the
lookup operation with various security flavors to see what flavor
the NFS server will accept for pseudo-fs access.
The list of flavors the client uses during retry consists only of
flavors that are currently registered in the kernel RPC client.
This list may not include any GSS pseudoflavors if auth_rpcgss.ko
has not yet been loaded.
Let's instead use a static list of security flavors that the NFS
standard requires the server to implement (RFC 3530bis, section
3.2.1). The RPC client should now be able to load support for
these dynamically; if not, they are skipped.
Recovery behavior here is prescribed by RFC 3530bis, section
15.33.5:
> For LOOKUPP, PUTROOTFH and PUTPUBFH, the client will be unable to
> use the SECINFO operation since SECINFO requires a current
> filehandle and none exist for these two [sic] operations. Therefore,
> the client must iterate through the security triples available at
> the client and reattempt the PUTROOTFH or PUTPUBFH operation. In
> the unfortunate event none of the MANDATORY security triples are
> supported by the client and server, the client SHOULD try using
> others that support integrity. Failing that, the client can try
> using AUTH_NONE, but because such forms lack integrity checks,
> this puts the client at risk.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-03-16 19:56:02 +00:00
|
|
|
|
2011-04-18 20:52:25 +00:00
|
|
|
/*
|
2018-10-26 18:10:31 +00:00
|
|
|
* -EACCES could mean that the user doesn't have correct permissions
|
2011-04-18 20:52:25 +00:00
|
|
|
* to access the mount. It could also mean that we tried to mount
|
|
|
|
* with a gss auth flavor, but rpc.gssd isn't running. Either way,
|
|
|
|
* existing mount programs don't handle -EACCES very well so it should
|
|
|
|
* be mapped to -EPERM instead.
|
|
|
|
*/
|
|
|
|
if (status == -EACCES)
|
|
|
|
status = -EPERM;
|
2011-04-13 18:31:30 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2013-03-16 19:55:45 +00:00
|
|
|
/**
|
|
|
|
* nfs4_proc_get_rootfh - get file handle for server's pseudoroot
|
|
|
|
* @server: initialized nfs_server handle
|
|
|
|
* @fhandle: we fill in the pseudo-fs root file handle
|
|
|
|
* @info: we fill in an FSINFO struct
|
2013-09-07 16:58:57 +00:00
|
|
|
* @auth_probe: probe the auth flavours
|
2013-03-16 19:55:45 +00:00
|
|
|
*
|
|
|
|
* Returns zero on success, or a negative errno.
|
2011-04-13 18:31:30 +00:00
|
|
|
*/
|
2012-05-10 19:07:30 +00:00
|
|
|
int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
|
2013-09-07 16:58:57 +00:00
|
|
|
struct nfs_fsinfo *info,
|
|
|
|
bool auth_probe)
|
2011-04-13 18:31:30 +00:00
|
|
|
{
|
2015-04-23 16:17:40 +00:00
|
|
|
int status = 0;
|
2013-03-16 19:55:45 +00:00
|
|
|
|
2015-04-23 16:17:40 +00:00
|
|
|
if (!auth_probe)
|
2013-09-07 16:58:57 +00:00
|
|
|
status = nfs4_lookup_root(server, fhandle, info);
|
2015-04-23 16:17:40 +00:00
|
|
|
|
|
|
|
if (auth_probe || status == NFS4ERR_WRONGSEC)
|
2016-07-25 17:31:14 +00:00
|
|
|
status = server->nfs_client->cl_mvops->find_root_sec(server,
|
|
|
|
fhandle, info);
|
2013-03-16 19:55:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status == 0)
|
|
|
|
status = nfs4_server_capabilities(server, fhandle);
|
|
|
|
if (status == 0)
|
|
|
|
status = nfs4_do_fsinfo(server, fhandle, info);
|
2013-03-16 19:55:45 +00:00
|
|
|
|
2006-03-14 05:20:47 +00:00
|
|
|
return nfs4_map_errors(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-05-10 19:07:31 +00:00
|
|
|
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct nfs_fattr *fattr = info->fattr;
|
|
|
|
|
|
|
|
error = nfs4_server_capabilities(server, mntfh);
|
|
|
|
if (error < 0) {
|
|
|
|
dprintk("nfs4_get_root: getcaps error = %d\n", -error);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2021-10-22 17:11:07 +00:00
|
|
|
error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
|
2012-05-10 19:07:31 +00:00
|
|
|
if (error < 0) {
|
|
|
|
dprintk("nfs4_get_root: getattr error = %d\n", -error);
|
2020-03-03 22:58:37 +00:00
|
|
|
goto out;
|
2012-05-10 19:07:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fattr->valid & NFS_ATTR_FATTR_FSID &&
|
|
|
|
!nfs_fsid_equal(&server->fsid, &fattr->fsid))
|
|
|
|
memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
|
|
|
|
|
2020-03-03 22:58:37 +00:00
|
|
|
out:
|
2012-05-10 19:07:31 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-06-09 13:34:29 +00:00
|
|
|
/*
|
|
|
|
* Get locations and (maybe) other attributes of a referral.
|
|
|
|
* Note that we'll actually follow the referral later when
|
|
|
|
* we detect fsid mismatch in inode revalidation
|
|
|
|
*/
|
2012-04-27 17:27:41 +00:00
|
|
|
static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
|
|
|
|
const struct qstr *name, struct nfs_fattr *fattr,
|
|
|
|
struct nfs_fh *fhandle)
|
2006-06-09 13:34:29 +00:00
|
|
|
{
|
|
|
|
int status = -ENOMEM;
|
|
|
|
struct page *page = NULL;
|
|
|
|
struct nfs4_fs_locations *locations = NULL;
|
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (page == NULL)
|
|
|
|
goto out;
|
|
|
|
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
|
|
|
|
if (locations == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2022-05-14 11:05:13 +00:00
|
|
|
locations->fattr = fattr;
|
|
|
|
|
2012-04-27 17:27:41 +00:00
|
|
|
status = nfs4_proc_fs_locations(client, dir, name, locations, page);
|
2006-06-09 13:34:29 +00:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
2013-10-17 18:13:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the fsid didn't change, this is a migration event, not a
|
|
|
|
* referral. Cause us to drop into the exception handler, which
|
|
|
|
* will kick off migration recovery.
|
|
|
|
*/
|
2022-05-14 11:05:13 +00:00
|
|
|
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
|
2011-06-13 22:25:56 +00:00
|
|
|
dprintk("%s: server did not return a different fsid for"
|
|
|
|
" a referral at %s\n", __func__, name->name);
|
2013-10-17 18:13:19 +00:00
|
|
|
status = -NFS4ERR_MOVED;
|
2006-06-09 13:34:29 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2011-06-13 22:25:56 +00:00
|
|
|
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
|
2022-05-14 11:05:13 +00:00
|
|
|
nfs_fixup_referral_attributes(fattr);
|
2006-06-09 13:34:29 +00:00
|
|
|
memset(fhandle, 0, sizeof(struct nfs_fh));
|
|
|
|
out:
|
|
|
|
if (page)
|
|
|
|
__free_page(page);
|
2010-08-11 16:42:15 +00:00
|
|
|
kfree(locations);
|
2006-06-09 13:34:29 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2013-05-22 16:50:42 +00:00
|
|
|
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
|
2021-10-22 17:11:07 +00:00
|
|
|
struct nfs_fattr *fattr, struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2018-04-07 17:54:23 +00:00
|
|
|
__u32 bitmask[NFS4_BITMASK_SZ];
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_getattr_arg args = {
|
|
|
|
.fh = fhandle,
|
2018-04-07 17:54:23 +00:00
|
|
|
.bitmask = bitmask,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct nfs4_getattr_res res = {
|
|
|
|
.fattr = fattr,
|
|
|
|
.server = server,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2020-01-06 20:39:37 +00:00
|
|
|
unsigned short task_flags = 0;
|
|
|
|
|
2021-06-24 03:28:51 +00:00
|
|
|
if (nfs4_has_session(server->nfs_client))
|
|
|
|
task_flags = RPC_TASK_MOVEABLE;
|
|
|
|
|
2020-01-06 20:39:37 +00:00
|
|
|
/* Is this is an attribute revalidation, subject to softreval? */
|
|
|
|
if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
|
|
|
|
task_flags |= RPC_TASK_TIMEOUT;
|
2013-05-22 16:50:44 +00:00
|
|
|
|
2021-10-22 17:11:07 +00:00
|
|
|
nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(fattr);
|
2020-01-06 20:39:37 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
|
|
|
|
return nfs4_do_call_sync(server->client, server, &msg,
|
|
|
|
&args.seq_args, &res.seq_res, task_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-10-08 20:33:53 +00:00
|
|
|
int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
|
2021-10-22 17:11:07 +00:00
|
|
|
struct nfs_fattr *fattr, struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2021-10-22 17:11:07 +00:00
|
|
|
err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
|
2013-08-13 17:01:39 +00:00
|
|
|
trace_nfs4_getattr(server, fhandle, fattr, err);
|
|
|
|
err = nfs4_handle_exception(server, err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The file is not closed if it is opened due to the a request to change
|
|
|
|
* the size of the file. The open call will not be needed once the
|
|
|
|
* VFS layer lookup-intents are implemented.
|
|
|
|
*
|
|
|
|
* Close is called when the inode is destroyed.
|
|
|
|
* If we haven't opened the file for O_WRONLY, we
|
|
|
|
* need to in the size_change case to obtain a stateid.
|
|
|
|
*
|
|
|
|
* Got race?
|
|
|
|
* Because OPEN is always done by name in nfsv4, it is
|
|
|
|
* possible that we opened a different file by the same
|
|
|
|
* name. We can recognize this race condition, but we
|
|
|
|
* can't do anything about it besides returning an error.
|
|
|
|
*
|
|
|
|
* This will be fixed with VFS changes (lookup-intent).
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
|
|
|
|
struct iattr *sattr)
|
|
|
|
{
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = NULL;
|
2016-10-13 04:26:47 +00:00
|
|
|
struct nfs_open_context *ctx = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
int status;
|
|
|
|
|
2014-09-12 03:04:10 +00:00
|
|
|
if (pnfs_ld_layoutret_on_setattr(inode) &&
|
|
|
|
sattr->ia_valid & ATTR_SIZE &&
|
|
|
|
sattr->ia_size < i_size_read(inode))
|
2013-03-20 17:23:33 +00:00
|
|
|
pnfs_commit_and_return_layout(inode);
|
2010-07-14 19:43:57 +00:00
|
|
|
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(fattr);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-05-30 20:12:24 +00:00
|
|
|
/* Deal with open(O_TRUNC) */
|
|
|
|
if (sattr->ia_valid & ATTR_OPEN)
|
2013-07-21 14:21:43 +00:00
|
|
|
sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
|
2012-05-30 20:12:24 +00:00
|
|
|
|
|
|
|
/* Optimization: if the end result is no change, don't RPC */
|
2013-07-21 14:21:43 +00:00
|
|
|
if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
|
2012-05-30 20:12:24 +00:00
|
|
|
return 0;
|
|
|
|
|
2005-11-04 20:33:38 +00:00
|
|
|
/* Search for an existing open(O_WRITE) file */
|
2008-06-10 23:39:41 +00:00
|
|
|
if (sattr->ia_valid & ATTR_FILE) {
|
|
|
|
|
|
|
|
ctx = nfs_file_open_context(sattr->ia_file);
|
2016-10-13 04:26:47 +00:00
|
|
|
if (ctx)
|
2008-10-16 03:15:16 +00:00
|
|
|
cred = ctx->cred;
|
2008-06-10 23:39:41 +00:00
|
|
|
}
|
2005-06-22 17:16:29 +00:00
|
|
|
|
2018-03-20 20:43:18 +00:00
|
|
|
/* Return any delegations if we're going to change ACLs */
|
|
|
|
if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
|
2018-03-20 20:43:20 +00:00
|
|
|
nfs4_inode_make_writeable(inode);
|
2018-03-20 20:43:18 +00:00
|
|
|
|
2021-10-22 17:11:08 +00:00
|
|
|
status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
|
2013-05-22 16:50:44 +00:00
|
|
|
if (status == 0) {
|
2015-02-26 21:09:04 +00:00
|
|
|
nfs_setattr_update_inode(inode, sattr, fattr);
|
2021-10-22 17:11:12 +00:00
|
|
|
nfs_setsecurity(inode, fattr);
|
2013-05-22 16:50:44 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2011-10-18 23:11:22 +00:00
|
|
|
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
|
2020-01-14 17:06:34 +00:00
|
|
|
struct dentry *dentry, struct nfs_fh *fhandle,
|
2021-10-22 17:11:04 +00:00
|
|
|
struct nfs_fattr *fattr)
|
2006-08-23 00:06:09 +00:00
|
|
|
{
|
2011-10-18 23:11:22 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2006-08-23 00:06:09 +00:00
|
|
|
int status;
|
|
|
|
struct nfs4_lookup_arg args = {
|
|
|
|
.bitmask = server->attr_bitmask,
|
2011-10-18 23:11:22 +00:00
|
|
|
.dir_fh = NFS_FH(dir),
|
2020-01-14 17:06:34 +00:00
|
|
|
.name = &dentry->d_name,
|
2006-08-23 00:06:09 +00:00
|
|
|
};
|
|
|
|
struct nfs4_lookup_res res = {
|
|
|
|
.server = server,
|
|
|
|
.fattr = fattr,
|
|
|
|
.fh = fhandle,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2020-01-14 17:06:34 +00:00
|
|
|
unsigned short task_flags = 0;
|
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
|
2021-06-24 03:28:51 +00:00
|
|
|
task_flags = RPC_TASK_MOVEABLE;
|
|
|
|
|
2020-01-14 17:06:34 +00:00
|
|
|
/* Is this is an attribute revalidation, subject to softreval? */
|
|
|
|
if (nfs_lookup_is_soft_revalidate(dentry))
|
|
|
|
task_flags |= RPC_TASK_TIMEOUT;
|
2006-08-23 00:06:09 +00:00
|
|
|
|
2021-10-22 17:11:04 +00:00
|
|
|
args.bitmask = nfs4_bitmask(server, fattr->label);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
2006-08-23 00:06:09 +00:00
|
|
|
nfs_fattr_init(fattr);
|
|
|
|
|
2020-01-14 17:06:34 +00:00
|
|
|
dprintk("NFS call lookup %pd2\n", dentry);
|
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
|
|
|
|
status = nfs4_do_call_sync(clnt, server, &msg,
|
|
|
|
&args.seq_args, &res.seq_res, task_flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
dprintk("NFS reply lookup: %d\n", status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:40 +00:00
|
|
|
static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
|
2011-03-24 17:12:30 +00:00
|
|
|
{
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
|
2012-04-27 17:27:40 +00:00
|
|
|
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
|
2011-03-24 17:12:30 +00:00
|
|
|
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
|
|
|
fattr->nlink = 2;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:40 +00:00
|
|
|
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
|
2020-01-14 17:06:34 +00:00
|
|
|
struct dentry *dentry, struct nfs_fh *fhandle,
|
2021-10-22 17:11:04 +00:00
|
|
|
struct nfs_fattr *fattr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2012-04-27 17:27:40 +00:00
|
|
|
struct rpc_clnt *client = *clnt;
|
2020-01-14 17:06:34 +00:00
|
|
|
const struct qstr *name = &dentry->d_name;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2021-10-22 17:11:04 +00:00
|
|
|
err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
|
2013-08-12 20:45:55 +00:00
|
|
|
trace_nfs4_lookup(dir, name, err);
|
2012-04-27 17:27:40 +00:00
|
|
|
switch (err) {
|
2011-10-18 23:11:49 +00:00
|
|
|
case -NFS4ERR_BADNAME:
|
2012-04-27 17:27:40 +00:00
|
|
|
err = -ENOENT;
|
|
|
|
goto out;
|
2011-10-18 23:11:22 +00:00
|
|
|
case -NFS4ERR_MOVED:
|
2012-04-27 17:27:41 +00:00
|
|
|
err = nfs4_get_referral(client, dir, name, fattr, fhandle);
|
2015-06-04 15:04:17 +00:00
|
|
|
if (err == -NFS4ERR_MOVED)
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
|
2012-04-27 17:27:40 +00:00
|
|
|
goto out;
|
2011-10-18 23:11:22 +00:00
|
|
|
case -NFS4ERR_WRONGSEC:
|
2012-04-27 17:27:40 +00:00
|
|
|
err = -EPERM;
|
|
|
|
if (client != *clnt)
|
|
|
|
goto out;
|
2014-06-12 19:02:32 +00:00
|
|
|
client = nfs4_negotiate_security(client, dir, name);
|
2012-04-27 17:27:40 +00:00
|
|
|
if (IS_ERR(client))
|
|
|
|
return PTR_ERR(client);
|
|
|
|
|
|
|
|
exception.retry = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
|
2011-10-18 23:11:22 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
2012-04-27 17:27:40 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
if (err == 0)
|
|
|
|
*clnt = client;
|
|
|
|
else if (client != *clnt)
|
|
|
|
rpc_shutdown_client(client);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-01-14 17:06:34 +00:00
|
|
|
static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
|
2021-10-22 17:11:04 +00:00
|
|
|
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
2012-04-27 17:27:40 +00:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
struct rpc_clnt *client = NFS_CLIENT(dir);
|
|
|
|
|
2021-10-22 17:11:04 +00:00
|
|
|
status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
|
2012-04-27 17:27:40 +00:00
|
|
|
if (client != NFS_CLIENT(dir)) {
|
|
|
|
rpc_shutdown_client(client);
|
|
|
|
nfs_fixup_secinfo_attributes(fattr);
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:41 +00:00
|
|
|
struct rpc_clnt *
|
2020-01-14 17:06:34 +00:00
|
|
|
nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
|
2012-04-27 17:27:41 +00:00
|
|
|
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
|
|
|
{
|
2013-08-08 00:38:07 +00:00
|
|
|
struct rpc_clnt *client = NFS_CLIENT(dir);
|
2012-04-27 17:27:41 +00:00
|
|
|
int status;
|
|
|
|
|
2021-10-22 17:11:04 +00:00
|
|
|
status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
|
2013-08-08 00:38:07 +00:00
|
|
|
if (status < 0)
|
2012-04-27 17:27:41 +00:00
|
|
|
return ERR_PTR(status);
|
2013-08-08 00:38:07 +00:00
|
|
|
return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
|
2012-04-27 17:27:41 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 13:34:52 +00:00
|
|
|
static int _nfs4_proc_lookupp(struct inode *inode,
|
2021-10-22 17:11:05 +00:00
|
|
|
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
2017-06-29 13:34:52 +00:00
|
|
|
{
|
|
|
|
struct rpc_clnt *clnt = NFS_CLIENT(inode);
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
int status;
|
|
|
|
struct nfs4_lookupp_arg args = {
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
};
|
|
|
|
struct nfs4_lookupp_res res = {
|
|
|
|
.server = server,
|
|
|
|
.fattr = fattr,
|
|
|
|
.fh = fhandle,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2020-10-20 18:30:35 +00:00
|
|
|
unsigned short task_flags = 0;
|
|
|
|
|
|
|
|
if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
|
|
|
|
task_flags |= RPC_TASK_TIMEOUT;
|
2017-06-29 13:34:52 +00:00
|
|
|
|
2021-10-22 17:11:05 +00:00
|
|
|
args.bitmask = nfs4_bitmask(server, fattr->label);
|
2017-06-29 13:34:52 +00:00
|
|
|
|
|
|
|
nfs_fattr_init(fattr);
|
|
|
|
|
|
|
|
dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
|
|
|
|
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
|
2020-10-20 18:30:35 +00:00
|
|
|
&res.seq_res, task_flags);
|
2017-06-29 13:34:52 +00:00
|
|
|
dprintk("NFS reply lookupp: %d\n", status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
|
2021-10-22 17:11:05 +00:00
|
|
|
struct nfs_fattr *fattr)
|
2017-06-29 13:34:52 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2017-06-29 13:34:52 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2021-10-22 17:11:05 +00:00
|
|
|
err = _nfs4_proc_lookupp(inode, fhandle, fattr);
|
2017-06-29 13:34:52 +00:00
|
|
|
trace_nfs4_lookupp(inode, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-09-27 23:47:57 +00:00
|
|
|
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
|
|
|
|
const struct cred *cred)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-08-10 21:45:11 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_accessargs args = {
|
|
|
|
.fh = NFS_FH(inode),
|
2017-07-26 16:00:21 +00:00
|
|
|
.access = entry->mask,
|
2007-08-10 21:45:11 +00:00
|
|
|
};
|
|
|
|
struct nfs4_accessres res = {
|
|
|
|
.server = server,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2021-09-27 23:47:57 +00:00
|
|
|
.rpc_cred = cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2013-05-22 16:50:44 +00:00
|
|
|
int status = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-06-04 19:00:53 +00:00
|
|
|
if (!nfs4_have_delegation(inode, FMODE_READ)) {
|
2018-03-20 21:03:11 +00:00
|
|
|
res.fattr = nfs_alloc_fattr();
|
|
|
|
if (res.fattr == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
args.bitmask = server->cache_consistency_bitmask;
|
|
|
|
}
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!status) {
|
2012-09-10 18:00:46 +00:00
|
|
|
nfs_access_set_mask(entry, res.access);
|
2018-03-20 21:03:11 +00:00
|
|
|
if (res.fattr)
|
|
|
|
nfs_refresh_inode(inode, res.fattr);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-04-16 20:22:48 +00:00
|
|
|
nfs_free_fattr(res.fattr);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2021-09-27 23:47:57 +00:00
|
|
|
static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
|
|
|
|
const struct cred *cred)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2021-09-27 23:47:57 +00:00
|
|
|
err = _nfs4_proc_access(inode, entry, cred);
|
2013-08-12 20:58:42 +00:00
|
|
|
trace_nfs4_access(inode, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: For the time being, we don't try to get any attributes
|
|
|
|
* along with any of the zero-copy operations READ, READDIR,
|
|
|
|
* READLINK, WRITE.
|
|
|
|
*
|
|
|
|
* In the case of the first three, we want to put the GETATTR
|
|
|
|
* after the read-type operation -- this is because it is hard
|
|
|
|
* to predict the length of a GETATTR response in v4, and thus
|
|
|
|
* align the READ data correctly. This means that the GETATTR
|
|
|
|
* may end up partially falling into the page cache, and we should
|
|
|
|
* shift it into the 'tail' of the xdr_buf before processing.
|
|
|
|
* To do this efficiently, we need to know the total length
|
|
|
|
* of data received, which doesn't seem to be available outside
|
|
|
|
* of the RPC layer.
|
|
|
|
*
|
|
|
|
* In the case of WRITE, we also want to put the GETATTR after
|
|
|
|
* the operation -- in this case because we want to make sure
|
2012-06-05 19:20:25 +00:00
|
|
|
* we get the post-operation mtime and size.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Both of these changes to the XDR layer would in fact be quite
|
|
|
|
* minor, but I decided to leave them for a subsequent patch.
|
|
|
|
*/
|
|
|
|
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
|
|
|
|
unsigned int pgbase, unsigned int pglen)
|
|
|
|
{
|
|
|
|
struct nfs4_readlink args = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.pgbase = pgbase,
|
|
|
|
.pglen = pglen,
|
|
|
|
.pages = &page,
|
|
|
|
};
|
2009-04-01 13:21:55 +00:00
|
|
|
struct nfs4_readlink_res res;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:21:55 +00:00
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
|
|
|
|
unsigned int pgbase, unsigned int pglen)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2013-08-12 20:58:42 +00:00
|
|
|
err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
|
|
|
|
trace_nfs4_readlink(inode, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-06-05 13:10:19 +00:00
|
|
|
* This is just for mknod. open(O_CREAT) will always do ->open_context().
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
2012-06-05 13:10:19 +00:00
|
|
|
int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-12-03 03:53:30 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2022-10-19 17:12:11 +00:00
|
|
|
struct nfs4_label l, *ilabel;
|
2012-06-05 13:10:19 +00:00
|
|
|
struct nfs_open_context *ctx;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_state *state;
|
|
|
|
int status = 0;
|
|
|
|
|
2016-10-13 04:26:47 +00:00
|
|
|
ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
|
2012-06-05 13:10:19 +00:00
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
|
|
|
|
|
2016-12-03 03:53:30 +00:00
|
|
|
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
|
|
|
sattr->ia_mode &= ~current_umask();
|
2015-08-26 13:11:39 +00:00
|
|
|
state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (IS_ERR(state)) {
|
|
|
|
status = PTR_ERR(state);
|
2010-09-17 14:56:51 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
out:
|
2013-05-22 16:50:44 +00:00
|
|
|
nfs4_label_release_security(ilabel);
|
2012-06-05 13:10:19 +00:00
|
|
|
put_nfs_open_context(ctx);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-07-31 19:54:10 +00:00
|
|
|
static int
|
|
|
|
_nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-28 02:12:44 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2007-07-14 19:39:57 +00:00
|
|
|
struct nfs_removeargs args = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.fh = NFS_FH(dir),
|
2012-05-10 20:14:12 +00:00
|
|
|
.name = *name,
|
2005-10-28 02:12:44 +00:00
|
|
|
};
|
2007-07-14 19:39:57 +00:00
|
|
|
struct nfs_removeres res = {
|
2005-10-28 02:12:44 +00:00
|
|
|
.server = server,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
2007-07-14 19:39:57 +00:00
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2017-01-12 03:07:28 +00:00
|
|
|
unsigned long timestamp = jiffies;
|
2012-04-27 17:48:19 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
|
2018-07-31 19:54:10 +00:00
|
|
|
if (status == 0) {
|
|
|
|
spin_lock(&dir->i_lock);
|
|
|
|
/* Removing a directory decrements nlink in the parent */
|
|
|
|
if (ftype == NF4DIR && dir->i_nlink > 2)
|
|
|
|
nfs4_dec_nlink_locked(dir);
|
2021-04-01 18:59:59 +00:00
|
|
|
nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
|
|
|
|
NFS_INO_INVALID_DATA);
|
2018-07-31 19:54:10 +00:00
|
|
|
spin_unlock(&dir->i_lock);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-03-20 20:43:15 +00:00
|
|
|
static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2018-03-20 20:43:15 +00:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2018-03-20 20:43:15 +00:00
|
|
|
|
2018-03-20 20:43:20 +00:00
|
|
|
if (inode) {
|
|
|
|
if (inode->i_nlink == 1)
|
|
|
|
nfs4_inode_return_delegation(inode);
|
|
|
|
else
|
|
|
|
nfs4_inode_make_writeable(inode);
|
|
|
|
}
|
2018-03-20 20:43:15 +00:00
|
|
|
do {
|
2018-07-31 19:54:10 +00:00
|
|
|
err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
|
2018-03-20 20:43:15 +00:00
|
|
|
trace_nfs4_remove(dir, &dentry->d_name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2018-03-20 20:43:15 +00:00
|
|
|
int err;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2018-07-31 19:54:10 +00:00
|
|
|
err = _nfs4_proc_remove(dir, name, NF4DIR);
|
2013-08-12 20:45:55 +00:00
|
|
|
trace_nfs4_remove(dir, name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-05-30 20:11:52 +00:00
|
|
|
static void nfs4_proc_unlink_setup(struct rpc_message *msg,
|
|
|
|
struct dentry *dentry,
|
|
|
|
struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-07-14 19:39:58 +00:00
|
|
|
struct nfs_removeargs *args = msg->rpc_argp;
|
|
|
|
struct nfs_removeres *res = msg->rpc_resp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-03-20 20:43:17 +00:00
|
|
|
res->server = NFS_SB(dentry->d_sb);
|
2005-04-16 22:20:36 +00:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
nfs_fattr_init(res->dir_attr);
|
2018-03-20 20:43:17 +00:00
|
|
|
|
2021-12-17 20:36:57 +00:00
|
|
|
if (inode) {
|
2018-03-20 20:43:17 +00:00
|
|
|
nfs4_inode_return_delegation(inode);
|
2021-12-17 20:36:57 +00:00
|
|
|
nfs_d_prune_case_insensitive_aliases(inode);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-03-19 18:54:41 +00:00
|
|
|
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
|
|
|
|
{
|
2017-01-09 20:48:22 +00:00
|
|
|
nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
|
2012-10-23 00:28:44 +00:00
|
|
|
&data->args.seq_args,
|
|
|
|
&data->res.seq_res,
|
|
|
|
task);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-14 19:39:58 +00:00
|
|
|
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-08-12 20:45:55 +00:00
|
|
|
struct nfs_unlinkdata *data = task->tk_calldata;
|
|
|
|
struct nfs_removeres *res = &data->res;
|
2007-07-14 19:39:58 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs4_sequence_done(task, &res->seq_res))
|
|
|
|
return 0;
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, res->server, NULL,
|
|
|
|
&data->timeout) == -EAGAIN)
|
2007-07-14 19:39:58 +00:00
|
|
|
return 0;
|
2017-01-11 17:36:11 +00:00
|
|
|
if (task->tk_status == 0)
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr(dir, &res->cinfo,
|
|
|
|
res->dir_attr->time_start,
|
|
|
|
NFS_INO_INVALID_DATA);
|
2007-07-14 19:39:58 +00:00
|
|
|
return 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-03-20 20:43:16 +00:00
|
|
|
static void nfs4_proc_rename_setup(struct rpc_message *msg,
|
|
|
|
struct dentry *old_dentry,
|
|
|
|
struct dentry *new_dentry)
|
2010-09-17 21:31:57 +00:00
|
|
|
{
|
|
|
|
struct nfs_renameargs *arg = msg->rpc_argp;
|
|
|
|
struct nfs_renameres *res = msg->rpc_resp;
|
2018-03-20 20:43:16 +00:00
|
|
|
struct inode *old_inode = d_inode(old_dentry);
|
|
|
|
struct inode *new_inode = d_inode(new_dentry);
|
2010-09-17 21:31:57 +00:00
|
|
|
|
2018-03-20 20:43:16 +00:00
|
|
|
if (old_inode)
|
2018-03-20 20:43:20 +00:00
|
|
|
nfs4_inode_make_writeable(old_inode);
|
2018-03-20 20:43:16 +00:00
|
|
|
if (new_inode)
|
|
|
|
nfs4_inode_return_delegation(new_inode);
|
2010-09-17 21:31:57 +00:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
|
2018-03-20 20:43:16 +00:00
|
|
|
res->server = NFS_SB(old_dentry->d_sb);
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
|
2010-09-17 21:31:57 +00:00
|
|
|
}
|
|
|
|
|
2012-03-19 18:54:42 +00:00
|
|
|
static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
|
|
|
|
{
|
2017-01-09 20:48:22 +00:00
|
|
|
nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
|
2012-10-23 00:28:44 +00:00
|
|
|
&data->args.seq_args,
|
|
|
|
&data->res.seq_res,
|
|
|
|
task);
|
2010-09-17 21:31:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
|
|
|
|
struct inode *new_dir)
|
|
|
|
{
|
2013-08-12 21:08:26 +00:00
|
|
|
struct nfs_renamedata *data = task->tk_calldata;
|
|
|
|
struct nfs_renameres *res = &data->res;
|
2010-09-17 21:31:57 +00:00
|
|
|
|
|
|
|
if (!nfs4_sequence_done(task, &res->seq_res))
|
|
|
|
return 0;
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
|
2010-09-17 21:31:57 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-01-11 17:32:26 +00:00
|
|
|
if (task->tk_status == 0) {
|
2021-12-17 20:36:57 +00:00
|
|
|
nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
|
2018-07-31 19:54:11 +00:00
|
|
|
if (new_dir != old_dir) {
|
|
|
|
/* Note: If we moved a directory, nlink will change */
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr(old_dir, &res->old_cinfo,
|
2018-07-31 19:54:11 +00:00
|
|
|
res->old_fattr->time_start,
|
2021-03-25 17:14:42 +00:00
|
|
|
NFS_INO_INVALID_NLINK |
|
2020-06-23 22:38:59 +00:00
|
|
|
NFS_INO_INVALID_DATA);
|
|
|
|
nfs4_update_changeattr(new_dir, &res->new_cinfo,
|
2018-07-31 19:54:11 +00:00
|
|
|
res->new_fattr->time_start,
|
2021-03-25 17:14:42 +00:00
|
|
|
NFS_INO_INVALID_NLINK |
|
2020-06-23 22:38:59 +00:00
|
|
|
NFS_INO_INVALID_DATA);
|
2018-07-31 19:54:11 +00:00
|
|
|
} else
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr(old_dir, &res->old_cinfo,
|
2018-07-31 19:54:11 +00:00
|
|
|
res->old_fattr->time_start,
|
2020-06-23 22:38:59 +00:00
|
|
|
NFS_INO_INVALID_DATA);
|
2017-01-11 17:32:26 +00:00
|
|
|
}
|
2010-09-17 21:31:57 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-07-20 20:34:42 +00:00
|
|
|
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-10-28 02:12:42 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2018-04-09 01:06:40 +00:00
|
|
|
__u32 bitmask[NFS4_BITMASK_SZ];
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_link_arg arg = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.dir_fh = NFS_FH(dir),
|
|
|
|
.name = name,
|
2018-04-09 01:06:40 +00:00
|
|
|
.bitmask = bitmask,
|
2005-10-28 02:12:42 +00:00
|
|
|
};
|
|
|
|
struct nfs4_link_res res = {
|
|
|
|
.server = server,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
|
|
|
|
.rpc_argp = &arg,
|
2005-10-28 02:12:42 +00:00
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2010-04-16 20:22:49 +00:00
|
|
|
int status = -ENOMEM;
|
|
|
|
|
2021-10-22 17:11:03 +00:00
|
|
|
res.fattr = nfs_alloc_fattr_with_label(server);
|
2012-04-27 17:48:19 +00:00
|
|
|
if (res.fattr == NULL)
|
2010-04-16 20:22:49 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-03-20 20:43:20 +00:00
|
|
|
nfs4_inode_make_writeable(inode);
|
2021-10-22 17:11:03 +00:00
|
|
|
nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), inode,
|
2021-04-10 04:23:03 +00:00
|
|
|
NFS_INO_INVALID_CHANGE);
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2005-10-28 02:12:42 +00:00
|
|
|
if (!status) {
|
2020-06-23 22:38:59 +00:00
|
|
|
nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
|
|
|
|
NFS_INO_INVALID_DATA);
|
2021-04-01 18:57:56 +00:00
|
|
|
nfs4_inc_nlink(inode);
|
2013-05-22 16:50:44 +00:00
|
|
|
status = nfs_post_op_update_inode(inode, res.fattr);
|
|
|
|
if (!status)
|
2021-10-22 17:11:12 +00:00
|
|
|
nfs_setsecurity(inode, res.fattr);
|
2005-10-28 02:12:42 +00:00
|
|
|
}
|
2013-05-22 16:50:43 +00:00
|
|
|
|
2010-04-16 20:22:49 +00:00
|
|
|
out:
|
|
|
|
nfs_free_fattr(res.fattr);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-07-20 20:34:42 +00:00
|
|
|
static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode),
|
|
|
|
_nfs4_proc_link(inode, dir, name),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
struct nfs4_createdata {
|
|
|
|
struct rpc_message msg;
|
|
|
|
struct nfs4_create_arg arg;
|
|
|
|
struct nfs4_create_res res;
|
|
|
|
struct nfs_fh fh;
|
|
|
|
struct nfs_fattr fattr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
|
2016-07-20 20:34:42 +00:00
|
|
|
const struct qstr *name, struct iattr *sattr, u32 ftype)
|
2008-06-20 19:35:32 +00:00
|
|
|
{
|
|
|
|
struct nfs4_createdata *data;
|
|
|
|
|
|
|
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
|
|
if (data != NULL) {
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
|
2021-10-22 17:11:02 +00:00
|
|
|
data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
|
|
|
|
if (IS_ERR(data->fattr.label))
|
2013-05-22 16:50:43 +00:00
|
|
|
goto out_free;
|
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
|
|
|
|
data->msg.rpc_argp = &data->arg;
|
|
|
|
data->msg.rpc_resp = &data->res;
|
|
|
|
data->arg.dir_fh = NFS_FH(dir);
|
|
|
|
data->arg.server = server;
|
|
|
|
data->arg.name = name;
|
|
|
|
data->arg.attrs = sattr;
|
|
|
|
data->arg.ftype = ftype;
|
2021-10-22 17:11:02 +00:00
|
|
|
data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
|
2016-12-03 03:53:30 +00:00
|
|
|
data->arg.umask = current_umask();
|
2008-06-20 19:35:32 +00:00
|
|
|
data->res.server = server;
|
|
|
|
data->res.fh = &data->fh;
|
|
|
|
data->res.fattr = &data->fattr;
|
|
|
|
nfs_fattr_init(data->res.fattr);
|
|
|
|
}
|
|
|
|
return data;
|
2013-05-22 16:50:43 +00:00
|
|
|
out_free:
|
|
|
|
kfree(data);
|
|
|
|
return NULL;
|
2008-06-20 19:35:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
|
|
|
|
{
|
2011-03-24 17:12:24 +00:00
|
|
|
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
|
2011-03-24 17:12:23 +00:00
|
|
|
&data->arg.seq_args, &data->res.seq_res, 1);
|
2008-06-20 19:35:32 +00:00
|
|
|
if (status == 0) {
|
2018-07-31 19:54:10 +00:00
|
|
|
spin_lock(&dir->i_lock);
|
|
|
|
/* Creating a directory bumps nlink in the parent */
|
|
|
|
if (data->arg.ftype == NF4DIR)
|
|
|
|
nfs4_inc_nlink_locked(dir);
|
2021-04-01 18:59:59 +00:00
|
|
|
nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
|
|
|
|
data->res.fattr->time_start,
|
|
|
|
NFS_INO_INVALID_DATA);
|
2018-07-31 19:54:10 +00:00
|
|
|
spin_unlock(&dir->i_lock);
|
2021-10-22 17:11:09 +00:00
|
|
|
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
|
2008-06-20 19:35:32 +00:00
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_free_createdata(struct nfs4_createdata *data)
|
|
|
|
{
|
2021-10-22 17:11:02 +00:00
|
|
|
nfs4_label_free(data->fattr.label);
|
2008-06-20 19:35:32 +00:00
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
2006-08-23 00:06:22 +00:00
|
|
|
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
|
2023-09-15 17:33:33 +00:00
|
|
|
struct folio *folio, unsigned int len, struct iattr *sattr,
|
2013-05-22 16:50:42 +00:00
|
|
|
struct nfs4_label *label)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-09-15 17:33:33 +00:00
|
|
|
struct page *page = &folio->page;
|
2008-06-20 19:35:32 +00:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int status = -ENAMETOOLONG;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-08-23 00:06:23 +00:00
|
|
|
if (len > NFS4_MAXPATHLEN)
|
2008-06-20 19:35:32 +00:00
|
|
|
goto out;
|
2006-08-23 00:06:22 +00:00
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
status = -ENOMEM;
|
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
|
|
|
|
data->arg.u.symlink.pages = &page;
|
|
|
|
data->arg.u.symlink.len = len;
|
2013-05-22 16:50:42 +00:00
|
|
|
data->arg.label = label;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
|
|
|
|
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-08-23 00:06:22 +00:00
|
|
|
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
|
2023-09-15 17:33:33 +00:00
|
|
|
struct folio *folio, unsigned int len, struct iattr *sattr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2022-10-19 17:12:11 +00:00
|
|
|
struct nfs4_label l, *label;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
label = nfs4_label_init_security(dir, dentry, sattr, &l);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2023-09-15 17:33:33 +00:00
|
|
|
err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
|
2013-08-12 20:45:55 +00:00
|
|
|
trace_nfs4_symlink(dir, &dentry->d_name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
nfs4_label_release_security(label);
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
|
2013-05-22 16:50:42 +00:00
|
|
|
struct iattr *sattr, struct nfs4_label *label)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-06-20 19:35:32 +00:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int status = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2013-05-22 16:50:42 +00:00
|
|
|
data->arg.label = label;
|
2008-06-20 19:35:32 +00:00
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
|
|
|
|
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr)
|
|
|
|
{
|
2016-12-03 03:53:30 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2022-10-19 17:12:11 +00:00
|
|
|
struct nfs4_label l, *label;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2010-12-09 11:35:14 +00:00
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
label = nfs4_label_init_security(dir, dentry, sattr, &l);
|
|
|
|
|
2016-12-03 03:53:30 +00:00
|
|
|
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
|
|
|
sattr->ia_mode &= ~current_umask();
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2013-08-12 20:45:55 +00:00
|
|
|
err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
|
|
|
|
trace_nfs4_mkdir(dir, &dentry->d_name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
2013-05-22 16:50:44 +00:00
|
|
|
nfs4_label_release_security(label);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-11-02 22:34:23 +00:00
|
|
|
static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
|
|
|
|
struct nfs_readdir_res *nr_res)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2020-11-02 22:34:23 +00:00
|
|
|
struct inode *dir = d_inode(nr_arg->dentry);
|
2020-11-06 21:03:38 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_readdir_arg args = {
|
|
|
|
.fh = NFS_FH(dir),
|
2020-11-02 22:34:23 +00:00
|
|
|
.pages = nr_arg->pages,
|
2005-04-16 22:20:36 +00:00
|
|
|
.pgbase = 0,
|
2020-11-02 22:34:23 +00:00
|
|
|
.count = nr_arg->page_len,
|
|
|
|
.plus = nr_arg->plus,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct nfs4_readdir_res res;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2020-11-02 22:34:23 +00:00
|
|
|
.rpc_cred = nr_arg->cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2020-11-02 22:34:23 +00:00
|
|
|
dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
|
|
|
|
nr_arg->dentry, (unsigned long long)nr_arg->cookie);
|
2020-11-06 21:03:38 +00:00
|
|
|
if (!(server->caps & NFS_CAP_SECURITY_LABEL))
|
|
|
|
args.bitmask = server->attr_bitmask_nl;
|
|
|
|
else
|
|
|
|
args.bitmask = server->attr_bitmask;
|
|
|
|
|
2020-11-02 22:34:23 +00:00
|
|
|
nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
|
2005-04-16 22:20:36 +00:00
|
|
|
res.pgbase = args.pgbase;
|
2020-11-06 21:03:38 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
|
|
|
|
&res.seq_res, 0);
|
2010-11-16 01:26:22 +00:00
|
|
|
if (status >= 0) {
|
2020-11-02 22:34:23 +00:00
|
|
|
memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
|
2010-11-16 01:26:22 +00:00
|
|
|
status += args.pgbase;
|
|
|
|
}
|
2007-09-28 21:11:45 +00:00
|
|
|
|
|
|
|
nfs_invalidate_atime(dir);
|
|
|
|
|
2008-05-02 20:42:44 +00:00
|
|
|
dprintk("%s: returns %d\n", __func__, status);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2020-11-02 22:34:23 +00:00
|
|
|
static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
|
|
|
|
struct nfs_readdir_res *res)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2020-11-02 22:34:23 +00:00
|
|
|
err = _nfs4_proc_readdir(arg, res);
|
|
|
|
trace_nfs4_readdir(d_inode(arg->dentry), err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
|
|
|
|
err, &exception);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
|
2013-05-22 16:50:44 +00:00
|
|
|
struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-06-20 19:35:32 +00:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int mode = sattr->ia_mode;
|
|
|
|
int status = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-06-20 19:35:32 +00:00
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (S_ISFIFO(mode))
|
2008-06-20 19:35:32 +00:00
|
|
|
data->arg.ftype = NF4FIFO;
|
2005-04-16 22:20:36 +00:00
|
|
|
else if (S_ISBLK(mode)) {
|
2008-06-20 19:35:32 +00:00
|
|
|
data->arg.ftype = NF4BLK;
|
|
|
|
data->arg.u.device.specdata1 = MAJOR(rdev);
|
|
|
|
data->arg.u.device.specdata2 = MINOR(rdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
else if (S_ISCHR(mode)) {
|
2008-06-20 19:35:32 +00:00
|
|
|
data->arg.ftype = NF4CHR;
|
|
|
|
data->arg.u.device.specdata1 = MAJOR(rdev);
|
|
|
|
data->arg.u.device.specdata2 = MINOR(rdev);
|
2012-10-15 19:47:41 +00:00
|
|
|
} else if (!S_ISSOCK(mode)) {
|
|
|
|
status = -EINVAL;
|
|
|
|
goto out_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2013-05-22 16:50:42 +00:00
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
data->arg.label = label;
|
2008-06-20 19:35:32 +00:00
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
2012-10-15 19:47:41 +00:00
|
|
|
out_free:
|
2008-06-20 19:35:32 +00:00
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr, dev_t rdev)
|
|
|
|
{
|
2016-12-03 03:53:30 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2022-10-19 17:12:11 +00:00
|
|
|
struct nfs4_label l, *label;
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
2010-12-09 11:35:14 +00:00
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
label = nfs4_label_init_security(dir, dentry, sattr, &l);
|
|
|
|
|
2016-12-03 03:53:30 +00:00
|
|
|
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
|
|
|
sattr->ia_mode &= ~current_umask();
|
2005-04-16 22:20:36 +00:00
|
|
|
do {
|
2013-08-12 20:45:55 +00:00
|
|
|
err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
|
|
|
|
trace_nfs4_mknod(dir, &dentry->d_name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
nfs4_label_release_security(label);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsstat *fsstat)
|
|
|
|
{
|
|
|
|
struct nfs4_statfs_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 13:21:56 +00:00
|
|
|
struct nfs4_statfs_res res = {
|
|
|
|
.fsstat = fsstat,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:21:56 +00:00
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(fsstat->fattr);
|
2011-03-24 17:12:24 +00:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_statfs(server, fhandle, fsstat),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
|
|
|
struct nfs4_fsinfo_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 13:21:57 +00:00
|
|
|
struct nfs4_fsinfo_res res = {
|
|
|
|
.fsinfo = fsinfo,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:21:57 +00:00
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2013-03-16 19:55:53 +00:00
|
|
|
err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
|
2013-08-13 17:01:39 +00:00
|
|
|
trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
|
2013-03-16 19:55:53 +00:00
|
|
|
if (err == 0) {
|
2020-01-30 09:43:25 +00:00
|
|
|
nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
|
2013-03-16 19:55:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
2012-06-20 19:53:40 +00:00
|
|
|
int error;
|
|
|
|
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(fsinfo->fattr);
|
2012-06-20 19:53:40 +00:00
|
|
|
error = nfs4_do_fsinfo(server, fhandle, fsinfo);
|
2012-08-23 16:27:49 +00:00
|
|
|
if (error == 0) {
|
|
|
|
/* block layout checks this! */
|
|
|
|
server->pnfs_blksize = fsinfo->blksize;
|
2016-09-15 18:40:49 +00:00
|
|
|
set_pnfs_layoutdriver(server, fhandle, fsinfo);
|
2012-08-23 16:27:49 +00:00
|
|
|
}
|
2012-06-20 19:53:40 +00:00
|
|
|
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_pathconf *pathconf)
|
|
|
|
{
|
|
|
|
struct nfs4_pathconf_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 13:21:58 +00:00
|
|
|
struct nfs4_pathconf_res res = {
|
|
|
|
.pathconf = pathconf,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:21:58 +00:00
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* None of the pathconf attributes are mandatory to implement */
|
|
|
|
if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
|
|
|
|
memset(pathconf, 0, sizeof(*pathconf));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-10-28 02:12:38 +00:00
|
|
|
nfs_fattr_init(pathconf->fattr);
|
2011-03-24 17:12:24 +00:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_pathconf *pathconf)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_pathconf(server, fhandle, pathconf),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-03-17 00:54:34 +00:00
|
|
|
int nfs4_set_rw_stateid(nfs4_stateid *stateid,
|
2013-03-17 19:52:00 +00:00
|
|
|
const struct nfs_open_context *ctx,
|
|
|
|
const struct nfs_lock_context *l_ctx,
|
|
|
|
fmode_t fmode)
|
|
|
|
{
|
2016-10-13 04:26:47 +00:00
|
|
|
return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
|
2013-03-17 19:52:00 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
|
|
|
|
|
2013-03-17 00:54:34 +00:00
|
|
|
static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
|
|
|
|
const struct nfs_open_context *ctx,
|
|
|
|
const struct nfs_lock_context *l_ctx,
|
|
|
|
fmode_t fmode)
|
|
|
|
{
|
2019-10-16 16:28:21 +00:00
|
|
|
nfs4_stateid _current_stateid;
|
2013-03-17 00:54:34 +00:00
|
|
|
|
2014-03-05 13:44:23 +00:00
|
|
|
/* If the current stateid represents a lost lock, then exit */
|
2019-10-16 16:28:21 +00:00
|
|
|
if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
|
2014-03-05 13:44:23 +00:00
|
|
|
return true;
|
2019-10-16 16:28:21 +00:00
|
|
|
return nfs4_stateid_match(stateid, &_current_stateid);
|
2013-03-17 00:54:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs4_error_stateid_expired(int err)
|
|
|
|
{
|
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
case -NFS4ERR_OPENMODE:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(hdr->inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
trace_nfs4_read(hdr, task->tk_status);
|
2016-09-22 17:39:11 +00:00
|
|
|
if (task->tk_status < 0) {
|
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = hdr->inode,
|
|
|
|
.state = hdr->args.context->state,
|
|
|
|
.stateid = &hdr->args.stateid,
|
|
|
|
};
|
|
|
|
task->tk_status = nfs4_async_handle_exception(task,
|
|
|
|
server, task->tk_status, &exception);
|
|
|
|
if (exception.retry) {
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-09-28 21:20:07 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (task->tk_status > 0)
|
2014-06-09 15:48:35 +00:00
|
|
|
renew_lease(server, hdr->timestamp);
|
2006-03-20 18:44:27 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-17 00:54:34 +00:00
|
|
|
static bool nfs4_read_stateid_changed(struct rpc_task *task,
|
2014-05-06 13:12:24 +00:00
|
|
|
struct nfs_pgio_args *args)
|
2013-03-17 00:54:34 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if (!nfs4_error_stateid_expired(task->tk_status) ||
|
|
|
|
nfs4_stateid_is_current(&args->stateid,
|
|
|
|
args->context,
|
|
|
|
args->lock_context,
|
|
|
|
FMODE_READ))
|
|
|
|
return false;
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-05-28 17:41:22 +00:00
|
|
|
static bool nfs4_read_plus_not_supported(struct rpc_task *task,
|
|
|
|
struct nfs_pgio_header *hdr)
|
2011-03-01 01:34:20 +00:00
|
|
|
{
|
2014-05-28 17:41:22 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(hdr->inode);
|
|
|
|
struct rpc_message *msg = &task->tk_msg;
|
2011-03-01 01:34:20 +00:00
|
|
|
|
2014-05-28 17:41:22 +00:00
|
|
|
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
|
|
|
|
server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
|
|
|
|
server->caps &= ~NFS_CAP_READ_PLUS;
|
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-05-16 15:19:25 +00:00
|
|
|
static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
if (!nfs4_sequence_done(task, &hdr->res.seq_res))
|
2011-03-01 01:34:20 +00:00
|
|
|
return -EAGAIN;
|
2014-06-09 15:48:35 +00:00
|
|
|
if (nfs4_read_stateid_changed(task, &hdr->args))
|
2013-03-17 00:54:34 +00:00
|
|
|
return -EAGAIN;
|
2014-05-28 17:41:22 +00:00
|
|
|
if (nfs4_read_plus_not_supported(task, hdr))
|
|
|
|
return -EAGAIN;
|
2016-09-15 22:26:05 +00:00
|
|
|
if (task->tk_status > 0)
|
|
|
|
nfs_invalidate_atime(hdr->inode);
|
2014-06-09 15:48:35 +00:00
|
|
|
return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
|
|
|
|
nfs4_read_done_cb(task, hdr);
|
2011-03-01 01:34:20 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 20:18:39 +00:00
|
|
|
#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
|
2023-04-06 19:16:52 +00:00
|
|
|
static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
|
2020-12-10 14:34:34 +00:00
|
|
|
struct rpc_message *msg)
|
2014-05-28 17:41:22 +00:00
|
|
|
{
|
2020-12-10 14:34:34 +00:00
|
|
|
/* Note: We don't use READ_PLUS with pNFS yet */
|
2023-04-06 19:16:52 +00:00
|
|
|
if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
|
2014-05-28 17:41:22 +00:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
|
2023-06-09 19:26:25 +00:00
|
|
|
return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
|
2023-04-06 19:16:52 +00:00
|
|
|
}
|
|
|
|
return false;
|
2014-05-28 17:41:22 +00:00
|
|
|
}
|
|
|
|
#else
|
2023-04-06 19:16:52 +00:00
|
|
|
static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
|
2020-12-10 14:34:34 +00:00
|
|
|
struct rpc_message *msg)
|
2014-05-28 17:41:22 +00:00
|
|
|
{
|
2023-04-06 19:16:52 +00:00
|
|
|
return false;
|
2014-05-28 17:41:22 +00:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_2 */
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
|
|
|
|
struct rpc_message *msg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
hdr->timestamp = jiffies;
|
2016-06-28 17:54:09 +00:00
|
|
|
if (!hdr->pgio_done_cb)
|
|
|
|
hdr->pgio_done_cb = nfs4_read_done_cb;
|
2023-04-06 19:16:52 +00:00
|
|
|
if (!nfs42_read_plus_support(hdr, msg))
|
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
|
|
|
|
struct nfs_pgio_header *hdr)
|
2012-03-19 18:54:40 +00:00
|
|
|
{
|
2017-01-09 20:48:22 +00:00
|
|
|
if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
|
2014-06-09 15:48:35 +00:00
|
|
|
&hdr->args.seq_args,
|
|
|
|
&hdr->res.seq_res,
|
2013-03-17 19:52:00 +00:00
|
|
|
task))
|
2013-09-04 07:04:49 +00:00
|
|
|
return 0;
|
2014-06-09 15:48:35 +00:00
|
|
|
if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
|
|
|
|
hdr->args.lock_context,
|
2017-04-19 14:11:35 +00:00
|
|
|
hdr->rw_mode) == -EIO)
|
2013-09-04 07:04:49 +00:00
|
|
|
return -EIO;
|
2014-06-09 15:48:35 +00:00
|
|
|
if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
|
2013-09-04 07:04:49 +00:00
|
|
|
return -EIO;
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static int nfs4_write_done_cb(struct rpc_task *task,
|
|
|
|
struct nfs_pgio_header *hdr)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
struct inode *inode = hdr->inode;
|
2014-09-18 06:09:27 +00:00
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
trace_nfs4_write(hdr, task->tk_status);
|
2016-09-22 17:39:11 +00:00
|
|
|
if (task->tk_status < 0) {
|
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = hdr->inode,
|
|
|
|
.state = hdr->args.context->state,
|
|
|
|
.stateid = &hdr->args.stateid,
|
|
|
|
};
|
|
|
|
task->tk_status = nfs4_async_handle_exception(task,
|
|
|
|
NFS_SERVER(inode), task->tk_status,
|
|
|
|
&exception);
|
|
|
|
if (exception.retry) {
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-10-28 02:12:44 +00:00
|
|
|
if (task->tk_status >= 0) {
|
2014-06-09 15:48:35 +00:00
|
|
|
renew_lease(NFS_SERVER(inode), hdr->timestamp);
|
2015-02-26 22:36:09 +00:00
|
|
|
nfs_writeback_update_inode(hdr);
|
2005-10-28 02:12:44 +00:00
|
|
|
}
|
2006-03-20 18:44:27 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2013-03-17 00:54:34 +00:00
|
|
|
static bool nfs4_write_stateid_changed(struct rpc_task *task,
|
2014-05-06 13:12:24 +00:00
|
|
|
struct nfs_pgio_args *args)
|
2013-03-17 00:54:34 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if (!nfs4_error_stateid_expired(task->tk_status) ||
|
|
|
|
nfs4_stateid_is_current(&args->stateid,
|
|
|
|
args->context,
|
|
|
|
args->lock_context,
|
|
|
|
FMODE_WRITE))
|
|
|
|
return false;
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
2011-03-03 15:13:42 +00:00
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
if (!nfs4_sequence_done(task, &hdr->res.seq_res))
|
2011-03-03 15:13:42 +00:00
|
|
|
return -EAGAIN;
|
2014-06-09 15:48:35 +00:00
|
|
|
if (nfs4_write_stateid_changed(task, &hdr->args))
|
2013-03-17 00:54:34 +00:00
|
|
|
return -EAGAIN;
|
2014-06-09 15:48:35 +00:00
|
|
|
return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
|
|
|
|
nfs4_write_done_cb(task, hdr);
|
2011-03-03 15:13:42 +00:00
|
|
|
}
|
|
|
|
|
2012-04-28 18:55:16 +00:00
|
|
|
static
|
2014-06-09 15:48:35 +00:00
|
|
|
bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
|
2011-03-03 15:13:47 +00:00
|
|
|
{
|
2012-04-28 18:55:16 +00:00
|
|
|
/* Don't request attributes for pNFS or O_DIRECT writes */
|
2014-06-09 15:48:35 +00:00
|
|
|
if (hdr->ds_clp != NULL || hdr->dreq != NULL)
|
2012-04-28 18:55:16 +00:00
|
|
|
return false;
|
|
|
|
/* Otherwise, request attributes if and only if we don't hold
|
|
|
|
* a delegation
|
|
|
|
*/
|
2012-06-20 19:53:43 +00:00
|
|
|
return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
|
2011-03-03 15:13:47 +00:00
|
|
|
}
|
|
|
|
|
2021-12-27 19:40:51 +00:00
|
|
|
void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
|
|
|
|
struct inode *inode, unsigned long cache_validity)
|
2020-09-14 21:05:08 +00:00
|
|
|
{
|
2021-12-27 19:40:51 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2021-03-25 22:15:36 +00:00
|
|
|
unsigned int i;
|
2020-09-14 21:05:08 +00:00
|
|
|
|
2021-03-25 22:15:36 +00:00
|
|
|
memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
|
2021-12-27 19:40:51 +00:00
|
|
|
cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
|
2020-09-14 21:05:08 +00:00
|
|
|
|
2021-03-26 01:07:21 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_CHANGE)
|
2021-03-25 22:15:36 +00:00
|
|
|
bitmask[0] |= FATTR4_WORD0_CHANGE;
|
2020-09-14 21:05:08 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_ATIME)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
|
2021-04-13 13:41:16 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_MODE)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_MODE;
|
2021-02-08 13:49:32 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_OTHER)
|
2021-04-13 13:41:16 +00:00
|
|
|
bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
|
2021-03-25 17:14:42 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_NLINK)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_NUMLINKS;
|
2021-02-08 13:49:32 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_CTIME)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
|
2020-09-14 21:05:08 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_MTIME)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
|
|
|
|
if (cache_validity & NFS_INO_INVALID_BLOCKS)
|
|
|
|
bitmask[1] |= FATTR4_WORD1_SPACE_USED;
|
2021-03-25 22:15:36 +00:00
|
|
|
|
2021-04-11 18:31:24 +00:00
|
|
|
if (cache_validity & NFS_INO_INVALID_SIZE)
|
2021-03-25 22:15:36 +00:00
|
|
|
bitmask[0] |= FATTR4_WORD0_SIZE;
|
|
|
|
|
|
|
|
for (i = 0; i < NFS4_BITMASK_SZ; i++)
|
|
|
|
bitmask[i] &= server->attr_bitmask[i];
|
2020-09-14 21:05:08 +00:00
|
|
|
}
|
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
|
2018-05-04 20:22:48 +00:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct rpc_clnt **clnt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2014-06-09 15:48:35 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(hdr->inode);
|
2007-07-14 19:40:00 +00:00
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
if (!nfs4_write_need_cache_consistency_data(hdr)) {
|
|
|
|
hdr->args.bitmask = NULL;
|
|
|
|
hdr->res.fattr = NULL;
|
2020-09-14 21:05:08 +00:00
|
|
|
} else {
|
2021-03-25 22:15:36 +00:00
|
|
|
nfs4_bitmask_set(hdr->args.bitmask_store,
|
|
|
|
server->cache_consistency_bitmask,
|
2021-12-27 19:40:51 +00:00
|
|
|
hdr->inode, NFS_INO_INVALID_BLOCKS);
|
2021-03-25 22:15:36 +00:00
|
|
|
hdr->args.bitmask = hdr->args.bitmask_store;
|
2020-09-14 21:05:08 +00:00
|
|
|
}
|
2012-04-28 18:55:16 +00:00
|
|
|
|
2014-06-09 15:48:35 +00:00
|
|
|
if (!hdr->pgio_done_cb)
|
|
|
|
hdr->pgio_done_cb = nfs4_write_done_cb;
|
|
|
|
hdr->res.server = server;
|
|
|
|
hdr->timestamp = jiffies;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-14 19:40:00 +00:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
|
2020-02-12 22:32:12 +00:00
|
|
|
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
|
2023-10-13 15:04:10 +00:00
|
|
|
nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-04-20 18:47:39 +00:00
|
|
|
static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
|
2012-03-19 18:54:39 +00:00
|
|
|
{
|
2017-01-09 20:48:22 +00:00
|
|
|
nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
|
2012-10-23 00:28:44 +00:00
|
|
|
&data->args.seq_args,
|
|
|
|
&data->res.seq_res,
|
|
|
|
task);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-04-20 18:47:39 +00:00
|
|
|
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = data->inode;
|
2010-07-31 18:29:06 +00:00
|
|
|
|
2013-08-14 19:31:28 +00:00
|
|
|
trace_nfs4_commit(data, task->tk_status);
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, NFS_SERVER(inode),
|
|
|
|
NULL, NULL) == -EAGAIN) {
|
2011-10-19 19:17:29 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
2006-03-20 18:44:27 +00:00
|
|
|
return -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-03-20 18:44:27 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-04-20 18:47:39 +00:00
|
|
|
static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
|
2011-03-23 13:27:46 +00:00
|
|
|
{
|
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return -EAGAIN;
|
2012-04-20 18:47:39 +00:00
|
|
|
return data->commit_done_cb(task, data);
|
2011-03-23 13:27:46 +00:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:22:49 +00:00
|
|
|
static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
|
|
|
|
struct rpc_clnt **clnt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-03-20 18:44:27 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(data->inode);
|
2011-03-23 13:27:52 +00:00
|
|
|
|
2012-04-20 18:47:39 +00:00
|
|
|
if (data->commit_done_cb == NULL)
|
|
|
|
data->commit_done_cb = nfs4_commit_done_cb;
|
2005-10-28 02:12:44 +00:00
|
|
|
data->res.server = server;
|
2007-07-14 19:40:00 +00:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
|
2023-10-13 15:04:10 +00:00
|
|
|
nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
|
|
|
|
NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-07-09 19:13:36 +00:00
|
|
|
static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
|
|
|
|
struct nfs_commitres *res)
|
|
|
|
{
|
|
|
|
struct inode *dst_inode = file_inode(dst);
|
|
|
|
struct nfs_server *server = NFS_SERVER(dst_inode);
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
|
|
|
|
.rpc_argp = args,
|
|
|
|
.rpc_resp = res,
|
|
|
|
};
|
|
|
|
|
|
|
|
args->fh = NFS_FH(dst_inode);
|
|
|
|
return nfs4_call_sync(server->client, server, &msg,
|
|
|
|
&args->seq_args, &res->seq_res, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
|
|
|
|
{
|
|
|
|
struct nfs_commitargs args = {
|
|
|
|
.offset = offset,
|
|
|
|
.count = count,
|
|
|
|
};
|
|
|
|
struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int status;
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = _nfs4_proc_commit(dst, &args, res);
|
|
|
|
status = nfs4_handle_exception(dst_server, status, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2010-05-07 17:34:17 +00:00
|
|
|
struct nfs4_renewdata {
|
|
|
|
struct nfs_client *client;
|
|
|
|
unsigned long timestamp;
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
|
|
|
|
* standalone procedure for queueing an asynchronous RENEW.
|
|
|
|
*/
|
2010-05-07 17:34:17 +00:00
|
|
|
static void nfs4_renew_release(void *calldata)
|
2010-02-05 11:45:04 +00:00
|
|
|
{
|
2010-05-07 17:34:17 +00:00
|
|
|
struct nfs4_renewdata *data = calldata;
|
|
|
|
struct nfs_client *clp = data->client;
|
2010-02-05 11:45:04 +00:00
|
|
|
|
2017-10-20 09:53:38 +00:00
|
|
|
if (refcount_read(&clp->cl_count) > 1)
|
2010-02-05 11:45:06 +00:00
|
|
|
nfs4_schedule_state_renewal(clp);
|
|
|
|
nfs_put_client(clp);
|
2010-05-07 17:34:17 +00:00
|
|
|
kfree(data);
|
2010-02-05 11:45:04 +00:00
|
|
|
}
|
|
|
|
|
2010-05-07 17:34:17 +00:00
|
|
|
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-05-07 17:34:17 +00:00
|
|
|
struct nfs4_renewdata *data = calldata;
|
|
|
|
struct nfs_client *clp = data->client;
|
|
|
|
unsigned long timestamp = data->timestamp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_renew_async(clp, task->tk_status);
|
2013-10-17 18:13:53 +00:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_LEASE_MOVED:
|
|
|
|
nfs4_schedule_lease_moved_recovery(clp);
|
|
|
|
break;
|
|
|
|
default:
|
2009-05-26 18:51:00 +00:00
|
|
|
/* Unless we're shutting down, schedule state recovery! */
|
2011-08-24 19:07:37 +00:00
|
|
|
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
|
|
|
|
return;
|
|
|
|
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
|
2011-03-09 21:00:53 +00:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2011-08-24 19:07:37 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
nfs4_schedule_path_down_recovery(clp);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2010-07-31 18:29:06 +00:00
|
|
|
do_renew_lease(clp, timestamp);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static const struct rpc_call_ops nfs4_renew_ops = {
|
|
|
|
.rpc_call_done = nfs4_renew_done,
|
2010-02-05 11:45:04 +00:00
|
|
|
.rpc_release = nfs4_renew_release,
|
2006-01-03 08:55:04 +00:00
|
|
|
};
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
|
|
|
|
.rpc_argp = clp,
|
2006-01-03 08:55:25 +00:00
|
|
|
.rpc_cred = cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2010-05-07 17:34:17 +00:00
|
|
|
struct nfs4_renewdata *data;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-08-24 19:07:37 +00:00
|
|
|
if (renew_flags == 0)
|
|
|
|
return 0;
|
2017-10-20 09:53:38 +00:00
|
|
|
if (!refcount_inc_not_zero(&clp->cl_count))
|
2010-02-05 11:45:06 +00:00
|
|
|
return -EIO;
|
2011-08-24 19:07:35 +00:00
|
|
|
data = kmalloc(sizeof(*data), GFP_NOFS);
|
2017-04-27 14:45:15 +00:00
|
|
|
if (data == NULL) {
|
|
|
|
nfs_put_client(clp);
|
2010-05-07 17:34:17 +00:00
|
|
|
return -ENOMEM;
|
2017-04-27 14:45:15 +00:00
|
|
|
}
|
2010-05-07 17:34:17 +00:00
|
|
|
data->client = clp;
|
|
|
|
data->timestamp = jiffies;
|
2013-04-08 21:50:28 +00:00
|
|
|
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
|
2010-05-07 17:34:17 +00:00
|
|
|
&nfs4_renew_ops, data);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
|
|
|
|
.rpc_argp = clp,
|
2006-01-03 08:55:25 +00:00
|
|
|
.rpc_cred = cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
int status;
|
|
|
|
|
2013-04-08 21:50:28 +00:00
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status < 0)
|
|
|
|
return status;
|
2010-07-31 18:29:06 +00:00
|
|
|
do_renew_lease(clp, now);
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static bool nfs4_server_supports_acls(const struct nfs_server *server,
|
|
|
|
enum nfs4_acl_type type)
|
2005-06-22 17:16:22 +00:00
|
|
|
{
|
2022-05-14 14:36:58 +00:00
|
|
|
switch (type) {
|
|
|
|
default:
|
|
|
|
return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
|
|
|
|
case NFS4ACL_DACL:
|
|
|
|
return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
|
|
|
|
case NFS4ACL_SACL:
|
|
|
|
return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
|
|
|
|
}
|
2005-06-22 17:16:22 +00:00
|
|
|
}
|
|
|
|
|
2012-08-24 14:59:25 +00:00
|
|
|
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
|
|
|
|
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
|
2005-06-22 17:16:22 +00:00
|
|
|
* the stack.
|
|
|
|
*/
|
2012-08-24 14:59:25 +00:00
|
|
|
#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
|
2005-06-22 17:16:22 +00:00
|
|
|
|
2020-06-23 22:39:01 +00:00
|
|
|
int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
|
2015-11-03 17:25:34 +00:00
|
|
|
struct page **pages)
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
{
|
|
|
|
struct page *newpage, **spages;
|
|
|
|
int rc = 0;
|
|
|
|
size_t len;
|
|
|
|
spages = pages;
|
|
|
|
|
|
|
|
do {
|
2012-08-24 14:59:25 +00:00
|
|
|
len = min_t(size_t, PAGE_SIZE, buflen);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
newpage = alloc_page(GFP_KERNEL);
|
|
|
|
|
|
|
|
if (newpage == NULL)
|
|
|
|
goto unwind;
|
|
|
|
memcpy(page_address(newpage), buf, len);
|
2017-01-11 20:04:25 +00:00
|
|
|
buf += len;
|
|
|
|
buflen -= len;
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
*pages++ = newpage;
|
|
|
|
rc++;
|
|
|
|
} while (buflen != 0);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
for(; rc > 0; rc--)
|
|
|
|
__free_page(spages[rc-1]);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-06-22 17:16:23 +00:00
|
|
|
struct nfs4_cached_acl {
|
2022-05-14 14:36:58 +00:00
|
|
|
enum nfs4_acl_type type;
|
2005-06-22 17:16:23 +00:00
|
|
|
int cached;
|
|
|
|
size_t len;
|
2020-03-09 18:24:42 +00:00
|
|
|
char data[];
|
2005-06-22 17:16:23 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
kfree(nfsi->nfs4_acl);
|
|
|
|
nfsi->nfs4_acl = acl;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_zap_acl_attr(struct inode *inode)
|
|
|
|
{
|
|
|
|
nfs4_set_cached_acl(inode, NULL);
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
|
|
|
|
size_t buflen, enum nfs4_acl_type type)
|
2005-06-22 17:16:23 +00:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs4_cached_acl *acl;
|
|
|
|
int ret = -ENOENT;
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
acl = nfsi->nfs4_acl;
|
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
2022-05-14 14:36:58 +00:00
|
|
|
if (acl->type != type)
|
|
|
|
goto out;
|
2005-06-22 17:16:23 +00:00
|
|
|
if (buf == NULL) /* user is just asking for length */
|
|
|
|
goto out_len;
|
|
|
|
if (acl->cached == 0)
|
|
|
|
goto out;
|
|
|
|
ret = -ERANGE; /* see getxattr(2) man page */
|
|
|
|
if (acl->len > buflen)
|
|
|
|
goto out;
|
|
|
|
memcpy(buf, acl->data, acl->len);
|
|
|
|
out_len:
|
|
|
|
ret = acl->len;
|
|
|
|
out:
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
|
|
|
|
size_t pgbase, size_t acl_len,
|
|
|
|
enum nfs4_acl_type type)
|
2005-06-22 17:16:23 +00:00
|
|
|
{
|
|
|
|
struct nfs4_cached_acl *acl;
|
2012-08-14 22:30:41 +00:00
|
|
|
size_t buflen = sizeof(*acl) + acl_len;
|
2005-06-22 17:16:23 +00:00
|
|
|
|
2012-08-26 18:44:43 +00:00
|
|
|
if (buflen <= PAGE_SIZE) {
|
2012-08-14 22:30:41 +00:00
|
|
|
acl = kmalloc(buflen, GFP_KERNEL);
|
2005-06-22 17:16:23 +00:00
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
|
|
|
acl->cached = 1;
|
2012-04-17 13:36:40 +00:00
|
|
|
_copy_from_pages(acl->data, pages, pgbase, acl_len);
|
2005-06-22 17:16:23 +00:00
|
|
|
} else {
|
|
|
|
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
|
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
|
|
|
acl->cached = 0;
|
|
|
|
}
|
2022-05-14 14:36:58 +00:00
|
|
|
acl->type = type;
|
2005-06-22 17:16:23 +00:00
|
|
|
acl->len = acl_len;
|
|
|
|
out:
|
|
|
|
nfs4_set_cached_acl(inode, acl);
|
|
|
|
}
|
|
|
|
|
2011-12-07 16:55:27 +00:00
|
|
|
/*
|
|
|
|
* The getxattr API returns the required buffer length when called with a
|
|
|
|
* NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
|
|
|
|
* the required buf. On a NULL buf, we send a page of data to the server
|
|
|
|
* guessing that the ACL request can be serviced by a page. If so, we cache
|
|
|
|
* up to the page of ACL data, and the 2nd call to getxattr is serviced by
|
|
|
|
* the cache. If not so, we throw away the page, and cache the required
|
|
|
|
* length. The next getxattr call will then produce another round trip to
|
|
|
|
* the server, this time with the input buf of the required size.
|
|
|
|
*/
|
2022-05-14 14:36:58 +00:00
|
|
|
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
|
|
|
|
size_t buflen, enum nfs4_acl_type type)
|
2005-06-22 17:16:22 +00:00
|
|
|
{
|
2020-01-02 22:09:54 +00:00
|
|
|
struct page **pages;
|
2005-06-22 17:16:22 +00:00
|
|
|
struct nfs_getaclargs args = {
|
|
|
|
.fh = NFS_FH(inode),
|
2022-05-14 14:36:59 +00:00
|
|
|
.acl_type = type,
|
2005-06-22 17:16:22 +00:00
|
|
|
.acl_len = buflen,
|
|
|
|
};
|
2009-04-01 13:21:59 +00:00
|
|
|
struct nfs_getaclres res = {
|
2022-05-14 14:36:59 +00:00
|
|
|
.acl_type = type,
|
2009-04-01 13:21:59 +00:00
|
|
|
.acl_len = buflen,
|
|
|
|
};
|
2005-06-22 17:16:22 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:21:59 +00:00
|
|
|
.rpc_resp = &res,
|
2005-06-22 17:16:22 +00:00
|
|
|
};
|
2020-01-02 22:09:54 +00:00
|
|
|
unsigned int npages;
|
2012-08-24 14:59:25 +00:00
|
|
|
int ret = -ENOMEM, i;
|
2020-01-02 22:09:54 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
|
|
|
|
if (buflen == 0)
|
|
|
|
buflen = server->rsize;
|
|
|
|
|
|
|
|
npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
|
2022-01-29 18:57:38 +00:00
|
|
|
pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
2020-01-02 22:09:54 +00:00
|
|
|
if (!pages)
|
|
|
|
return -ENOMEM;
|
2005-06-22 17:16:22 +00:00
|
|
|
|
2020-01-02 22:09:54 +00:00
|
|
|
args.acl_pages = pages;
|
2012-04-17 13:35:39 +00:00
|
|
|
|
2011-12-07 16:55:27 +00:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
pages[i] = alloc_page(GFP_KERNEL);
|
|
|
|
if (!pages[i])
|
|
|
|
goto out_free;
|
2005-06-22 17:16:23 +00:00
|
|
|
}
|
2012-04-17 13:35:39 +00:00
|
|
|
|
|
|
|
/* for decoding across pages */
|
|
|
|
res.acl_scratch = alloc_page(GFP_KERNEL);
|
|
|
|
if (!res.acl_scratch)
|
|
|
|
goto out_free;
|
|
|
|
|
2011-12-07 16:55:27 +00:00
|
|
|
args.acl_len = npages * PAGE_SIZE;
|
2012-04-17 13:35:39 +00:00
|
|
|
|
2012-01-10 14:42:47 +00:00
|
|
|
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
|
2011-12-07 16:55:27 +00:00
|
|
|
__func__, buf, buflen, npages, args.acl_len);
|
|
|
|
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
|
|
|
|
&msg, &args.seq_args, &res.seq_res, 0);
|
2005-06-22 17:16:23 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
2011-12-07 16:55:27 +00:00
|
|
|
|
2012-08-26 18:44:43 +00:00
|
|
|
/* Handle the case where the passed-in buffer is too short */
|
|
|
|
if (res.acl_flags & NFS4_ACL_TRUNC) {
|
|
|
|
/* Did the user only issue a request for the acl length? */
|
|
|
|
if (buf == NULL)
|
|
|
|
goto out_ok;
|
2005-06-22 17:16:23 +00:00
|
|
|
ret = -ERANGE;
|
2012-08-26 18:44:43 +00:00
|
|
|
goto out_free;
|
2005-06-22 17:16:23 +00:00
|
|
|
}
|
2022-05-14 14:36:58 +00:00
|
|
|
nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
|
|
|
|
type);
|
2012-12-08 14:30:18 +00:00
|
|
|
if (buf) {
|
|
|
|
if (res.acl_len > buflen) {
|
|
|
|
ret = -ERANGE;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2012-08-26 18:44:43 +00:00
|
|
|
_copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
|
2012-12-08 14:30:18 +00:00
|
|
|
}
|
2012-08-26 18:44:43 +00:00
|
|
|
out_ok:
|
|
|
|
ret = res.acl_len;
|
2005-06-22 17:16:23 +00:00
|
|
|
out_free:
|
2023-07-25 11:59:30 +00:00
|
|
|
while (--i >= 0)
|
|
|
|
__free_page(pages[i]);
|
2012-02-03 23:30:53 +00:00
|
|
|
if (res.acl_scratch)
|
|
|
|
__free_page(res.acl_scratch);
|
2020-01-02 22:09:54 +00:00
|
|
|
kfree(pages);
|
2005-06-22 17:16:22 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
|
|
|
|
size_t buflen, enum nfs4_acl_type type)
|
2006-08-24 16:27:15 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2006-08-24 16:27:15 +00:00
|
|
|
ssize_t ret;
|
|
|
|
do {
|
2022-05-14 14:36:58 +00:00
|
|
|
ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
|
2013-08-12 20:58:42 +00:00
|
|
|
trace_nfs4_get_acl(inode, ret);
|
2006-08-24 16:27:15 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
break;
|
|
|
|
ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
|
|
|
|
enum nfs4_acl_type type)
|
2005-06-22 17:16:23 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
int ret;
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
if (!nfs4_server_supports_acls(server, type))
|
2005-06-22 17:16:23 +00:00
|
|
|
return -EOPNOTSUPP;
|
2021-03-25 15:04:34 +00:00
|
|
|
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
|
2005-06-22 17:16:23 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-12-01 10:42:16 +00:00
|
|
|
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
|
|
|
|
nfs_zap_acl_cache(inode);
|
2022-05-14 14:36:58 +00:00
|
|
|
ret = nfs4_read_cached_acl(inode, buf, buflen, type);
|
2005-06-22 17:16:23 +00:00
|
|
|
if (ret != -ENOENT)
|
2011-12-07 16:55:27 +00:00
|
|
|
/* -ENOENT is returned if there is no ACL or if there is an ACL
|
|
|
|
* but no cached acl data, just the acl length */
|
2005-06-22 17:16:23 +00:00
|
|
|
return ret;
|
2022-05-14 14:36:58 +00:00
|
|
|
return nfs4_get_acl_uncached(inode, buf, buflen, type);
|
2005-06-22 17:16:23 +00:00
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
|
|
|
|
size_t buflen, enum nfs4_acl_type type)
|
2005-06-22 17:16:23 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct page *pages[NFS4ACL_MAXPAGES];
|
|
|
|
struct nfs_setaclargs arg = {
|
2022-05-14 14:36:59 +00:00
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.acl_type = type,
|
|
|
|
.acl_len = buflen,
|
|
|
|
.acl_pages = pages,
|
2005-06-22 17:16:23 +00:00
|
|
|
};
|
2009-04-01 13:22:01 +00:00
|
|
|
struct nfs_setaclres res;
|
2005-06-22 17:16:23 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
|
|
|
|
.rpc_argp = &arg,
|
2009-04-01 13:22:01 +00:00
|
|
|
.rpc_resp = &res,
|
2005-06-22 17:16:23 +00:00
|
|
|
};
|
2012-08-24 14:59:25 +00:00
|
|
|
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
int ret, i;
|
2005-06-22 17:16:23 +00:00
|
|
|
|
2021-01-28 22:36:38 +00:00
|
|
|
/* You can't remove system.nfs4_acl: */
|
|
|
|
if (buflen == 0)
|
|
|
|
return -EINVAL;
|
2022-05-14 14:36:58 +00:00
|
|
|
if (!nfs4_server_supports_acls(server, type))
|
2005-06-22 17:16:23 +00:00
|
|
|
return -EOPNOTSUPP;
|
2012-08-24 14:59:25 +00:00
|
|
|
if (npages > ARRAY_SIZE(pages))
|
|
|
|
return -ERANGE;
|
2020-06-23 22:39:01 +00:00
|
|
|
i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
if (i < 0)
|
|
|
|
return i;
|
2018-03-20 20:43:20 +00:00
|
|
|
nfs4_inode_make_writeable(inode);
|
2011-03-24 17:12:24 +00:00
|
|
|
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 00:26:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free each page after tx, so the only ref left is
|
|
|
|
* held by the network stack
|
|
|
|
*/
|
|
|
|
for (; i > 0; i--)
|
|
|
|
put_page(pages[i-1]);
|
|
|
|
|
2010-12-01 10:42:16 +00:00
|
|
|
/*
|
|
|
|
* Acl update can result in inode attribute update.
|
|
|
|
* so mark the attribute cache invalid.
|
|
|
|
*/
|
|
|
|
spin_lock(&inode->i_lock);
|
2021-03-08 19:42:55 +00:00
|
|
|
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
|
|
|
|
NFS_INO_INVALID_CTIME |
|
|
|
|
NFS_INO_REVAL_FORCED);
|
2010-12-01 10:42:16 +00:00
|
|
|
spin_unlock(&inode->i_lock);
|
2008-06-11 21:39:04 +00:00
|
|
|
nfs_access_zap_cache(inode);
|
|
|
|
nfs_zap_acl_cache(inode);
|
2005-06-22 17:16:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-14 14:36:58 +00:00
|
|
|
static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
|
|
|
|
size_t buflen, enum nfs4_acl_type type)
|
2006-08-24 16:27:15 +00:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2022-05-14 14:36:58 +00:00
|
|
|
err = __nfs4_proc_set_acl(inode, buf, buflen, type);
|
2013-08-12 20:58:42 +00:00
|
|
|
trace_nfs4_set_acl(inode, err);
|
2021-05-19 21:15:10 +00:00
|
|
|
if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
|
|
|
|
/*
|
|
|
|
* no need to retry since the kernel
|
|
|
|
* isn't involved in encoding the ACEs.
|
|
|
|
*/
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2013-08-12 20:58:42 +00:00
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
2006-08-24 16:27:15 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-05-22 16:50:44 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
static int _nfs4_get_security_label(struct inode *inode, void *buf,
|
|
|
|
size_t buflen)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs4_label label = {0, 0, buflen, buf};
|
|
|
|
|
|
|
|
u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
|
2021-10-22 17:11:07 +00:00
|
|
|
struct nfs_fattr fattr = {
|
|
|
|
.label = &label,
|
|
|
|
};
|
2013-11-01 16:42:25 +00:00
|
|
|
struct nfs4_getattr_arg arg = {
|
2013-05-22 16:50:44 +00:00
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.bitmask = bitmask,
|
|
|
|
};
|
|
|
|
struct nfs4_getattr_res res = {
|
|
|
|
.fattr = &fattr,
|
|
|
|
.server = server,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
|
2013-11-01 16:42:25 +00:00
|
|
|
.rpc_argp = &arg,
|
2013-05-22 16:50:44 +00:00
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nfs_fattr_init(&fattr);
|
|
|
|
|
2013-11-01 16:42:25 +00:00
|
|
|
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
|
2013-05-22 16:50:44 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
|
|
|
|
return -ENOENT;
|
2021-01-15 17:43:56 +00:00
|
|
|
return label.len;
|
2013-05-22 16:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_get_security_label(struct inode *inode, void *buf,
|
|
|
|
size_t buflen)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2013-05-22 16:50:44 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
do {
|
2013-08-12 20:58:42 +00:00
|
|
|
err = _nfs4_get_security_label(inode, buf, buflen);
|
|
|
|
trace_nfs4_get_security_label(inode, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
2013-05-22 16:50:44 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_do_set_security_label(struct inode *inode,
|
|
|
|
struct nfs4_label *ilabel,
|
2021-10-22 17:11:08 +00:00
|
|
|
struct nfs_fattr *fattr)
|
2013-05-22 16:50:44 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
struct iattr sattr = {0};
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
|
2013-11-01 14:49:32 +00:00
|
|
|
struct nfs_setattrargs arg = {
|
2017-01-11 20:04:25 +00:00
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.iap = &sattr,
|
2013-05-22 16:50:44 +00:00
|
|
|
.server = server,
|
|
|
|
.bitmask = bitmask,
|
|
|
|
.label = ilabel,
|
|
|
|
};
|
|
|
|
struct nfs_setattrres res = {
|
|
|
|
.fattr = fattr,
|
|
|
|
.server = server,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
2017-01-11 20:04:25 +00:00
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
|
|
|
|
.rpc_argp = &arg,
|
|
|
|
.rpc_resp = &res,
|
2013-05-22 16:50:44 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2013-11-01 14:49:32 +00:00
|
|
|
nfs4_stateid_copy(&arg.stateid, &zero_stateid);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
2013-11-01 14:49:32 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2013-05-22 16:50:44 +00:00
|
|
|
if (status)
|
|
|
|
dprintk("%s failed: %d\n", __func__, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_do_set_security_label(struct inode *inode,
|
|
|
|
struct nfs4_label *ilabel,
|
2021-10-22 17:11:08 +00:00
|
|
|
struct nfs_fattr *fattr)
|
2013-05-22 16:50:44 +00:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2021-10-22 17:11:08 +00:00
|
|
|
err = _nfs4_do_set_security_label(inode, ilabel, fattr);
|
2013-08-12 20:58:42 +00:00
|
|
|
trace_nfs4_set_security_label(inode, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode), err,
|
2013-05-22 16:50:44 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-05-27 14:19:30 +00:00
|
|
|
nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
|
2013-05-22 16:50:44 +00:00
|
|
|
{
|
2021-10-22 17:11:08 +00:00
|
|
|
struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
|
|
|
|
struct nfs_fattr *fattr;
|
2013-05-22 16:50:44 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2021-10-22 17:11:08 +00:00
|
|
|
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
|
|
|
|
if (fattr == NULL)
|
|
|
|
return -ENOMEM;
|
2013-05-22 16:50:44 +00:00
|
|
|
|
2021-10-22 17:11:08 +00:00
|
|
|
status = nfs4_do_set_security_label(inode, &ilabel, fattr);
|
2013-05-22 16:50:44 +00:00
|
|
|
if (status == 0)
|
2021-10-22 17:11:12 +00:00
|
|
|
nfs_setsecurity(inode, fattr);
|
2013-05-22 16:50:44 +00:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
|
|
|
|
|
|
|
|
|
NFS: Always use the same SETCLIENTID boot verifier
Currently our NFS client assigns a unique SETCLIENTID boot verifier
for each server IP address it knows about. It's set to CURRENT_TIME
when the struct nfs_client for that server IP is created.
During the SETCLIENTID operation, our client also presents an
nfs_client_id4 string to servers, as an identifier on which the server
can hang all of this client's NFSv4 state. Our client's
nfs_client_id4 string is unique for each server IP address.
An NFSv4 server is obligated to wipe all NFSv4 state associated with
an nfs_client_id4 string when the client presents the same
nfs_client_id4 string along with a changed SETCLIENTID boot verifier.
When our client unmounts the last of a server's shares, it destroys
that server's struct nfs_client. The next time the client mounts that
NFS server, it creates a fresh struct nfs_client with a fresh boot
verifier. On seeing the fresh verifer, the server wipes any previous
NFSv4 state associated with that nfs_client_id4.
However, NFSv4.1 clients are supposed to present the same
nfs_client_id4 string to all servers. And, to support Transparent
State Migration, the same nfs_client_id4 string should be presented
to all NFSv4.0 servers so they recognize that migrated state for this
client belongs with state a server may already have for this client.
(This is known as the Uniform Client String model).
If the nfs_client_id4 string is the same but the boot verifier changes
for each server IP address, SETCLIENTID and EXCHANGE_ID operations
from such a client could unintentionally result in a server wiping a
client's previously obtained lease.
Thus, if our NFS client is going to use a fixed nfs_client_id4 string,
either for NFSv4.0 or NFSv4.1 mounts, our NFS client should use a
boot verifier that does not change depending on server IP address.
Replace our current per-nfs_client boot verifier with a per-nfs_net
boot verifier.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-05-22 02:45:41 +00:00
|
|
|
static void nfs4_init_boot_verifier(const struct nfs_client *clp,
|
|
|
|
nfs4_verifier *bootverf)
|
2012-03-02 22:14:31 +00:00
|
|
|
{
|
|
|
|
__be32 verf[2];
|
|
|
|
|
2012-05-22 02:45:33 +00:00
|
|
|
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
|
|
|
|
/* An impossible timestamp guarantees this value
|
|
|
|
* will never match a generated boot time. */
|
2016-10-01 23:46:26 +00:00
|
|
|
verf[0] = cpu_to_be32(U32_MAX);
|
|
|
|
verf[1] = cpu_to_be32(U32_MAX);
|
2012-05-22 02:45:33 +00:00
|
|
|
} else {
|
NFS: Always use the same SETCLIENTID boot verifier
Currently our NFS client assigns a unique SETCLIENTID boot verifier
for each server IP address it knows about. It's set to CURRENT_TIME
when the struct nfs_client for that server IP is created.
During the SETCLIENTID operation, our client also presents an
nfs_client_id4 string to servers, as an identifier on which the server
can hang all of this client's NFSv4 state. Our client's
nfs_client_id4 string is unique for each server IP address.
An NFSv4 server is obligated to wipe all NFSv4 state associated with
an nfs_client_id4 string when the client presents the same
nfs_client_id4 string along with a changed SETCLIENTID boot verifier.
When our client unmounts the last of a server's shares, it destroys
that server's struct nfs_client. The next time the client mounts that
NFS server, it creates a fresh struct nfs_client with a fresh boot
verifier. On seeing the fresh verifer, the server wipes any previous
NFSv4 state associated with that nfs_client_id4.
However, NFSv4.1 clients are supposed to present the same
nfs_client_id4 string to all servers. And, to support Transparent
State Migration, the same nfs_client_id4 string should be presented
to all NFSv4.0 servers so they recognize that migrated state for this
client belongs with state a server may already have for this client.
(This is known as the Uniform Client String model).
If the nfs_client_id4 string is the same but the boot verifier changes
for each server IP address, SETCLIENTID and EXCHANGE_ID operations
from such a client could unintentionally result in a server wiping a
client's previously obtained lease.
Thus, if our NFS client is going to use a fixed nfs_client_id4 string,
either for NFSv4.0 or NFSv4.1 mounts, our NFS client should use a
boot verifier that does not change depending on server IP address.
Replace our current per-nfs_client boot verifier with a per-nfs_net
boot verifier.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-05-22 02:45:41 +00:00
|
|
|
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
|
2016-10-01 23:46:26 +00:00
|
|
|
u64 ns = ktime_to_ns(nn->boot_time);
|
|
|
|
|
|
|
|
verf[0] = cpu_to_be32(ns >> 32);
|
|
|
|
verf[1] = cpu_to_be32(ns);
|
2012-05-22 02:45:33 +00:00
|
|
|
}
|
2012-03-02 22:14:31 +00:00
|
|
|
memcpy(bootverf->data, verf, sizeof(bootverf->data));
|
|
|
|
}
|
|
|
|
|
2020-10-07 22:24:17 +00:00
|
|
|
static size_t
|
2020-10-07 22:24:18 +00:00
|
|
|
nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
|
2020-10-07 22:24:17 +00:00
|
|
|
{
|
2020-10-07 22:24:18 +00:00
|
|
|
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
|
|
|
|
struct nfs_netns_client *nn_clp = nn->nfs_client;
|
|
|
|
const char *id;
|
|
|
|
|
2020-10-07 22:24:17 +00:00
|
|
|
buf[0] = '\0';
|
|
|
|
|
2020-10-07 22:24:18 +00:00
|
|
|
if (nn_clp) {
|
|
|
|
rcu_read_lock();
|
|
|
|
id = rcu_dereference(nn_clp->identifier);
|
|
|
|
if (id)
|
|
|
|
strscpy(buf, id, buflen);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
|
2020-10-07 22:24:17 +00:00
|
|
|
strscpy(buf, nfs4_client_id_uniquifier, buflen);
|
|
|
|
|
|
|
|
return strlen(buf);
|
|
|
|
}
|
|
|
|
|
2015-06-09 23:43:59 +00:00
|
|
|
static int
|
|
|
|
nfs4_init_nonuniform_client_string(struct nfs_client *clp)
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
{
|
2020-10-07 22:24:17 +00:00
|
|
|
char buf[NFS4_CLIENT_ID_UNIQ_LEN];
|
|
|
|
size_t buflen;
|
2015-06-09 23:43:59 +00:00
|
|
|
size_t len;
|
|
|
|
char *str;
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
|
2015-01-03 20:16:04 +00:00
|
|
|
if (clp->cl_owner_id != NULL)
|
2015-06-09 23:43:59 +00:00
|
|
|
return 0;
|
2015-08-31 02:53:43 +00:00
|
|
|
|
2015-06-09 23:43:59 +00:00
|
|
|
rcu_read_lock();
|
NFSv4.0: Remove cl_ipaddr from non-UCS client ID
It is possible for two distinct clients to have the same cl_ipaddr:
- if the client admin disables callback with clientaddr=0.0.0.0 on
more than one client
- if two clients behind separate NATs use the same private subnet
number
- if the client admin specifies the same address via clientaddr=
mount option (pointing the server at the same NAT box, for
example)
Because of the way the Linux NFSv4.0 client constructs its client
ID string by default, such clients could interfere with each others'
lease state when mounting the same server:
scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
cl_ipaddr is set to the value of the clientaddr= mount option. Two
clients whose addresses are 192.168.3.77 that mount the same server
(whose public IP address is, say, 3.4.5.6) would both generate the
same client ID string when sending a SETCLIENTID:
Linux NFSv4.0 192.168.3.77/3.4.5.6 tcp
and thus the server would not be able to distinguish the clients'
leases. If both clients are using AUTH_SYS when sending SETCLIENTID
then the server could possibly permit the two clients to interfere
with or purge each others' leases.
To better ensure that Linux's NFSv4.0 client ID strings are distinct
in these cases, remove cl_ipaddr from the client ID string and
replace it with something more likely to be unique. Note that the
replacement looks a lot like the uniform client ID string.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-06-04 14:53:29 +00:00
|
|
|
len = 14 +
|
|
|
|
strlen(clp->cl_rpcclient->cl_nodename) +
|
|
|
|
1 +
|
2015-06-09 23:43:59 +00:00
|
|
|
strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
|
|
|
|
1;
|
|
|
|
rcu_read_unlock();
|
2020-10-07 22:24:17 +00:00
|
|
|
|
2020-10-07 22:24:18 +00:00
|
|
|
buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
|
2020-10-07 22:24:17 +00:00
|
|
|
if (buflen)
|
|
|
|
len += buflen + 1;
|
|
|
|
|
2015-06-09 23:43:59 +00:00
|
|
|
if (len > NFS4_OPAQUE_LIMIT + 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since this string is allocated at mount time, and held until the
|
|
|
|
* nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
|
|
|
|
* about a memory-reclaim deadlock.
|
|
|
|
*/
|
|
|
|
str = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (!str)
|
|
|
|
return -ENOMEM;
|
2015-01-03 20:16:04 +00:00
|
|
|
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
rcu_read_lock();
|
2020-10-07 22:24:17 +00:00
|
|
|
if (buflen)
|
2018-06-04 14:53:34 +00:00
|
|
|
scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
|
2020-10-07 22:24:17 +00:00
|
|
|
clp->cl_rpcclient->cl_nodename, buf,
|
NFSv4.0: Remove cl_ipaddr from non-UCS client ID
It is possible for two distinct clients to have the same cl_ipaddr:
- if the client admin disables callback with clientaddr=0.0.0.0 on
more than one client
- if two clients behind separate NATs use the same private subnet
number
- if the client admin specifies the same address via clientaddr=
mount option (pointing the server at the same NAT box, for
example)
Because of the way the Linux NFSv4.0 client constructs its client
ID string by default, such clients could interfere with each others'
lease state when mounting the same server:
scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
cl_ipaddr is set to the value of the clientaddr= mount option. Two
clients whose addresses are 192.168.3.77 that mount the same server
(whose public IP address is, say, 3.4.5.6) would both generate the
same client ID string when sending a SETCLIENTID:
Linux NFSv4.0 192.168.3.77/3.4.5.6 tcp
and thus the server would not be able to distinguish the clients'
leases. If both clients are using AUTH_SYS when sending SETCLIENTID
then the server could possibly permit the two clients to interfere
with or purge each others' leases.
To better ensure that Linux's NFSv4.0 client ID strings are distinct
in these cases, remove cl_ipaddr from the client ID string and
replace it with something more likely to be unique. Note that the
replacement looks a lot like the uniform client ID string.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-06-04 14:53:29 +00:00
|
|
|
rpc_peeraddr2str(clp->cl_rpcclient,
|
2018-06-04 14:53:34 +00:00
|
|
|
RPC_DISPLAY_ADDR));
|
NFSv4.0: Remove cl_ipaddr from non-UCS client ID
It is possible for two distinct clients to have the same cl_ipaddr:
- if the client admin disables callback with clientaddr=0.0.0.0 on
more than one client
- if two clients behind separate NATs use the same private subnet
number
- if the client admin specifies the same address via clientaddr=
mount option (pointing the server at the same NAT box, for
example)
Because of the way the Linux NFSv4.0 client constructs its client
ID string by default, such clients could interfere with each others'
lease state when mounting the same server:
scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
cl_ipaddr is set to the value of the clientaddr= mount option. Two
clients whose addresses are 192.168.3.77 that mount the same server
(whose public IP address is, say, 3.4.5.6) would both generate the
same client ID string when sending a SETCLIENTID:
Linux NFSv4.0 192.168.3.77/3.4.5.6 tcp
and thus the server would not be able to distinguish the clients'
leases. If both clients are using AUTH_SYS when sending SETCLIENTID
then the server could possibly permit the two clients to interfere
with or purge each others' leases.
To better ensure that Linux's NFSv4.0 client ID strings are distinct
in these cases, remove cl_ipaddr from the client ID string and
replace it with something more likely to be unique. Note that the
replacement looks a lot like the uniform client ID string.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-06-04 14:53:29 +00:00
|
|
|
else
|
2018-06-04 14:53:34 +00:00
|
|
|
scnprintf(str, len, "Linux NFSv4.0 %s/%s",
|
NFSv4.0: Remove cl_ipaddr from non-UCS client ID
It is possible for two distinct clients to have the same cl_ipaddr:
- if the client admin disables callback with clientaddr=0.0.0.0 on
more than one client
- if two clients behind separate NATs use the same private subnet
number
- if the client admin specifies the same address via clientaddr=
mount option (pointing the server at the same NAT box, for
example)
Because of the way the Linux NFSv4.0 client constructs its client
ID string by default, such clients could interfere with each others'
lease state when mounting the same server:
scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
cl_ipaddr is set to the value of the clientaddr= mount option. Two
clients whose addresses are 192.168.3.77 that mount the same server
(whose public IP address is, say, 3.4.5.6) would both generate the
same client ID string when sending a SETCLIENTID:
Linux NFSv4.0 192.168.3.77/3.4.5.6 tcp
and thus the server would not be able to distinguish the clients'
leases. If both clients are using AUTH_SYS when sending SETCLIENTID
then the server could possibly permit the two clients to interfere
with or purge each others' leases.
To better ensure that Linux's NFSv4.0 client ID strings are distinct
in these cases, remove cl_ipaddr from the client ID string and
replace it with something more likely to be unique. Note that the
replacement looks a lot like the uniform client ID string.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
2018-06-04 14:53:29 +00:00
|
|
|
clp->cl_rpcclient->cl_nodename,
|
|
|
|
rpc_peeraddr2str(clp->cl_rpcclient,
|
2018-06-04 14:53:34 +00:00
|
|
|
RPC_DISPLAY_ADDR));
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
rcu_read_unlock();
|
2015-06-09 23:43:59 +00:00
|
|
|
|
|
|
|
clp->cl_owner_id = str;
|
|
|
|
return 0;
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
}
|
|
|
|
|
2015-06-09 23:44:00 +00:00
|
|
|
static int
|
|
|
|
nfs4_init_uniform_client_string(struct nfs_client *clp)
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
{
|
2020-10-07 22:24:17 +00:00
|
|
|
char buf[NFS4_CLIENT_ID_UNIQ_LEN];
|
|
|
|
size_t buflen;
|
2015-06-09 23:44:00 +00:00
|
|
|
size_t len;
|
|
|
|
char *str;
|
2015-01-03 20:16:04 +00:00
|
|
|
|
|
|
|
if (clp->cl_owner_id != NULL)
|
2015-06-09 23:44:00 +00:00
|
|
|
return 0;
|
2012-09-14 21:24:41 +00:00
|
|
|
|
2015-06-09 23:44:00 +00:00
|
|
|
len = 10 + 10 + 1 + 10 + 1 +
|
|
|
|
strlen(clp->cl_rpcclient->cl_nodename) + 1;
|
|
|
|
|
2020-10-07 22:24:18 +00:00
|
|
|
buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
|
2020-10-07 22:24:17 +00:00
|
|
|
if (buflen)
|
|
|
|
len += buflen + 1;
|
|
|
|
|
2015-06-09 23:44:00 +00:00
|
|
|
if (len > NFS4_OPAQUE_LIMIT + 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since this string is allocated at mount time, and held until the
|
|
|
|
* nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
|
|
|
|
* about a memory-reclaim deadlock.
|
|
|
|
*/
|
|
|
|
str = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (!str)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-10-07 22:24:17 +00:00
|
|
|
if (buflen)
|
|
|
|
scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
|
|
|
|
clp->rpc_ops->version, clp->cl_minorversion,
|
|
|
|
buf, clp->cl_rpcclient->cl_nodename);
|
|
|
|
else
|
|
|
|
scnprintf(str, len, "Linux NFSv%u.%u %s",
|
|
|
|
clp->rpc_ops->version, clp->cl_minorversion,
|
|
|
|
clp->cl_rpcclient->cl_nodename);
|
2015-06-09 23:44:00 +00:00
|
|
|
clp->cl_owner_id = str;
|
|
|
|
return 0;
|
NFS: Use the same nfs_client_id4 for every server
Currently, when identifying itself to NFS servers, the Linux NFS
client uses a unique nfs_client_id4.id string for each server IP
address it talks with. For example, when client A talks to server X,
the client identifies itself using a string like "AX". The
requirements for these strings are specified in detail by RFC 3530
(and bis).
This form of client identification presents a problem for Transparent
State Migration. When client A's state on server X is migrated to
server Y, it continues to be associated with string "AX." But,
according to the rules of client string construction above, client
A will present string "AY" when communicating with server Y.
Server Y thus has no way to know that client A should be associated
with the state migrated from server X. "AX" is all but abandoned,
interfering with establishing fresh state for client A on server Y.
To support transparent state migration, then, NFSv4.0 clients must
instead use the same nfs_client_id4.id string to identify themselves
to every NFS server; something like "A".
Now a client identifies itself as "A" to server X. When a file
system on server X transitions to server Y, and client A identifies
itself as "A" to server Y, Y will know immediately that the state
associated with "A," whether it is native or migrated, is owned by
the client, and can merge both into a single lease.
As a pre-requisite to adding support for NFSv4 migration to the Linux
NFS client, this patch changes the way Linux identifies itself to NFS
servers via the SETCLIENTID (NFSv4 minor version 0) and EXCHANGE_ID
(NFSv4 minor version 1) operations.
In addition to removing the server's IP address from nfs_client_id4,
the Linux NFS client will also no longer use its own source IP address
as part of the nfs_client_id4 string. On multi-homed clients, the
value of this address depends on the address family and network
routing used to contact the server, thus it can be different for each
server.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-09-14 21:24:21 +00:00
|
|
|
}
|
|
|
|
|
2014-03-12 16:51:47 +00:00
|
|
|
/*
|
|
|
|
* nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
|
|
|
|
* services. Advertise one based on the address family of the
|
|
|
|
* clientaddr.
|
|
|
|
*/
|
|
|
|
static unsigned int
|
|
|
|
nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
|
|
|
|
{
|
|
|
|
if (strchr(clp->cl_ipaddr, ':') != NULL)
|
|
|
|
return scnprintf(buf, len, "tcp6");
|
|
|
|
else
|
|
|
|
return scnprintf(buf, len, "tcp");
|
|
|
|
}
|
|
|
|
|
2014-06-22 00:52:17 +00:00
|
|
|
static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_setclientid *sc = calldata;
|
|
|
|
|
|
|
|
if (task->tk_status == 0)
|
|
|
|
sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_setclientid_ops = {
|
|
|
|
.rpc_call_done = nfs4_setclientid_done,
|
|
|
|
};
|
|
|
|
|
2012-07-11 20:30:59 +00:00
|
|
|
/**
|
|
|
|
* nfs4_proc_setclientid - Negotiate client ID
|
|
|
|
* @clp: state data structure
|
|
|
|
* @program: RPC program for NFSv4 callback service
|
|
|
|
* @port: IP port number for NFS4 callback service
|
2018-12-03 00:30:31 +00:00
|
|
|
* @cred: credential to use for this call
|
2012-07-11 20:30:59 +00:00
|
|
|
* @res: where to place the result
|
|
|
|
*
|
|
|
|
* Returns zero, a negative errno, or a negative NFS4ERR status code.
|
|
|
|
*/
|
2010-04-16 20:43:06 +00:00
|
|
|
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
2018-12-03 00:30:31 +00:00
|
|
|
unsigned short port, const struct cred *cred,
|
2010-04-16 20:43:06 +00:00
|
|
|
struct nfs4_setclientid_res *res)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
nfs4_verifier sc_verifier;
|
|
|
|
struct nfs4_setclientid setclientid = {
|
|
|
|
.sc_verifier = &sc_verifier,
|
|
|
|
.sc_prog = program,
|
2015-06-09 23:43:57 +00:00
|
|
|
.sc_clnt = clp,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
|
|
|
|
.rpc_argp = &setclientid,
|
2010-04-16 20:43:06 +00:00
|
|
|
.rpc_resp = res,
|
2006-01-03 08:55:26 +00:00
|
|
|
.rpc_cred = cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2014-06-22 00:52:17 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_setclientid_ops,
|
|
|
|
.callback_data = &setclientid,
|
2019-05-30 00:41:28 +00:00
|
|
|
.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
|
2014-06-22 00:52:17 +00:00
|
|
|
};
|
2020-01-30 09:43:25 +00:00
|
|
|
unsigned long now = jiffies;
|
2012-07-11 20:30:59 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-07-11 20:30:50 +00:00
|
|
|
/* nfs_client_id4 */
|
NFS: Always use the same SETCLIENTID boot verifier
Currently our NFS client assigns a unique SETCLIENTID boot verifier
for each server IP address it knows about. It's set to CURRENT_TIME
when the struct nfs_client for that server IP is created.
During the SETCLIENTID operation, our client also presents an
nfs_client_id4 string to servers, as an identifier on which the server
can hang all of this client's NFSv4 state. Our client's
nfs_client_id4 string is unique for each server IP address.
An NFSv4 server is obligated to wipe all NFSv4 state associated with
an nfs_client_id4 string when the client presents the same
nfs_client_id4 string along with a changed SETCLIENTID boot verifier.
When our client unmounts the last of a server's shares, it destroys
that server's struct nfs_client. The next time the client mounts that
NFS server, it creates a fresh struct nfs_client with a fresh boot
verifier. On seeing the fresh verifer, the server wipes any previous
NFSv4 state associated with that nfs_client_id4.
However, NFSv4.1 clients are supposed to present the same
nfs_client_id4 string to all servers. And, to support Transparent
State Migration, the same nfs_client_id4 string should be presented
to all NFSv4.0 servers so they recognize that migrated state for this
client belongs with state a server may already have for this client.
(This is known as the Uniform Client String model).
If the nfs_client_id4 string is the same but the boot verifier changes
for each server IP address, SETCLIENTID and EXCHANGE_ID operations
from such a client could unintentionally result in a server wiping a
client's previously obtained lease.
Thus, if our NFS client is going to use a fixed nfs_client_id4 string,
either for NFSv4.0 or NFSv4.1 mounts, our NFS client should use a
boot verifier that does not change depending on server IP address.
Replace our current per-nfs_client boot verifier with a per-nfs_net
boot verifier.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-05-22 02:45:41 +00:00
|
|
|
nfs4_init_boot_verifier(clp, &sc_verifier);
|
2015-06-09 23:44:00 +00:00
|
|
|
|
|
|
|
if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
|
|
|
|
status = nfs4_init_uniform_client_string(clp);
|
|
|
|
else
|
2015-06-09 23:43:59 +00:00
|
|
|
status = nfs4_init_nonuniform_client_string(clp);
|
2015-06-09 23:44:00 +00:00
|
|
|
|
|
|
|
if (status)
|
|
|
|
goto out;
|
2015-06-09 23:43:57 +00:00
|
|
|
|
2012-07-11 20:30:50 +00:00
|
|
|
/* cb_client4 */
|
2014-03-12 16:51:47 +00:00
|
|
|
setclientid.sc_netid_len =
|
|
|
|
nfs4_init_callback_netid(clp,
|
|
|
|
setclientid.sc_netid,
|
|
|
|
sizeof(setclientid.sc_netid));
|
2012-07-11 20:30:50 +00:00
|
|
|
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
|
2007-12-10 19:57:09 +00:00
|
|
|
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
|
2005-04-16 22:20:36 +00:00
|
|
|
clp->cl_ipaddr, port >> 8, port & 255);
|
|
|
|
|
2015-06-09 23:43:57 +00:00
|
|
|
dprintk("NFS call setclientid auth=%s, '%s'\n",
|
2012-07-11 20:30:59 +00:00
|
|
|
clp->cl_rpcclient->cl_auth->au_ops->au_name,
|
2015-06-09 23:43:57 +00:00
|
|
|
clp->cl_owner_id);
|
2019-08-14 19:28:28 +00:00
|
|
|
|
|
|
|
status = nfs4_call_sync_custom(&task_setup_data);
|
2014-06-22 00:52:17 +00:00
|
|
|
if (setclientid.sc_cred) {
|
2019-10-04 13:58:54 +00:00
|
|
|
kfree(clp->cl_acceptor);
|
2014-06-22 00:52:17 +00:00
|
|
|
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
|
|
|
|
put_rpccred(setclientid.sc_cred);
|
|
|
|
}
|
2020-01-30 09:43:25 +00:00
|
|
|
|
|
|
|
if (status == 0)
|
|
|
|
do_renew_lease(clp, now);
|
2014-06-22 00:52:17 +00:00
|
|
|
out:
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_setclientid(clp, status);
|
2012-07-11 20:30:59 +00:00
|
|
|
dprintk("NFS reply setclientid: %d\n", status);
|
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-07-11 20:30:59 +00:00
|
|
|
/**
|
|
|
|
* nfs4_proc_setclientid_confirm - Confirm client ID
|
|
|
|
* @clp: state data structure
|
2019-02-18 18:32:38 +00:00
|
|
|
* @arg: result of a previous SETCLIENTID
|
2018-12-03 00:30:31 +00:00
|
|
|
* @cred: credential to use for this call
|
2012-07-11 20:30:59 +00:00
|
|
|
*
|
|
|
|
* Returns zero, a negative errno, or a negative NFS4ERR status code.
|
|
|
|
*/
|
2011-04-24 18:28:18 +00:00
|
|
|
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
|
2010-04-16 20:43:06 +00:00
|
|
|
struct nfs4_setclientid_res *arg,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
|
2010-04-16 20:43:06 +00:00
|
|
|
.rpc_argp = arg,
|
2006-01-03 08:55:26 +00:00
|
|
|
.rpc_cred = cred,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2012-07-11 20:30:59 +00:00
|
|
|
dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
|
|
|
|
clp->cl_rpcclient->cl_auth->au_ops->au_name,
|
|
|
|
clp->cl_clientid);
|
2019-05-30 00:41:28 +00:00
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg,
|
|
|
|
RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_setclientid_confirm(clp, status);
|
2012-07-11 20:30:59 +00:00
|
|
|
dprintk("NFS reply setclientid_confirm: %d\n", status);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:18 +00:00
|
|
|
struct nfs4_delegreturndata {
|
|
|
|
struct nfs4_delegreturnargs args;
|
2006-01-03 08:55:38 +00:00
|
|
|
struct nfs4_delegreturnres res;
|
2006-01-03 08:55:18 +00:00
|
|
|
struct nfs_fh fh;
|
|
|
|
nfs4_stateid stateid;
|
2006-01-03 08:55:21 +00:00
|
|
|
unsigned long timestamp;
|
2016-11-15 20:03:33 +00:00
|
|
|
struct {
|
|
|
|
struct nfs4_layoutreturn_args arg;
|
|
|
|
struct nfs4_layoutreturn_res res;
|
2016-09-23 15:38:08 +00:00
|
|
|
struct nfs4_xdr_opaque_data ld_private;
|
2016-11-15 20:03:33 +00:00
|
|
|
u32 roc_barrier;
|
|
|
|
bool roc;
|
|
|
|
} lr;
|
2006-01-03 08:55:38 +00:00
|
|
|
struct nfs_fattr fattr;
|
2006-01-03 08:55:18 +00:00
|
|
|
int rpc_status;
|
2014-07-03 05:05:02 +00:00
|
|
|
struct inode *inode;
|
2006-01-03 08:55:18 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *data = calldata;
|
2017-11-07 16:02:32 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = data->inode,
|
|
|
|
.stateid = &data->stateid,
|
2021-06-01 15:10:05 +00:00
|
|
|
.task_is_privileged = data->args.seq_args.sa_privileged,
|
2017-11-07 16:02:32 +00:00
|
|
|
};
|
2009-04-01 13:22:28 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return;
|
2009-04-01 13:22:28 +00:00
|
|
|
|
2013-08-13 14:36:56 +00:00
|
|
|
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
|
2016-11-15 20:03:33 +00:00
|
|
|
|
|
|
|
/* Handle Layoutreturn errors */
|
2021-01-04 18:18:03 +00:00
|
|
|
if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
|
|
|
|
&data->res.lr_ret) == -EAGAIN)
|
2019-09-20 11:23:41 +00:00
|
|
|
goto out_restart;
|
2016-11-15 20:03:33 +00:00
|
|
|
|
2009-12-07 14:23:21 +00:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
2006-01-03 08:55:38 +00:00
|
|
|
renew_lease(data->res.server, data->timestamp);
|
2016-11-10 21:06:28 +00:00
|
|
|
break;
|
2013-11-19 21:34:14 +00:00
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
2016-09-22 17:39:05 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
nfs4_free_revoked_stateid(data->res.server,
|
|
|
|
data->args.stateid,
|
|
|
|
task->tk_msg.rpc_cred);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2013-11-19 21:34:14 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2019-12-20 15:43:37 +00:00
|
|
|
case -ETIMEDOUT:
|
2013-11-19 21:34:14 +00:00
|
|
|
task->tk_status = 0;
|
|
|
|
break;
|
2017-11-06 20:28:05 +00:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
2019-10-24 22:00:35 +00:00
|
|
|
if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
|
|
|
|
nfs4_stateid_seqid_inc(&data->stateid);
|
2019-10-27 02:37:40 +00:00
|
|
|
if (data->args.bitmask) {
|
|
|
|
data->args.bitmask = NULL;
|
|
|
|
data->res.fattr = NULL;
|
|
|
|
}
|
2019-10-24 22:00:35 +00:00
|
|
|
goto out_restart;
|
2016-12-19 15:23:10 +00:00
|
|
|
case -NFS4ERR_ACCESS:
|
|
|
|
if (data->args.bitmask) {
|
|
|
|
data->args.bitmask = NULL;
|
|
|
|
data->res.fattr = NULL;
|
2017-11-06 20:28:10 +00:00
|
|
|
goto out_restart;
|
2016-12-19 15:23:10 +00:00
|
|
|
}
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2009-12-07 14:23:21 +00:00
|
|
|
default:
|
2017-11-07 16:02:32 +00:00
|
|
|
task->tk_status = nfs4_async_handle_exception(task,
|
|
|
|
data->res.server, task->tk_status,
|
|
|
|
&exception);
|
|
|
|
if (exception.retry)
|
2017-11-06 20:28:10 +00:00
|
|
|
goto out_restart;
|
2009-12-07 14:23:21 +00:00
|
|
|
}
|
2019-10-21 18:22:14 +00:00
|
|
|
nfs_delegation_mark_returned(data->inode, data->args.stateid);
|
2009-12-07 14:23:21 +00:00
|
|
|
data->rpc_status = task->tk_status;
|
2017-11-06 20:28:10 +00:00
|
|
|
return;
|
|
|
|
out_restart:
|
|
|
|
task->tk_status = 0;
|
|
|
|
rpc_restart_call_prepare(task);
|
2006-01-03 08:55:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_delegreturn_release(void *calldata)
|
|
|
|
{
|
2014-07-03 05:05:02 +00:00
|
|
|
struct nfs4_delegreturndata *data = calldata;
|
2015-02-05 20:13:24 +00:00
|
|
|
struct inode *inode = data->inode;
|
2014-07-03 05:05:02 +00:00
|
|
|
|
2021-01-04 18:18:03 +00:00
|
|
|
if (data->lr.roc)
|
|
|
|
pnfs_roc_release(&data->lr.arg, &data->lr.res,
|
|
|
|
data->res.lr_ret);
|
2015-02-05 20:13:24 +00:00
|
|
|
if (inode) {
|
2022-04-25 22:04:27 +00:00
|
|
|
nfs4_fattr_set_prechange(&data->fattr,
|
|
|
|
inode_peek_iversion_raw(inode));
|
|
|
|
nfs_refresh_inode(inode, &data->fattr);
|
2015-02-05 20:13:24 +00:00
|
|
|
nfs_iput_and_deactive(inode);
|
|
|
|
}
|
2006-01-03 08:55:18 +00:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:28 +00:00
|
|
|
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *d_data;
|
2018-06-15 20:31:02 +00:00
|
|
|
struct pnfs_layout_hdr *lo;
|
2009-04-01 13:22:28 +00:00
|
|
|
|
2022-06-15 05:39:24 +00:00
|
|
|
d_data = data;
|
2009-04-01 13:22:28 +00:00
|
|
|
|
2019-11-13 08:39:36 +00:00
|
|
|
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
|
|
|
|
nfs4_sequence_done(task, &d_data->res.seq_res);
|
2015-09-22 03:35:22 +00:00
|
|
|
return;
|
2019-11-13 08:39:36 +00:00
|
|
|
}
|
2015-09-22 03:35:22 +00:00
|
|
|
|
2018-06-15 20:31:02 +00:00
|
|
|
lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
|
|
|
|
if (lo && !pnfs_layout_is_valid(lo)) {
|
|
|
|
d_data->args.lr_args = NULL;
|
|
|
|
d_data->res.lr_res = NULL;
|
|
|
|
}
|
|
|
|
|
2017-01-09 20:48:22 +00:00
|
|
|
nfs4_setup_sequence(d_data->res.server->nfs_client,
|
2012-10-23 00:28:44 +00:00
|
|
|
&d_data->args.seq_args,
|
|
|
|
&d_data->res.seq_res,
|
|
|
|
task);
|
2009-04-01 13:22:28 +00:00
|
|
|
}
|
|
|
|
|
2006-03-20 18:44:07 +00:00
|
|
|
static const struct rpc_call_ops nfs4_delegreturn_ops = {
|
2009-04-01 13:22:28 +00:00
|
|
|
.rpc_call_prepare = nfs4_delegreturn_prepare,
|
2006-01-03 08:55:18 +00:00
|
|
|
.rpc_call_done = nfs4_delegreturn_done,
|
|
|
|
.rpc_release = nfs4_delegreturn_release,
|
|
|
|
};
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
|
2006-01-03 08:55:18 +00:00
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *data;
|
2006-01-03 08:55:38 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2006-01-03 08:55:18 +00:00
|
|
|
struct rpc_task *task;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_delegreturn_ops,
|
2022-05-25 16:12:59 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
|
2007-07-14 19:39:59 +00:00
|
|
|
};
|
2008-01-24 23:14:34 +00:00
|
|
|
int status = 0;
|
2006-01-03 08:55:18 +00:00
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
|
|
|
|
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
|
|
|
|
2022-01-29 18:57:38 +00:00
|
|
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
2006-01-03 08:55:18 +00:00
|
|
|
if (data == NULL)
|
|
|
|
return -ENOMEM;
|
2015-12-02 14:39:51 +00:00
|
|
|
|
|
|
|
nfs4_state_protect(server->nfs_client,
|
|
|
|
NFS_SP4_MACH_CRED_CLEANUP,
|
|
|
|
&task_setup_data.rpc_client, &msg);
|
|
|
|
|
2006-01-03 08:55:18 +00:00
|
|
|
data->args.fhandle = &data->fh;
|
|
|
|
data->args.stateid = &data->stateid;
|
2021-03-25 22:15:36 +00:00
|
|
|
nfs4_bitmask_set(data->args.bitmask_store,
|
2021-12-27 19:40:51 +00:00
|
|
|
server->cache_consistency_bitmask, inode, 0);
|
2021-03-25 22:15:36 +00:00
|
|
|
data->args.bitmask = data->args.bitmask_store;
|
2006-01-03 08:55:18 +00:00
|
|
|
nfs_copy_fh(&data->fh, NFS_FH(inode));
|
2012-03-04 23:13:56 +00:00
|
|
|
nfs4_stateid_copy(&data->stateid, stateid);
|
2006-01-03 08:55:38 +00:00
|
|
|
data->res.fattr = &data->fattr;
|
|
|
|
data->res.server = server;
|
2016-11-15 20:03:33 +00:00
|
|
|
data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
2016-09-23 15:38:08 +00:00
|
|
|
data->lr.arg.ld_private = &data->lr.ld_private;
|
2007-07-14 19:40:01 +00:00
|
|
|
nfs_fattr_init(data->res.fattr);
|
2006-01-03 08:55:21 +00:00
|
|
|
data->timestamp = jiffies;
|
2006-01-03 08:55:18 +00:00
|
|
|
data->rpc_status = 0;
|
2015-02-05 20:13:24 +00:00
|
|
|
data->inode = nfs_igrab_and_active(inode);
|
2021-01-04 18:18:03 +00:00
|
|
|
if (data->inode || issync) {
|
|
|
|
data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
|
|
|
|
cred);
|
2016-11-16 06:11:25 +00:00
|
|
|
if (data->lr.roc) {
|
|
|
|
data->args.lr_args = &data->lr.arg;
|
|
|
|
data->res.lr_res = &data->lr.res;
|
|
|
|
}
|
|
|
|
}
|
2006-01-03 08:55:18 +00:00
|
|
|
|
2021-06-01 15:10:05 +00:00
|
|
|
if (!data->inode)
|
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
|
|
|
|
1);
|
|
|
|
else
|
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
|
|
|
|
0);
|
2007-07-14 19:39:59 +00:00
|
|
|
task_setup_data.callback_data = data;
|
2010-12-21 15:52:24 +00:00
|
|
|
msg.rpc_argp = &data->args;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-14 19:39:59 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-20 23:11:10 +00:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 08:55:18 +00:00
|
|
|
return PTR_ERR(task);
|
2008-01-24 23:14:34 +00:00
|
|
|
if (!issync)
|
|
|
|
goto out;
|
2017-01-11 20:01:43 +00:00
|
|
|
status = rpc_wait_for_completion_task(task);
|
2008-01-24 23:14:34 +00:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
status = data->rpc_status;
|
|
|
|
out:
|
2006-11-12 03:18:03 +00:00
|
|
|
rpc_put_task(task);
|
2006-01-03 08:55:18 +00:00
|
|
|
return status;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2008-01-24 23:14:34 +00:00
|
|
|
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
|
Adding stateid information to tracepoints
Operations to which stateid information is added:
close, delegreturn, open, read, setattr, layoutget, layoutcommit, test_stateid,
write, lock, locku, lockt
Format is "stateid=<seqid>:<crc32 hash stateid.other>", also "openstateid=",
"layoutstateid=", and "lockstateid=" for open_file, layoutget, set_lock
tracepoints.
New function is added to internal.h, nfs_stateid_hash(), to compute the hash
trace_nfs4_setattr() is moved from nfs4_do_setattr() to _nfs4_do_setattr()
to get access to stateid.
trace_nfs4_setattr and trace_nfs4_delegreturn are changed from INODE_EVENT
to new event type, INODE_STATEID_EVENT which is same as INODE_EVENT but adds
stateid information
for locking tracepoints, moved trace_nfs4_set_lock() into _nfs4_do_setlk()
to get access to stateid information, and removed trace_nfs4_lock_reclaim(),
trace_nfs4_lock_expired() as they call into _nfs4_do_setlk() and both were
previously same LOCK_EVENT type.
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-11-24 18:29:41 +00:00
|
|
|
trace_nfs4_delegreturn(inode, stateid, err);
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct inode *inode = state->inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2006-08-23 00:06:09 +00:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2006-01-03 08:55:16 +00:00
|
|
|
struct nfs_lockt_args arg = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.fh = NFS_FH(inode),
|
2006-01-03 08:55:16 +00:00
|
|
|
.fl = request,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2006-01-03 08:55:16 +00:00
|
|
|
struct nfs_lockt_res res = {
|
|
|
|
.denied = request,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
|
2017-01-11 20:04:25 +00:00
|
|
|
.rpc_argp = &arg,
|
|
|
|
.rpc_resp = &res,
|
2005-04-16 22:20:36 +00:00
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
int status;
|
|
|
|
|
2006-01-03 08:55:16 +00:00
|
|
|
arg.lock_owner.clientid = clp->cl_clientid;
|
2005-06-22 17:16:32 +00:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
lsp = request->fl_u.nfs4_fl.owner;
|
2012-01-18 03:04:25 +00:00
|
|
|
arg.lock_owner.id = lsp->ls_seqid.owner_id;
|
2010-12-21 15:45:27 +00:00
|
|
|
arg.lock_owner.s_dev = server->s_dev;
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2006-01-03 08:55:16 +00:00
|
|
|
switch (status) {
|
|
|
|
case 0:
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_type = F_UNLCK;
|
2006-01-03 08:55:16 +00:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_DENIED:
|
|
|
|
status = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-02-22 23:48:53 +00:00
|
|
|
request->fl_ops->fl_release_private(request);
|
2013-10-01 18:24:58 +00:00
|
|
|
request->fl_ops = NULL;
|
2005-06-22 17:16:32 +00:00
|
|
|
out:
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2013-08-12 20:35:20 +00:00
|
|
|
err = _nfs4_proc_getlk(state, cmd, request);
|
|
|
|
trace_nfs4_get_lock(request, state, cmd, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
|
2005-04-16 22:20:36 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-09-20 11:23:48 +00:00
|
|
|
/*
|
|
|
|
* Update the seqid of a lock stateid after receiving
|
|
|
|
* NFS4ERR_OLD_STATEID
|
|
|
|
*/
|
|
|
|
static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
|
|
|
|
struct nfs4_lock_state *lsp)
|
|
|
|
{
|
|
|
|
struct nfs4_state *state = lsp->ls_state;
|
|
|
|
bool ret = false;
|
|
|
|
|
|
|
|
spin_lock(&state->state_lock);
|
|
|
|
if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
|
|
|
|
goto out;
|
|
|
|
if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
|
|
|
|
nfs4_stateid_seqid_inc(dst);
|
|
|
|
else
|
|
|
|
dst->seqid = lsp->ls_stateid.seqid;
|
|
|
|
ret = true;
|
|
|
|
out:
|
|
|
|
spin_unlock(&state->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
|
|
|
|
struct nfs4_lock_state *lsp)
|
|
|
|
{
|
|
|
|
struct nfs4_state *state = lsp->ls_state;
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
spin_lock(&state->state_lock);
|
|
|
|
ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
|
|
|
|
nfs4_stateid_copy(dst, &lsp->ls_stateid);
|
|
|
|
spin_unlock(&state->state_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-10-18 21:20:15 +00:00
|
|
|
struct nfs4_unlockdata {
|
2006-01-03 08:55:16 +00:00
|
|
|
struct nfs_locku_args arg;
|
|
|
|
struct nfs_locku_res res;
|
2005-10-18 21:20:15 +00:00
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
struct nfs_open_context *ctx;
|
2017-04-11 16:50:12 +00:00
|
|
|
struct nfs_lock_context *l_ctx;
|
2006-01-03 08:55:16 +00:00
|
|
|
struct file_lock fl;
|
2015-09-20 20:15:24 +00:00
|
|
|
struct nfs_server *server;
|
2006-01-03 08:55:21 +00:00
|
|
|
unsigned long timestamp;
|
2005-10-18 21:20:15 +00:00
|
|
|
};
|
|
|
|
|
2006-01-03 08:55:16 +00:00
|
|
|
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
struct nfs4_lock_state *lsp,
|
|
|
|
struct nfs_seqid *seqid)
|
|
|
|
{
|
|
|
|
struct nfs4_unlockdata *p;
|
2019-09-20 11:23:48 +00:00
|
|
|
struct nfs4_state *state = lsp->ls_state;
|
|
|
|
struct inode *inode = state->inode;
|
2006-01-03 08:55:16 +00:00
|
|
|
|
2022-01-29 18:57:38 +00:00
|
|
|
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
2006-01-03 08:55:16 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
p->arg.fh = NFS_FH(inode);
|
|
|
|
p->arg.fl = &p->fl;
|
|
|
|
p->arg.seqid = seqid;
|
2008-04-07 17:20:54 +00:00
|
|
|
p->res.seqid = seqid;
|
2006-01-03 08:55:16 +00:00
|
|
|
p->lsp = lsp;
|
|
|
|
/* Ensure we don't close file until we're done freeing locks! */
|
|
|
|
p->ctx = get_nfs_open_context(ctx);
|
2017-04-11 16:50:12 +00:00
|
|
|
p->l_ctx = nfs_get_lock_context(ctx);
|
2018-11-29 23:04:08 +00:00
|
|
|
locks_init_lock(&p->fl);
|
|
|
|
locks_copy_lock(&p->fl, fl);
|
2006-01-03 08:55:16 +00:00
|
|
|
p->server = NFS_SERVER(inode);
|
2019-09-20 11:23:48 +00:00
|
|
|
spin_lock(&state->state_lock);
|
|
|
|
nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
|
|
|
|
spin_unlock(&state->state_lock);
|
2006-01-03 08:55:16 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:07 +00:00
|
|
|
static void nfs4_locku_release_calldata(void *data)
|
2005-10-18 21:20:15 +00:00
|
|
|
{
|
2006-01-03 08:55:04 +00:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2006-01-03 08:55:16 +00:00
|
|
|
nfs_free_seqid(calldata->arg.seqid);
|
2006-01-03 08:55:07 +00:00
|
|
|
nfs4_put_lock_state(calldata->lsp);
|
2017-04-11 16:50:12 +00:00
|
|
|
nfs_put_lock_context(calldata->l_ctx);
|
2006-01-03 08:55:07 +00:00
|
|
|
put_nfs_open_context(calldata->ctx);
|
|
|
|
kfree(calldata);
|
2005-10-18 21:20:15 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static void nfs4_locku_done(struct rpc_task *task, void *data)
|
2005-10-18 21:20:15 +00:00
|
|
|
{
|
2006-01-03 08:55:04 +00:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2017-11-07 16:14:49 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = calldata->lsp->ls_state->inode,
|
|
|
|
.stateid = &calldata->arg.stateid,
|
|
|
|
};
|
2005-10-18 21:20:15 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
|
|
|
return;
|
2005-10-18 21:20:15 +00:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
2006-01-03 08:55:21 +00:00
|
|
|
renew_lease(calldata->server, calldata->timestamp);
|
2016-09-17 22:17:32 +00:00
|
|
|
locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
|
2015-01-24 21:03:52 +00:00
|
|
|
if (nfs4_update_lock_stateid(calldata->lsp,
|
|
|
|
&calldata->res.stateid))
|
|
|
|
break;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2016-09-22 17:39:05 +00:00
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
nfs4_free_revoked_stateid(calldata->server,
|
|
|
|
&calldata->arg.stateid,
|
|
|
|
task->tk_msg.rpc_cred);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2008-12-23 20:21:46 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2005-10-18 21:20:15 +00:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2019-09-20 11:23:48 +00:00
|
|
|
if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
|
|
|
|
calldata->lsp))
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
|
|
|
|
calldata->lsp))
|
2015-01-24 19:57:53 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
2005-10-18 21:20:15 +00:00
|
|
|
break;
|
|
|
|
default:
|
2017-11-07 16:14:49 +00:00
|
|
|
task->tk_status = nfs4_async_handle_exception(task,
|
|
|
|
calldata->server, task->tk_status,
|
|
|
|
&exception);
|
|
|
|
if (exception.retry)
|
2011-10-19 19:17:29 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
2005-10-18 21:20:15 +00:00
|
|
|
}
|
2012-10-29 22:53:23 +00:00
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
2005-10-18 21:20:15 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:05 +00:00
|
|
|
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
|
2005-10-18 21:20:15 +00:00
|
|
|
{
|
2006-01-03 08:55:05 +00:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2005-10-18 21:20:15 +00:00
|
|
|
|
2017-04-11 16:50:12 +00:00
|
|
|
if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
|
|
|
|
nfs_async_iocounter_wait(task, calldata->l_ctx))
|
|
|
|
return;
|
|
|
|
|
2006-01-03 08:55:16 +00:00
|
|
|
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_wait;
|
2012-09-10 17:26:49 +00:00
|
|
|
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
|
2006-01-03 08:55:04 +00:00
|
|
|
/* Note: exit _without_ running nfs4_locku_done */
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_no_action;
|
2005-10-18 21:20:15 +00:00
|
|
|
}
|
2006-01-03 08:55:21 +00:00
|
|
|
calldata->timestamp = jiffies;
|
2017-01-09 20:48:22 +00:00
|
|
|
if (nfs4_setup_sequence(calldata->server->nfs_client,
|
2009-04-01 13:22:23 +00:00
|
|
|
&calldata->arg.seq_args,
|
2012-10-29 22:37:40 +00:00
|
|
|
&calldata->res.seq_res,
|
|
|
|
task) != 0)
|
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
2013-02-12 00:01:21 +00:00
|
|
|
return;
|
|
|
|
out_no_action:
|
|
|
|
task->tk_action = NULL;
|
|
|
|
out_wait:
|
|
|
|
nfs4_sequence_done(task, &calldata->res.seq_res);
|
2005-10-18 21:20:15 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:04 +00:00
|
|
|
static const struct rpc_call_ops nfs4_locku_ops = {
|
2006-01-03 08:55:05 +00:00
|
|
|
.rpc_call_prepare = nfs4_locku_prepare,
|
2006-01-03 08:55:04 +00:00
|
|
|
.rpc_call_done = nfs4_locku_done,
|
2006-01-03 08:55:07 +00:00
|
|
|
.rpc_release = nfs4_locku_release_calldata,
|
2006-01-03 08:55:04 +00:00
|
|
|
};
|
|
|
|
|
2006-01-03 08:55:17 +00:00
|
|
|
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
struct nfs4_lock_state *lsp,
|
|
|
|
struct nfs_seqid *seqid)
|
|
|
|
{
|
|
|
|
struct nfs4_unlockdata *data;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
|
|
|
|
.rpc_cred = ctx->cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_locku_ops,
|
2008-02-20 01:04:23 +00:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-14 19:39:59 +00:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2021-06-24 03:28:51 +00:00
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
|
2021-06-24 03:28:51 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
2006-01-03 08:55:17 +00:00
|
|
|
|
2013-08-13 20:37:34 +00:00
|
|
|
nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
|
|
|
|
NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
|
|
|
|
|
NFSv4: Make sure unlock is really an unlock when cancelling a lock
I ran into a curious issue when a lock is being canceled. The
cancellation results in a lock request to the vfs layer instead of an
unlock request. This is particularly insidious when the process that
owns the lock is exiting. In that case, sometimes the erroneous lock is
applied AFTER the process has entered zombie state, preventing the lock
from ever being released. Eventually other processes block on the lock
causing a slow degredation of the system. In the 2.6.16 kernel this was
investigated on, the problem is compounded by the fact that the cl_sem
is held while blocking on the vfs lock, which results in most processes
accessing the nfs file system in question hanging.
In more detail, here is how the situation occurs:
first _nfs4_do_setlk():
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
...
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
...
} else
data->cancelled = 1;
then nfs4_lock_release():
static void nfs4_lock_release(void *calldata)
...
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
The problem is the same file_lock that was passed in to _nfs4_do_setlk()
gets passed to nfs4_do_unlck() from nfs4_lock_release(). So the type is
still F_RDLCK or FWRLCK, not F_UNLCK. At some point, when cancelling the
lock, the type needs to be changed to F_UNLCK. It seemed easiest to do
that in nfs4_do_unlck(), but it could be done in nfs4_lock_release().
The concern I had with doing it there was if something still needed the
original file_lock, though it turns out the original file_lock still
needs to be modified by nfs4_do_unlck() because nfs4_do_unlck() uses the
original file_lock to pass to the vfs layer, and a copy of the original
file_lock for the RPC request.
It seems like the simplest solution is to force all situations where
nfs4_do_unlck() is being used to result in an unlock, so with that in
mind, I made the following change:
Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2007-07-09 22:32:29 +00:00
|
|
|
/* Ensure this is an unlock - when canceling a lock, the
|
|
|
|
* canceled lock is passed in, and it won't be an unlock.
|
|
|
|
*/
|
2024-01-31 23:02:22 +00:00
|
|
|
fl->c.flc_type = F_UNLCK;
|
|
|
|
if (fl->c.flc_flags & FL_CLOSE)
|
2017-04-11 16:50:12 +00:00
|
|
|
set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
|
NFSv4: Make sure unlock is really an unlock when cancelling a lock
I ran into a curious issue when a lock is being canceled. The
cancellation results in a lock request to the vfs layer instead of an
unlock request. This is particularly insidious when the process that
owns the lock is exiting. In that case, sometimes the erroneous lock is
applied AFTER the process has entered zombie state, preventing the lock
from ever being released. Eventually other processes block on the lock
causing a slow degredation of the system. In the 2.6.16 kernel this was
investigated on, the problem is compounded by the fact that the cl_sem
is held while blocking on the vfs lock, which results in most processes
accessing the nfs file system in question hanging.
In more detail, here is how the situation occurs:
first _nfs4_do_setlk():
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
...
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
...
} else
data->cancelled = 1;
then nfs4_lock_release():
static void nfs4_lock_release(void *calldata)
...
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
The problem is the same file_lock that was passed in to _nfs4_do_setlk()
gets passed to nfs4_do_unlck() from nfs4_lock_release(). So the type is
still F_RDLCK or FWRLCK, not F_UNLCK. At some point, when cancelling the
lock, the type needs to be changed to F_UNLCK. It seemed easiest to do
that in nfs4_do_unlck(), but it could be done in nfs4_lock_release().
The concern I had with doing it there was if something still needed the
original file_lock, though it turns out the original file_lock still
needs to be modified by nfs4_do_unlck() because nfs4_do_unlck() uses the
original file_lock to pass to the vfs layer, and a copy of the original
file_lock for the RPC request.
It seems like the simplest solution is to force all situations where
nfs4_do_unlck() is being used to result in an unlock, so with that in
mind, I made the following change:
Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2007-07-09 22:32:29 +00:00
|
|
|
|
2006-01-03 08:55:17 +00:00
|
|
|
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
|
|
|
|
if (data == NULL) {
|
|
|
|
nfs_free_seqid(seqid);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
|
2010-12-21 15:52:24 +00:00
|
|
|
msg.rpc_argp = &data->arg;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-14 19:39:59 +00:00
|
|
|
task_setup_data.callback_data = data;
|
|
|
|
return rpc_run_task(&task_setup_data);
|
2006-01-03 08:55:17 +00:00
|
|
|
}
|
|
|
|
|
2005-10-18 21:20:15 +00:00
|
|
|
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2013-02-07 15:54:07 +00:00
|
|
|
struct inode *inode = state->inode;
|
|
|
|
struct nfs4_state_owner *sp = state->owner;
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2006-01-03 08:55:16 +00:00
|
|
|
struct nfs_seqid *seqid;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs4_lock_state *lsp;
|
2006-01-03 08:55:07 +00:00
|
|
|
struct rpc_task *task;
|
2015-01-24 19:19:19 +00:00
|
|
|
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
|
2006-01-03 08:55:07 +00:00
|
|
|
int status = 0;
|
2024-01-31 23:02:22 +00:00
|
|
|
unsigned char saved_flags = request->c.flc_flags;
|
2005-10-18 21:20:15 +00:00
|
|
|
|
2005-06-22 17:16:32 +00:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
2006-06-29 20:38:34 +00:00
|
|
|
/* Unlock _before_ we do the RPC call */
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_flags |= FL_EXISTS;
|
2013-02-07 15:54:07 +00:00
|
|
|
/* Exclude nfs_delegation_claim_locks() */
|
|
|
|
mutex_lock(&sp->so_delegreturn_mutex);
|
|
|
|
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
|
2008-12-23 20:21:44 +00:00
|
|
|
down_read(&nfsi->rwsem);
|
2016-09-17 22:17:32 +00:00
|
|
|
if (locks_lock_inode_wait(inode, request) == -ENOENT) {
|
2008-12-23 20:21:44 +00:00
|
|
|
up_read(&nfsi->rwsem);
|
2013-02-07 15:54:07 +00:00
|
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
2006-06-29 20:38:34 +00:00
|
|
|
goto out;
|
2008-12-23 20:21:44 +00:00
|
|
|
}
|
2022-10-09 23:26:51 +00:00
|
|
|
lsp = request->fl_u.nfs4_fl.owner;
|
|
|
|
set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags);
|
2008-12-23 20:21:44 +00:00
|
|
|
up_read(&nfsi->rwsem);
|
2013-02-07 15:54:07 +00:00
|
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
2005-06-22 17:16:32 +00:00
|
|
|
if (status != 0)
|
2006-06-29 20:38:34 +00:00
|
|
|
goto out;
|
|
|
|
/* Is this a delegated lock? */
|
2013-04-30 16:43:42 +00:00
|
|
|
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
|
|
|
|
goto out;
|
2015-01-24 19:19:19 +00:00
|
|
|
alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
|
|
|
|
seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
|
2006-06-29 20:38:34 +00:00
|
|
|
status = -ENOMEM;
|
2015-01-23 23:48:00 +00:00
|
|
|
if (IS_ERR(seqid))
|
2006-06-29 20:38:34 +00:00
|
|
|
goto out;
|
2024-01-31 23:02:22 +00:00
|
|
|
task = nfs4_do_unlck(request,
|
|
|
|
nfs_file_open_context(request->c.flc_file),
|
|
|
|
lsp, seqid);
|
2006-01-03 08:55:17 +00:00
|
|
|
status = PTR_ERR(task);
|
|
|
|
if (IS_ERR(task))
|
2006-06-29 20:38:34 +00:00
|
|
|
goto out;
|
2017-01-11 20:01:43 +00:00
|
|
|
status = rpc_wait_for_completion_task(task);
|
2006-11-12 03:18:03 +00:00
|
|
|
rpc_put_task(task);
|
2006-06-29 20:38:34 +00:00
|
|
|
out:
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_flags = saved_flags;
|
2013-08-12 20:35:20 +00:00
|
|
|
trace_nfs4_unlock(request, state, F_SETLK, status);
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:17 +00:00
|
|
|
struct nfs4_lockdata {
|
|
|
|
struct nfs_lock_args arg;
|
|
|
|
struct nfs_lock_res res;
|
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
struct file_lock fl;
|
2006-01-03 08:55:21 +00:00
|
|
|
unsigned long timestamp;
|
2006-01-03 08:55:17 +00:00
|
|
|
int rpc_status;
|
|
|
|
int cancelled;
|
2009-04-01 13:22:22 +00:00
|
|
|
struct nfs_server *server;
|
2006-01-03 08:55:17 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
2010-05-13 16:51:01 +00:00
|
|
|
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
|
|
|
|
gfp_t gfp_mask)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-03 08:55:17 +00:00
|
|
|
struct nfs4_lockdata *p;
|
|
|
|
struct inode *inode = lsp->ls_state->inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2015-01-24 19:19:19 +00:00
|
|
|
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
|
2006-01-03 08:55:17 +00:00
|
|
|
|
2010-05-13 16:51:01 +00:00
|
|
|
p = kzalloc(sizeof(*p), gfp_mask);
|
2006-01-03 08:55:17 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
p->arg.fh = NFS_FH(inode);
|
|
|
|
p->arg.fl = &p->fl;
|
2010-05-13 16:51:01 +00:00
|
|
|
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
|
2015-01-23 23:48:00 +00:00
|
|
|
if (IS_ERR(p->arg.open_seqid))
|
2008-01-08 22:56:07 +00:00
|
|
|
goto out_free;
|
2015-01-24 19:19:19 +00:00
|
|
|
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
|
|
|
|
p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
|
2015-01-23 23:48:00 +00:00
|
|
|
if (IS_ERR(p->arg.lock_seqid))
|
2008-01-08 22:56:07 +00:00
|
|
|
goto out_free_seqid;
|
2006-08-23 00:06:09 +00:00
|
|
|
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
|
2012-01-18 03:04:25 +00:00
|
|
|
p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
|
2010-12-21 15:45:27 +00:00
|
|
|
p->arg.lock_owner.s_dev = server->s_dev;
|
2008-04-07 17:20:54 +00:00
|
|
|
p->res.lock_seqid = p->arg.lock_seqid;
|
2006-01-03 08:55:17 +00:00
|
|
|
p->lsp = lsp;
|
2009-04-01 13:22:22 +00:00
|
|
|
p->server = server;
|
2006-01-03 08:55:17 +00:00
|
|
|
p->ctx = get_nfs_open_context(ctx);
|
2018-11-29 23:04:08 +00:00
|
|
|
locks_init_lock(&p->fl);
|
|
|
|
locks_copy_lock(&p->fl, fl);
|
2006-01-03 08:55:17 +00:00
|
|
|
return p;
|
2008-01-08 22:56:07 +00:00
|
|
|
out_free_seqid:
|
|
|
|
nfs_free_seqid(p->arg.open_seqid);
|
2006-01-03 08:55:17 +00:00
|
|
|
out_free:
|
|
|
|
kfree(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
|
|
|
struct nfs4_state *state = data->lsp->ls_state;
|
2005-10-18 21:20:15 +00:00
|
|
|
|
2008-01-08 22:56:07 +00:00
|
|
|
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
|
2013-02-12 00:01:21 +00:00
|
|
|
goto out_wait;
|
2006-01-03 08:55:17 +00:00
|
|
|
/* Do we need to do an open_to_lock_owner? */
|
2015-01-24 23:38:15 +00:00
|
|
|
if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
|
2012-10-29 23:02:20 +00:00
|
|
|
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
|
2012-10-29 22:37:40 +00:00
|
|
|
goto out_release_lock_seqid;
|
2012-10-29 23:02:20 +00:00
|
|
|
}
|
2015-01-24 19:57:53 +00:00
|
|
|
nfs4_stateid_copy(&data->arg.open_stateid,
|
|
|
|
&state->open_stateid);
|
2006-01-03 08:55:17 +00:00
|
|
|
data->arg.new_lock_owner = 1;
|
2008-04-07 17:20:54 +00:00
|
|
|
data->res.open_seqid = data->arg.open_seqid;
|
2015-01-24 19:57:53 +00:00
|
|
|
} else {
|
2008-01-08 22:56:07 +00:00
|
|
|
data->arg.new_lock_owner = 0;
|
2015-01-24 19:57:53 +00:00
|
|
|
nfs4_stateid_copy(&data->arg.lock_stateid,
|
|
|
|
&data->lsp->ls_stateid);
|
|
|
|
}
|
2013-03-14 20:57:48 +00:00
|
|
|
if (!nfs4_valid_open_stateid(state)) {
|
|
|
|
data->rpc_status = -EBADF;
|
|
|
|
task->tk_action = NULL;
|
|
|
|
goto out_release_open_seqid;
|
|
|
|
}
|
2006-01-03 08:55:21 +00:00
|
|
|
data->timestamp = jiffies;
|
2017-01-09 20:48:22 +00:00
|
|
|
if (nfs4_setup_sequence(data->server->nfs_client,
|
2010-06-16 13:52:26 +00:00
|
|
|
&data->arg.seq_args,
|
2012-10-29 22:37:40 +00:00
|
|
|
&data->res.seq_res,
|
2012-10-23 00:28:44 +00:00
|
|
|
task) == 0)
|
2009-04-01 13:22:22 +00:00
|
|
|
return;
|
2013-03-14 20:57:48 +00:00
|
|
|
out_release_open_seqid:
|
2012-10-29 22:37:40 +00:00
|
|
|
nfs_release_seqid(data->arg.open_seqid);
|
|
|
|
out_release_lock_seqid:
|
|
|
|
nfs_release_seqid(data->arg.lock_seqid);
|
2013-02-12 00:01:21 +00:00
|
|
|
out_wait:
|
|
|
|
nfs4_sequence_done(task, &data->res.seq_res);
|
2021-10-16 22:03:04 +00:00
|
|
|
dprintk("%s: ret = %d\n", __func__, data->rpc_status);
|
2009-12-15 05:27:57 +00:00
|
|
|
}
|
|
|
|
|
2006-01-03 08:55:17 +00:00
|
|
|
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
2015-01-24 20:07:56 +00:00
|
|
|
struct nfs4_lock_state *lsp = data->lsp;
|
2006-01-03 08:55:17 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return;
|
2009-04-01 13:22:22 +00:00
|
|
|
|
2006-01-03 08:55:17 +00:00
|
|
|
data->rpc_status = task->tk_status;
|
2015-01-24 19:57:53 +00:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
2023-06-27 18:31:49 +00:00
|
|
|
renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
|
|
|
|
data->timestamp);
|
2018-05-03 11:12:57 +00:00
|
|
|
if (data->arg.new_lock && !data->cancelled) {
|
2024-01-31 23:02:22 +00:00
|
|
|
data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS);
|
2018-05-03 11:12:57 +00:00
|
|
|
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
|
2018-07-30 02:21:22 +00:00
|
|
|
goto out_restart;
|
2015-01-24 21:03:52 +00:00
|
|
|
}
|
2015-01-24 20:07:56 +00:00
|
|
|
if (data->arg.new_lock_owner != 0) {
|
|
|
|
nfs_confirm_seqid(&lsp->ls_seqid, 0);
|
|
|
|
nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
|
|
|
|
set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
|
2018-07-30 02:21:22 +00:00
|
|
|
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
|
|
|
|
goto out_restart;
|
2015-01-24 19:57:53 +00:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
2023-06-30 13:18:13 +00:00
|
|
|
if (data->arg.new_lock_owner != 0 &&
|
|
|
|
nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
|
|
|
|
lsp->ls_state))
|
|
|
|
goto out_restart;
|
|
|
|
if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
|
|
|
|
goto out_restart;
|
|
|
|
fallthrough;
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2015-01-24 19:57:53 +00:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
if (data->arg.new_lock_owner != 0) {
|
2018-07-30 02:21:22 +00:00
|
|
|
if (!nfs4_stateid_match(&data->arg.open_stateid,
|
2015-01-24 19:57:53 +00:00
|
|
|
&lsp->ls_state->open_stateid))
|
2018-07-30 02:21:22 +00:00
|
|
|
goto out_restart;
|
|
|
|
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
|
2015-01-24 19:57:53 +00:00
|
|
|
&lsp->ls_stateid))
|
2018-07-30 02:21:22 +00:00
|
|
|
goto out_restart;
|
2006-01-03 08:55:17 +00:00
|
|
|
}
|
2018-05-03 11:12:57 +00:00
|
|
|
out_done:
|
2021-10-16 22:03:04 +00:00
|
|
|
dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
|
2018-07-30 02:21:22 +00:00
|
|
|
return;
|
|
|
|
out_restart:
|
|
|
|
if (!data->cancelled)
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
goto out_done;
|
2006-01-03 08:55:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_lock_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
|
|
|
|
2008-01-08 22:56:07 +00:00
|
|
|
nfs_free_seqid(data->arg.open_seqid);
|
2018-07-30 02:21:22 +00:00
|
|
|
if (data->cancelled && data->rpc_status == 0) {
|
2006-01-03 08:55:17 +00:00
|
|
|
struct rpc_task *task;
|
|
|
|
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
|
|
|
|
data->arg.lock_seqid);
|
|
|
|
if (!IS_ERR(task))
|
2011-02-21 19:05:41 +00:00
|
|
|
rpc_put_task_async(task);
|
2008-05-02 20:42:44 +00:00
|
|
|
dprintk("%s: cancelling lock!\n", __func__);
|
2006-01-03 08:55:17 +00:00
|
|
|
} else
|
|
|
|
nfs_free_seqid(data->arg.lock_seqid);
|
|
|
|
nfs4_put_lock_state(data->lsp);
|
|
|
|
put_nfs_open_context(data->ctx);
|
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_lock_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_lock_prepare,
|
|
|
|
.rpc_call_done = nfs4_lock_done,
|
|
|
|
.rpc_release = nfs4_lock_release,
|
|
|
|
};
|
|
|
|
|
2010-01-26 20:42:21 +00:00
|
|
|
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
2016-09-22 17:39:09 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2010-01-26 20:42:21 +00:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2011-03-09 21:00:56 +00:00
|
|
|
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
|
2010-01-26 20:42:21 +00:00
|
|
|
if (new_lock_owner != 0 ||
|
2012-09-10 17:26:49 +00:00
|
|
|
test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
|
2011-03-09 21:00:56 +00:00
|
|
|
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
|
2010-01-26 20:42:47 +00:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
|
2011-03-09 21:00:56 +00:00
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
2019-12-19 10:34:47 +00:00
|
|
|
}
|
2010-01-26 20:42:21 +00:00
|
|
|
}
|
|
|
|
|
2009-12-09 09:50:14 +00:00
|
|
|
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
|
2006-01-03 08:55:17 +00:00
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data;
|
|
|
|
struct rpc_task *task;
|
2007-07-14 19:40:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
|
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
2007-07-14 19:39:59 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = NFS_CLIENT(state->inode),
|
2007-07-14 19:40:01 +00:00
|
|
|
.rpc_message = &msg,
|
2007-07-14 19:39:59 +00:00
|
|
|
.callback_ops = &nfs4_lock_ops,
|
2008-02-20 01:04:23 +00:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2020-02-08 00:38:12 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
|
2007-07-14 19:39:59 +00:00
|
|
|
};
|
2006-01-03 08:55:17 +00:00
|
|
|
int ret;
|
2021-06-24 03:28:51 +00:00
|
|
|
|
2022-05-25 16:12:59 +00:00
|
|
|
if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
|
2021-06-24 03:28:51 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_MOVEABLE;
|
2006-01-03 08:55:17 +00:00
|
|
|
|
2024-01-31 23:02:22 +00:00
|
|
|
data = nfs4_alloc_lockdata(fl,
|
|
|
|
nfs_file_open_context(fl->c.flc_file),
|
2022-01-29 18:57:38 +00:00
|
|
|
fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
|
2006-01-03 08:55:17 +00:00
|
|
|
if (data == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (IS_SETLKW(cmd))
|
|
|
|
data->arg.block = 1;
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
|
|
|
|
recovery_type > NFS_LOCK_NEW);
|
2010-12-21 15:52:24 +00:00
|
|
|
msg.rpc_argp = &data->arg;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-14 19:39:59 +00:00
|
|
|
task_setup_data.callback_data = data;
|
2012-10-29 23:02:20 +00:00
|
|
|
if (recovery_type > NFS_LOCK_NEW) {
|
|
|
|
if (recovery_type == NFS_LOCK_RECLAIM)
|
|
|
|
data->arg.reclaim = NFS_LOCK_RECLAIM;
|
2015-01-24 21:03:52 +00:00
|
|
|
} else
|
|
|
|
data->arg.new_lock = 1;
|
2007-07-14 19:39:59 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-20 23:11:10 +00:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 08:55:17 +00:00
|
|
|
return PTR_ERR(task);
|
2017-01-11 20:01:43 +00:00
|
|
|
ret = rpc_wait_for_completion_task(task);
|
2006-01-03 08:55:17 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
ret = data->rpc_status;
|
2010-01-26 20:42:21 +00:00
|
|
|
if (ret)
|
|
|
|
nfs4_handle_setlk_error(data->server, data->lsp,
|
|
|
|
data->arg.new_lock_owner, ret);
|
2006-01-03 08:55:17 +00:00
|
|
|
} else
|
2017-06-20 12:33:44 +00:00
|
|
|
data->cancelled = true;
|
2020-12-11 10:12:51 +00:00
|
|
|
trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
|
2006-11-12 03:18:03 +00:00
|
|
|
rpc_put_task(task);
|
2021-10-16 22:03:04 +00:00
|
|
|
dprintk("%s: ret = %d\n", __func__, ret);
|
2006-01-03 08:55:17 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
|
|
|
|
{
|
2005-06-22 17:16:29 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2012-04-18 16:20:10 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = state->inode,
|
|
|
|
};
|
2005-06-22 17:16:29 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2006-06-29 20:38:36 +00:00
|
|
|
/* Cache the lock if possible... */
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
|
|
|
|
return 0;
|
2009-12-09 09:50:14 +00:00
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
|
2010-10-19 23:47:49 +00:00
|
|
|
if (err != -NFS4ERR_DELAY)
|
2005-06-22 17:16:29 +00:00
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
|
|
|
|
{
|
2005-06-22 17:16:29 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2012-04-18 16:20:10 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.inode = state->inode,
|
|
|
|
};
|
2005-06-22 17:16:29 +00:00
|
|
|
int err;
|
|
|
|
|
2005-11-04 20:39:36 +00:00
|
|
|
err = nfs4_set_lock_state(state, request);
|
|
|
|
if (err != 0)
|
|
|
|
return err;
|
2013-09-04 14:08:54 +00:00
|
|
|
if (!recover_lost_locks) {
|
2013-09-04 07:04:49 +00:00
|
|
|
set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-06-22 17:16:29 +00:00
|
|
|
do {
|
2006-06-29 20:38:36 +00:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
|
|
|
|
return 0;
|
2009-12-09 09:50:14 +00:00
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
|
2009-12-03 20:53:21 +00:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
err = 0;
|
|
|
|
}
|
2005-06-22 17:16:29 +00:00
|
|
|
} while (exception.retry);
|
2009-12-03 20:53:21 +00:00
|
|
|
out:
|
2005-06-22 17:16:29 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-06-02 18:59:10 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2012-01-31 15:39:30 +00:00
|
|
|
static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
|
|
|
|
{
|
2016-09-22 17:39:03 +00:00
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
int status;
|
2012-01-31 15:39:30 +00:00
|
|
|
|
2016-09-22 17:39:03 +00:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
lsp = request->fl_u.nfs4_fl.owner;
|
|
|
|
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
|
|
|
|
test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
|
|
|
|
return 0;
|
2017-01-11 21:41:34 +00:00
|
|
|
return nfs4_lock_expired(state, request);
|
2011-06-02 18:59:10 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2008-12-23 20:21:44 +00:00
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
2016-04-11 20:20:22 +00:00
|
|
|
struct nfs4_state_owner *sp = state->owner;
|
2024-01-31 23:02:22 +00:00
|
|
|
unsigned char flags = request->c.flc_flags;
|
2016-09-17 22:17:37 +00:00
|
|
|
int status;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_flags |= FL_ACCESS;
|
2016-09-17 22:17:32 +00:00
|
|
|
status = locks_lock_inode_wait(state->inode, request);
|
2006-06-29 20:38:39 +00:00
|
|
|
if (status < 0)
|
|
|
|
goto out;
|
2016-04-11 20:20:22 +00:00
|
|
|
mutex_lock(&sp->so_delegreturn_mutex);
|
2008-12-23 20:21:44 +00:00
|
|
|
down_read(&nfsi->rwsem);
|
2006-06-29 20:38:39 +00:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
|
|
|
/* Yes: cache locks! */
|
|
|
|
/* ...but avoid races with delegation recall... */
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_flags = flags & ~FL_SLEEP;
|
2016-09-17 22:17:32 +00:00
|
|
|
status = locks_lock_inode_wait(state->inode, request);
|
2015-01-24 21:03:52 +00:00
|
|
|
up_read(&nfsi->rwsem);
|
2016-04-11 20:20:22 +00:00
|
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
2013-02-05 01:17:49 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2008-12-23 20:21:44 +00:00
|
|
|
up_read(&nfsi->rwsem);
|
2016-04-11 20:20:22 +00:00
|
|
|
mutex_unlock(&sp->so_delegreturn_mutex);
|
2015-01-24 21:03:52 +00:00
|
|
|
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
|
2006-06-29 20:38:39 +00:00
|
|
|
out:
|
2024-01-31 23:02:22 +00:00
|
|
|
request->c.flc_flags = flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2012-03-06 00:56:44 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.state = state,
|
2012-04-18 16:20:10 +00:00
|
|
|
.inode = state->inode,
|
2019-04-07 17:59:09 +00:00
|
|
|
.interruptible = true,
|
2012-03-06 00:56:44 +00:00
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2009-06-17 20:22:59 +00:00
|
|
|
err = _nfs4_proc_setlk(state, cmd, request);
|
|
|
|
if (err == -NFS4ERR_DENIED)
|
|
|
|
err = -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
err = nfs4_handle_exception(NFS_SERVER(state->inode),
|
2009-06-17 20:22:59 +00:00
|
|
|
err, &exception);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-09-17 22:17:38 +00:00
|
|
|
#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
|
|
|
|
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
|
|
|
|
|
|
|
|
static int
|
2016-09-17 22:17:39 +00:00
|
|
|
nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
|
|
|
|
struct file_lock *request)
|
2016-09-17 22:17:38 +00:00
|
|
|
{
|
|
|
|
int status = -ERESTARTSYS;
|
|
|
|
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
|
|
|
|
|
|
|
|
while(!signalled()) {
|
|
|
|
status = nfs4_proc_setlk(state, cmd, request);
|
|
|
|
if ((status != -EAGAIN) || IS_SETLK(cmd))
|
|
|
|
break;
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 11:18:22 +00:00
|
|
|
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
|
|
|
|
schedule_timeout(timeout);
|
2016-09-17 22:17:38 +00:00
|
|
|
timeout *= 2;
|
|
|
|
timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
|
|
|
|
status = -ERESTARTSYS;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-09-17 22:17:39 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
struct nfs4_lock_waiter {
|
|
|
|
struct inode *inode;
|
2021-02-02 19:22:51 +00:00
|
|
|
struct nfs_lowner owner;
|
|
|
|
wait_queue_entry_t wait;
|
2016-09-17 22:17:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2017-06-20 10:06:13 +00:00
|
|
|
nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
|
2016-09-17 22:17:39 +00:00
|
|
|
{
|
2021-02-02 19:22:51 +00:00
|
|
|
struct nfs4_lock_waiter *waiter =
|
|
|
|
container_of(wait, struct nfs4_lock_waiter, wait);
|
2016-09-17 22:17:39 +00:00
|
|
|
|
2018-03-18 12:37:03 +00:00
|
|
|
/* NULL key means to wake up everyone */
|
|
|
|
if (key) {
|
|
|
|
struct cb_notify_lock_args *cbnl = key;
|
|
|
|
struct nfs_lowner *lowner = &cbnl->cbnl_owner,
|
2021-02-02 19:22:51 +00:00
|
|
|
*wowner = &waiter->owner;
|
2016-09-17 22:17:39 +00:00
|
|
|
|
2018-03-18 12:37:03 +00:00
|
|
|
/* Only wake if the callback was for the same owner. */
|
|
|
|
if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
|
|
|
|
return 0;
|
2016-09-17 22:17:39 +00:00
|
|
|
|
2018-03-18 12:37:03 +00:00
|
|
|
/* Make sure it's for the right inode */
|
|
|
|
if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
|
|
|
|
return 0;
|
|
|
|
}
|
2016-09-17 22:17:39 +00:00
|
|
|
|
2021-02-02 19:22:51 +00:00
|
|
|
return woken_wake_function(wait, mode, flags, key);
|
2016-09-17 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
wait_queue_head_t *q = &clp->cl_lock_waitq;
|
2021-02-02 19:22:51 +00:00
|
|
|
struct nfs4_lock_waiter waiter = {
|
|
|
|
.inode = state->inode,
|
|
|
|
.owner = { .clientid = clp->cl_clientid,
|
|
|
|
.id = lsp->ls_seqid.owner_id,
|
|
|
|
.s_dev = server->s_dev },
|
|
|
|
};
|
|
|
|
int status;
|
2016-09-17 22:17:39 +00:00
|
|
|
|
|
|
|
/* Don't bother with waitqueue if we don't expect a callback */
|
|
|
|
if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
|
|
|
|
return nfs4_retry_setlk_simple(state, cmd, request);
|
|
|
|
|
2021-02-02 19:22:51 +00:00
|
|
|
init_wait(&waiter.wait);
|
|
|
|
waiter.wait.func = nfs4_wake_lock_waiter;
|
|
|
|
add_wait_queue(q, &waiter.wait);
|
2016-09-17 22:17:39 +00:00
|
|
|
|
2021-02-02 19:22:51 +00:00
|
|
|
do {
|
2016-09-17 22:17:39 +00:00
|
|
|
status = nfs4_proc_setlk(state, cmd, request);
|
2021-02-02 19:22:51 +00:00
|
|
|
if (status != -EAGAIN || IS_SETLK(cmd))
|
2016-09-17 22:17:39 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
status = -ERESTARTSYS;
|
freezer,sched: Rewrite core freezer logic
Rewrite the core freezer to behave better wrt thawing and be simpler
in general.
By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is
ensured frozen tasks stay frozen until thawed and don't randomly wake
up early, as is currently possible.
As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up
two PF_flags (yay!).
Specifically; the current scheme works a little like:
freezer_do_not_count();
schedule();
freezer_count();
And either the task is blocked, or it lands in try_to_freezer()
through freezer_count(). Now, when it is blocked, the freezer
considers it frozen and continues.
However, on thawing, once pm_freezing is cleared, freezer_count()
stops working, and any random/spurious wakeup will let a task run
before its time.
That is, thawing tries to thaw things in explicit order; kernel
threads and workqueues before doing bringing SMP back before userspace
etc.. However due to the above mentioned races it is entirely possible
for userspace tasks to thaw (by accident) before SMP is back.
This can be a fatal problem in asymmetric ISA architectures (eg ARMv9)
where the userspace task requires a special CPU to run.
As said; replace this with a special task state TASK_FROZEN and add
the following state transitions:
TASK_FREEZABLE -> TASK_FROZEN
__TASK_STOPPED -> TASK_FROZEN
__TASK_TRACED -> TASK_FROZEN
The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL
(IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state
is already required to deal with spurious wakeups and the freezer
causes one such when thawing the task (since the original state is
lost).
The special __TASK_{STOPPED,TRACED} states *can* be restored since
their canonical state is in ->jobctl.
With this, frozen tasks need an explicit TASK_FROZEN wakeup and are
free of undue (early / spurious) wakeups.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
2022-08-22 11:18:22 +00:00
|
|
|
wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
|
2021-02-02 19:22:51 +00:00
|
|
|
NFS4_LOCK_MAXTIMEOUT);
|
|
|
|
} while (!signalled());
|
|
|
|
|
|
|
|
remove_wait_queue(q, &waiter.wait);
|
2016-09-17 22:17:39 +00:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_NFS_V4_1 */
|
|
|
|
static inline int
|
|
|
|
nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
return nfs4_retry_setlk_simple(state, cmd, request);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int
|
|
|
|
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
struct nfs4_state *state;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/* verify open state */
|
2007-08-10 21:44:32 +00:00
|
|
|
ctx = nfs_file_open_context(filp);
|
2005-04-16 22:20:36 +00:00
|
|
|
state = ctx->state;
|
|
|
|
|
2009-07-21 23:22:38 +00:00
|
|
|
if (IS_GETLK(cmd)) {
|
|
|
|
if (state != NULL)
|
|
|
|
return nfs4_proc_getlk(state, F_GETLK, request);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2024-01-31 23:01:52 +00:00
|
|
|
if (lock_is_unlock(request)) {
|
2009-07-21 23:22:38 +00:00
|
|
|
if (state != NULL)
|
|
|
|
return nfs4_proc_unlck(state, cmd, request);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-07-21 23:22:38 +00:00
|
|
|
if (state == NULL)
|
|
|
|
return -ENOLCK;
|
2016-09-17 22:17:37 +00:00
|
|
|
|
2024-01-31 23:02:22 +00:00
|
|
|
if ((request->c.flc_flags & FL_POSIX) &&
|
2016-09-17 22:17:37 +00:00
|
|
|
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
|
|
|
|
return -ENOLCK;
|
|
|
|
|
2017-11-10 11:27:49 +00:00
|
|
|
/*
|
|
|
|
* Don't rely on the VFS having checked the file open mode,
|
|
|
|
* since it won't do this for flock() locks.
|
|
|
|
*/
|
2024-01-31 23:02:22 +00:00
|
|
|
switch (request->c.flc_type) {
|
2017-11-10 11:27:49 +00:00
|
|
|
case F_RDLCK:
|
|
|
|
if (!(filp->f_mode & FMODE_READ))
|
|
|
|
return -EBADF;
|
|
|
|
break;
|
|
|
|
case F_WRLCK:
|
|
|
|
if (!(filp->f_mode & FMODE_WRITE))
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2016-09-17 22:17:37 +00:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
|
2016-09-17 22:17:38 +00:00
|
|
|
return nfs4_retry_setlk(state, cmd, request);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2021-05-07 14:06:13 +00:00
|
|
|
static int nfs4_delete_lease(struct file *file, void **priv)
|
|
|
|
{
|
|
|
|
return generic_setlease(file, F_UNLCK, NULL, priv);
|
|
|
|
}
|
|
|
|
|
2024-01-31 23:02:28 +00:00
|
|
|
static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease,
|
2021-05-07 14:06:13 +00:00
|
|
|
void **priv)
|
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* No delegation, no lease */
|
|
|
|
if (!nfs4_have_delegation(inode, type))
|
2021-06-25 19:08:39 +00:00
|
|
|
return -EAGAIN;
|
2021-05-07 14:06:13 +00:00
|
|
|
ret = generic_setlease(file, arg, lease, priv);
|
|
|
|
if (ret || nfs4_have_delegation(inode, type))
|
|
|
|
return ret;
|
|
|
|
/* We raced with a delegation return */
|
|
|
|
nfs4_delete_lease(file, priv);
|
2021-06-25 19:08:39 +00:00
|
|
|
return -EAGAIN;
|
2021-05-07 14:06:13 +00:00
|
|
|
}
|
|
|
|
|
2024-01-31 23:02:28 +00:00
|
|
|
int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease,
|
2021-05-07 14:06:13 +00:00
|
|
|
void **priv)
|
|
|
|
{
|
|
|
|
switch (arg) {
|
|
|
|
case F_RDLCK:
|
|
|
|
case F_WRLCK:
|
|
|
|
return nfs4_add_lease(file, arg, lease, priv);
|
|
|
|
case F_UNLCK:
|
|
|
|
return nfs4_delete_lease(file, priv);
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-01 19:56:46 +00:00
|
|
|
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
|
2005-11-04 20:38:11 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nfs4_set_lock_state(state, fl);
|
|
|
|
if (err != 0)
|
2013-04-01 19:56:46 +00:00
|
|
|
return err;
|
2020-08-20 22:52:43 +00:00
|
|
|
do {
|
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
|
|
|
|
if (err != -NFS4ERR_DELAY)
|
|
|
|
break;
|
|
|
|
ssleep(1);
|
|
|
|
} while (err == -NFS4ERR_DELAY);
|
2017-12-12 22:57:09 +00:00
|
|
|
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
|
2005-11-04 20:38:11 +00:00
|
|
|
}
|
2005-06-22 17:16:22 +00:00
|
|
|
|
2012-03-07 18:49:12 +00:00
|
|
|
struct nfs_release_lockowner_data {
|
|
|
|
struct nfs4_lock_state *lsp;
|
2012-03-19 20:17:18 +00:00
|
|
|
struct nfs_server *server;
|
2012-03-07 18:49:12 +00:00
|
|
|
struct nfs_release_lockowner_args args;
|
2014-02-26 19:19:14 +00:00
|
|
|
struct nfs_release_lockowner_res res;
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
unsigned long timestamp;
|
2012-03-07 18:49:12 +00:00
|
|
|
};
|
|
|
|
|
2013-08-09 16:49:38 +00:00
|
|
|
static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs_release_lockowner_data *data = calldata;
|
2014-08-04 08:18:16 +00:00
|
|
|
struct nfs_server *server = data->server;
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
|
|
|
|
&data->res.seq_res, task);
|
2014-08-04 08:18:16 +00:00
|
|
|
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
data->timestamp = jiffies;
|
2013-08-09 16:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs_release_lockowner_data *data = calldata;
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
struct nfs_server *server = data->server;
|
|
|
|
|
2014-02-26 19:19:14 +00:00
|
|
|
nfs40_sequence_done(task, &data->res.seq_res);
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
|
|
|
renew_lease(server, data->timestamp);
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
2014-08-04 08:18:16 +00:00
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
|
|
|
break;
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
case -NFS4ERR_LEASE_MOVED:
|
|
|
|
case -NFS4ERR_DELAY:
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, server,
|
|
|
|
NULL, NULL) == -EAGAIN)
|
NFS: Migration support for RELEASE_LOCKOWNER
Currently the Linux NFS client ignores the operation status code for
the RELEASE_LOCKOWNER operation. Like NFSv3's UMNT operation,
RELEASE_LOCKOWNER is a courtesy to help servers manage their
resources, and the outcome is not consequential for the client.
During a migration, a server may report NFS4ERR_LEASE_MOVED, in
which case the client really should retry, since typically
LEASE_MOVED has nothing to do with the current operation, but does
prevent it from going forward.
Also, it's important for a client to respond as soon as possible to
a moved lease condition, since the client's lease could expire on
the destination without further action by the client.
NFS4ERR_DELAY is not included in the list of valid status codes for
RELEASE_LOCKOWNER in RFC 3530bis. However, rfc3530-migration-update
does permit migration-capable servers to return DELAY to clients,
but only in the context of an ongoing migration. In this case the
server has frozen lock state in preparation for migration, and a
client retry would help the destination server purge unneeded state
once migration recovery is complete.
Interestly, NFS4ERR_MOVED is not valid for RELEASE_LOCKOWNER, even
though lock owners can be migrated with Transparent State Migration.
Note that RFC 3530bis section 9.5 includes RELEASE_LOCKOWNER in the
list of operations that renew a client's lease on the server if they
succeed. Now that our client pays attention to the operation's
status code, we can note that renewal appropriately.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-10-17 18:13:47 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
}
|
2013-08-09 16:49:38 +00:00
|
|
|
}
|
|
|
|
|
2010-07-01 16:49:01 +00:00
|
|
|
static void nfs4_release_lockowner_release(void *calldata)
|
|
|
|
{
|
2012-03-07 18:49:12 +00:00
|
|
|
struct nfs_release_lockowner_data *data = calldata;
|
2012-03-19 20:17:18 +00:00
|
|
|
nfs4_free_lock_state(data->server, data->lsp);
|
2010-07-01 16:49:01 +00:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct rpc_call_ops nfs4_release_lockowner_ops = {
|
2013-08-09 16:49:38 +00:00
|
|
|
.rpc_call_prepare = nfs4_release_lockowner_prepare,
|
|
|
|
.rpc_call_done = nfs4_release_lockowner_done,
|
2010-07-01 16:49:01 +00:00
|
|
|
.rpc_release = nfs4_release_lockowner_release,
|
|
|
|
};
|
|
|
|
|
2014-05-01 10:28:47 +00:00
|
|
|
static void
|
|
|
|
nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
|
2010-07-01 16:49:01 +00:00
|
|
|
{
|
2012-03-07 18:49:12 +00:00
|
|
|
struct nfs_release_lockowner_data *data;
|
2010-07-01 16:49:01 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
|
|
|
|
};
|
|
|
|
|
|
|
|
if (server->nfs_client->cl_mvops->minor_version != 0)
|
2014-05-01 10:28:47 +00:00
|
|
|
return;
|
2013-08-09 16:49:38 +00:00
|
|
|
|
2022-01-29 18:57:38 +00:00
|
|
|
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
2012-03-07 18:49:12 +00:00
|
|
|
if (!data)
|
2014-05-01 10:28:47 +00:00
|
|
|
return;
|
2012-03-07 18:49:12 +00:00
|
|
|
data->lsp = lsp;
|
2012-03-19 20:17:18 +00:00
|
|
|
data->server = server;
|
2012-03-07 18:49:12 +00:00
|
|
|
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
|
|
|
|
data->args.lock_owner.id = lsp->ls_seqid.owner_id;
|
|
|
|
data->args.lock_owner.s_dev = server->s_dev;
|
2013-08-09 16:49:38 +00:00
|
|
|
|
2012-03-07 18:49:12 +00:00
|
|
|
msg.rpc_argp = &data->args;
|
2014-02-26 19:19:14 +00:00
|
|
|
msg.rpc_resp = &data->res;
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
|
2012-03-07 18:49:12 +00:00
|
|
|
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
|
2010-07-01 16:49:01 +00:00
|
|
|
}
|
|
|
|
|
2005-06-22 17:16:22 +00:00
|
|
|
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
|
|
|
|
|
2015-10-04 17:18:51 +00:00
|
|
|
static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
|
2023-01-13 11:49:23 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2016-05-27 14:19:30 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, const void *buf,
|
|
|
|
size_t buflen, int flags)
|
2005-06-22 17:16:22 +00:00
|
|
|
{
|
2022-05-14 14:36:58 +00:00
|
|
|
return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
|
2005-06-22 17:16:22 +00:00
|
|
|
}
|
|
|
|
|
2015-10-04 17:18:51 +00:00
|
|
|
static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
|
2016-04-11 00:48:24 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, void *buf, size_t buflen)
|
2005-06-22 17:16:22 +00:00
|
|
|
{
|
2022-05-14 14:36:58 +00:00
|
|
|
return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
|
2005-06-22 17:16:22 +00:00
|
|
|
}
|
|
|
|
|
2015-12-02 13:44:43 +00:00
|
|
|
static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
|
2005-06-22 17:16:22 +00:00
|
|
|
{
|
2022-05-14 14:36:58 +00:00
|
|
|
return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
|
2005-06-22 17:16:22 +00:00
|
|
|
}
|
|
|
|
|
2022-05-14 14:37:00 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
#define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
|
|
|
|
|
|
|
|
static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
|
2023-01-13 11:49:23 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2022-05-14 14:37:00 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, const void *buf,
|
|
|
|
size_t buflen, int flags)
|
|
|
|
{
|
|
|
|
return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
|
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
|
|
|
|
|
|
|
|
static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
|
2023-01-13 11:49:23 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2022-05-14 14:37:00 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, const void *buf,
|
|
|
|
size_t buflen, int flags)
|
|
|
|
{
|
|
|
|
return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
|
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2013-05-22 16:50:45 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
|
2015-10-04 17:18:51 +00:00
|
|
|
static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
|
2023-01-13 11:49:23 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2016-05-27 14:19:30 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, const void *buf,
|
|
|
|
size_t buflen, int flags)
|
2013-05-22 16:50:45 +00:00
|
|
|
{
|
|
|
|
if (security_ismaclabel(key))
|
2016-05-27 14:19:30 +00:00
|
|
|
return nfs4_set_security_label(inode, buf, buflen);
|
2013-05-22 16:50:45 +00:00
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-10-04 17:18:51 +00:00
|
|
|
static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
|
2016-04-11 00:48:24 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, void *buf, size_t buflen)
|
2013-05-22 16:50:45 +00:00
|
|
|
{
|
|
|
|
if (security_ismaclabel(key))
|
2016-04-11 00:48:24 +00:00
|
|
|
return nfs4_get_security_label(inode, buf, buflen);
|
2013-05-22 16:50:45 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-12-02 13:44:41 +00:00
|
|
|
static ssize_t
|
|
|
|
nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
|
2013-05-22 16:50:45 +00:00
|
|
|
{
|
2015-12-02 13:44:41 +00:00
|
|
|
int len = 0;
|
2013-05-22 16:50:45 +00:00
|
|
|
|
2015-12-02 13:44:41 +00:00
|
|
|
if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
|
|
|
|
len = security_inode_listsecurity(inode, list, list_len);
|
2020-06-17 01:09:39 +00:00
|
|
|
if (len >= 0 && list_len && len > list_len)
|
2015-12-02 13:44:41 +00:00
|
|
|
return -ERANGE;
|
2013-05-22 16:50:45 +00:00
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
|
|
|
|
.prefix = XATTR_SECURITY_PREFIX,
|
|
|
|
.get = nfs4_xattr_get_nfs4_label,
|
|
|
|
.set = nfs4_xattr_set_nfs4_label,
|
|
|
|
};
|
|
|
|
|
2015-12-02 13:44:41 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2013-05-22 16:50:45 +00:00
|
|
|
|
2020-06-23 22:39:03 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_2
|
|
|
|
static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
|
2023-01-13 11:49:23 +00:00
|
|
|
struct mnt_idmap *idmap,
|
2020-06-23 22:39:03 +00:00
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, const void *buf,
|
|
|
|
size_t buflen, int flags)
|
|
|
|
{
|
2021-09-27 23:47:57 +00:00
|
|
|
u32 mask;
|
2020-06-23 22:39:04 +00:00
|
|
|
int ret;
|
2020-06-23 22:39:03 +00:00
|
|
|
|
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
|
|
|
|
* flags right now. Handling of xattr operations use the normal
|
|
|
|
* file read/write permissions.
|
|
|
|
*
|
|
|
|
* Just in case the server has other ideas (which RFC 8276 allows),
|
|
|
|
* do a cached access check for the XA* flags to possibly avoid
|
|
|
|
* doing an RPC and getting EACCES back.
|
|
|
|
*/
|
2021-09-27 23:47:57 +00:00
|
|
|
if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
|
|
|
|
if (!(mask & NFS_ACCESS_XAWRITE))
|
2020-06-23 22:39:03 +00:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2020-06-23 22:39:04 +00:00
|
|
|
if (buf == NULL) {
|
|
|
|
ret = nfs42_proc_removexattr(inode, key);
|
|
|
|
if (!ret)
|
|
|
|
nfs4_xattr_cache_remove(inode, key);
|
|
|
|
} else {
|
|
|
|
ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
|
|
|
|
if (!ret)
|
|
|
|
nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2020-06-23 22:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
|
|
|
|
struct dentry *unused, struct inode *inode,
|
|
|
|
const char *key, void *buf, size_t buflen)
|
|
|
|
{
|
2021-09-27 23:47:57 +00:00
|
|
|
u32 mask;
|
2020-06-23 22:39:04 +00:00
|
|
|
ssize_t ret;
|
2020-06-23 22:39:03 +00:00
|
|
|
|
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2021-09-27 23:47:57 +00:00
|
|
|
if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
|
|
|
|
if (!(mask & NFS_ACCESS_XAREAD))
|
2020-06-23 22:39:03 +00:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2021-03-25 15:04:34 +00:00
|
|
|
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
|
2020-06-23 22:39:04 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
|
|
|
|
if (ret >= 0 || (ret < 0 && ret != -ENOENT))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nfs42_proc_getxattr(inode, key, buf, buflen);
|
|
|
|
|
|
|
|
return ret;
|
2020-06-23 22:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
|
|
|
|
{
|
|
|
|
u64 cookie;
|
|
|
|
bool eof;
|
2020-06-23 22:39:04 +00:00
|
|
|
ssize_t ret, size;
|
2020-06-23 22:39:03 +00:00
|
|
|
char *buf;
|
|
|
|
size_t buflen;
|
2021-09-27 23:47:57 +00:00
|
|
|
u32 mask;
|
2020-06-23 22:39:03 +00:00
|
|
|
|
|
|
|
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
|
|
|
|
return 0;
|
|
|
|
|
2021-09-27 23:47:57 +00:00
|
|
|
if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
|
|
|
|
if (!(mask & NFS_ACCESS_XALIST))
|
2020-06-23 22:39:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-25 15:04:34 +00:00
|
|
|
ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
|
2020-06-23 22:39:04 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nfs4_xattr_cache_list(inode, list, list_len);
|
|
|
|
if (ret >= 0 || (ret < 0 && ret != -ENOENT))
|
|
|
|
return ret;
|
|
|
|
|
2020-06-23 22:39:03 +00:00
|
|
|
cookie = 0;
|
|
|
|
eof = false;
|
|
|
|
buflen = list_len ? list_len : XATTR_LIST_MAX;
|
|
|
|
buf = list_len ? list : NULL;
|
|
|
|
size = 0;
|
|
|
|
|
|
|
|
while (!eof) {
|
|
|
|
ret = nfs42_proc_listxattrs(inode, buf, buflen,
|
|
|
|
&cookie, &eof);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (list_len) {
|
|
|
|
buf += ret;
|
|
|
|
buflen -= ret;
|
|
|
|
}
|
|
|
|
size += ret;
|
|
|
|
}
|
|
|
|
|
2020-06-23 22:39:04 +00:00
|
|
|
if (list_len)
|
|
|
|
nfs4_xattr_cache_set_list(inode, list, size);
|
|
|
|
|
2020-06-23 22:39:03 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_2 */
|
|
|
|
|
2011-06-13 22:25:56 +00:00
|
|
|
/*
|
|
|
|
* nfs_fhget will use either the mounted_on_fileid or the fileid
|
|
|
|
*/
|
2009-03-11 18:10:28 +00:00
|
|
|
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
|
|
|
|
{
|
2011-06-13 22:25:56 +00:00
|
|
|
if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
|
|
|
|
(fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
|
|
|
|
(fattr->valid & NFS_ATTR_FATTR_FSID) &&
|
2012-03-01 22:01:57 +00:00
|
|
|
(fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
|
2009-03-11 18:10:28 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
|
2012-03-01 22:01:57 +00:00
|
|
|
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
|
2009-03-11 18:10:28 +00:00
|
|
|
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
|
|
|
fattr->nlink = 2;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:41 +00:00
|
|
|
static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
|
|
|
|
const struct qstr *name,
|
|
|
|
struct nfs4_fs_locations *fs_locations,
|
|
|
|
struct page *page)
|
2006-06-09 13:34:22 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2017-11-05 20:45:22 +00:00
|
|
|
u32 bitmask[3];
|
2006-06-09 13:34:22 +00:00
|
|
|
struct nfs4_fs_locations_arg args = {
|
|
|
|
.dir_fh = NFS_FH(dir),
|
2007-01-13 07:28:11 +00:00
|
|
|
.name = name,
|
2006-06-09 13:34:22 +00:00
|
|
|
.page = page,
|
|
|
|
.bitmask = bitmask,
|
|
|
|
};
|
2009-04-01 13:22:02 +00:00
|
|
|
struct nfs4_fs_locations_res res = {
|
|
|
|
.fs_locations = fs_locations,
|
|
|
|
};
|
2006-06-09 13:34:22 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 13:22:02 +00:00
|
|
|
.rpc_resp = &res,
|
2006-06-09 13:34:22 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2008-05-02 20:42:44 +00:00
|
|
|
dprintk("%s: start\n", __func__);
|
2011-06-13 22:25:56 +00:00
|
|
|
|
2017-11-05 20:45:22 +00:00
|
|
|
bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
|
|
|
|
bitmask[1] = nfs4_fattr_bitmap[1];
|
|
|
|
|
2011-06-13 22:25:56 +00:00
|
|
|
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
|
|
|
|
* is not supported */
|
|
|
|
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
|
2017-11-05 20:45:22 +00:00
|
|
|
bitmask[0] &= ~FATTR4_WORD0_FILEID;
|
2011-06-13 22:25:56 +00:00
|
|
|
else
|
2017-11-05 20:45:22 +00:00
|
|
|
bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
|
2011-06-13 22:25:56 +00:00
|
|
|
|
2022-05-14 11:05:13 +00:00
|
|
|
nfs_fattr_init(fs_locations->fattr);
|
2006-06-09 13:34:22 +00:00
|
|
|
fs_locations->server = server;
|
2006-06-09 13:34:25 +00:00
|
|
|
fs_locations->nlocations = 0;
|
2012-04-27 17:27:41 +00:00
|
|
|
status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2008-05-02 20:42:44 +00:00
|
|
|
dprintk("%s: returned status = %d\n", __func__, status);
|
2006-06-09 13:34:22 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:41 +00:00
|
|
|
int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
|
|
|
|
const struct qstr *name,
|
|
|
|
struct nfs4_fs_locations *fs_locations,
|
|
|
|
struct page *page)
|
2012-04-27 17:27:39 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2012-04-27 17:27:39 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2013-08-12 20:45:55 +00:00
|
|
|
err = _nfs4_proc_fs_locations(client, dir, name,
|
|
|
|
fs_locations, page);
|
|
|
|
trace_nfs4_get_fs_locations(dir, name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2012-04-27 17:27:39 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-10-17 18:12:50 +00:00
|
|
|
/*
|
|
|
|
* This operation also signals the server that this client is
|
|
|
|
* performing migration recovery. The server can stop returning
|
|
|
|
* NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
|
|
|
|
* appended to this compound to identify the client ID which is
|
|
|
|
* performing recovery.
|
|
|
|
*/
|
2022-01-12 15:27:38 +00:00
|
|
|
static int _nfs40_proc_get_locations(struct nfs_server *server,
|
|
|
|
struct nfs_fh *fhandle,
|
2013-10-17 18:12:50 +00:00
|
|
|
struct nfs4_fs_locations *locations,
|
2018-12-03 00:30:31 +00:00
|
|
|
struct page *page, const struct cred *cred)
|
2013-10-17 18:12:50 +00:00
|
|
|
{
|
|
|
|
struct rpc_clnt *clnt = server->client;
|
|
|
|
u32 bitmask[2] = {
|
|
|
|
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
|
|
|
|
};
|
|
|
|
struct nfs4_fs_locations_arg args = {
|
|
|
|
.clientid = server->nfs_client->cl_clientid,
|
2022-01-12 15:27:38 +00:00
|
|
|
.fh = fhandle,
|
2013-10-17 18:12:50 +00:00
|
|
|
.page = page,
|
|
|
|
.bitmask = bitmask,
|
|
|
|
.migration = 1, /* skip LOOKUP */
|
|
|
|
.renew = 1, /* append RENEW */
|
|
|
|
};
|
|
|
|
struct nfs4_fs_locations_res res = {
|
|
|
|
.fs_locations = locations,
|
|
|
|
.migration = 1,
|
|
|
|
.renew = 1,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
int status;
|
|
|
|
|
2022-05-14 11:05:13 +00:00
|
|
|
nfs_fattr_init(locations->fattr);
|
2013-10-17 18:12:50 +00:00
|
|
|
locations->server = server;
|
|
|
|
locations->nlocations = 0;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
|
2013-10-17 18:12:50 +00:00
|
|
|
status = nfs4_call_sync_sequence(clnt, server, &msg,
|
|
|
|
&args.seq_args, &res.seq_res);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
renew_lease(server, now);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This operation also signals the server that this client is
|
|
|
|
* performing migration recovery. The server can stop asserting
|
|
|
|
* SEQ4_STATUS_LEASE_MOVED for this client. The client ID
|
|
|
|
* performing this operation is identified in the SEQUENCE
|
|
|
|
* operation in this compound.
|
|
|
|
*
|
|
|
|
* When the client supports GETATTR(fs_locations_info), it can
|
|
|
|
* be plumbed in here.
|
|
|
|
*/
|
2022-01-12 15:27:38 +00:00
|
|
|
static int _nfs41_proc_get_locations(struct nfs_server *server,
|
|
|
|
struct nfs_fh *fhandle,
|
2013-10-17 18:12:50 +00:00
|
|
|
struct nfs4_fs_locations *locations,
|
2018-12-03 00:30:31 +00:00
|
|
|
struct page *page, const struct cred *cred)
|
2013-10-17 18:12:50 +00:00
|
|
|
{
|
|
|
|
struct rpc_clnt *clnt = server->client;
|
|
|
|
u32 bitmask[2] = {
|
|
|
|
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
|
|
|
|
};
|
|
|
|
struct nfs4_fs_locations_arg args = {
|
2022-01-12 15:27:38 +00:00
|
|
|
.fh = fhandle,
|
2013-10-17 18:12:50 +00:00
|
|
|
.page = page,
|
|
|
|
.bitmask = bitmask,
|
|
|
|
.migration = 1, /* skip LOOKUP */
|
|
|
|
};
|
|
|
|
struct nfs4_fs_locations_res res = {
|
|
|
|
.fs_locations = locations,
|
|
|
|
.migration = 1,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2022-02-15 18:26:41 +00:00
|
|
|
struct nfs4_call_sync_data data = {
|
|
|
|
.seq_server = server,
|
|
|
|
.seq_args = &args.seq_args,
|
|
|
|
.seq_res = &res.seq_res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clnt,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
|
|
|
|
.callback_data = &data,
|
|
|
|
.flags = RPC_TASK_NO_ROUND_ROBIN,
|
|
|
|
};
|
2013-10-17 18:12:50 +00:00
|
|
|
int status;
|
|
|
|
|
2022-05-14 11:05:13 +00:00
|
|
|
nfs_fattr_init(locations->fattr);
|
2013-10-17 18:12:50 +00:00
|
|
|
locations->server = server;
|
|
|
|
locations->nlocations = 0;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
|
2022-02-15 18:26:41 +00:00
|
|
|
status = nfs4_call_sync_custom(&task_setup_data);
|
2013-10-17 18:12:50 +00:00
|
|
|
if (status == NFS4_OK &&
|
|
|
|
res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
|
|
|
|
status = -NFS4ERR_LEASE_MOVED;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs4_proc_get_locations - discover locations for a migrated FSID
|
2022-01-13 02:26:04 +00:00
|
|
|
* @server: pointer to nfs_server to process
|
|
|
|
* @fhandle: pointer to the kernel NFS client file handle
|
2013-10-17 18:12:50 +00:00
|
|
|
* @locations: result of query
|
|
|
|
* @page: buffer
|
|
|
|
* @cred: credential to use for this operation
|
|
|
|
*
|
|
|
|
* Returns NFS4_OK on success, a negative NFS4ERR status code if the
|
|
|
|
* operation failed, or a negative errno if a local error occurred.
|
|
|
|
*
|
|
|
|
* On success, "locations" is filled in, but if the server has
|
|
|
|
* no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
|
|
|
|
* asserted.
|
|
|
|
*
|
|
|
|
* -NFS4ERR_LEASE_MOVED is returned if the server still has leases
|
|
|
|
* from this client that require migration recovery.
|
|
|
|
*/
|
2022-01-12 15:27:38 +00:00
|
|
|
int nfs4_proc_get_locations(struct nfs_server *server,
|
|
|
|
struct nfs_fh *fhandle,
|
2013-10-17 18:12:50 +00:00
|
|
|
struct nfs4_fs_locations *locations,
|
2018-12-03 00:30:31 +00:00
|
|
|
struct page *page, const struct cred *cred)
|
2013-10-17 18:12:50 +00:00
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
const struct nfs4_mig_recovery_ops *ops =
|
|
|
|
clp->cl_mvops->mig_recovery_ops;
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2013-10-17 18:12:50 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
|
|
|
|
(unsigned long long)server->fsid.major,
|
|
|
|
(unsigned long long)server->fsid.minor,
|
|
|
|
clp->cl_hostname);
|
2022-01-12 15:27:38 +00:00
|
|
|
nfs_display_fhandle(fhandle, __func__);
|
2013-10-17 18:12:50 +00:00
|
|
|
|
|
|
|
do {
|
2022-01-12 15:27:38 +00:00
|
|
|
status = ops->get_locations(server, fhandle, locations, page,
|
|
|
|
cred);
|
2013-10-17 18:12:50 +00:00
|
|
|
if (status != -NFS4ERR_DELAY)
|
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, status, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2013-10-17 18:13:30 +00:00
|
|
|
/*
|
|
|
|
* This operation also signals the server that this client is
|
|
|
|
* performing "lease moved" recovery. The server can stop
|
|
|
|
* returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
|
|
|
|
* is appended to this compound to identify the client ID which is
|
|
|
|
* performing recovery.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
|
2013-10-17 18:13:30 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
|
|
|
struct rpc_clnt *clnt = server->client;
|
|
|
|
struct nfs4_fsid_present_arg args = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.clientid = clp->cl_clientid,
|
|
|
|
.renew = 1, /* append RENEW */
|
|
|
|
};
|
|
|
|
struct nfs4_fsid_present_res res = {
|
|
|
|
.renew = 1,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
res.fh = nfs_alloc_fhandle();
|
|
|
|
if (res.fh == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
|
2013-10-17 18:13:30 +00:00
|
|
|
status = nfs4_call_sync_sequence(clnt, server, &msg,
|
|
|
|
&args.seq_args, &res.seq_res);
|
|
|
|
nfs_free_fhandle(res.fh);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
do_renew_lease(clp, now);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This operation also signals the server that this client is
|
|
|
|
* performing "lease moved" recovery. The server can stop asserting
|
|
|
|
* SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
|
|
|
|
* this operation is identified in the SEQUENCE operation in this
|
|
|
|
* compound.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
|
2013-10-17 18:13:30 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct rpc_clnt *clnt = server->client;
|
|
|
|
struct nfs4_fsid_present_arg args = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
};
|
|
|
|
struct nfs4_fsid_present_res res = {
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
|
|
|
res.fh = nfs_alloc_fhandle();
|
|
|
|
if (res.fh == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
|
2013-10-17 18:13:30 +00:00
|
|
|
status = nfs4_call_sync_sequence(clnt, server, &msg,
|
|
|
|
&args.seq_args, &res.seq_res);
|
|
|
|
nfs_free_fhandle(res.fh);
|
|
|
|
if (status == NFS4_OK &&
|
|
|
|
res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
|
|
|
|
status = -NFS4ERR_LEASE_MOVED;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs4_proc_fsid_present - Is this FSID present or absent on server?
|
|
|
|
* @inode: inode on FSID to check
|
|
|
|
* @cred: credential to use for this operation
|
|
|
|
*
|
|
|
|
* Server indicates whether the FSID is present, moved, or not
|
|
|
|
* recognized. This operation is necessary to clear a LEASE_MOVED
|
|
|
|
* condition for this client ID.
|
|
|
|
*
|
|
|
|
* Returns NFS4_OK if the FSID is present on this server,
|
|
|
|
* -NFS4ERR_MOVED if the FSID is no longer present, a negative
|
|
|
|
* NFS4ERR code if some error occurred on the server, or a
|
|
|
|
* negative errno if a local failure occurred.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
|
2013-10-17 18:13:30 +00:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
const struct nfs4_mig_recovery_ops *ops =
|
|
|
|
clp->cl_mvops->mig_recovery_ops;
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2013-10-17 18:13:30 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
|
|
|
|
(unsigned long long)server->fsid.major,
|
|
|
|
(unsigned long long)server->fsid.minor,
|
|
|
|
clp->cl_hostname);
|
|
|
|
nfs_display_fhandle(NFS_FH(inode), __func__);
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = ops->fsid_present(inode, cred);
|
|
|
|
if (status != -NFS4ERR_DELAY)
|
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, status, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-02-18 18:32:38 +00:00
|
|
|
/*
|
2013-09-03 19:18:49 +00:00
|
|
|
* If 'use_integrity' is true and the state managment nfs_client
|
|
|
|
* cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
|
|
|
|
* and the machine credential as per RFC3530bis and RFC5661 Security
|
|
|
|
* Considerations sections. Otherwise, just use the user cred with the
|
|
|
|
* filesystem's rpc_client.
|
2013-08-08 14:57:55 +00:00
|
|
|
*/
|
2013-09-03 19:18:49 +00:00
|
|
|
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
|
2011-03-24 17:12:29 +00:00
|
|
|
{
|
|
|
|
int status;
|
2019-08-14 19:30:16 +00:00
|
|
|
struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
|
|
|
|
struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
|
2011-03-24 17:12:29 +00:00
|
|
|
struct nfs4_secinfo_arg args = {
|
|
|
|
.dir_fh = NFS_FH(dir),
|
|
|
|
.name = name,
|
|
|
|
};
|
|
|
|
struct nfs4_secinfo_res res = {
|
|
|
|
.flavors = flavors,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2019-08-14 19:30:16 +00:00
|
|
|
struct nfs4_call_sync_data data = {
|
|
|
|
.seq_server = NFS_SERVER(dir),
|
|
|
|
.seq_args = &args.seq_args,
|
|
|
|
.seq_res = &res.seq_res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
|
|
|
.rpc_client = clnt,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = clp->cl_mvops->call_sync_ops,
|
|
|
|
.callback_data = &data,
|
|
|
|
.flags = RPC_TASK_NO_ROUND_ROBIN,
|
|
|
|
};
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = NULL;
|
2013-09-03 19:18:49 +00:00
|
|
|
|
|
|
|
if (use_integrity) {
|
2019-08-14 19:30:16 +00:00
|
|
|
clnt = clp->cl_rpcclient;
|
|
|
|
task_setup.rpc_client = clnt;
|
|
|
|
|
|
|
|
cred = nfs4_get_clid_cred(clp);
|
2013-09-10 22:44:31 +00:00
|
|
|
msg.rpc_cred = cred;
|
2013-09-03 19:18:49 +00:00
|
|
|
}
|
2011-03-24 17:12:29 +00:00
|
|
|
|
|
|
|
dprintk("NFS call secinfo %s\n", name->name);
|
2013-08-13 20:37:35 +00:00
|
|
|
|
2019-08-14 19:30:16 +00:00
|
|
|
nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
|
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
|
|
|
|
status = nfs4_call_sync_custom(&task_setup);
|
2013-08-13 20:37:35 +00:00
|
|
|
|
2011-03-24 17:12:29 +00:00
|
|
|
dprintk("NFS reply secinfo: %d\n", status);
|
2013-09-03 19:18:49 +00:00
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
put_cred(cred);
|
2011-03-24 17:12:29 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2012-04-27 17:27:40 +00:00
|
|
|
int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
|
|
|
|
struct nfs4_secinfo_flavors *flavors)
|
2011-03-24 17:12:29 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2011-03-24 17:12:29 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2013-09-03 19:18:49 +00:00
|
|
|
err = -NFS4ERR_WRONGSEC;
|
|
|
|
|
|
|
|
/* try to use integrity protection with machine cred */
|
|
|
|
if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
|
|
|
|
err = _nfs4_proc_secinfo(dir, name, flavors, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if unable to use integrity protection, or SECINFO with
|
|
|
|
* integrity protection returns NFS4ERR_WRONGSEC (which is
|
|
|
|
* disallowed by spec, but exists in deployed servers) use
|
|
|
|
* the current filesystem's rpc_client and the user cred.
|
|
|
|
*/
|
|
|
|
if (err == -NFS4ERR_WRONGSEC)
|
|
|
|
err = _nfs4_proc_secinfo(dir, name, flavors, false);
|
|
|
|
|
2013-08-12 20:45:55 +00:00
|
|
|
trace_nfs4_secinfo(dir, name, err);
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir), err,
|
2011-03-24 17:12:29 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:21:53 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
2010-12-14 15:11:57 +00:00
|
|
|
/*
|
|
|
|
* Check the exchange flags returned by the server for invalid flags, having
|
|
|
|
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
|
|
|
|
* DS flags set.
|
|
|
|
*/
|
2020-10-16 13:25:45 +00:00
|
|
|
static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
|
2010-12-14 15:11:57 +00:00
|
|
|
{
|
2020-10-16 13:25:45 +00:00
|
|
|
if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
|
|
|
|
goto out_inval;
|
|
|
|
else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
|
2010-12-14 15:11:57 +00:00
|
|
|
goto out_inval;
|
|
|
|
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
|
|
|
|
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
|
|
|
|
goto out_inval;
|
|
|
|
if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
|
|
|
|
goto out_inval;
|
|
|
|
return NFS_OK;
|
|
|
|
out_inval:
|
|
|
|
return -NFS4ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2011-05-31 23:05:47 +00:00
|
|
|
static bool
|
2012-05-22 02:44:31 +00:00
|
|
|
nfs41_same_server_scope(struct nfs41_server_scope *a,
|
|
|
|
struct nfs41_server_scope *b)
|
2011-05-31 23:05:47 +00:00
|
|
|
{
|
2017-01-11 21:51:59 +00:00
|
|
|
if (a->server_scope_sz != b->server_scope_sz)
|
|
|
|
return false;
|
|
|
|
return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
|
2011-05-31 23:05:47 +00:00
|
|
|
}
|
|
|
|
|
2016-02-05 21:08:37 +00:00
|
|
|
static void
|
|
|
|
nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
2019-11-13 07:34:00 +00:00
|
|
|
struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
|
2020-04-24 21:45:50 +00:00
|
|
|
struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
|
2019-11-13 07:34:00 +00:00
|
|
|
struct nfs_client *clp = args->client;
|
|
|
|
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
nfs4_schedule_session_recovery(clp->cl_session,
|
|
|
|
task->tk_status);
|
2022-03-24 14:38:42 +00:00
|
|
|
return;
|
2019-11-13 07:34:00 +00:00
|
|
|
}
|
2020-04-24 21:45:50 +00:00
|
|
|
if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
|
|
|
|
res->dir != NFS4_CDFS4_BOTH) {
|
|
|
|
rpc_task_close_connection(task);
|
|
|
|
if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
|
|
|
|
rpc_restart_call(task);
|
|
|
|
}
|
2016-02-05 21:08:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
|
2020-04-26 15:30:00 +00:00
|
|
|
.rpc_call_done = nfs4_bind_one_conn_to_session_done,
|
2016-02-05 21:08:37 +00:00
|
|
|
};
|
|
|
|
|
2012-05-24 17:22:50 +00:00
|
|
|
/*
|
2016-01-31 03:58:24 +00:00
|
|
|
* nfs4_proc_bind_one_conn_to_session()
|
2012-05-24 17:22:50 +00:00
|
|
|
*
|
|
|
|
* The 4.1 client currently uses the same TCP connection for the
|
|
|
|
* fore and backchannel.
|
|
|
|
*/
|
2016-01-31 03:58:24 +00:00
|
|
|
static
|
|
|
|
int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
|
|
|
|
struct rpc_xprt *xprt,
|
|
|
|
struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2012-05-24 17:22:50 +00:00
|
|
|
{
|
|
|
|
int status;
|
2015-02-18 17:27:18 +00:00
|
|
|
struct nfs41_bind_conn_to_session_args args = {
|
|
|
|
.client = clp,
|
|
|
|
.dir = NFS4_CDFC4_FORE_OR_BOTH,
|
2020-04-24 21:45:50 +00:00
|
|
|
.retries = 0,
|
2015-02-18 17:27:18 +00:00
|
|
|
};
|
2012-05-24 17:22:50 +00:00
|
|
|
struct nfs41_bind_conn_to_session_res res;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc =
|
|
|
|
&nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
|
2015-02-18 17:27:18 +00:00
|
|
|
.rpc_argp = &args,
|
2012-05-24 17:22:50 +00:00
|
|
|
.rpc_resp = &res,
|
2012-05-25 21:57:41 +00:00
|
|
|
.rpc_cred = cred,
|
2012-05-24 17:22:50 +00:00
|
|
|
};
|
2016-01-31 03:58:24 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clnt,
|
|
|
|
.rpc_xprt = xprt,
|
2016-02-05 21:08:37 +00:00
|
|
|
.callback_ops = &nfs4_bind_one_conn_to_session_ops,
|
2016-01-31 03:58:24 +00:00
|
|
|
.rpc_message = &msg,
|
|
|
|
.flags = RPC_TASK_TIMEOUT,
|
|
|
|
};
|
|
|
|
struct rpc_task *task;
|
2012-05-24 17:22:50 +00:00
|
|
|
|
2015-02-18 17:27:18 +00:00
|
|
|
nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
|
|
|
|
if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
|
|
|
|
args.dir = NFS4_CDFC4_FORE;
|
2012-05-24 17:22:50 +00:00
|
|
|
|
2016-01-31 03:58:24 +00:00
|
|
|
/* Do not set the backchannel flag unless this is clnt->cl_xprt */
|
|
|
|
if (xprt != rcu_access_pointer(clnt->cl_xprt))
|
|
|
|
args.dir = NFS4_CDFC4_FORE;
|
|
|
|
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (!IS_ERR(task)) {
|
|
|
|
status = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
} else
|
|
|
|
status = PTR_ERR(task);
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_bind_conn_to_session(clp, status);
|
2012-05-24 17:22:50 +00:00
|
|
|
if (status == 0) {
|
2015-02-18 17:27:18 +00:00
|
|
|
if (memcmp(res.sessionid.data,
|
2012-05-24 17:22:50 +00:00
|
|
|
clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
|
|
|
|
dprintk("NFS: %s: Session ID mismatch\n", __func__);
|
2017-04-07 18:15:21 +00:00
|
|
|
return -EIO;
|
2012-05-24 17:22:50 +00:00
|
|
|
}
|
2015-02-18 17:27:18 +00:00
|
|
|
if ((res.dir & args.dir) != res.dir || res.dir == 0) {
|
2012-05-24 17:22:50 +00:00
|
|
|
dprintk("NFS: %s: Unexpected direction from server\n",
|
|
|
|
__func__);
|
2017-04-07 18:15:21 +00:00
|
|
|
return -EIO;
|
2012-05-24 17:22:50 +00:00
|
|
|
}
|
2015-02-18 17:27:18 +00:00
|
|
|
if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
|
2012-05-24 17:22:50 +00:00
|
|
|
dprintk("NFS: %s: Server returned RDMA mode = true\n",
|
|
|
|
__func__);
|
2017-04-07 18:15:21 +00:00
|
|
|
return -EIO;
|
2012-05-24 17:22:50 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-07 18:15:21 +00:00
|
|
|
|
2012-05-24 17:22:50 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-01-31 03:58:24 +00:00
|
|
|
struct rpc_bind_conn_calldata {
|
|
|
|
struct nfs_client *clp;
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred;
|
2016-01-31 03:58:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
|
|
|
|
struct rpc_xprt *xprt,
|
|
|
|
void *calldata)
|
|
|
|
{
|
|
|
|
struct rpc_bind_conn_calldata *p = calldata;
|
|
|
|
|
|
|
|
return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
|
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
|
2016-01-31 03:58:24 +00:00
|
|
|
{
|
|
|
|
struct rpc_bind_conn_calldata data = {
|
|
|
|
.clp = clp,
|
|
|
|
.cred = cred,
|
|
|
|
};
|
|
|
|
return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
|
|
|
|
nfs4_proc_bind_conn_to_session_callback, &data);
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:29 +00:00
|
|
|
/*
|
2013-08-13 20:37:34 +00:00
|
|
|
* Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
|
|
|
|
* and operations we'd like to see to enable certain features in the allow map
|
2013-08-13 20:37:32 +00:00
|
|
|
*/
|
|
|
|
static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
|
|
|
|
.how = SP4_MACH_CRED,
|
|
|
|
.enforce.u.words = {
|
|
|
|
[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
|
|
|
|
1 << (OP_EXCHANGE_ID - 32) |
|
|
|
|
1 << (OP_CREATE_SESSION - 32) |
|
|
|
|
1 << (OP_DESTROY_SESSION - 32) |
|
|
|
|
1 << (OP_DESTROY_CLIENTID - 32)
|
2013-08-13 20:37:34 +00:00
|
|
|
},
|
|
|
|
.allow.u.words = {
|
|
|
|
[0] = 1 << (OP_CLOSE) |
|
2015-12-02 14:39:51 +00:00
|
|
|
1 << (OP_OPEN_DOWNGRADE) |
|
2013-09-10 22:44:30 +00:00
|
|
|
1 << (OP_LOCKU) |
|
2015-12-02 14:39:51 +00:00
|
|
|
1 << (OP_DELEGRETURN) |
|
2013-09-10 22:44:30 +00:00
|
|
|
1 << (OP_COMMIT),
|
2013-08-13 20:37:35 +00:00
|
|
|
[1] = 1 << (OP_SECINFO - 32) |
|
2013-08-13 20:37:36 +00:00
|
|
|
1 << (OP_SECINFO_NO_NAME - 32) |
|
2015-12-02 14:39:51 +00:00
|
|
|
1 << (OP_LAYOUTRETURN - 32) |
|
2013-08-13 20:37:36 +00:00
|
|
|
1 << (OP_TEST_STATEID - 32) |
|
2013-09-10 22:44:30 +00:00
|
|
|
1 << (OP_FREE_STATEID - 32) |
|
|
|
|
1 << (OP_WRITE - 32)
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select the state protection mode for client `clp' given the server results
|
|
|
|
* from exchange_id in `sp'.
|
2009-04-01 13:22:29 +00:00
|
|
|
*
|
2013-08-13 20:37:32 +00:00
|
|
|
* Returns 0 on success, negative errno otherwise.
|
|
|
|
*/
|
|
|
|
static int nfs4_sp4_select_mode(struct nfs_client *clp,
|
|
|
|
struct nfs41_state_protection *sp)
|
|
|
|
{
|
|
|
|
static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
|
|
|
|
[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
|
|
|
|
1 << (OP_EXCHANGE_ID - 32) |
|
|
|
|
1 << (OP_CREATE_SESSION - 32) |
|
|
|
|
1 << (OP_DESTROY_SESSION - 32) |
|
|
|
|
1 << (OP_DESTROY_CLIENTID - 32)
|
|
|
|
};
|
2017-08-01 11:32:50 +00:00
|
|
|
unsigned long flags = 0;
|
2013-08-13 20:37:32 +00:00
|
|
|
unsigned int i;
|
2017-08-01 11:32:50 +00:00
|
|
|
int ret = 0;
|
2013-08-13 20:37:32 +00:00
|
|
|
|
|
|
|
if (sp->how == SP4_MACH_CRED) {
|
|
|
|
/* Print state protect result */
|
|
|
|
dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
|
|
|
|
for (i = 0; i <= LAST_NFS4_OP; i++) {
|
|
|
|
if (test_bit(i, sp->enforce.u.longs))
|
|
|
|
dfprintk(MOUNT, " enforce op %d\n", i);
|
|
|
|
if (test_bit(i, sp->allow.u.longs))
|
|
|
|
dfprintk(MOUNT, " allow op %d\n", i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure nothing is on enforce list that isn't supported */
|
|
|
|
for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
|
|
|
|
if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
|
|
|
|
dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimal mode - state operations are allowed to use machine
|
|
|
|
* credential. Note this already happens by default, so the
|
|
|
|
* client doesn't have to do anything more than the negotiation.
|
|
|
|
*
|
|
|
|
* NOTE: we don't care if EXCHANGE_ID is in the list -
|
|
|
|
* we're already using the machine cred for exchange_id
|
|
|
|
* and will never use a different cred.
|
|
|
|
*/
|
|
|
|
if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
|
|
|
|
test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
|
|
|
|
test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
|
|
|
|
test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
|
|
|
|
dfprintk(MOUNT, "sp4_mach_cred:\n");
|
|
|
|
dfprintk(MOUNT, " minimal mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
|
2013-08-13 20:37:32 +00:00
|
|
|
} else {
|
|
|
|
dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
2013-08-13 20:37:34 +00:00
|
|
|
|
|
|
|
if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
|
2015-12-02 14:39:51 +00:00
|
|
|
test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
|
|
|
|
test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
|
2013-08-13 20:37:34 +00:00
|
|
|
test_bit(OP_LOCKU, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " cleanup mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
|
2013-08-13 20:37:34 +00:00
|
|
|
}
|
2013-08-13 20:37:35 +00:00
|
|
|
|
2015-12-02 14:39:51 +00:00
|
|
|
if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
|
2015-12-02 14:39:51 +00:00
|
|
|
}
|
|
|
|
|
2013-08-13 20:37:35 +00:00
|
|
|
if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
|
|
|
|
test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " secinfo mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
|
2013-08-13 20:37:35 +00:00
|
|
|
}
|
2013-08-13 20:37:36 +00:00
|
|
|
|
|
|
|
if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
|
|
|
|
test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " stateid mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
|
2013-08-13 20:37:36 +00:00
|
|
|
}
|
2013-08-13 20:37:37 +00:00
|
|
|
|
|
|
|
if (test_bit(OP_WRITE, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " write mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
|
2013-08-13 20:37:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
|
|
|
|
dfprintk(MOUNT, " commit mode enabled\n");
|
2017-08-01 11:32:50 +00:00
|
|
|
__set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
|
2013-08-13 20:37:37 +00:00
|
|
|
}
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
2017-08-01 11:32:50 +00:00
|
|
|
out:
|
|
|
|
clp->cl_sp4_flags = flags;
|
2018-08-02 05:42:04 +00:00
|
|
|
return ret;
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
|
|
|
|
2016-09-09 13:22:18 +00:00
|
|
|
struct nfs41_exchange_id_data {
|
|
|
|
struct nfs41_exchange_id_res res;
|
|
|
|
struct nfs41_exchange_id_args args;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_exchange_id_release(void *data)
|
|
|
|
{
|
|
|
|
struct nfs41_exchange_id_data *cdata =
|
|
|
|
(struct nfs41_exchange_id_data *)data;
|
|
|
|
|
2017-03-13 14:36:19 +00:00
|
|
|
nfs_put_client(cdata->args.client);
|
2016-09-09 13:22:18 +00:00
|
|
|
kfree(cdata->res.impl_id);
|
|
|
|
kfree(cdata->res.server_scope);
|
|
|
|
kfree(cdata->res.server_owner);
|
|
|
|
kfree(cdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
|
|
|
|
.rpc_release = nfs4_exchange_id_release,
|
|
|
|
};
|
|
|
|
|
2013-08-13 20:37:32 +00:00
|
|
|
/*
|
|
|
|
* _nfs4_proc_exchange_id()
|
2012-07-11 20:30:59 +00:00
|
|
|
*
|
2013-08-13 20:37:32 +00:00
|
|
|
* Wrapper for EXCHANGE_ID operation.
|
2009-04-01 13:22:29 +00:00
|
|
|
*/
|
2017-07-31 22:38:50 +00:00
|
|
|
static struct rpc_task *
|
2018-12-03 00:30:31 +00:00
|
|
|
nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
|
2016-09-09 13:22:28 +00:00
|
|
|
u32 sp4_how, struct rpc_xprt *xprt)
|
2009-04-01 13:22:29 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2016-09-09 13:22:18 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.callback_ops = &nfs4_exchange_id_call_ops,
|
|
|
|
.rpc_message = &msg,
|
2019-05-30 00:41:28 +00:00
|
|
|
.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
|
2016-09-09 13:22:18 +00:00
|
|
|
};
|
|
|
|
struct nfs41_exchange_id_data *calldata;
|
2017-04-07 18:15:22 +00:00
|
|
|
int status;
|
2016-09-09 13:22:18 +00:00
|
|
|
|
2017-10-20 09:53:38 +00:00
|
|
|
if (!refcount_inc_not_zero(&clp->cl_count))
|
2017-07-31 22:38:50 +00:00
|
|
|
return ERR_PTR(-EIO);
|
2016-09-09 13:22:18 +00:00
|
|
|
|
2017-07-31 22:38:50 +00:00
|
|
|
status = -ENOMEM;
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
|
2017-07-31 22:38:50 +00:00
|
|
|
if (!calldata)
|
|
|
|
goto out;
|
2009-04-01 13:22:29 +00:00
|
|
|
|
2017-08-01 20:02:47 +00:00
|
|
|
nfs4_init_boot_verifier(clp, &calldata->args.verifier);
|
2015-06-09 23:44:00 +00:00
|
|
|
|
|
|
|
status = nfs4_init_uniform_client_string(clp);
|
|
|
|
if (status)
|
2016-09-09 13:22:18 +00:00
|
|
|
goto out_calldata;
|
2015-06-09 23:43:57 +00:00
|
|
|
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
|
|
|
|
GFP_NOFS);
|
|
|
|
status = -ENOMEM;
|
|
|
|
if (unlikely(calldata->res.server_owner == NULL))
|
|
|
|
goto out_calldata;
|
2011-05-31 23:05:47 +00:00
|
|
|
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
|
2012-05-24 20:31:39 +00:00
|
|
|
GFP_NOFS);
|
2016-09-09 13:22:18 +00:00
|
|
|
if (unlikely(calldata->res.server_scope == NULL))
|
2012-05-22 02:46:16 +00:00
|
|
|
goto out_server_owner;
|
2011-05-31 23:05:47 +00:00
|
|
|
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
|
|
|
|
if (unlikely(calldata->res.impl_id == NULL))
|
2012-02-17 20:20:26 +00:00
|
|
|
goto out_server_scope;
|
|
|
|
|
2013-08-13 20:37:32 +00:00
|
|
|
switch (sp4_how) {
|
|
|
|
case SP4_NONE:
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->args.state_protect.how = SP4_NONE;
|
2013-08-13 20:37:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SP4_MACH_CRED:
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->args.state_protect = nfs4_sp4_mach_cred_request;
|
2013-08-13 20:37:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* unsupported! */
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
status = -EINVAL;
|
2015-07-01 03:54:53 +00:00
|
|
|
goto out_impl_id;
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
2016-09-09 13:22:28 +00:00
|
|
|
if (xprt) {
|
|
|
|
task_setup_data.rpc_xprt = xprt;
|
2017-08-01 20:02:48 +00:00
|
|
|
task_setup_data.flags |= RPC_TASK_SOFTCONN;
|
2017-08-01 20:02:47 +00:00
|
|
|
memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
|
|
|
|
sizeof(calldata->args.verifier.data));
|
2016-09-09 13:22:28 +00:00
|
|
|
}
|
2016-09-09 13:22:18 +00:00
|
|
|
calldata->args.client = clp;
|
|
|
|
calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
|
2017-08-01 12:17:34 +00:00
|
|
|
EXCHGID4_FLAG_BIND_PRINC_STATEID;
|
|
|
|
#ifdef CONFIG_NFS_V4_1_MIGRATION
|
|
|
|
calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
|
2016-09-09 13:22:18 +00:00
|
|
|
#endif
|
2023-07-13 17:02:38 +00:00
|
|
|
if (test_bit(NFS_CS_DS, &clp->cl_flags))
|
|
|
|
calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
|
2016-09-09 13:22:18 +00:00
|
|
|
msg.rpc_argp = &calldata->args;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
|
|
|
task_setup_data.callback_data = calldata;
|
2013-08-13 20:37:32 +00:00
|
|
|
|
2017-07-31 22:38:50 +00:00
|
|
|
return rpc_run_task(&task_setup_data);
|
2016-09-09 13:22:18 +00:00
|
|
|
|
|
|
|
out_impl_id:
|
|
|
|
kfree(calldata->res.impl_id);
|
|
|
|
out_server_scope:
|
|
|
|
kfree(calldata->res.server_scope);
|
|
|
|
out_server_owner:
|
|
|
|
kfree(calldata->res.server_owner);
|
|
|
|
out_calldata:
|
|
|
|
kfree(calldata);
|
2017-07-31 22:38:50 +00:00
|
|
|
out:
|
2017-03-13 14:36:19 +00:00
|
|
|
nfs_put_client(clp);
|
2017-07-31 22:38:50 +00:00
|
|
|
return ERR_PTR(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _nfs4_proc_exchange_id()
|
|
|
|
*
|
|
|
|
* Wrapper for EXCHANGE_ID operation.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
|
2017-07-31 22:38:50 +00:00
|
|
|
u32 sp4_how)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct nfs41_exchange_id_args *argp;
|
|
|
|
struct nfs41_exchange_id_res *resp;
|
2020-01-30 09:43:25 +00:00
|
|
|
unsigned long now = jiffies;
|
2017-07-31 22:38:50 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
|
|
|
|
|
|
|
argp = task->tk_msg.rpc_argp;
|
|
|
|
resp = task->tk_msg.rpc_resp;
|
|
|
|
status = task->tk_status;
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
|
2020-10-16 13:25:45 +00:00
|
|
|
status = nfs4_check_cl_exchange_flags(resp->flags,
|
|
|
|
clp->cl_mvops->minor_version);
|
2017-07-31 22:38:50 +00:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
status = nfs4_sp4_select_mode(clp, &resp->state_protect);
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
|
2020-01-30 09:43:25 +00:00
|
|
|
do_renew_lease(clp, now);
|
|
|
|
|
2017-07-31 22:38:50 +00:00
|
|
|
clp->cl_clientid = resp->clientid;
|
|
|
|
clp->cl_exchange_flags = resp->flags;
|
|
|
|
clp->cl_seqid = resp->seqid;
|
|
|
|
/* Client ID is not confirmed */
|
|
|
|
if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
|
|
|
|
clear_bit(NFS4_SESSION_ESTABLISHED,
|
|
|
|
&clp->cl_session->session_state);
|
|
|
|
|
|
|
|
if (clp->cl_serverscope != NULL &&
|
|
|
|
!nfs41_same_server_scope(clp->cl_serverscope,
|
|
|
|
resp->server_scope)) {
|
|
|
|
dprintk("%s: server_scope mismatch detected\n",
|
|
|
|
__func__);
|
|
|
|
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
swap(clp->cl_serverowner, resp->server_owner);
|
|
|
|
swap(clp->cl_serverscope, resp->server_scope);
|
|
|
|
swap(clp->cl_implid, resp->impl_id);
|
|
|
|
|
|
|
|
/* Save the EXCHANGE_ID verifier session trunk tests */
|
|
|
|
memcpy(clp->cl_confirm.data, argp->verifier.data,
|
|
|
|
sizeof(clp->cl_confirm.data));
|
|
|
|
out:
|
|
|
|
trace_nfs4_exchange_id(clp, status);
|
|
|
|
rpc_put_task(task);
|
|
|
|
return status;
|
2009-04-01 13:22:29 +00:00
|
|
|
}
|
|
|
|
|
2013-08-13 20:37:32 +00:00
|
|
|
/*
|
|
|
|
* nfs4_proc_exchange_id()
|
|
|
|
*
|
|
|
|
* Returns zero, a negative errno, or a negative NFS4ERR status code.
|
|
|
|
*
|
|
|
|
* Since the clientid has expired, all compounds using sessions
|
|
|
|
* associated with the stale clientid will be returning
|
|
|
|
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
|
|
|
|
* be in some phase of session reset.
|
|
|
|
*
|
|
|
|
* Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
|
2013-08-13 20:37:32 +00:00
|
|
|
{
|
|
|
|
rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/* try SP4_MACH_CRED if krb5i/p */
|
|
|
|
if (authflavor == RPC_AUTH_GSS_KRB5I ||
|
|
|
|
authflavor == RPC_AUTH_GSS_KRB5P) {
|
2017-07-31 22:38:50 +00:00
|
|
|
status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
|
2013-08-13 20:37:32 +00:00
|
|
|
if (!status)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try SP4_NONE */
|
2017-07-31 22:38:50 +00:00
|
|
|
return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
|
|
|
|
2016-09-09 13:22:29 +00:00
|
|
|
/**
|
|
|
|
* nfs4_test_session_trunk
|
|
|
|
*
|
|
|
|
* This is an add_xprt_test() test function called from
|
|
|
|
* rpc_clnt_setup_test_and_add_xprt.
|
|
|
|
*
|
|
|
|
* The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
|
|
|
|
* and is dereferrenced in nfs4_exchange_id_release
|
|
|
|
*
|
|
|
|
* Upon success, add the new transport to the rpc_clnt
|
|
|
|
*
|
|
|
|
* @clnt: struct rpc_clnt to get new transport
|
|
|
|
* @xprt: the rpc_xprt to test
|
|
|
|
* @data: call data for _nfs4_proc_exchange_id.
|
|
|
|
*/
|
2018-12-19 06:59:57 +00:00
|
|
|
void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
|
2016-09-09 13:22:29 +00:00
|
|
|
void *data)
|
|
|
|
{
|
2022-06-15 05:39:24 +00:00
|
|
|
struct nfs4_add_xprt_data *adata = data;
|
2017-07-31 22:38:50 +00:00
|
|
|
struct rpc_task *task;
|
|
|
|
int status;
|
|
|
|
|
2016-09-09 13:22:29 +00:00
|
|
|
u32 sp4_how;
|
|
|
|
|
|
|
|
dprintk("--> %s try %s\n", __func__,
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR]);
|
|
|
|
|
|
|
|
sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
|
|
|
|
|
2023-09-15 19:21:16 +00:00
|
|
|
try_again:
|
2016-09-09 13:22:29 +00:00
|
|
|
/* Test connection for session trunking. Async exchange_id call */
|
2017-07-31 22:38:50 +00:00
|
|
|
task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
|
|
|
|
if (IS_ERR(task))
|
2018-12-19 06:59:57 +00:00
|
|
|
return;
|
2017-07-31 22:38:50 +00:00
|
|
|
|
|
|
|
status = task->tk_status;
|
2024-02-02 20:21:13 +00:00
|
|
|
if (status == 0) {
|
2017-07-31 22:38:50 +00:00
|
|
|
status = nfs4_detect_session_trunking(adata->clp,
|
|
|
|
task->tk_msg.rpc_resp, xprt);
|
2024-02-02 20:21:13 +00:00
|
|
|
trace_nfs4_trunked_exchange_id(adata->clp,
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR], status);
|
|
|
|
}
|
2018-12-19 06:59:57 +00:00
|
|
|
if (status == 0)
|
|
|
|
rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
|
2023-09-15 19:21:16 +00:00
|
|
|
else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
|
2022-07-25 13:32:27 +00:00
|
|
|
(struct sockaddr *)&xprt->addr))
|
|
|
|
rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
|
2018-12-19 06:59:57 +00:00
|
|
|
|
2017-07-31 22:38:50 +00:00
|
|
|
rpc_put_task(task);
|
2023-09-15 19:21:16 +00:00
|
|
|
if (status == -NFS4ERR_DELAY) {
|
|
|
|
ssleep(1);
|
|
|
|
goto try_again;
|
|
|
|
}
|
2013-08-13 20:37:32 +00:00
|
|
|
}
|
2016-09-09 13:22:29 +00:00
|
|
|
EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
|
2013-08-13 20:37:32 +00:00
|
|
|
|
2012-05-25 21:18:09 +00:00
|
|
|
static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2012-05-25 21:18:09 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
|
|
|
|
.rpc_argp = clp,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2019-05-30 00:41:28 +00:00
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg,
|
|
|
|
RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_destroy_clientid(clp, status);
|
2012-05-25 21:18:09 +00:00
|
|
|
if (status)
|
2012-06-07 17:45:53 +00:00
|
|
|
dprintk("NFS: Got error %d from the server %s on "
|
2012-05-25 21:18:09 +00:00
|
|
|
"DESTROY_CLIENTID.", status, clp->cl_hostname);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2012-05-25 21:18:09 +00:00
|
|
|
{
|
|
|
|
unsigned int loop;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
|
|
|
|
ret = _nfs4_proc_destroy_clientid(clp, cred);
|
|
|
|
switch (ret) {
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
case -NFS4ERR_CLIENTID_BUSY:
|
|
|
|
ssleep(1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs4_destroy_clientid(struct nfs_client *clp)
|
|
|
|
{
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred;
|
2012-05-25 21:18:09 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (clp->cl_mvops->minor_version < 1)
|
|
|
|
goto out;
|
|
|
|
if (clp->cl_exchange_flags == 0)
|
|
|
|
goto out;
|
2012-09-14 21:24:32 +00:00
|
|
|
if (clp->cl_preserve_clid)
|
|
|
|
goto out;
|
2013-07-24 16:28:37 +00:00
|
|
|
cred = nfs4_get_clid_cred(clp);
|
2012-05-25 21:18:09 +00:00
|
|
|
ret = nfs4_proc_destroy_clientid(clp, cred);
|
2018-12-03 00:30:31 +00:00
|
|
|
put_cred(cred);
|
2012-05-25 21:18:09 +00:00
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
|
|
clp->cl_exchange_flags = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-07-07 19:26:08 +00:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2009-04-01 13:22:30 +00:00
|
|
|
struct nfs4_get_lease_time_data {
|
|
|
|
struct nfs4_get_lease_time_args *args;
|
|
|
|
struct nfs4_get_lease_time_res *res;
|
|
|
|
struct nfs_client *clp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
|
|
|
|
void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_get_lease_time_data *data =
|
|
|
|
(struct nfs4_get_lease_time_data *)calldata;
|
|
|
|
|
|
|
|
/* just setup sequence, do not trigger session recovery
|
|
|
|
since we're invoked within one */
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(data->clp,
|
2012-10-23 00:28:44 +00:00
|
|
|
&data->args->la_seq_args,
|
|
|
|
&data->res->lr_seq_res,
|
|
|
|
task);
|
2009-04-01 13:22:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from nfs4_state_manager thread for session setup, so don't recover
|
|
|
|
* from sequence operation or clientid errors.
|
|
|
|
*/
|
|
|
|
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_get_lease_time_data *data =
|
|
|
|
(struct nfs4_get_lease_time_data *)calldata;
|
|
|
|
|
2019-07-07 19:26:08 +00:00
|
|
|
if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
|
2010-07-31 18:29:06 +00:00
|
|
|
return;
|
2009-04-01 13:22:30 +00:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MIN);
|
|
|
|
task->tk_status = 0;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2011-05-03 17:43:03 +00:00
|
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
2011-10-19 19:17:29 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
2009-04-01 13:22:30 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct rpc_call_ops nfs4_get_lease_time_ops = {
|
2009-04-01 13:22:30 +00:00
|
|
|
.rpc_call_prepare = nfs4_get_lease_time_prepare,
|
|
|
|
.rpc_call_done = nfs4_get_lease_time_done,
|
|
|
|
};
|
|
|
|
|
|
|
|
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
|
|
|
struct nfs4_get_lease_time_args args;
|
|
|
|
struct nfs4_get_lease_time_res res = {
|
|
|
|
.lr_fsinfo = fsinfo,
|
|
|
|
};
|
|
|
|
struct nfs4_get_lease_time_data data = {
|
|
|
|
.args = &args,
|
|
|
|
.res = &res,
|
|
|
|
.clp = clp,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_get_lease_time_ops,
|
2011-04-24 18:29:33 +00:00
|
|
|
.callback_data = &data,
|
|
|
|
.flags = RPC_TASK_TIMEOUT,
|
2009-04-01 13:22:30 +00:00
|
|
|
};
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
|
2019-08-19 14:18:28 +00:00
|
|
|
return nfs4_call_sync_custom(&task_setup);
|
2009-04-01 13:22:30 +00:00
|
|
|
}
|
|
|
|
|
2019-07-07 19:26:08 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
|
2009-04-01 13:22:31 +00:00
|
|
|
/*
|
|
|
|
* Initialize the values to be used by the client in CREATE_SESSION
|
|
|
|
* If nfs4_init_session set the fore channel request and response sizes,
|
|
|
|
* use them.
|
|
|
|
*
|
|
|
|
* Set the back channel max_resp_sz_cached to zero to force the client to
|
|
|
|
* always set csa_cachethis to FALSE because the current implementation
|
|
|
|
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
|
|
|
|
*/
|
2016-05-02 18:40:40 +00:00
|
|
|
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
|
|
|
|
struct rpc_clnt *clnt)
|
2009-04-01 13:22:31 +00:00
|
|
|
{
|
2013-06-26 16:21:49 +00:00
|
|
|
unsigned int max_rqst_sz, max_resp_sz;
|
2016-05-02 18:40:40 +00:00
|
|
|
unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
|
2019-07-16 17:51:29 +00:00
|
|
|
unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
|
2013-06-26 16:21:49 +00:00
|
|
|
|
|
|
|
max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
|
|
|
|
max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
|
2009-04-01 13:22:31 +00:00
|
|
|
|
|
|
|
/* Fore channel attributes */
|
2013-06-26 16:21:49 +00:00
|
|
|
args->fc_attrs.max_rqst_sz = max_rqst_sz;
|
|
|
|
args->fc_attrs.max_resp_sz = max_resp_sz;
|
2009-04-01 13:22:31 +00:00
|
|
|
args->fc_attrs.max_ops = NFS4_MAX_OPS;
|
2012-02-07 00:50:40 +00:00
|
|
|
args->fc_attrs.max_reqs = max_session_slots;
|
2009-04-01 13:22:31 +00:00
|
|
|
|
|
|
|
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
|
2009-12-17 17:06:26 +00:00
|
|
|
"max_ops=%u max_reqs=%u\n",
|
2009-04-01 13:22:31 +00:00
|
|
|
__func__,
|
|
|
|
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
|
2009-12-17 17:06:26 +00:00
|
|
|
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
|
2009-04-01 13:22:31 +00:00
|
|
|
|
|
|
|
/* Back channel attributes */
|
2016-05-02 18:40:40 +00:00
|
|
|
args->bc_attrs.max_rqst_sz = max_bc_payload;
|
|
|
|
args->bc_attrs.max_resp_sz = max_bc_payload;
|
2009-04-01 13:22:31 +00:00
|
|
|
args->bc_attrs.max_resp_sz_cached = 0;
|
|
|
|
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
|
2018-08-11 15:52:39 +00:00
|
|
|
args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
|
2019-07-16 17:51:29 +00:00
|
|
|
if (args->bc_attrs.max_reqs > max_bc_slots)
|
|
|
|
args->bc_attrs.max_reqs = max_bc_slots;
|
2009-04-01 13:22:31 +00:00
|
|
|
|
|
|
|
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
|
|
|
|
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
|
|
|
|
__func__,
|
|
|
|
args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
|
|
|
|
args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
|
|
|
|
args->bc_attrs.max_reqs);
|
|
|
|
}
|
|
|
|
|
2015-02-18 19:30:18 +00:00
|
|
|
static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
|
|
|
|
struct nfs41_create_session_res *res)
|
2009-04-01 13:22:32 +00:00
|
|
|
{
|
2010-10-02 19:19:01 +00:00
|
|
|
struct nfs4_channel_attrs *sent = &args->fc_attrs;
|
2015-02-18 19:30:18 +00:00
|
|
|
struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
|
2010-10-02 19:19:01 +00:00
|
|
|
|
|
|
|
if (rcvd->max_resp_sz > sent->max_resp_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
/*
|
|
|
|
* Our requested max_ops is the minimum we need; we're not
|
|
|
|
* prepared to break up compounds into smaller pieces than that.
|
|
|
|
* So, no point even trying to continue if the server won't
|
|
|
|
* cooperate:
|
|
|
|
*/
|
|
|
|
if (rcvd->max_ops < sent->max_ops)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_reqs == 0)
|
|
|
|
return -EINVAL;
|
2012-02-15 15:38:25 +00:00
|
|
|
if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
|
|
|
|
rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
|
2010-10-02 19:19:01 +00:00
|
|
|
return 0;
|
2009-04-01 13:22:32 +00:00
|
|
|
}
|
|
|
|
|
2015-02-18 19:30:18 +00:00
|
|
|
static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
|
|
|
|
struct nfs41_create_session_res *res)
|
2010-10-02 19:19:01 +00:00
|
|
|
{
|
|
|
|
struct nfs4_channel_attrs *sent = &args->bc_attrs;
|
2015-02-18 19:30:18 +00:00
|
|
|
struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
|
2009-04-01 13:22:32 +00:00
|
|
|
|
2015-02-18 19:34:58 +00:00
|
|
|
if (!(res->flags & SESSION4_BACK_CHAN))
|
|
|
|
goto out;
|
2010-10-02 19:19:01 +00:00
|
|
|
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_resp_sz < sent->max_resp_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
|
|
|
|
return -EINVAL;
|
2016-08-30 00:03:52 +00:00
|
|
|
if (rcvd->max_ops > sent->max_ops)
|
2010-10-02 19:19:01 +00:00
|
|
|
return -EINVAL;
|
2016-08-30 00:03:52 +00:00
|
|
|
if (rcvd->max_reqs > sent->max_reqs)
|
2010-10-02 19:19:01 +00:00
|
|
|
return -EINVAL;
|
2015-02-18 19:34:58 +00:00
|
|
|
out:
|
2010-10-02 19:19:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2009-04-01 13:22:32 +00:00
|
|
|
|
|
|
|
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
|
2015-02-18 19:30:18 +00:00
|
|
|
struct nfs41_create_session_res *res)
|
2009-04-01 13:22:32 +00:00
|
|
|
{
|
2010-10-02 19:19:01 +00:00
|
|
|
int ret;
|
2009-04-01 13:22:32 +00:00
|
|
|
|
2015-02-18 19:30:18 +00:00
|
|
|
ret = nfs4_verify_fore_channel_attrs(args, res);
|
2010-10-02 19:19:01 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-02-18 19:30:18 +00:00
|
|
|
return nfs4_verify_back_channel_attrs(args, res);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_update_session(struct nfs4_session *session,
|
|
|
|
struct nfs41_create_session_res *res)
|
|
|
|
{
|
|
|
|
nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
|
2015-03-04 01:35:31 +00:00
|
|
|
/* Mark client id and session as being confirmed */
|
|
|
|
session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
|
|
|
|
set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
|
2015-02-18 19:30:18 +00:00
|
|
|
session->flags = res->flags;
|
|
|
|
memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
|
2015-02-18 19:34:58 +00:00
|
|
|
if (res->flags & SESSION4_BACK_CHAN)
|
|
|
|
memcpy(&session->bc_attrs, &res->bc_attrs,
|
|
|
|
sizeof(session->bc_attrs));
|
2009-04-01 13:22:32 +00:00
|
|
|
}
|
|
|
|
|
2012-05-25 21:51:23 +00:00
|
|
|
static int _nfs4_proc_create_session(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2009-04-01 13:22:31 +00:00
|
|
|
{
|
|
|
|
struct nfs4_session *session = clp->cl_session;
|
|
|
|
struct nfs41_create_session_args args = {
|
|
|
|
.client = clp,
|
2015-02-18 19:30:18 +00:00
|
|
|
.clientid = clp->cl_clientid,
|
|
|
|
.seqid = clp->cl_seqid,
|
2009-04-01 13:22:31 +00:00
|
|
|
.cb_program = NFS4_CALLBACK,
|
|
|
|
};
|
2015-02-18 19:30:18 +00:00
|
|
|
struct nfs41_create_session_res res;
|
|
|
|
|
2009-04-01 13:22:31 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2012-05-25 21:51:23 +00:00
|
|
|
.rpc_cred = cred,
|
2009-04-01 13:22:31 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2016-05-02 18:40:40 +00:00
|
|
|
nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
|
2009-04-01 13:23:16 +00:00
|
|
|
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
|
2009-04-01 13:22:31 +00:00
|
|
|
|
2019-05-30 00:41:28 +00:00
|
|
|
status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
|
|
|
|
RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_create_session(clp, status);
|
2009-04-01 13:22:31 +00:00
|
|
|
|
2016-09-11 18:50:01 +00:00
|
|
|
switch (status) {
|
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
case -ETIMEDOUT:
|
|
|
|
case -EACCES:
|
|
|
|
case -EAGAIN:
|
|
|
|
goto out;
|
2019-12-19 10:34:47 +00:00
|
|
|
}
|
2016-09-11 18:50:01 +00:00
|
|
|
|
|
|
|
clp->cl_seqid++;
|
2012-11-20 16:13:12 +00:00
|
|
|
if (!status) {
|
2009-04-01 13:22:32 +00:00
|
|
|
/* Verify the session's negotiated channel_attrs values */
|
2015-02-18 19:30:18 +00:00
|
|
|
status = nfs4_verify_channel_attrs(&args, &res);
|
2009-04-01 13:22:31 +00:00
|
|
|
/* Increment the clientid slot sequence id */
|
2015-02-18 19:30:18 +00:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
nfs4_update_session(session, &res);
|
2009-04-01 13:22:31 +00:00
|
|
|
}
|
2015-02-18 19:30:18 +00:00
|
|
|
out:
|
2009-04-01 13:22:31 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issues a CREATE_SESSION operation to the server.
|
|
|
|
* It is the responsibility of the caller to verify the session is
|
|
|
|
* expired before calling this routine.
|
|
|
|
*/
|
2018-12-03 00:30:31 +00:00
|
|
|
int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
|
2009-04-01 13:22:31 +00:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
unsigned *ptr;
|
|
|
|
struct nfs4_session *session = clp->cl_session;
|
2022-07-25 13:32:31 +00:00
|
|
|
struct nfs4_add_xprt_data xprtdata = {
|
|
|
|
.clp = clp,
|
|
|
|
};
|
|
|
|
struct rpc_add_xprt_test rpcdata = {
|
|
|
|
.add_xprt_test = clp->cl_mvops->session_trunk,
|
|
|
|
.data = &xprtdata,
|
|
|
|
};
|
2009-04-01 13:22:31 +00:00
|
|
|
|
|
|
|
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
|
|
|
|
|
2012-05-25 21:51:23 +00:00
|
|
|
status = _nfs4_proc_create_session(clp, cred);
|
2009-04-01 13:22:31 +00:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
2011-11-09 18:58:21 +00:00
|
|
|
/* Init or reset the session slot tables */
|
|
|
|
status = nfs4_setup_session_slot_tables(session);
|
|
|
|
dprintk("slot table setup returned %d\n", status);
|
2009-04-01 13:22:31 +00:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ptr = (unsigned *)&session->sess_id.data[0];
|
|
|
|
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
|
|
|
|
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
|
2022-07-25 13:32:31 +00:00
|
|
|
rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
|
2009-04-01 13:22:31 +00:00
|
|
|
out:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:34 +00:00
|
|
|
/*
|
|
|
|
* Issue the over-the-wire RPC DESTROY_SESSION.
|
|
|
|
* The caller must serialize access to this routine.
|
|
|
|
*/
|
2012-05-25 21:51:23 +00:00
|
|
|
int nfs4_proc_destroy_session(struct nfs4_session *session,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2009-04-01 13:22:34 +00:00
|
|
|
{
|
2012-05-25 21:51:23 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
|
|
|
|
.rpc_argp = session,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2009-04-01 13:22:34 +00:00
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
/* session is still being setup */
|
2015-03-04 01:35:31 +00:00
|
|
|
if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
|
|
|
|
return 0;
|
2009-04-01 13:22:34 +00:00
|
|
|
|
2019-05-30 00:41:28 +00:00
|
|
|
status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
|
|
|
|
RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_destroy_session(session->clp, status);
|
2009-04-01 13:22:34 +00:00
|
|
|
|
|
|
|
if (status)
|
2012-06-05 14:08:24 +00:00
|
|
|
dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
|
2009-04-01 13:22:34 +00:00
|
|
|
"Session has been destroyed regardless...\n", status);
|
2022-07-25 13:32:23 +00:00
|
|
|
rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
|
2009-04-01 13:22:34 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 13:22:36 +00:00
|
|
|
/*
|
|
|
|
* Renew the cl_session lease.
|
|
|
|
*/
|
2010-06-16 13:52:25 +00:00
|
|
|
struct nfs4_sequence_data {
|
|
|
|
struct nfs_client *clp;
|
|
|
|
struct nfs4_sequence_args args;
|
|
|
|
struct nfs4_sequence_res res;
|
|
|
|
};
|
|
|
|
|
2010-02-05 11:45:04 +00:00
|
|
|
static void nfs41_sequence_release(void *data)
|
|
|
|
{
|
2010-06-16 13:52:25 +00:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2010-02-05 11:45:04 +00:00
|
|
|
|
2017-10-20 09:53:38 +00:00
|
|
|
if (refcount_read(&clp->cl_count) > 1)
|
2010-02-05 11:45:05 +00:00
|
|
|
nfs4_schedule_state_renewal(clp);
|
|
|
|
nfs_put_client(clp);
|
2010-06-16 13:52:25 +00:00
|
|
|
kfree(calldata);
|
2010-02-05 11:45:04 +00:00
|
|
|
}
|
|
|
|
|
2010-06-16 13:52:25 +00:00
|
|
|
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
switch(task->tk_status) {
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
return -EAGAIN;
|
|
|
|
default:
|
2011-03-09 21:00:53 +00:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2010-06-16 13:52:25 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-05 11:45:04 +00:00
|
|
|
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
|
2009-04-01 13:22:36 +00:00
|
|
|
{
|
2010-06-16 13:52:25 +00:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2009-04-01 13:22:36 +00:00
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
|
|
|
|
return;
|
2009-04-01 13:22:36 +00:00
|
|
|
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_sequence(clp, task->tk_status);
|
2023-06-15 18:07:32 +00:00
|
|
|
if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
|
2009-04-01 13:22:36 +00:00
|
|
|
dprintk("%s ERROR %d\n", __func__, task->tk_status);
|
2017-10-20 09:53:38 +00:00
|
|
|
if (refcount_read(&clp->cl_count) == 1)
|
2021-10-16 22:03:04 +00:00
|
|
|
return;
|
2009-04-01 13:22:36 +00:00
|
|
|
|
2010-06-16 13:52:25 +00:00
|
|
|
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
|
|
|
|
rpc_restart_call_prepare(task);
|
2009-04-01 13:22:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
2010-06-16 13:52:25 +00:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2009-04-01 13:22:36 +00:00
|
|
|
struct nfs4_sequence_args *args;
|
|
|
|
struct nfs4_sequence_res *res;
|
|
|
|
|
|
|
|
args = task->tk_msg.rpc_argp;
|
|
|
|
res = task->tk_msg.rpc_resp;
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(clp, args, res, task);
|
2009-04-01 13:22:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs41_sequence_ops = {
|
|
|
|
.rpc_call_done = nfs41_sequence_call_done,
|
|
|
|
.rpc_call_prepare = nfs41_sequence_prepare,
|
2010-02-05 11:45:04 +00:00
|
|
|
.rpc_release = nfs41_sequence_release,
|
2009-04-01 13:22:36 +00:00
|
|
|
};
|
|
|
|
|
2012-10-29 23:02:20 +00:00
|
|
|
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred,
|
2017-10-19 19:46:45 +00:00
|
|
|
struct nfs4_slot *slot,
|
2012-10-29 23:02:20 +00:00
|
|
|
bool is_privileged)
|
2009-04-01 13:22:36 +00:00
|
|
|
{
|
2010-06-16 13:52:25 +00:00
|
|
|
struct nfs4_sequence_data *calldata;
|
2009-04-01 13:22:36 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2010-06-16 13:52:26 +00:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
2012-10-29 23:02:20 +00:00
|
|
|
.callback_ops = &nfs41_sequence_ops,
|
2021-06-24 03:28:51 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
|
2010-06-16 13:52:26 +00:00
|
|
|
};
|
2017-10-19 19:46:45 +00:00
|
|
|
struct rpc_task *ret;
|
2009-04-01 13:22:36 +00:00
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
ret = ERR_PTR(-EIO);
|
2017-10-20 09:53:38 +00:00
|
|
|
if (!refcount_inc_not_zero(&clp->cl_count))
|
2017-10-19 19:46:45 +00:00
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
ret = ERR_PTR(-ENOMEM);
|
2022-01-29 18:57:38 +00:00
|
|
|
calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
|
2017-10-19 19:46:45 +00:00
|
|
|
if (calldata == NULL)
|
|
|
|
goto out_put_clp;
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
|
2017-10-19 19:46:45 +00:00
|
|
|
nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
|
2010-06-16 13:52:25 +00:00
|
|
|
msg.rpc_argp = &calldata->args;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
|
|
|
calldata->clp = clp;
|
2010-06-16 13:52:26 +00:00
|
|
|
task_setup_data.callback_data = calldata;
|
2009-04-01 13:22:36 +00:00
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
ret = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(ret))
|
|
|
|
goto out_err;
|
|
|
|
return ret;
|
|
|
|
out_put_clp:
|
|
|
|
nfs_put_client(clp);
|
|
|
|
out_err:
|
|
|
|
nfs41_release_slot(slot);
|
|
|
|
return ret;
|
2010-06-16 13:52:26 +00:00
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
|
2010-06-16 13:52:26 +00:00
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
int ret = 0;
|
|
|
|
|
2011-08-24 19:07:37 +00:00
|
|
|
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
|
NFSv4.1: Fix an NFSv4.1 state renewal regression
Commit 2f60ea6b8ced ("NFSv4: The NFSv4.0 client must send RENEW calls if it holds a delegation") set the NFS4_RENEW_TIMEOUT flag in nfs4_renew_state, and does
not put an nfs41_proc_async_sequence call, the NFSv4.1 lease renewal heartbeat
call, on the wire to renew the NFSv4.1 state if the flag was not set.
The NFS4_RENEW_TIMEOUT flag is set when "now" is after the last renewal
(cl_last_renewal) plus the lease time divided by 3. This is arbitrary and
sometimes does the following:
In normal operation, the only way a future state renewal call is put on the
wire is via a call to nfs4_schedule_state_renewal, which schedules a
nfs4_renew_state workqueue task. nfs4_renew_state determines if the
NFS4_RENEW_TIMEOUT should be set, and the calls nfs41_proc_async_sequence,
which only gets sent if the NFS4_RENEW_TIMEOUT flag is set.
Then the nfs41_proc_async_sequence rpc_release function schedules
another state remewal via nfs4_schedule_state_renewal.
Without this change we can get into a state where an application stops
accessing the NFSv4.1 share, state renewal calls stop due to the
NFS4_RENEW_TIMEOUT flag _not_ being set. The only way to recover
from this situation is with a clientid re-establishment, once the application
resumes and the server has timed out the lease and so returns
NFS4ERR_BAD_SESSION on the subsequent SEQUENCE operation.
An example application:
open, lock, write a file.
sleep for 6 * lease (could be less)
ulock, close.
In the above example with NFSv4.1 delegations enabled, without this change,
there are no OP_SEQUENCE state renewal calls during the sleep, and the
clientid is recovered due to lease expiration on the close.
This issue does not occur with NFSv4.1 delegations disabled, nor with
NFSv4.0, with or without delegations enabled.
Signed-off-by: Andy Adamson <andros@netapp.com>
Link: http://lkml.kernel.org/r/1411486536-23401-1-git-send-email-andros@netapp.com
Fixes: 2f60ea6b8ced (NFSv4: The NFSv4.0 client must send RENEW calls...)
Cc: stable@vger.kernel.org # 3.2.x
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-09-29 16:31:57 +00:00
|
|
|
return -EAGAIN;
|
2017-10-19 19:46:45 +00:00
|
|
|
task = _nfs41_proc_sequence(clp, cred, NULL, false);
|
2010-06-16 13:52:26 +00:00
|
|
|
if (IS_ERR(task))
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
else
|
2011-02-21 19:05:41 +00:00
|
|
|
rpc_put_task_async(task);
|
2010-06-16 13:52:26 +00:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
|
2010-06-16 13:52:26 +00:00
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
int ret;
|
|
|
|
|
2017-10-19 19:46:45 +00:00
|
|
|
task = _nfs41_proc_sequence(clp, cred, NULL, true);
|
2010-06-16 13:52:26 +00:00
|
|
|
if (IS_ERR(task)) {
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = rpc_wait_for_completion_task(task);
|
2015-07-05 18:50:46 +00:00
|
|
|
if (!ret)
|
2010-06-16 13:52:26 +00:00
|
|
|
ret = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
|
|
|
return ret;
|
2009-04-01 13:22:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-05 21:08:41 +00:00
|
|
|
struct nfs4_reclaim_complete_data {
|
|
|
|
struct nfs_client *clp;
|
|
|
|
struct nfs41_reclaim_complete_args arg;
|
|
|
|
struct nfs41_reclaim_complete_res res;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(calldata->clp,
|
2012-10-23 00:28:44 +00:00
|
|
|
&calldata->arg.seq_args,
|
|
|
|
&calldata->res.seq_res,
|
|
|
|
task);
|
2009-12-05 21:08:41 +00:00
|
|
|
}
|
|
|
|
|
2010-06-16 13:52:25 +00:00
|
|
|
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
switch(task->tk_status) {
|
|
|
|
case 0:
|
2018-03-18 12:37:03 +00:00
|
|
|
wake_up_all(&clp->cl_lock_waitq);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2010-06-16 13:52:25 +00:00
|
|
|
case -NFS4ERR_COMPLETE_ALREADY:
|
|
|
|
case -NFS4ERR_WRONG_CRED: /* What to do here? */
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2011-05-03 17:43:03 +00:00
|
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
2022-07-27 10:01:07 +00:00
|
|
|
case -EACCES:
|
|
|
|
dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
|
|
|
|
__func__, task->tk_status, clp->cl_hostname);
|
2010-06-16 13:52:25 +00:00
|
|
|
return -EAGAIN;
|
2017-05-04 17:44:04 +00:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
break;
|
2010-06-16 13:52:25 +00:00
|
|
|
default:
|
2011-03-09 21:00:53 +00:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2010-06-16 13:52:25 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-05 21:08:41 +00:00
|
|
|
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
|
|
|
struct nfs4_sequence_res *res = &calldata->res.seq_res;
|
|
|
|
|
2010-07-31 18:29:06 +00:00
|
|
|
if (!nfs41_sequence_done(task, res))
|
|
|
|
return;
|
2009-12-05 21:08:41 +00:00
|
|
|
|
2013-08-09 15:51:26 +00:00
|
|
|
trace_nfs4_reclaim_complete(clp, task->tk_status);
|
2010-06-16 13:52:25 +00:00
|
|
|
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return;
|
|
|
|
}
|
2009-12-05 21:08:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_free_reclaim_complete_data(void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
|
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_reclaim_complete_prepare,
|
|
|
|
.rpc_call_done = nfs4_reclaim_complete_done,
|
|
|
|
.rpc_release = nfs4_free_reclaim_complete_data,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue a global reclaim complete.
|
|
|
|
*/
|
2013-05-20 15:05:17 +00:00
|
|
|
static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2009-12-05 21:08:41 +00:00
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
|
2013-05-20 15:05:17 +00:00
|
|
|
.rpc_cred = cred,
|
2009-12-05 21:08:41 +00:00
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_reclaim_complete_call_ops,
|
2019-08-14 19:46:48 +00:00
|
|
|
.flags = RPC_TASK_NO_ROUND_ROBIN,
|
2009-12-05 21:08:41 +00:00
|
|
|
};
|
|
|
|
int status = -ENOMEM;
|
|
|
|
|
2010-05-13 16:51:01 +00:00
|
|
|
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
|
2009-12-05 21:08:41 +00:00
|
|
|
if (calldata == NULL)
|
|
|
|
goto out;
|
|
|
|
calldata->clp = clp;
|
|
|
|
calldata->arg.one_fs = 0;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
|
2009-12-05 21:08:41 +00:00
|
|
|
msg.rpc_argp = &calldata->arg;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
|
|
|
task_setup_data.callback_data = calldata;
|
2019-08-14 19:46:48 +00:00
|
|
|
status = nfs4_call_sync_custom(&task_setup_data);
|
2009-12-05 21:08:41 +00:00
|
|
|
out:
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
2010-10-20 04:18:03 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
2011-01-06 11:36:24 +00:00
|
|
|
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
2010-10-20 04:18:03 +00:00
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
&lgp->res.seq_res, task);
|
2010-10-20 04:18:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
nfs41_sequence_process(task, &lgp->res.seq_res);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs4_layoutget_handle_exception(struct rpc_task *task,
|
|
|
|
struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
|
|
|
|
{
|
2012-10-02 00:25:48 +00:00
|
|
|
struct inode *inode = lgp->args.inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2021-07-02 21:24:22 +00:00
|
|
|
struct pnfs_layout_hdr *lo = lgp->lo;
|
2016-07-14 22:46:24 +00:00
|
|
|
int nfs4err = task->tk_status;
|
|
|
|
int err, status = 0;
|
2016-07-14 19:14:02 +00:00
|
|
|
LIST_HEAD(head);
|
2010-10-20 04:18:03 +00:00
|
|
|
|
2014-01-22 18:34:54 +00:00
|
|
|
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
|
2010-10-20 04:18:03 +00:00
|
|
|
|
2018-06-15 19:58:45 +00:00
|
|
|
nfs4_sequence_free_slot(&lgp->res.seq_res);
|
|
|
|
|
2023-09-09 23:16:53 +00:00
|
|
|
exception->state = NULL;
|
|
|
|
exception->stateid = NULL;
|
|
|
|
|
2016-07-14 22:46:24 +00:00
|
|
|
switch (nfs4err) {
|
2010-10-20 04:18:03 +00:00
|
|
|
case 0:
|
2012-10-02 00:25:48 +00:00
|
|
|
goto out;
|
2015-12-05 09:01:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
|
|
|
|
* on the file. set tk_status to -ENODATA to tell upper layer to
|
|
|
|
* retry go inband.
|
|
|
|
*/
|
|
|
|
case -NFS4ERR_LAYOUTUNAVAILABLE:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
status = -ENODATA;
|
2015-12-05 09:01:01 +00:00
|
|
|
goto out;
|
2015-08-31 08:19:22 +00:00
|
|
|
/*
|
|
|
|
* NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
|
|
|
|
* length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
|
|
|
|
*/
|
|
|
|
case -NFS4ERR_BADLAYOUT:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
status = -EOVERFLOW;
|
|
|
|
goto out;
|
2014-01-22 18:34:54 +00:00
|
|
|
/*
|
|
|
|
* NFS4ERR_LAYOUTTRYLATER is a conflict with another client
|
2015-08-31 08:19:22 +00:00
|
|
|
* (or clients) writing to the same RAID stripe except when
|
|
|
|
* the minlength argument is 0 (see RFC5661 section 18.43.3).
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
*
|
|
|
|
* Treat it like we would RECALLCONFLICT -- we retry for a little
|
|
|
|
* while, and then eventually give up.
|
2014-01-22 18:34:54 +00:00
|
|
|
*/
|
2010-10-20 04:18:03 +00:00
|
|
|
case -NFS4ERR_LAYOUTTRYLATER:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
if (lgp->args.minlength == 0) {
|
|
|
|
status = -EOVERFLOW;
|
|
|
|
goto out;
|
2014-01-22 18:34:54 +00:00
|
|
|
}
|
2016-07-14 22:46:24 +00:00
|
|
|
status = -EBUSY;
|
|
|
|
break;
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
case -NFS4ERR_RECALLCONFLICT:
|
2023-11-15 18:55:29 +00:00
|
|
|
case -NFS4ERR_RETURNCONFLICT:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
status = -ERECALLCONFLICT;
|
2016-07-14 22:46:24 +00:00
|
|
|
break;
|
2016-09-22 17:39:10 +00:00
|
|
|
case -NFS4ERR_DELEG_REVOKED:
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
2012-10-02 00:25:48 +00:00
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
exception->timeout = 0;
|
2012-10-02 00:25:48 +00:00
|
|
|
spin_lock(&inode->i_lock);
|
2016-07-14 19:14:02 +00:00
|
|
|
/* If the open stateid was bad, then recover it. */
|
|
|
|
if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
|
2017-10-04 17:49:12 +00:00
|
|
|
!nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
|
2012-10-02 00:25:48 +00:00
|
|
|
spin_unlock(&inode->i_lock);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
exception->state = lgp->args.ctx->state;
|
2016-09-22 17:39:10 +00:00
|
|
|
exception->stateid = &lgp->args.stateid;
|
2015-09-20 17:30:30 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-07-14 19:14:02 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the bad layout state as invalid, then retry
|
|
|
|
*/
|
2016-07-24 21:08:59 +00:00
|
|
|
pnfs_mark_layout_stateid_invalid(lo, &head);
|
2016-07-14 19:14:02 +00:00
|
|
|
spin_unlock(&inode->i_lock);
|
2017-04-29 14:10:17 +00:00
|
|
|
nfs_commit_inode(inode, 0);
|
2016-07-14 19:14:02 +00:00
|
|
|
pnfs_free_lseg_list(&head);
|
|
|
|
status = -EAGAIN;
|
|
|
|
goto out;
|
2010-10-20 04:18:03 +00:00
|
|
|
}
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
|
2016-07-14 22:46:24 +00:00
|
|
|
err = nfs4_handle_exception(server, nfs4err, exception);
|
|
|
|
if (!status) {
|
|
|
|
if (exception->retry)
|
|
|
|
status = -EAGAIN;
|
|
|
|
else
|
|
|
|
status = err;
|
|
|
|
}
|
2012-10-02 00:25:48 +00:00
|
|
|
out:
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
return status;
|
2010-10-20 04:18:03 +00:00
|
|
|
}
|
|
|
|
|
2016-09-19 21:47:09 +00:00
|
|
|
size_t max_response_pages(struct nfs_server *server)
|
2012-08-02 08:47:10 +00:00
|
|
|
{
|
|
|
|
u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
|
|
|
|
return nfs_page_array_len(0, max_resp_sz);
|
|
|
|
}
|
|
|
|
|
2010-10-20 04:18:03 +00:00
|
|
|
static void nfs4_layoutget_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
|
|
|
|
2017-06-27 21:33:38 +00:00
|
|
|
nfs4_sequence_free_slot(&lgp->res.seq_res);
|
2018-05-30 21:16:20 +00:00
|
|
|
pnfs_layoutget_free(lgp);
|
2010-10-20 04:18:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_layoutget_call_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_layoutget_prepare,
|
|
|
|
.rpc_call_done = nfs4_layoutget_done,
|
|
|
|
.rpc_release = nfs4_layoutget_release,
|
|
|
|
};
|
|
|
|
|
2012-09-17 21:12:15 +00:00
|
|
|
struct pnfs_layout_segment *
|
2023-09-09 23:16:53 +00:00
|
|
|
nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
|
|
|
|
struct nfs4_exception *exception)
|
2010-10-20 04:18:03 +00:00
|
|
|
{
|
2013-02-26 02:27:33 +00:00
|
|
|
struct inode *inode = lgp->args.inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2010-10-20 04:18:03 +00:00
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
|
|
|
|
.rpc_argp = &lgp->args,
|
|
|
|
.rpc_resp = &lgp->res,
|
2013-05-20 14:49:34 +00:00
|
|
|
.rpc_cred = lgp->cred,
|
2010-10-20 04:18:03 +00:00
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_layoutget_call_ops,
|
|
|
|
.callback_data = lgp,
|
2021-06-24 03:28:51 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
|
|
|
|
RPC_TASK_MOVEABLE,
|
2010-10-20 04:18:03 +00:00
|
|
|
};
|
2012-09-17 21:12:15 +00:00
|
|
|
struct pnfs_layout_segment *lseg = NULL;
|
2010-10-20 04:18:03 +00:00
|
|
|
int status = 0;
|
|
|
|
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
|
2023-09-09 23:16:53 +00:00
|
|
|
exception->retry = 0;
|
2013-02-26 02:27:33 +00:00
|
|
|
|
2010-10-20 04:18:03 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2022-04-07 02:33:19 +00:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return ERR_CAST(task);
|
2020-04-25 13:04:40 +00:00
|
|
|
|
2017-01-11 20:01:43 +00:00
|
|
|
status = rpc_wait_for_completion_task(task);
|
2018-06-15 19:58:45 +00:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
|
2019-02-13 12:55:31 +00:00
|
|
|
if (task->tk_status < 0) {
|
2023-09-09 23:16:53 +00:00
|
|
|
exception->retry = 1;
|
|
|
|
status = nfs4_layoutget_handle_exception(task, lgp, exception);
|
2019-02-13 12:55:31 +00:00
|
|
|
} else if (lgp->res.layoutp->len == 0) {
|
2023-09-09 23:16:53 +00:00
|
|
|
exception->retry = 1;
|
2019-02-13 12:55:31 +00:00
|
|
|
status = -EAGAIN;
|
2023-09-09 23:16:53 +00:00
|
|
|
nfs4_update_delay(&exception->timeout);
|
2018-06-15 19:58:45 +00:00
|
|
|
} else
|
|
|
|
lseg = pnfs_layout_process(lgp);
|
|
|
|
out:
|
2013-08-14 20:36:51 +00:00
|
|
|
trace_nfs4_layoutget(lgp->args.ctx,
|
|
|
|
&lgp->args.range,
|
|
|
|
&lgp->res.range,
|
Adding stateid information to tracepoints
Operations to which stateid information is added:
close, delegreturn, open, read, setattr, layoutget, layoutcommit, test_stateid,
write, lock, locku, lockt
Format is "stateid=<seqid>:<crc32 hash stateid.other>", also "openstateid=",
"layoutstateid=", and "lockstateid=" for open_file, layoutget, set_lock
tracepoints.
New function is added to internal.h, nfs_stateid_hash(), to compute the hash
trace_nfs4_setattr() is moved from nfs4_do_setattr() to _nfs4_do_setattr()
to get access to stateid.
trace_nfs4_setattr and trace_nfs4_delegreturn are changed from INODE_EVENT
to new event type, INODE_STATEID_EVENT which is same as INODE_EVENT but adds
stateid information
for locking tracepoints, moved trace_nfs4_set_lock() into _nfs4_do_setlk()
to get access to stateid information, and removed trace_nfs4_lock_reclaim(),
trace_nfs4_lock_expired() as they call into _nfs4_do_setlk() and both were
previously same LOCK_EVENT type.
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-11-24 18:29:41 +00:00
|
|
|
&lgp->res.stateid,
|
2013-08-14 20:36:51 +00:00
|
|
|
status);
|
pnfs: rework LAYOUTGET retry handling
There are several problems in the way a stateid is selected for a
LAYOUTGET operation:
We pick a stateid to use in the RPC prepare op, but that makes
it difficult to serialize LAYOUTGETs that use the open stateid. That
serialization is done in pnfs_update_layout, which occurs well before
the rpc_prepare operation.
Between those two events, the i_lock is dropped and reacquired.
pnfs_update_layout can find that the list has lsegs in it and not do any
serialization, but then later pnfs_choose_layoutget_stateid ends up
choosing the open stateid.
This patch changes the client to select the stateid to use in the
LAYOUTGET earlier, when we're searching for a usable layout segment.
This way we can do it all while holding the i_lock the first time, and
ensure that we serialize any LAYOUTGET call that uses a non-layout
stateid.
This also means a rework of how LAYOUTGET replies are handled, as we
must now get the latest stateid if we want to retransmit in response
to a retryable error.
Most of those errors boil down to the fact that the layout state has
changed in some fashion. Thus, what we really want to do is to re-search
for a layout when it fails with a retryable error, so that we can avoid
reissuing the RPC at all if possible.
While the LAYOUTGET RPC is async, the initiating thread always waits for
it to complete, so it's effectively synchronous anyway. Currently, when
we need to retry a LAYOUTGET because of an error, we drive that retry
via the rpc state machine.
This means that once the call has been submitted, it runs until it
completes. So, we must move the error handling for this RPC out of the
rpc_call_done operation and into the caller.
In order to handle errors like NFS4ERR_DELAY properly, we must also
pass a pointer to the sliding timeout, which is now moved to the stack
in pnfs_update_layout.
The complicating errors are -NFS4ERR_RECALLCONFLICT and
-NFS4ERR_LAYOUTTRYLATER, as those involve a timeout after which we give
up and return NULL back to the caller. So, there is some special
handling for those errors to ensure that the layers driving the retries
can handle that appropriately.
Signed-off-by: Jeff Layton <jeff.layton@primarydata.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-05-17 16:28:47 +00:00
|
|
|
|
2010-10-20 04:18:03 +00:00
|
|
|
rpc_put_task(task);
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
2012-09-17 21:12:15 +00:00
|
|
|
if (status)
|
|
|
|
return ERR_PTR(status);
|
|
|
|
return lseg;
|
2010-10-20 04:18:03 +00:00
|
|
|
}
|
|
|
|
|
2011-05-22 16:52:37 +00:00
|
|
|
static void
|
|
|
|
nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutreturn *lrp = calldata;
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(lrp->clp,
|
2012-10-23 00:28:44 +00:00
|
|
|
&lrp->args.seq_args,
|
|
|
|
&lrp->res.seq_res,
|
|
|
|
task);
|
2018-06-15 20:31:02 +00:00
|
|
|
if (!pnfs_layout_is_valid(lrp->args.layout))
|
|
|
|
rpc_exit(task, 0);
|
2011-05-22 16:52:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutreturn *lrp = calldata;
|
|
|
|
struct nfs_server *server;
|
|
|
|
|
2016-08-28 15:50:26 +00:00
|
|
|
if (!nfs41_sequence_process(task, &lrp->res.seq_res))
|
2011-05-22 16:52:37 +00:00
|
|
|
return;
|
|
|
|
|
2019-09-20 11:23:43 +00:00
|
|
|
/*
|
|
|
|
* Was there an RPC level error? Assume the call succeeded,
|
|
|
|
* and that we need to release the layout
|
|
|
|
*/
|
|
|
|
if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
|
|
|
|
lrp->res.lrs_present = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-05-22 16:52:37 +00:00
|
|
|
server = NFS_SERVER(lrp->args.inode);
|
2013-12-04 17:09:45 +00:00
|
|
|
switch (task->tk_status) {
|
2017-11-06 20:28:08 +00:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
2019-09-20 11:23:45 +00:00
|
|
|
if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
|
2018-08-16 01:35:46 +00:00
|
|
|
&lrp->args.range,
|
2017-11-06 20:28:08 +00:00
|
|
|
lrp->args.inode))
|
|
|
|
goto out_restart;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2013-12-04 17:09:45 +00:00
|
|
|
default:
|
|
|
|
task->tk_status = 0;
|
2020-08-23 22:36:59 +00:00
|
|
|
fallthrough;
|
2013-12-04 17:09:45 +00:00
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_DELAY:
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
|
2013-12-04 17:09:45 +00:00
|
|
|
break;
|
2017-11-06 20:28:08 +00:00
|
|
|
goto out_restart;
|
2011-05-22 16:52:37 +00:00
|
|
|
}
|
2017-11-06 20:28:08 +00:00
|
|
|
return;
|
|
|
|
out_restart:
|
|
|
|
task->tk_status = 0;
|
|
|
|
nfs4_sequence_free_slot(&lrp->res.seq_res);
|
|
|
|
rpc_restart_call_prepare(task);
|
2011-05-22 16:52:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutreturn_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutreturn *lrp = calldata;
|
2012-09-24 18:18:39 +00:00
|
|
|
struct pnfs_layout_hdr *lo = lrp->args.layout;
|
2011-05-22 16:52:37 +00:00
|
|
|
|
2016-11-20 18:13:54 +00:00
|
|
|
pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
|
2016-10-12 23:50:54 +00:00
|
|
|
lrp->res.lrs_present ? &lrp->res.stateid : NULL);
|
2016-08-28 15:50:26 +00:00
|
|
|
nfs4_sequence_free_slot(&lrp->res.seq_res);
|
2016-09-23 15:38:08 +00:00
|
|
|
if (lrp->ld_private.ops && lrp->ld_private.ops->free)
|
|
|
|
lrp->ld_private.ops->free(&lrp->ld_private);
|
2016-12-07 17:29:26 +00:00
|
|
|
pnfs_put_layout_hdr(lrp->args.layout);
|
|
|
|
nfs_iput_and_deactive(lrp->inode);
|
2020-04-02 19:37:02 +00:00
|
|
|
put_cred(lrp->cred);
|
2011-05-22 16:52:37 +00:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_layoutreturn_prepare,
|
|
|
|
.rpc_call_done = nfs4_layoutreturn_done,
|
|
|
|
.rpc_release = nfs4_layoutreturn_release,
|
|
|
|
};
|
|
|
|
|
2014-11-17 01:30:40 +00:00
|
|
|
int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
|
2011-05-22 16:52:37 +00:00
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
|
|
|
|
.rpc_argp = &lrp->args,
|
|
|
|
.rpc_resp = &lrp->res,
|
2013-05-20 14:43:47 +00:00
|
|
|
.rpc_cred = lrp->cred,
|
2011-05-22 16:52:37 +00:00
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
2013-07-22 16:42:05 +00:00
|
|
|
.rpc_client = NFS_SERVER(lrp->args.inode)->client,
|
2011-05-22 16:52:37 +00:00
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_layoutreturn_call_ops,
|
|
|
|
.callback_data = lrp,
|
2021-06-24 03:28:51 +00:00
|
|
|
.flags = RPC_TASK_MOVEABLE,
|
2011-05-22 16:52:37 +00:00
|
|
|
};
|
2014-11-17 01:30:40 +00:00
|
|
|
int status = 0;
|
2011-05-22 16:52:37 +00:00
|
|
|
|
2015-12-02 14:39:51 +00:00
|
|
|
nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
|
|
|
|
NFS_SP4_MACH_CRED_PNFS_CLEANUP,
|
|
|
|
&task_setup_data.rpc_client, &msg);
|
|
|
|
|
2021-06-01 15:35:56 +00:00
|
|
|
lrp->inode = nfs_igrab_and_active(lrp->args.inode);
|
2015-02-05 21:35:16 +00:00
|
|
|
if (!sync) {
|
|
|
|
if (!lrp->inode) {
|
|
|
|
nfs4_layoutreturn_release(lrp);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
task_setup_data.flags |= RPC_TASK_ASYNC;
|
|
|
|
}
|
2021-06-01 15:35:56 +00:00
|
|
|
if (!lrp->inode)
|
|
|
|
nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
|
|
|
|
1);
|
|
|
|
else
|
|
|
|
nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
|
|
|
|
0);
|
2011-05-22 16:52:37 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2015-02-05 21:35:16 +00:00
|
|
|
if (sync)
|
|
|
|
status = task->tk_status;
|
Adding stateid information to tracepoints
Operations to which stateid information is added:
close, delegreturn, open, read, setattr, layoutget, layoutcommit, test_stateid,
write, lock, locku, lockt
Format is "stateid=<seqid>:<crc32 hash stateid.other>", also "openstateid=",
"layoutstateid=", and "lockstateid=" for open_file, layoutget, set_lock
tracepoints.
New function is added to internal.h, nfs_stateid_hash(), to compute the hash
trace_nfs4_setattr() is moved from nfs4_do_setattr() to _nfs4_do_setattr()
to get access to stateid.
trace_nfs4_setattr and trace_nfs4_delegreturn are changed from INODE_EVENT
to new event type, INODE_STATEID_EVENT which is same as INODE_EVENT but adds
stateid information
for locking tracepoints, moved trace_nfs4_set_lock() into _nfs4_do_setlk()
to get access to stateid information, and removed trace_nfs4_lock_reclaim(),
trace_nfs4_lock_expired() as they call into _nfs4_do_setlk() and both were
previously same LOCK_EVENT type.
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-11-24 18:29:41 +00:00
|
|
|
trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
|
2011-05-22 16:52:37 +00:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
rpc_put_task(task);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2010-10-20 04:18:03 +00:00
|
|
|
static int
|
2013-05-20 15:42:54 +00:00
|
|
|
_nfs4_proc_getdeviceinfo(struct nfs_server *server,
|
|
|
|
struct pnfs_device *pdev,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2010-10-20 04:18:03 +00:00
|
|
|
{
|
|
|
|
struct nfs4_getdeviceinfo_args args = {
|
|
|
|
.pdev = pdev,
|
2015-03-09 18:01:25 +00:00
|
|
|
.notify_types = NOTIFY_DEVICEID4_CHANGE |
|
|
|
|
NOTIFY_DEVICEID4_DELETE,
|
2010-10-20 04:18:03 +00:00
|
|
|
};
|
|
|
|
struct nfs4_getdeviceinfo_res res = {
|
|
|
|
.pdev = pdev,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2013-05-20 15:42:54 +00:00
|
|
|
.rpc_cred = cred,
|
2010-10-20 04:18:03 +00:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2011-03-24 17:12:24 +00:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2015-03-09 18:01:25 +00:00
|
|
|
if (res.notification & ~args.notify_types)
|
|
|
|
dprintk("%s: unsupported notification\n", __func__);
|
2015-03-09 18:48:32 +00:00
|
|
|
if (res.notification != args.notify_types)
|
|
|
|
pdev->nocache = 1;
|
2015-03-09 18:01:25 +00:00
|
|
|
|
2020-12-16 21:31:26 +00:00
|
|
|
trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
|
|
|
|
|
2010-10-20 04:18:03 +00:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2013-05-20 15:42:54 +00:00
|
|
|
int nfs4_proc_getdeviceinfo(struct nfs_server *server,
|
|
|
|
struct pnfs_device *pdev,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2010-10-20 04:18:03 +00:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
2013-05-20 15:42:54 +00:00
|
|
|
_nfs4_proc_getdeviceinfo(server, pdev, cred),
|
2010-10-20 04:18:03 +00:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
|
|
|
|
|
2011-03-23 13:27:54 +00:00
|
|
|
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->args.inode);
|
|
|
|
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(server->nfs_client,
|
2012-10-23 00:28:44 +00:00
|
|
|
&data->args.seq_args,
|
|
|
|
&data->res.seq_res,
|
|
|
|
task);
|
2011-03-23 13:27:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->args.inode);
|
|
|
|
|
2012-10-23 00:07:20 +00:00
|
|
|
if (!nfs41_sequence_done(task, &data->res.seq_res))
|
2011-03-23 13:27:54 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
switch (task->tk_status) { /* Just ignore these failures */
|
2012-03-27 22:22:19 +00:00
|
|
|
case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
|
|
|
|
case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
|
|
|
|
case -NFS4ERR_BADLAYOUT: /* no layout */
|
|
|
|
case -NFS4ERR_GRACE: /* loca_recalim always false */
|
2011-03-23 13:27:54 +00:00
|
|
|
task->tk_status = 0;
|
2020-11-20 18:26:46 +00:00
|
|
|
break;
|
2012-03-27 22:22:19 +00:00
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
default:
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
|
2012-03-27 22:22:19 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2011-03-23 13:27:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutcommit_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
|
2011-07-31 00:52:38 +00:00
|
|
|
pnfs_cleanup_layoutcommit(data);
|
2014-01-13 17:08:11 +00:00
|
|
|
nfs_post_op_update_inode_force_wcc(data->args.inode,
|
|
|
|
data->res.fattr);
|
2018-12-03 00:30:31 +00:00
|
|
|
put_cred(data->cred);
|
2015-02-05 21:50:30 +00:00
|
|
|
nfs_iput_and_deactive(data->inode);
|
2011-03-23 13:27:54 +00:00
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_layoutcommit_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_layoutcommit_prepare,
|
|
|
|
.rpc_call_done = nfs4_layoutcommit_done,
|
|
|
|
.rpc_release = nfs4_layoutcommit_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
2011-03-12 07:58:10 +00:00
|
|
|
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
|
2011-03-23 13:27:54 +00:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
|
|
|
|
.rpc_argp = &data->args,
|
|
|
|
.rpc_resp = &data->res,
|
|
|
|
.rpc_cred = data->cred,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.task = &data->task,
|
|
|
|
.rpc_client = NFS_CLIENT(data->args.inode),
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_layoutcommit_ops,
|
|
|
|
.callback_data = data,
|
2021-06-24 03:28:51 +00:00
|
|
|
.flags = RPC_TASK_MOVEABLE,
|
2011-03-23 13:27:54 +00:00
|
|
|
};
|
|
|
|
struct rpc_task *task;
|
|
|
|
int status = 0;
|
|
|
|
|
2015-07-01 04:00:13 +00:00
|
|
|
dprintk("NFS: initiating layoutcommit call. sync %d "
|
|
|
|
"lbw: %llu inode %lu\n", sync,
|
2011-03-23 13:27:54 +00:00
|
|
|
data->args.lastbytewritten,
|
|
|
|
data->args.inode->i_ino);
|
|
|
|
|
2015-02-05 21:50:30 +00:00
|
|
|
if (!sync) {
|
|
|
|
data->inode = nfs_igrab_and_active(data->args.inode);
|
|
|
|
if (data->inode == NULL) {
|
|
|
|
nfs4_layoutcommit_release(data);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
task_setup_data.flags = RPC_TASK_ASYNC;
|
|
|
|
}
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
|
2011-03-23 13:27:54 +00:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2015-02-05 21:50:30 +00:00
|
|
|
if (sync)
|
|
|
|
status = task->tk_status;
|
Adding stateid information to tracepoints
Operations to which stateid information is added:
close, delegreturn, open, read, setattr, layoutget, layoutcommit, test_stateid,
write, lock, locku, lockt
Format is "stateid=<seqid>:<crc32 hash stateid.other>", also "openstateid=",
"layoutstateid=", and "lockstateid=" for open_file, layoutget, set_lock
tracepoints.
New function is added to internal.h, nfs_stateid_hash(), to compute the hash
trace_nfs4_setattr() is moved from nfs4_do_setattr() to _nfs4_do_setattr()
to get access to stateid.
trace_nfs4_setattr and trace_nfs4_delegreturn are changed from INODE_EVENT
to new event type, INODE_STATEID_EVENT which is same as INODE_EVENT but adds
stateid information
for locking tracepoints, moved trace_nfs4_set_lock() into _nfs4_do_setlk()
to get access to stateid information, and removed trace_nfs4_lock_reclaim(),
trace_nfs4_lock_expired() as they call into _nfs4_do_setlk() and both were
previously same LOCK_EVENT type.
Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-11-24 18:29:41 +00:00
|
|
|
trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
|
2011-03-23 13:27:54 +00:00
|
|
|
dprintk("%s: status %d\n", __func__, status);
|
|
|
|
rpc_put_task(task);
|
|
|
|
return status;
|
|
|
|
}
|
2011-06-02 18:59:07 +00:00
|
|
|
|
2019-02-18 18:32:38 +00:00
|
|
|
/*
|
2013-08-08 14:57:56 +00:00
|
|
|
* Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
|
|
|
|
* possible) as per RFC3530bis and RFC5661 Security Considerations sections
|
|
|
|
*/
|
2011-06-02 18:59:07 +00:00
|
|
|
static int
|
|
|
|
_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
|
2013-09-04 16:13:19 +00:00
|
|
|
struct nfs_fsinfo *info,
|
|
|
|
struct nfs4_secinfo_flavors *flavors, bool use_integrity)
|
2011-06-02 18:59:07 +00:00
|
|
|
{
|
|
|
|
struct nfs41_secinfo_no_name_args args = {
|
|
|
|
.style = SECINFO_STYLE_CURRENT_FH,
|
|
|
|
};
|
|
|
|
struct nfs4_secinfo_res res = {
|
|
|
|
.flavors = flavors,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2019-08-14 20:22:31 +00:00
|
|
|
struct nfs4_call_sync_data data = {
|
|
|
|
.seq_server = server,
|
|
|
|
.seq_args = &args.seq_args,
|
|
|
|
.seq_res = &res.seq_res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
|
|
|
.rpc_client = server->client,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
|
|
|
|
.callback_data = &data,
|
|
|
|
.flags = RPC_TASK_NO_ROUND_ROBIN,
|
|
|
|
};
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = NULL;
|
2013-09-04 16:13:19 +00:00
|
|
|
int status;
|
|
|
|
|
|
|
|
if (use_integrity) {
|
2020-06-09 23:22:57 +00:00
|
|
|
task_setup.rpc_client = server->nfs_client->cl_rpcclient;
|
2019-08-14 20:22:31 +00:00
|
|
|
|
2013-09-10 22:44:31 +00:00
|
|
|
cred = nfs4_get_clid_cred(server->nfs_client);
|
|
|
|
msg.rpc_cred = cred;
|
2013-09-04 16:13:19 +00:00
|
|
|
}
|
|
|
|
|
2019-08-14 20:22:31 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
|
|
|
|
status = nfs4_call_sync_custom(&task_setup);
|
2013-09-04 16:13:19 +00:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
|
2018-12-03 00:30:31 +00:00
|
|
|
put_cred(cred);
|
2013-09-04 16:13:19 +00:00
|
|
|
|
|
|
|
return status;
|
2011-06-02 18:59:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
|
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2011-06-02 18:59:07 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2013-09-04 16:13:19 +00:00
|
|
|
/* first try using integrity protection */
|
|
|
|
err = -NFS4ERR_WRONGSEC;
|
|
|
|
|
|
|
|
/* try to use integrity protection with machine cred */
|
|
|
|
if (_nfs4_is_integrity_protected(server->nfs_client))
|
|
|
|
err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
|
|
|
|
flavors, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if unable to use integrity protection, or SECINFO with
|
|
|
|
* integrity protection returns NFS4ERR_WRONGSEC (which is
|
|
|
|
* disallowed by spec, but exists in deployed servers) use
|
|
|
|
* the current filesystem's rpc_client and the user cred.
|
|
|
|
*/
|
|
|
|
if (err == -NFS4ERR_WRONGSEC)
|
|
|
|
err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
|
|
|
|
flavors, false);
|
|
|
|
|
2011-06-02 18:59:07 +00:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
|
|
|
case -NFS4ERR_WRONGSEC:
|
2014-01-13 21:54:45 +00:00
|
|
|
case -ENOTSUPP:
|
2012-03-27 22:13:02 +00:00
|
|
|
goto out;
|
2011-06-02 18:59:07 +00:00
|
|
|
default:
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
}
|
|
|
|
} while (exception.retry);
|
2012-03-27 22:13:02 +00:00
|
|
|
out:
|
2011-06-02 18:59:07 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct page *page;
|
2013-09-25 21:02:48 +00:00
|
|
|
rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
|
2011-06-02 18:59:07 +00:00
|
|
|
struct nfs4_secinfo_flavors *flavors;
|
2013-09-24 17:58:02 +00:00
|
|
|
struct nfs4_secinfo4 *secinfo;
|
|
|
|
int i;
|
2011-06-02 18:59:07 +00:00
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
flavors = page_address(page);
|
|
|
|
err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fall back on "guess and check" method if
|
|
|
|
* the server doesn't support SECINFO_NO_NAME
|
|
|
|
*/
|
2014-01-13 21:54:45 +00:00
|
|
|
if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
|
2011-06-02 18:59:07 +00:00
|
|
|
err = nfs4_find_root_sec(server, fhandle, info);
|
|
|
|
goto out_freepage;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
goto out_freepage;
|
|
|
|
|
2013-09-24 17:58:02 +00:00
|
|
|
for (i = 0; i < flavors->num_flavors; i++) {
|
|
|
|
secinfo = &flavors->flavors[i];
|
|
|
|
|
|
|
|
switch (secinfo->flavor) {
|
|
|
|
case RPC_AUTH_NULL:
|
|
|
|
case RPC_AUTH_UNIX:
|
|
|
|
case RPC_AUTH_GSS:
|
|
|
|
flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
|
|
|
|
&secinfo->flavor_info);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
flavor = RPC_AUTH_MAXFLAVOR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-10-18 19:15:19 +00:00
|
|
|
if (!nfs_auth_info_match(&server->auth_info, flavor))
|
|
|
|
flavor = RPC_AUTH_MAXFLAVOR;
|
|
|
|
|
2013-09-24 17:58:02 +00:00
|
|
|
if (flavor != RPC_AUTH_MAXFLAVOR) {
|
|
|
|
err = nfs4_lookup_root_sec(server, fhandle,
|
|
|
|
info, flavor);
|
|
|
|
if (!err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flavor == RPC_AUTH_MAXFLAVOR)
|
|
|
|
err = -EPERM;
|
2011-06-02 18:59:07 +00:00
|
|
|
|
|
|
|
out_freepage:
|
|
|
|
put_page(page);
|
|
|
|
if (err == -EACCES)
|
|
|
|
return -EPERM;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
2012-01-31 15:39:29 +00:00
|
|
|
|
2013-05-20 15:20:27 +00:00
|
|
|
static int _nfs41_test_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2011-06-02 18:59:08 +00:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
struct nfs41_test_stateid_args args = {
|
2012-01-31 15:39:29 +00:00
|
|
|
.stateid = stateid,
|
2011-06-02 18:59:08 +00:00
|
|
|
};
|
|
|
|
struct nfs41_test_stateid_res res;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2013-05-20 15:20:27 +00:00
|
|
|
.rpc_cred = cred,
|
2011-06-02 18:59:08 +00:00
|
|
|
};
|
2013-08-13 20:37:36 +00:00
|
|
|
struct rpc_clnt *rpc_client = server->client;
|
|
|
|
|
|
|
|
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
|
|
|
|
&rpc_client, &msg);
|
2012-01-31 15:39:29 +00:00
|
|
|
|
2012-07-11 20:30:23 +00:00
|
|
|
dprintk("NFS call test_stateid %p\n", stateid);
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
|
2013-08-13 20:37:36 +00:00
|
|
|
status = nfs4_call_sync_sequence(rpc_client, server, &msg,
|
2012-10-29 23:02:20 +00:00
|
|
|
&args.seq_args, &res.seq_res);
|
2012-07-11 20:30:23 +00:00
|
|
|
if (status != NFS_OK) {
|
|
|
|
dprintk("NFS reply test_stateid: failed, %d\n", status);
|
NFS: Fix up TEST_STATEID and FREE_STATEID return code handling
The TEST_STATEID and FREE_STATEID operations can return
-NFS4ERR_BAD_STATEID, -NFS4ERR_OLD_STATEID, or -NFS4ERR_DEADSESSION.
nfs41_{test,free}_stateid() should not pass these errors to
nfs4_handle_exception() during state recovery, since that will
recursively kick off state recovery again, resulting in a deadlock.
In particular, when the TEST_STATEID operation returns NFS4_OK,
res.status can contain one of these errors. _nfs41_test_stateid()
replaces NFS4_OK with the value in res.status, which is then returned
to callers.
But res.status is not passed through nfs4_stat_to_errno(), and thus is
a positive NFS4ERR value. Currently callers are only interested in
!NFS4_OK, and nfs4_handle_exception() ignores positive values.
Thus the res.status values are currently ignored by
nfs4_handle_exception() and won't cause the deadlock above. Thanks to
this missing negative, it is only when these operations fail (which
is very rare) that a deadlock can occur.
Bryan agrees the original intent was to return res.status as a
negative NFS4ERR value to callers of nfs41_test_stateid().
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-07-11 20:29:45 +00:00
|
|
|
return status;
|
2012-07-11 20:30:23 +00:00
|
|
|
}
|
|
|
|
dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
|
NFS: Fix up TEST_STATEID and FREE_STATEID return code handling
The TEST_STATEID and FREE_STATEID operations can return
-NFS4ERR_BAD_STATEID, -NFS4ERR_OLD_STATEID, or -NFS4ERR_DEADSESSION.
nfs41_{test,free}_stateid() should not pass these errors to
nfs4_handle_exception() during state recovery, since that will
recursively kick off state recovery again, resulting in a deadlock.
In particular, when the TEST_STATEID operation returns NFS4_OK,
res.status can contain one of these errors. _nfs41_test_stateid()
replaces NFS4_OK with the value in res.status, which is then returned
to callers.
But res.status is not passed through nfs4_stat_to_errno(), and thus is
a positive NFS4ERR value. Currently callers are only interested in
!NFS4_OK, and nfs4_handle_exception() ignores positive values.
Thus the res.status values are currently ignored by
nfs4_handle_exception() and won't cause the deadlock above. Thanks to
this missing negative, it is only when these operations fail (which
is very rare) that a deadlock can occur.
Bryan agrees the original intent was to return res.status as a
negative NFS4ERR value to callers of nfs41_test_stateid().
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2012-07-11 20:29:45 +00:00
|
|
|
return -res.status;
|
2011-06-02 18:59:08 +00:00
|
|
|
}
|
|
|
|
|
2016-09-22 17:38:56 +00:00
|
|
|
static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
|
|
|
|
int err, struct nfs4_exception *exception)
|
|
|
|
{
|
|
|
|
exception->retry = 0;
|
|
|
|
switch(err) {
|
|
|
|
case -NFS4ERR_DELAY:
|
2016-09-22 17:39:19 +00:00
|
|
|
case -NFS4ERR_RETRY_UNCACHED_REP:
|
2016-09-22 17:38:56 +00:00
|
|
|
nfs4_handle_exception(server, err, exception);
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
nfs4_do_handle_exception(server, err, exception);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-11 20:30:23 +00:00
|
|
|
/**
|
|
|
|
* nfs41_test_stateid - perform a TEST_STATEID operation
|
|
|
|
*
|
|
|
|
* @server: server / transport on which to perform the operation
|
|
|
|
* @stateid: state ID to test
|
2013-05-20 15:20:27 +00:00
|
|
|
* @cred: credential
|
2012-07-11 20:30:23 +00:00
|
|
|
*
|
|
|
|
* Returns NFS_OK if the server recognizes that "stateid" is valid.
|
|
|
|
* Otherwise a negative NFS4ERR value is returned if the operation
|
|
|
|
* failed or the state ID is not currently valid.
|
|
|
|
*/
|
2013-05-20 15:20:27 +00:00
|
|
|
static int nfs41_test_stateid(struct nfs_server *server,
|
|
|
|
nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred)
|
2011-06-02 18:59:08 +00:00
|
|
|
{
|
2019-04-07 17:59:09 +00:00
|
|
|
struct nfs4_exception exception = {
|
|
|
|
.interruptible = true,
|
|
|
|
};
|
2011-06-02 18:59:08 +00:00
|
|
|
int err;
|
|
|
|
do {
|
2013-05-20 15:20:27 +00:00
|
|
|
err = _nfs41_test_stateid(server, stateid, cred);
|
2016-09-22 17:38:56 +00:00
|
|
|
nfs4_handle_delay_or_session_error(server, err, &exception);
|
2011-06-02 18:59:08 +00:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
2011-06-02 18:59:09 +00:00
|
|
|
|
2013-05-03 18:40:01 +00:00
|
|
|
struct nfs_free_stateid_data {
|
|
|
|
struct nfs_server *server;
|
|
|
|
struct nfs41_free_stateid_args args;
|
2011-06-02 18:59:09 +00:00
|
|
|
struct nfs41_free_stateid_res res;
|
2013-05-03 18:40:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs_free_stateid_data *data = calldata;
|
2017-01-10 16:39:53 +00:00
|
|
|
nfs4_setup_sequence(data->server->nfs_client,
|
2013-05-03 18:40:01 +00:00
|
|
|
&data->args.seq_args,
|
|
|
|
&data->res.seq_res,
|
|
|
|
task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs_free_stateid_data *data = calldata;
|
|
|
|
|
|
|
|
nfs41_sequence_done(task, &data->res.seq_res);
|
|
|
|
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case -NFS4ERR_DELAY:
|
2014-09-18 06:09:27 +00:00
|
|
|
if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
|
2013-05-03 18:40:01 +00:00
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs41_free_stateid_release(void *calldata)
|
|
|
|
{
|
2021-11-03 10:24:40 +00:00
|
|
|
struct nfs_free_stateid_data *data = calldata;
|
|
|
|
struct nfs_client *clp = data->server->nfs_client;
|
|
|
|
|
|
|
|
nfs_put_client(clp);
|
2013-05-03 18:40:01 +00:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2013-08-21 19:48:42 +00:00
|
|
|
static const struct rpc_call_ops nfs41_free_stateid_ops = {
|
2013-05-03 18:40:01 +00:00
|
|
|
.rpc_call_prepare = nfs41_free_stateid_prepare,
|
|
|
|
.rpc_call_done = nfs41_free_stateid_done,
|
|
|
|
.rpc_release = nfs41_free_stateid_release,
|
|
|
|
};
|
|
|
|
|
2018-05-15 17:03:39 +00:00
|
|
|
/**
|
|
|
|
* nfs41_free_stateid - perform a FREE_STATEID operation
|
|
|
|
*
|
|
|
|
* @server: server / transport on which to perform the operation
|
|
|
|
* @stateid: state ID to release
|
|
|
|
* @cred: credential
|
2019-02-18 18:32:38 +00:00
|
|
|
* @privileged: set to true if this call needs to be privileged
|
2018-05-15 17:03:39 +00:00
|
|
|
*
|
|
|
|
* Note: this function is always asynchronous.
|
|
|
|
*/
|
|
|
|
static int nfs41_free_stateid(struct nfs_server *server,
|
2016-09-22 17:39:04 +00:00
|
|
|
const nfs4_stateid *stateid,
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred,
|
2013-05-03 18:40:01 +00:00
|
|
|
bool privileged)
|
|
|
|
{
|
2011-06-02 18:59:09 +00:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
|
2013-05-20 15:20:27 +00:00
|
|
|
.rpc_cred = cred,
|
2011-06-02 18:59:09 +00:00
|
|
|
};
|
2013-05-03 18:40:01 +00:00
|
|
|
struct rpc_task_setup task_setup = {
|
|
|
|
.rpc_client = server->client,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs41_free_stateid_ops,
|
2021-06-24 03:28:51 +00:00
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
|
2013-05-03 18:40:01 +00:00
|
|
|
};
|
|
|
|
struct nfs_free_stateid_data *data;
|
2018-05-15 17:03:39 +00:00
|
|
|
struct rpc_task *task;
|
2021-11-03 10:24:40 +00:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
|
|
|
|
if (!refcount_inc_not_zero(&clp->cl_count))
|
|
|
|
return -EIO;
|
2011-06-02 18:59:09 +00:00
|
|
|
|
2013-08-13 20:37:36 +00:00
|
|
|
nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
|
|
|
|
&task_setup.rpc_client, &msg);
|
|
|
|
|
2012-07-11 20:30:23 +00:00
|
|
|
dprintk("NFS call free_stateid %p\n", stateid);
|
2022-01-29 18:57:38 +00:00
|
|
|
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
2013-05-03 18:40:01 +00:00
|
|
|
if (!data)
|
2018-05-15 17:03:39 +00:00
|
|
|
return -ENOMEM;
|
2013-05-03 18:40:01 +00:00
|
|
|
data->server = server;
|
|
|
|
nfs4_stateid_copy(&data->args.stateid, stateid);
|
|
|
|
|
|
|
|
task_setup.callback_data = data;
|
|
|
|
|
|
|
|
msg.rpc_argp = &data->args;
|
|
|
|
msg.rpc_resp = &data->res;
|
2018-05-04 20:22:50 +00:00
|
|
|
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
|
2018-05-15 17:03:39 +00:00
|
|
|
task = rpc_run_task(&task_setup);
|
2013-05-03 18:40:01 +00:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
|
|
|
rpc_put_task(task);
|
2016-09-22 17:39:04 +00:00
|
|
|
return 0;
|
2011-06-02 18:59:09 +00:00
|
|
|
}
|
2012-03-04 23:13:56 +00:00
|
|
|
|
2014-05-01 10:28:47 +00:00
|
|
|
static void
|
|
|
|
nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
|
2013-05-03 20:22:55 +00:00
|
|
|
{
|
2018-12-03 00:30:31 +00:00
|
|
|
const struct cred *cred = lsp->ls_state->owner->so_cred;
|
2013-05-03 20:22:55 +00:00
|
|
|
|
2016-09-22 17:39:04 +00:00
|
|
|
nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
|
2013-05-03 20:22:55 +00:00
|
|
|
nfs4_free_lock_state(server, lsp);
|
|
|
|
}
|
|
|
|
|
2012-03-04 23:13:56 +00:00
|
|
|
static bool nfs41_match_stateid(const nfs4_stateid *s1,
|
|
|
|
const nfs4_stateid *s2)
|
|
|
|
{
|
2016-05-16 21:42:43 +00:00
|
|
|
if (s1->type != s2->type)
|
|
|
|
return false;
|
|
|
|
|
2012-03-04 23:13:57 +00:00
|
|
|
if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
|
2012-03-04 23:13:56 +00:00
|
|
|
return false;
|
|
|
|
|
2012-03-04 23:13:57 +00:00
|
|
|
if (s1->seqid == s2->seqid)
|
2012-03-04 23:13:56 +00:00
|
|
|
return true;
|
|
|
|
|
2017-01-11 21:59:48 +00:00
|
|
|
return s1->seqid == 0 || s2->seqid == 0;
|
2012-03-04 23:13:56 +00:00
|
|
|
}
|
|
|
|
|
2009-04-01 13:21:53 +00:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2012-03-04 23:13:56 +00:00
|
|
|
static bool nfs4_match_stateid(const nfs4_stateid *s1,
|
|
|
|
const nfs4_stateid *s2)
|
|
|
|
{
|
2012-03-04 23:13:56 +00:00
|
|
|
return nfs4_stateid_match(s1, s2);
|
2012-03-04 23:13:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
|
2008-12-23 20:21:43 +00:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
|
2008-12-23 20:21:41 +00:00
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
|
2005-04-16 22:20:36 +00:00
|
|
|
.recover_open = nfs4_open_reclaim,
|
|
|
|
.recover_lock = nfs4_lock_reclaim,
|
2009-04-01 13:22:47 +00:00
|
|
|
.establish_clid = nfs4_init_clientid,
|
2012-09-14 21:24:32 +00:00
|
|
|
.detect_trunking = nfs40_discover_server_trunking,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2009-04-01 13:22:47 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
|
2009-04-01 13:22:47 +00:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
|
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
|
|
|
|
.recover_open = nfs4_open_reclaim,
|
|
|
|
.recover_lock = nfs4_lock_reclaim,
|
2009-12-04 20:52:24 +00:00
|
|
|
.establish_clid = nfs41_init_clientid,
|
2009-12-05 21:08:41 +00:00
|
|
|
.reclaim_complete = nfs41_proc_reclaim_complete,
|
2012-09-14 21:24:32 +00:00
|
|
|
.detect_trunking = nfs41_discover_server_trunking,
|
2009-04-01 13:22:47 +00:00
|
|
|
};
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
|
2009-04-01 13:22:47 +00:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
2014-10-17 12:10:25 +00:00
|
|
|
.recover_open = nfs40_open_expired,
|
2009-04-01 13:22:47 +00:00
|
|
|
.recover_lock = nfs4_lock_expired,
|
|
|
|
.establish_clid = nfs4_init_clientid,
|
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
|
2008-12-23 20:21:43 +00:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
2008-12-23 20:21:41 +00:00
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
2011-06-02 18:59:10 +00:00
|
|
|
.recover_open = nfs41_open_expired,
|
|
|
|
.recover_lock = nfs41_lock_expired,
|
2009-12-04 20:52:24 +00:00
|
|
|
.establish_clid = nfs41_init_clientid,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
2009-04-01 13:22:47 +00:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
|
2009-04-01 13:22:44 +00:00
|
|
|
.sched_state_renewal = nfs4_proc_async_renew,
|
2018-12-03 00:30:30 +00:00
|
|
|
.get_state_renewal_cred = nfs4_get_renew_cred,
|
2009-04-01 13:22:45 +00:00
|
|
|
.renew_lease = nfs4_proc_renew,
|
2009-04-01 13:22:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2012-03-11 17:11:00 +00:00
|
|
|
static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
|
2009-04-01 13:22:44 +00:00
|
|
|
.sched_state_renewal = nfs41_proc_async_sequence,
|
2018-12-03 00:30:30 +00:00
|
|
|
.get_state_renewal_cred = nfs4_get_machine_cred,
|
2009-04-01 13:22:45 +00:00
|
|
|
.renew_lease = nfs4_proc_sequence,
|
2009-04-01 13:22:44 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2013-10-17 18:12:39 +00:00
|
|
|
static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
|
2013-10-17 18:12:50 +00:00
|
|
|
.get_locations = _nfs40_proc_get_locations,
|
2013-10-17 18:13:30 +00:00
|
|
|
.fsid_present = _nfs40_proc_fsid_present,
|
2013-10-17 18:12:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
|
2013-10-17 18:12:50 +00:00
|
|
|
.get_locations = _nfs41_proc_get_locations,
|
2013-10-17 18:13:30 +00:00
|
|
|
.fsid_present = _nfs41_proc_fsid_present,
|
2013-10-17 18:12:39 +00:00
|
|
|
};
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
|
|
|
|
.minor_version = 0,
|
2013-03-15 20:11:57 +00:00
|
|
|
.init_caps = NFS_CAP_READDIRPLUS
|
|
|
|
| NFS_CAP_ATOMIC_OPEN
|
|
|
|
| NFS_CAP_POSIX_LOCK,
|
2013-08-09 16:49:11 +00:00
|
|
|
.init_client = nfs40_init_client,
|
|
|
|
.shutdown_client = nfs40_shutdown_client,
|
2012-03-04 23:13:56 +00:00
|
|
|
.match_stateid = nfs4_match_stateid,
|
2011-06-02 18:59:07 +00:00
|
|
|
.find_root_sec = nfs4_find_root_sec,
|
2013-05-03 20:22:55 +00:00
|
|
|
.free_lock_state = nfs4_release_lockowner,
|
2016-09-22 17:38:59 +00:00
|
|
|
.test_and_free_expired = nfs40_test_and_free_expired_stateid,
|
2015-01-24 00:19:25 +00:00
|
|
|
.alloc_seqid = nfs_alloc_seqid,
|
2013-08-09 16:48:27 +00:00
|
|
|
.call_sync_ops = &nfs40_call_sync_ops,
|
2010-06-16 13:52:27 +00:00
|
|
|
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
|
|
|
|
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
|
|
|
|
.state_renewal_ops = &nfs40_state_renewal_ops,
|
2013-10-17 18:12:39 +00:00
|
|
|
.mig_recovery_ops = &nfs40_mig_recovery_ops,
|
2010-06-16 13:52:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2015-01-24 00:19:25 +00:00
|
|
|
static struct nfs_seqid *
|
|
|
|
nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
|
|
|
|
.minor_version = 1,
|
2013-03-15 20:11:57 +00:00
|
|
|
.init_caps = NFS_CAP_READDIRPLUS
|
|
|
|
| NFS_CAP_ATOMIC_OPEN
|
2013-03-17 19:31:15 +00:00
|
|
|
| NFS_CAP_POSIX_LOCK
|
2013-03-15 20:44:28 +00:00
|
|
|
| NFS_CAP_STATEID_NFSV41
|
2016-10-04 19:26:41 +00:00
|
|
|
| NFS_CAP_ATOMIC_OPEN_V1
|
2022-05-25 16:12:59 +00:00
|
|
|
| NFS_CAP_LGOPEN
|
|
|
|
| NFS_CAP_MOVEABLE,
|
2013-08-09 16:49:11 +00:00
|
|
|
.init_client = nfs41_init_client,
|
|
|
|
.shutdown_client = nfs41_shutdown_client,
|
2012-03-04 23:13:56 +00:00
|
|
|
.match_stateid = nfs41_match_stateid,
|
2011-06-02 18:59:07 +00:00
|
|
|
.find_root_sec = nfs41_find_root_sec,
|
2013-05-03 20:22:55 +00:00
|
|
|
.free_lock_state = nfs41_free_lock_state,
|
2016-09-22 17:38:59 +00:00
|
|
|
.test_and_free_expired = nfs41_test_and_free_expired_stateid,
|
2015-01-24 00:19:25 +00:00
|
|
|
.alloc_seqid = nfs_alloc_no_seqid,
|
2016-09-09 13:22:29 +00:00
|
|
|
.session_trunk = nfs4_test_session_trunk,
|
2013-08-09 16:48:27 +00:00
|
|
|
.call_sync_ops = &nfs41_call_sync_ops,
|
2010-06-16 13:52:27 +00:00
|
|
|
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
|
|
|
|
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
|
|
|
|
.state_renewal_ops = &nfs41_state_renewal_ops,
|
2013-10-17 18:12:39 +00:00
|
|
|
.mig_recovery_ops = &nfs41_mig_recovery_ops,
|
2010-06-16 13:52:26 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2013-05-22 16:50:38 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_2)
|
|
|
|
static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
|
|
|
|
.minor_version = 2,
|
2013-06-19 17:41:43 +00:00
|
|
|
.init_caps = NFS_CAP_READDIRPLUS
|
|
|
|
| NFS_CAP_ATOMIC_OPEN
|
|
|
|
| NFS_CAP_POSIX_LOCK
|
|
|
|
| NFS_CAP_STATEID_NFSV41
|
2014-10-22 19:53:10 +00:00
|
|
|
| NFS_CAP_ATOMIC_OPEN_V1
|
2016-10-04 19:26:41 +00:00
|
|
|
| NFS_CAP_LGOPEN
|
2014-11-25 18:18:15 +00:00
|
|
|
| NFS_CAP_ALLOCATE
|
2013-05-21 20:53:03 +00:00
|
|
|
| NFS_CAP_COPY
|
2018-07-09 19:13:29 +00:00
|
|
|
| NFS_CAP_OFFLOAD_CANCEL
|
2019-06-04 20:14:30 +00:00
|
|
|
| NFS_CAP_COPY_NOTIFY
|
2014-11-25 18:18:16 +00:00
|
|
|
| NFS_CAP_DEALLOCATE
|
2015-06-27 15:45:46 +00:00
|
|
|
| NFS_CAP_SEEK
|
2015-09-25 18:24:35 +00:00
|
|
|
| NFS_CAP_LAYOUTSTATS
|
2019-02-08 15:31:05 +00:00
|
|
|
| NFS_CAP_CLONE
|
2014-05-28 17:41:22 +00:00
|
|
|
| NFS_CAP_LAYOUTERROR
|
2022-05-25 16:12:59 +00:00
|
|
|
| NFS_CAP_READ_PLUS
|
|
|
|
| NFS_CAP_MOVEABLE,
|
2013-08-09 16:49:11 +00:00
|
|
|
.init_client = nfs41_init_client,
|
|
|
|
.shutdown_client = nfs41_shutdown_client,
|
2013-05-22 16:50:38 +00:00
|
|
|
.match_stateid = nfs41_match_stateid,
|
|
|
|
.find_root_sec = nfs41_find_root_sec,
|
2013-06-19 17:41:43 +00:00
|
|
|
.free_lock_state = nfs41_free_lock_state,
|
2013-08-09 16:48:27 +00:00
|
|
|
.call_sync_ops = &nfs41_call_sync_ops,
|
2016-09-22 17:38:59 +00:00
|
|
|
.test_and_free_expired = nfs41_test_and_free_expired_stateid,
|
2015-01-24 00:19:25 +00:00
|
|
|
.alloc_seqid = nfs_alloc_no_seqid,
|
2016-09-09 13:22:29 +00:00
|
|
|
.session_trunk = nfs4_test_session_trunk,
|
2013-05-22 16:50:38 +00:00
|
|
|
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
|
|
|
|
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
|
|
|
|
.state_renewal_ops = &nfs41_state_renewal_ops,
|
NFS: Fix a NULL pointer dereference of migration recovery ops for v4.2 client
---Steps to Reproduce--
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
<nfs-client>
# mount -t nfs nfs-server:/nfs/ /mnt/
# ll /mnt/*/
<nfs-server>
# cat /etc/exports
/nfs/referal *(rw,insecure,no_subtree_check,no_root_squash,crossmnt,refer=/nfs/old/@nfs-server)
/nfs/old *(ro,insecure,subtree_check,root_squash,crossmnt)
# service nfs restart
<nfs-client>
# ll /mnt/*/ --->>>>> oops here
[ 5123.102925] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 5123.103363] IP: [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.103752] PGD 587b9067 PUD 3cbf5067 PMD 0
[ 5123.104131] Oops: 0000 [#1]
[ 5123.104529] Modules linked in: nfsv4(OE) nfs(OE) fscache(E) nfsd(OE) xfs libcrc32c iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi coretemp crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel ppdev vmw_balloon parport_pc parport i2c_piix4 shpchp auth_rpcgss nfs_acl vmw_vmci lockd grace sunrpc vmwgfx drm_kms_helper ttm drm mptspi serio_raw scsi_transport_spi e1000 mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd]
[ 5123.105887] CPU: 0 PID: 15853 Comm: ::1-manager Tainted: G OE 4.2.0-rc6+ #214
[ 5123.106358] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 05/20/2014
[ 5123.106860] task: ffff88007620f300 ti: ffff88005877c000 task.ti: ffff88005877c000
[ 5123.107363] RIP: 0010:[<ffffffffa03ed38b>] [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.107909] RSP: 0018:ffff88005877fdb8 EFLAGS: 00010246
[ 5123.108435] RAX: ffff880053f3bc00 RBX: ffff88006ce6c908 RCX: ffff880053a0d240
[ 5123.108968] RDX: ffffea0000e6d940 RSI: ffff8800399a0000 RDI: ffff88006ce6c908
[ 5123.109503] RBP: ffff88005877fe28 R08: ffffffff81c708a0 R09: 0000000000000000
[ 5123.110045] R10: 00000000000001a2 R11: ffff88003ba7f5c8 R12: ffff880054c55800
[ 5123.110618] R13: 0000000000000000 R14: ffff880053a0d240 R15: ffff880053a0d240
[ 5123.111169] FS: 0000000000000000(0000) GS:ffffffff81c27000(0000) knlGS:0000000000000000
[ 5123.111726] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 5123.112286] CR2: 0000000000000000 CR3: 0000000054cac000 CR4: 00000000001406f0
[ 5123.112888] Stack:
[ 5123.113458] ffffea0000e6d940 ffff8800399a0000 00000000000167d0 0000000000000000
[ 5123.114049] 0000000000000000 0000000000000000 0000000000000000 00000000a7ec82c6
[ 5123.114662] ffff88005877fe18 ffffea0000e6d940 ffff8800399a0000 ffff880054c55800
[ 5123.115264] Call Trace:
[ 5123.115868] [<ffffffffa03fb44b>] nfs4_try_migration+0xbb/0x220 [nfsv4]
[ 5123.116487] [<ffffffffa03fcb3b>] nfs4_run_state_manager+0x4ab/0x7b0 [nfsv4]
[ 5123.117104] [<ffffffffa03fc690>] ? nfs4_do_reclaim+0x510/0x510 [nfsv4]
[ 5123.117813] [<ffffffff810a4527>] kthread+0xd7/0xf0
[ 5123.118456] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.119108] [<ffffffff816d9cdf>] ret_from_fork+0x3f/0x70
[ 5123.119723] [<ffffffff810a4450>] ? kthread_worker_fn+0x160/0x160
[ 5123.120329] Code: 4c 8b 6a 58 74 17 eb 52 48 8d 55 a8 89 c6 4c 89 e7 e8 4a b5 ff ff 8b 45 b0 85 c0 74 1c 4c 89 f9 48 8b 55 90 48 8b 75 98 48 89 df <41> ff 55 00 3d e8 d8 ff ff 41 89 c6 74 cf 48 8b 4d c8 65 48 33
[ 5123.121643] RIP [<ffffffffa03ed38b>] nfs4_proc_get_locations+0x9b/0x120 [nfsv4]
[ 5123.122308] RSP <ffff88005877fdb8>
[ 5123.122942] CR2: 0000000000000000
Fixes: ec011fe847 ("NFS: Introduce a vector of migration recovery ops")
Cc: stable@vger.kernel.org # v3.13+
Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2015-08-15 13:52:10 +00:00
|
|
|
.mig_recovery_ops = &nfs41_mig_recovery_ops,
|
2013-05-22 16:50:38 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2010-06-16 13:52:26 +00:00
|
|
|
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
|
|
|
|
[0] = &nfs_v4_0_minor_ops,
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
[1] = &nfs_v4_1_minor_ops,
|
|
|
|
#endif
|
2013-05-22 16:50:38 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_2)
|
|
|
|
[2] = &nfs_v4_2_minor_ops,
|
|
|
|
#endif
|
2010-06-16 13:52:26 +00:00
|
|
|
};
|
|
|
|
|
2016-07-24 21:10:52 +00:00
|
|
|
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
|
2015-12-02 13:44:41 +00:00
|
|
|
{
|
2020-06-23 22:39:03 +00:00
|
|
|
ssize_t error, error2, error3;
|
2024-01-25 14:56:12 +00:00
|
|
|
size_t left = size;
|
2015-12-02 13:44:41 +00:00
|
|
|
|
2024-01-25 14:56:12 +00:00
|
|
|
error = generic_listxattr(dentry, list, left);
|
2015-12-02 13:44:41 +00:00
|
|
|
if (error < 0)
|
|
|
|
return error;
|
|
|
|
if (list) {
|
|
|
|
list += error;
|
2024-01-25 14:56:12 +00:00
|
|
|
left -= error;
|
2015-12-02 13:44:41 +00:00
|
|
|
}
|
|
|
|
|
2024-01-25 14:56:12 +00:00
|
|
|
error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
|
2015-12-02 13:44:41 +00:00
|
|
|
if (error2 < 0)
|
|
|
|
return error2;
|
2020-06-23 22:39:03 +00:00
|
|
|
|
|
|
|
if (list) {
|
|
|
|
list += error2;
|
2024-01-25 14:56:12 +00:00
|
|
|
left -= error2;
|
2020-06-23 22:39:03 +00:00
|
|
|
}
|
|
|
|
|
2024-01-25 14:56:12 +00:00
|
|
|
error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
|
2020-06-23 22:39:03 +00:00
|
|
|
if (error3 < 0)
|
|
|
|
return error3;
|
|
|
|
|
2024-01-25 14:56:12 +00:00
|
|
|
error += error2 + error3;
|
|
|
|
if (size && error > size)
|
|
|
|
return -ERANGE;
|
|
|
|
return error;
|
2015-12-02 13:44:41 +00:00
|
|
|
}
|
|
|
|
|
2022-03-06 23:41:44 +00:00
|
|
|
static void nfs4_enable_swap(struct inode *inode)
|
|
|
|
{
|
|
|
|
/* The state manager thread must always be running.
|
|
|
|
* It will notice the client is a swapper, and stay put.
|
|
|
|
*/
|
|
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
|
|
|
|
|
|
|
nfs4_schedule_state_manager(clp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_disable_swap(struct inode *inode)
|
|
|
|
{
|
|
|
|
/* The state manager thread will now exit once it is
|
|
|
|
* woken.
|
|
|
|
*/
|
2023-02-08 04:45:38 +00:00
|
|
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
|
|
|
|
2023-09-24 17:14:15 +00:00
|
|
|
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
|
|
|
|
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
|
|
|
|
wake_up_var(&clp->cl_state);
|
2022-03-06 23:41:44 +00:00
|
|
|
}
|
|
|
|
|
2013-08-21 19:48:42 +00:00
|
|
|
static const struct inode_operations nfs4_dir_inode_operations = {
|
2012-07-16 20:39:12 +00:00
|
|
|
.create = nfs_create,
|
|
|
|
.lookup = nfs_lookup,
|
|
|
|
.atomic_open = nfs_atomic_open,
|
|
|
|
.link = nfs_link,
|
|
|
|
.unlink = nfs_unlink,
|
|
|
|
.symlink = nfs_symlink,
|
|
|
|
.mkdir = nfs_mkdir,
|
|
|
|
.rmdir = nfs_rmdir,
|
|
|
|
.mknod = nfs_mknod,
|
|
|
|
.rename = nfs_rename,
|
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
2015-12-02 13:44:41 +00:00
|
|
|
.listxattr = nfs4_listxattr,
|
2012-07-16 20:39:12 +00:00
|
|
|
};
|
|
|
|
|
2007-02-12 08:55:39 +00:00
|
|
|
static const struct inode_operations nfs4_file_inode_operations = {
|
2005-06-22 17:16:22 +00:00
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
2015-12-02 13:44:41 +00:00
|
|
|
.listxattr = nfs4_listxattr,
|
2005-06-22 17:16:22 +00:00
|
|
|
};
|
|
|
|
|
2006-08-23 00:06:11 +00:00
|
|
|
const struct nfs_rpc_ops nfs_v4_clientops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.version = 4, /* protocol version */
|
|
|
|
.dentry_ops = &nfs4_dentry_operations,
|
|
|
|
.dir_inode_ops = &nfs4_dir_inode_operations,
|
2005-06-22 17:16:22 +00:00
|
|
|
.file_inode_ops = &nfs4_file_inode_operations,
|
nfs: when attempting to open a directory, fall back on normal lookup (try #5)
commit d953126 changed how nfs_atomic_lookup handles an -EISDIR return
from an OPEN call. Prior to that patch, that caused the client to fall
back to doing a normal lookup. When that patch went in, the code began
returning that error to userspace. The d_revalidate codepath however
never had the corresponding change, so it was still possible to end up
with a NULL ctx->state pointer after that.
That patch caused a regression. When we attempt to open a directory that
does not have a cached dentry, that open now errors out with EISDIR. If
you attempt the same open with a cached dentry, it will succeed.
Fix this by reverting the change in nfs_atomic_lookup and allowing
attempts to open directories to fall back to a normal lookup
Also, add a NFSv4-specific f_ops->open routine that just returns
-ENOTDIR. This should never be called if things are working properly,
but if it ever is, then the dprintk may help in debugging.
To facilitate this, a new file_operations field is also added to the
nfs_rpc_ops struct.
Cc: stable@kernel.org
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-11-04 17:31:21 +00:00
|
|
|
.file_ops = &nfs4_file_operations,
|
2005-04-16 22:20:36 +00:00
|
|
|
.getroot = nfs4_proc_get_root,
|
2012-04-27 17:27:45 +00:00
|
|
|
.submount = nfs4_submount,
|
2019-12-10 12:31:13 +00:00
|
|
|
.try_get_tree = nfs4_try_get_tree,
|
2005-04-16 22:20:36 +00:00
|
|
|
.getattr = nfs4_proc_getattr,
|
|
|
|
.setattr = nfs4_proc_setattr,
|
|
|
|
.lookup = nfs4_proc_lookup,
|
2017-06-29 13:34:52 +00:00
|
|
|
.lookupp = nfs4_proc_lookupp,
|
2005-04-16 22:20:36 +00:00
|
|
|
.access = nfs4_proc_access,
|
|
|
|
.readlink = nfs4_proc_readlink,
|
|
|
|
.create = nfs4_proc_create,
|
|
|
|
.remove = nfs4_proc_remove,
|
|
|
|
.unlink_setup = nfs4_proc_unlink_setup,
|
2012-03-19 18:54:41 +00:00
|
|
|
.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
|
2005-04-16 22:20:36 +00:00
|
|
|
.unlink_done = nfs4_proc_unlink_done,
|
2010-09-17 21:31:57 +00:00
|
|
|
.rename_setup = nfs4_proc_rename_setup,
|
2012-03-19 18:54:42 +00:00
|
|
|
.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
|
2010-09-17 21:31:57 +00:00
|
|
|
.rename_done = nfs4_proc_rename_done,
|
2005-04-16 22:20:36 +00:00
|
|
|
.link = nfs4_proc_link,
|
|
|
|
.symlink = nfs4_proc_symlink,
|
|
|
|
.mkdir = nfs4_proc_mkdir,
|
2018-03-20 20:43:15 +00:00
|
|
|
.rmdir = nfs4_proc_rmdir,
|
2005-04-16 22:20:36 +00:00
|
|
|
.readdir = nfs4_proc_readdir,
|
|
|
|
.mknod = nfs4_proc_mknod,
|
|
|
|
.statfs = nfs4_proc_statfs,
|
|
|
|
.fsinfo = nfs4_proc_fsinfo,
|
|
|
|
.pathconf = nfs4_proc_pathconf,
|
2006-08-23 00:06:10 +00:00
|
|
|
.set_capabilities = nfs4_server_capabilities,
|
2005-04-16 22:20:36 +00:00
|
|
|
.decode_dirent = nfs4_decode_dirent,
|
2014-05-06 13:12:31 +00:00
|
|
|
.pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
|
2005-04-16 22:20:36 +00:00
|
|
|
.read_setup = nfs4_proc_read_setup,
|
2006-03-20 18:44:27 +00:00
|
|
|
.read_done = nfs4_read_done,
|
2005-04-16 22:20:36 +00:00
|
|
|
.write_setup = nfs4_proc_write_setup,
|
2006-03-20 18:44:27 +00:00
|
|
|
.write_done = nfs4_write_done,
|
2005-04-16 22:20:36 +00:00
|
|
|
.commit_setup = nfs4_proc_commit_setup,
|
2012-04-20 18:47:39 +00:00
|
|
|
.commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
|
2006-03-20 18:44:27 +00:00
|
|
|
.commit_done = nfs4_commit_done,
|
2005-04-16 22:20:36 +00:00
|
|
|
.lock = nfs4_proc_lock,
|
2005-06-22 17:16:23 +00:00
|
|
|
.clear_acl_cache = nfs4_zap_acl_attr,
|
2009-03-19 19:35:50 +00:00
|
|
|
.close_context = nfs4_close_context,
|
2010-09-17 14:56:51 +00:00
|
|
|
.open_context = nfs4_atomic_open,
|
2012-06-20 19:53:43 +00:00
|
|
|
.have_delegation = nfs4_have_delegation,
|
2012-06-20 19:53:46 +00:00
|
|
|
.alloc_client = nfs4_alloc_client,
|
2011-03-01 01:34:08 +00:00
|
|
|
.init_client = nfs4_init_client,
|
2012-06-20 19:53:45 +00:00
|
|
|
.free_client = nfs4_free_client,
|
2012-07-30 20:05:19 +00:00
|
|
|
.create_server = nfs4_create_server,
|
|
|
|
.clone_server = nfs_clone_server,
|
2022-01-12 15:27:38 +00:00
|
|
|
.discover_trunking = nfs4_discover_trunking,
|
2022-03-06 23:41:44 +00:00
|
|
|
.enable_swap = nfs4_enable_swap,
|
|
|
|
.disable_swap = nfs4_disable_swap,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2010-12-09 11:35:25 +00:00
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
|
2015-12-02 13:44:36 +00:00
|
|
|
.name = XATTR_NAME_NFSV4_ACL,
|
2010-12-09 11:35:25 +00:00
|
|
|
.list = nfs4_xattr_list_nfs4_acl,
|
|
|
|
.get = nfs4_xattr_get_nfs4_acl,
|
|
|
|
.set = nfs4_xattr_set_nfs4_acl,
|
|
|
|
};
|
|
|
|
|
2022-05-14 14:37:00 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
|
|
|
|
.name = XATTR_NAME_NFSV4_DACL,
|
|
|
|
.list = nfs4_xattr_list_nfs4_dacl,
|
|
|
|
.get = nfs4_xattr_get_nfs4_dacl,
|
|
|
|
.set = nfs4_xattr_set_nfs4_dacl,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
|
|
|
|
.name = XATTR_NAME_NFSV4_SACL,
|
|
|
|
.list = nfs4_xattr_list_nfs4_sacl,
|
|
|
|
.get = nfs4_xattr_get_nfs4_sacl,
|
|
|
|
.set = nfs4_xattr_set_nfs4_sacl,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2020-06-23 22:39:03 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_2
|
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
|
|
|
|
.prefix = XATTR_USER_PREFIX,
|
|
|
|
.get = nfs4_xattr_get_nfs4_user,
|
|
|
|
.set = nfs4_xattr_set_nfs4_user,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2023-09-30 05:00:22 +00:00
|
|
|
const struct xattr_handler * const nfs4_xattr_handlers[] = {
|
2010-12-09 11:35:25 +00:00
|
|
|
&nfs4_xattr_nfs4_acl_handler,
|
2022-05-14 14:37:00 +00:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
&nfs4_xattr_nfs4_dacl_handler,
|
|
|
|
&nfs4_xattr_nfs4_sacl_handler,
|
|
|
|
#endif
|
2013-05-22 16:50:45 +00:00
|
|
|
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
|
|
|
|
&nfs4_xattr_nfs4_label_handler,
|
2020-06-23 22:39:03 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_NFS_V4_2
|
|
|
|
&nfs4_xattr_nfs4_user_handler,
|
2013-05-22 16:50:45 +00:00
|
|
|
#endif
|
2010-12-09 11:35:25 +00:00
|
|
|
NULL
|
|
|
|
};
|