Commit 18bce371 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-2.6.38' of git://linux-nfs.org/~bfields/linux

* 'for-2.6.38' of git://linux-nfs.org/~bfields/linux: (62 commits)
  nfsd4: fix callback restarting
  nfsd: break lease on unlink, link, and rename
  nfsd4: break lease on nfsd setattr
  nfsd: don't support msnfs export option
  nfsd4: initialize cb_per_client
  nfsd4: allow restarting callbacks
  nfsd4: simplify nfsd4_cb_prepare
  nfsd4: give out delegations more quickly in 4.1 case
  nfsd4: add helper function to run callbacks
  nfsd4: make sure sequence flags are set after destroy_session
  nfsd4: re-probe callback on connection loss
  nfsd4: set sequence flag when backchannel is down
  nfsd4: keep finer-grained callback status
  rpc: allow xprt_class->setup to return a preexisting xprt
  rpc: keep backchannel xprt as long as server connection
  rpc: move sk_bc_xprt to svc_xprt
  nfsd4: allow backchannel recovery
  nfsd4: support BIND_CONN_TO_SESSION
  nfsd4: modify session list under cl_lock
  Documentation: fl_mylease no longer exists
  ...

Fix up conflicts in fs/nfsd/vfs.c with the vfs-scale work.  The
vfs-scale work touched some msnfs cases, and this merge removes support
for that entirely, so the conflict was trivial to resolve.
parents ec08bdb1 a8f2800b
......@@ -343,7 +343,6 @@ prototypes:
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *); /* break_lease callback */
int (*fl_mylease)(struct file_lock *, struct file_lock *);
int (*fl_change)(struct file_lock **, int);
locking rules:
......@@ -353,7 +352,6 @@ fl_notify: yes no
fl_grant: no no
fl_release_private: maybe no
fl_break: yes no
fl_mylease: yes no
fl_change yes no
--------------------------- buffer_head -----------------------------------
......
......@@ -444,15 +444,9 @@ static void lease_release_private_callback(struct file_lock *fl)
fl->fl_file->f_owner.signum = 0;
}
static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
{
return fl->fl_file == try->fl_file;
}
static const struct lock_manager_operations lease_manager_ops = {
.fl_break = lease_break_callback,
.fl_release_private = lease_release_private_callback,
.fl_mylease = lease_mylease_callback,
.fl_change = lease_modify,
};
......@@ -1405,7 +1399,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
if (lease->fl_lmops->fl_mylease(fl, lease))
if (fl->fl_file == filp)
my_before = before;
else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
/*
......
/*
* include/linux/nfs4_acl.c
*
* Common NFSv4 ACL handling definitions.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
......
#define MSNFS /* HACK HACK */
/*
* NFS exporting and validation.
*
......@@ -1444,9 +1443,6 @@ static struct flags {
{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
{ NFSEXP_V4ROOT, {"v4root", ""}},
#ifdef MSNFS
{ NFSEXP_MSNFS, {"msnfs", ""}},
#endif
{ 0, {"", ""}}
};
......
/*
* include/linux/nfsd_idmap.h
*
* Mapping of UID to name and vice versa.
*
* Copyright (c) 2002, 2003 The Regents of the University of
......@@ -56,8 +54,8 @@ static inline void nfsd_idmap_shutdown(void)
}
#endif
int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
......
......@@ -151,10 +151,10 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
__be32 nfserr;
u32 max_blocksize = svc_max_payload(rqstp);
dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long) argp->offset);
(unsigned long long) argp->offset);
/* Obtain buffer pointer for payload.
* 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
......@@ -191,10 +191,10 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
__be32 nfserr;
unsigned long cnt = argp->len;
dprintk("nfsd: WRITE(3) %s %d bytes at %ld%s\n",
dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n",
SVCFH_fmt(&argp->fh),
argp->len,
(unsigned long) argp->offset,
(unsigned long long) argp->offset,
argp->stable? " stable" : "");
fh_copy(&resp->fh, &argp->fh);
......
......@@ -36,7 +36,7 @@
#include <linux/slab.h>
#include <linux/nfs_fs.h>
#include <linux/nfs4_acl.h>
#include "acl.h"
/* mode bit translations: */
......
......@@ -628,10 +628,8 @@ static int max_cb_time(void)
return max(nfsd4_lease/10, (time_t)1) * HZ;
}
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cl_cb_set an atomic? */
int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
......@@ -641,6 +639,7 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
.net = &init_net,
.address = (struct sockaddr *) &conn->cb_addr,
.addrsize = conn->cb_addrlen,
.saddress = (struct sockaddr *) &conn->cb_saddr,
.timeout = &timeparms,
.program = &cb_program,
.version = 0,
......@@ -657,6 +656,10 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
args.protocol = XPRT_TRANSPORT_TCP;
clp->cl_cb_ident = conn->cb_ident;
} else {
if (!conn->cb_xprt)
return -EINVAL;
clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP;
......@@ -679,14 +682,20 @@ static void warn_no_callback_path(struct nfs4_client *clp, int reason)
(int)clp->cl_name.len, clp->cl_name.data, reason);
}
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{
clp->cl_cb_state = NFSD4_CB_DOWN;
warn_no_callback_path(clp, reason);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
if (task->tk_status)
warn_no_callback_path(clp, task->tk_status);
nfsd4_mark_cb_down(clp, task->tk_status);
else
atomic_set(&clp->cl_cb_set, 1);
clp->cl_cb_state = NFSD4_CB_UP;
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
......@@ -709,6 +718,11 @@ int set_callback_cred(void)
static struct workqueue_struct *callback_wq;
static void run_nfsd4_cb(struct nfsd4_callback *cb)
{
queue_work(callback_wq, &cb->cb_work);
}
static void do_probe_callback(struct nfs4_client *clp)
{
struct nfsd4_callback *cb = &clp->cl_cb_null;
......@@ -723,7 +737,7 @@ static void do_probe_callback(struct nfs4_client *clp)
cb->cb_ops = &nfsd4_cb_probe_ops;
queue_work(callback_wq, &cb->cb_work);
run_nfsd4_cb(cb);
}
/*
......@@ -732,14 +746,21 @@ static void do_probe_callback(struct nfs4_client *clp)
*/
void nfsd4_probe_callback(struct nfs4_client *clp)
{
/* XXX: atomicity? Also, should we be using cl_cb_flags? */
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
do_probe_callback(clp);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
void nfsd4_probe_callback_sync(struct nfs4_client *clp)
{
BUG_ON(atomic_read(&clp->cl_cb_set));
nfsd4_probe_callback(clp);
flush_workqueue(callback_wq);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
{
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock);
......@@ -750,24 +771,14 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
* If the slot is available, then mark it busy. Otherwise, set the
* thread for sleeping on the callback RPC wait queue.
*/
static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
struct rpc_task *task)
static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
{
u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
int status = 0;
dprintk("%s: %u:%u:%u:%u\n", __func__,
ptr[0], ptr[1], ptr[2], ptr[3]);
if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
dprintk("%s slot is busy\n", __func__);
status = -EAGAIN;
goto out;
return false;
}
out:
dprintk("%s status=%d\n", __func__, status);
return status;
return true;
}
/*
......@@ -780,20 +791,19 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
u32 minorversion = clp->cl_minorversion;
int status = 0;
cb->cb_minorversion = minorversion;
if (minorversion) {
status = nfsd41_cb_setup_sequence(clp, task);
if (status) {
if (status != -EAGAIN) {
/* terminate rpc task */
task->tk_status = status;
task->tk_action = NULL;
}
if (!nfsd41_cb_get_slot(clp, task))
return;
}
}
spin_lock(&clp->cl_lock);
if (list_empty(&cb->cb_per_client)) {
/* This is the first call, not a restart */
cb->cb_done = false;
list_add(&cb->cb_per_client, &clp->cl_callbacks);
}
spin_unlock(&clp->cl_lock);
rpc_call_start(task);
}
......@@ -829,15 +839,18 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
nfsd4_cb_done(task, calldata);
if (current_rpc_client == NULL) {
/* We're shutting down; give up. */
/* XXX: err, or is it ok just to fall through
* and rpc_restart_call? */
if (current_rpc_client != task->tk_client) {
/* We're shutting down or changing cl_cb_client; leave
* it to nfsd4_process_cb_update to restart the call if
* necessary. */
return;
}
if (cb->cb_done)
return;
switch (task->tk_status) {
case 0:
cb->cb_done = true;
return;
case -EBADHANDLE:
case -NFS4ERR_BAD_STATEID:
......@@ -846,32 +859,30 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
break;
default:
/* Network partition? */
atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
if (current_rpc_client != task->tk_client) {
/* queue a callback on the new connection: */
atomic_inc(&dp->dl_count);
nfsd4_cb_recall(dp);
return;
}
nfsd4_mark_cb_down(clp, task->tk_status);
}
if (dp->dl_retries--) {
rpc_delay(task, 2*HZ);
task->tk_status = 0;
rpc_restart_call_prepare(task);
return;
} else {
atomic_set(&clp->cl_cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
}
nfsd4_mark_cb_down(clp, task->tk_status);
cb->cb_done = true;
}
static void nfsd4_cb_recall_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
nfs4_put_delegation(dp);
if (cb->cb_done) {
spin_lock(&clp->cl_lock);
list_del(&cb->cb_per_client);
spin_unlock(&clp->cl_lock);
nfs4_put_delegation(dp);
}
}
static const struct rpc_call_ops nfsd4_cb_recall_ops = {
......@@ -906,16 +917,33 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
flush_workqueue(callback_wq);
}
void nfsd4_release_cb(struct nfsd4_callback *cb)
static void nfsd4_release_cb(struct nfsd4_callback *cb)
{
if (cb->cb_ops->rpc_release)
cb->cb_ops->rpc_release(cb);
}
void nfsd4_process_cb_update(struct nfsd4_callback *cb)
/* requires cl_lock: */
static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
{
struct nfsd4_session *s;
struct nfsd4_conn *c;
list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
list_for_each_entry(c, &s->se_conns, cn_persession) {
if (c->cn_flags & NFS4_CDFC4_BACK)
return c;
}
}
return NULL;
}
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
{
struct nfs4_cb_conn conn;
struct nfs4_client *clp = cb->cb_clp;
struct nfsd4_session *ses = NULL;
struct nfsd4_conn *c;
int err;
/*
......@@ -926,6 +954,10 @@ void nfsd4_process_cb_update(struct nfsd4_callback *cb)
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
}
if (clp->cl_cb_conn.cb_xprt) {
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
clp->cl_cb_conn.cb_xprt = NULL;
}
if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
return;
spin_lock(&clp->cl_lock);
......@@ -936,11 +968,22 @@ void nfsd4_process_cb_update(struct nfsd4_callback *cb)
BUG_ON(!clp->cl_cb_flags);
clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp);
if (c) {
svc_xprt_get(c->cn_xprt);
conn.cb_xprt = c->cn_xprt;
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
err = setup_callback_client(clp, &conn);
if (err)
err = setup_callback_client(clp, &conn, ses);
if (err) {
warn_no_callback_path(clp, err);
return;
}
/* Yay, the callback channel's back! Restart any callbacks: */
list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
run_nfsd4_cb(cb);
}
void nfsd4_do_callback_rpc(struct work_struct *w)
......@@ -965,10 +1008,11 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfsd4_callback *cb = &dp->dl_recall;
struct nfs4_client *clp = dp->dl_client;
dp->dl_retries = 1;
cb->cb_op = dp;
cb->cb_clp = dp->dl_client;
cb->cb_clp = clp;
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
......@@ -977,5 +1021,8 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp)
cb->cb_ops = &nfsd4_cb_recall_ops;
dp->dl_retries = 1;
queue_work(callback_wq, &dp->dl_recall.cb_work);
INIT_LIST_HEAD(&cb->cb_per_client);
cb->cb_done = true;
run_nfsd4_cb(&dp->dl_recall);
}
......@@ -33,10 +33,11 @@
*/
#include <linux/module.h>
#include <linux/nfsd_idmap.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "idmap.h"
#include "nfsd.h"
/*
* Cache entry
......@@ -514,7 +515,7 @@ rqst_authname(struct svc_rqst *rqstp)
return clp->name;
}
static int
static __be32
idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
uid_t *id)
{
......@@ -524,15 +525,15 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
int ret;
if (namelen + 1 > sizeof(key.name))
return -EINVAL;
return nfserr_badowner;
memcpy(key.name, name, namelen);
key.name[namelen] = '\0';
strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
if (ret == -ENOENT)
ret = -ESRCH; /* nfserr_badname */
return nfserr_badowner;
if (ret)
return ret;
return nfserrno(ret);
*id = item->id;
cache_put(&item->h, &nametoid_cache);
return 0;
......@@ -560,14 +561,14 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
return ret;
}
int
__be32
nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id)
{
return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
}
int
__be32
nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id)
{
......
......@@ -604,9 +604,7 @@ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status;
}
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
{
struct svc_fh tmp_fh;
__be32 ret;
......@@ -615,13 +613,19 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
ret = exp_pseudoroot(rqstp, &tmp_fh);
if (ret)
return ret;
if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
if (tmp_fh.fh_dentry == fh->fh_dentry) {
fh_put(&tmp_fh);
return nfserr_noent;
}
fh_put(&tmp_fh);
return nfsd_lookup(rqstp, &cstate->current_fh,
"..", 2, &cstate->current_fh);
return nfsd_lookup(rqstp, fh, "..", 2, fh);
}
static __be32
nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
void *arg)
{
return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
}
static __be32
......@@ -769,9 +773,35 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
} else
secinfo->si_exp = exp;
dput(dentry);
if (cstate->minorversion)
/* See rfc 5661 section 2.6.3.1.1.8 */
fh_put(&cstate->current_fh);
return err;
}
static __be32
nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_secinfo_no_name *sin)
{
__be32 err;
switch (sin->sin_style) {
case NFS4_SECINFO_STYLE4_CURRENT_FH:
break;
case NFS4_SECINFO_STYLE4_PARENT:
err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
if (err)
return err;
break;
default:
return nfserr_inval;
}
exp_get(cstate->current_fh.fh_export);
sin->sin_exp = cstate->current_fh.fh_export;
fh_put(&cstate->current_fh);
return nfs_ok;
}
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setattr *setattr)
......@@ -974,8 +1004,8 @@ static const char *nfsd4_op_name(unsigned opnum);
* Also note, enforced elsewhere:
* - SEQUENCE other than as first op results in
* NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
* - BIND_CONN_TO_SESSION must be the only op in its compound
* (Will be enforced in nfsd4_bind_conn_to_session().)
* - BIND_CONN_TO_SESSION must be the only op in its compound.
* (Enforced in nfsd4_bind_conn_to_session().)
* - DESTROY_SESSION must be the final operation in a compound, if
* sessionid's in SEQUENCE and DESTROY_SESSION are the same.
* (Enforced in nfsd4_destroy_session().)
......@@ -1126,10 +1156,6 @@ encode_op:
nfsd4_increment_op_stats(op->opnum);
}
if (!rqstp->rq_usedeferral && status == nfserr_dropit) {
dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__);
status = nfserr_jukebox;
}
resp->cstate.status = status;
fh_put(&resp->cstate.current_fh);
......@@ -1300,6 +1326,11 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_EXCHANGE_ID",
},
[OP_BIND_CONN_TO_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_BIND_CONN_TO_SESSION",
},
[OP_CREATE_SESSION] = {
.op_func = (nfsd4op_func)nfsd4_create_session,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
......@@ -1320,6 +1351,10 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_RECLAIM_COMPLETE",
},
[OP_SECINFO_NO_NAME] = {
.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
.op_name = "OP_SECINFO_NO_NAME",
},
};
static const char *nfsd4_op_name(unsigned opnum)
......
......@@ -302,7 +302,6 @@ purge_old(struct dentry *parent, struct dentry *child)
{
int status;
/* note: we currently use this path only for minorversion 0 */
if (nfs4_has_reclaimed_state(child->d_name.name, false))
return 0;
......
......@@ -230,7 +230,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
dp->dl_client = clp;
get_nfs4_file(fp);
dp->dl_file = fp;
nfs4_file_get_access(fp, O_RDONLY);
dp->dl_vfs_file = find_readable_file(fp);
get_file(dp->dl_vfs_file);
dp->dl_flock = NULL;
dp->dl_type = type;
dp->dl_stateid.si_boot = boot_time;
......@@ -252,6 +253,7 @@ nfs4_put_delegation(struct nfs4_delegation *dp)
if (atomic_dec_and_test(&dp->dl_count)) {