|
@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
|
|
|
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
+static bool
|
|
|
unhash_delegation_locked(struct nfs4_delegation *dp)
|
|
|
{
|
|
|
struct nfs4_file *fp = dp->dl_stid.sc_file;
|
|
|
|
|
|
lockdep_assert_held(&state_lock);
|
|
|
|
|
|
+ if (list_empty(&dp->dl_perfile))
|
|
|
+ return false;
|
|
|
+
|
|
|
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
|
|
|
/* Ensure that deleg break won't try to requeue it */
|
|
|
++dp->dl_time;
|
|
@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
|
|
|
list_del_init(&dp->dl_recall_lru);
|
|
|
list_del_init(&dp->dl_perfile);
|
|
|
spin_unlock(&fp->fi_lock);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static void destroy_delegation(struct nfs4_delegation *dp)
|
|
|
{
|
|
|
+ bool unhashed;
|
|
|
+
|
|
|
spin_lock(&state_lock);
|
|
|
- unhash_delegation_locked(dp);
|
|
|
+ unhashed = unhash_delegation_locked(dp);
|
|
|
spin_unlock(&state_lock);
|
|
|
- put_clnt_odstate(dp->dl_clnt_odstate);
|
|
|
- nfs4_put_deleg_lease(dp->dl_stid.sc_file);
|
|
|
- nfs4_put_stid(&dp->dl_stid);
|
|
|
+ if (unhashed) {
|
|
|
+ put_clnt_odstate(dp->dl_clnt_odstate);
|
|
|
+ nfs4_put_deleg_lease(dp->dl_stid.sc_file);
|
|
|
+ nfs4_put_stid(&dp->dl_stid);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void revoke_delegation(struct nfs4_delegation *dp)
|
|
@@ -990,6 +998,12 @@ release_all_access(struct nfs4_ol_stateid *stp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
|
|
|
+{
|
|
|
+ kfree(sop->so_owner.data);
|
|
|
+ sop->so_ops->so_free(sop);
|
|
|
+}
|
|
|
+
|
|
|
static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
|
|
|
{
|
|
|
struct nfs4_client *clp = sop->so_client;
|
|
@@ -1000,20 +1014,23 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
|
|
|
return;
|
|
|
sop->so_ops->so_unhash(sop);
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
- kfree(sop->so_owner.data);
|
|
|
- sop->so_ops->so_free(sop);
|
|
|
+ nfs4_free_stateowner(sop);
|
|
|
}
|
|
|
|
|
|
-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
|
|
|
+static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
|
|
|
{
|
|
|
struct nfs4_file *fp = stp->st_stid.sc_file;
|
|
|
|
|
|
lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
|
|
|
|
|
|
+ if (list_empty(&stp->st_perfile))
|
|
|
+ return false;
|
|
|
+
|
|
|
spin_lock(&fp->fi_lock);
|
|
|
- list_del(&stp->st_perfile);
|
|
|
+ list_del_init(&stp->st_perfile);
|
|
|
spin_unlock(&fp->fi_lock);
|
|
|
list_del(&stp->st_perstateowner);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
|
|
@@ -1063,25 +1080,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
|
|
|
list_add(&stp->st_locks, reaplist);
|
|
|
}
|
|
|
|
|
|
-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
|
|
|
+static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
|
|
|
{
|
|
|
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
|
|
|
|
|
|
lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
|
|
|
|
|
|
list_del_init(&stp->st_locks);
|
|
|
- unhash_ol_stateid(stp);
|
|
|
nfs4_unhash_stid(&stp->st_stid);
|
|
|
+ return unhash_ol_stateid(stp);
|
|
|
}
|
|
|
|
|
|
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
|
|
|
{
|
|
|
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
|
|
|
+ bool unhashed;
|
|
|
|
|
|
spin_lock(&oo->oo_owner.so_client->cl_lock);
|
|
|
- unhash_lock_stateid(stp);
|
|
|
+ unhashed = unhash_lock_stateid(stp);
|
|
|
spin_unlock(&oo->oo_owner.so_client->cl_lock);
|
|
|
- nfs4_put_stid(&stp->st_stid);
|
|
|
+ if (unhashed)
|
|
|
+ nfs4_put_stid(&stp->st_stid);
|
|
|
}
|
|
|
|
|
|
static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
|
|
@@ -1129,7 +1148,7 @@ static void release_lockowner(struct nfs4_lockowner *lo)
|
|
|
while (!list_empty(&lo->lo_owner.so_stateids)) {
|
|
|
stp = list_first_entry(&lo->lo_owner.so_stateids,
|
|
|
struct nfs4_ol_stateid, st_perstateowner);
|
|
|
- unhash_lock_stateid(stp);
|
|
|
+ WARN_ON(!unhash_lock_stateid(stp));
|
|
|
put_ol_stateid_locked(stp, &reaplist);
|
|
|
}
|
|
|
spin_unlock(&clp->cl_lock);
|
|
@@ -1142,21 +1161,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
|
|
|
{
|
|
|
struct nfs4_ol_stateid *stp;
|
|
|
|
|
|
+ lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
|
|
|
+
|
|
|
while (!list_empty(&open_stp->st_locks)) {
|
|
|
stp = list_entry(open_stp->st_locks.next,
|
|
|
struct nfs4_ol_stateid, st_locks);
|
|
|
- unhash_lock_stateid(stp);
|
|
|
+ WARN_ON(!unhash_lock_stateid(stp));
|
|
|
put_ol_stateid_locked(stp, reaplist);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
|
|
|
+static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
|
|
|
struct list_head *reaplist)
|
|
|
{
|
|
|
+ bool unhashed;
|
|
|
+
|
|
|
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
|
|
|
|
|
|
- unhash_ol_stateid(stp);
|
|
|
+ unhashed = unhash_ol_stateid(stp);
|
|
|
release_open_stateid_locks(stp, reaplist);
|
|
|
+ return unhashed;
|
|
|
}
|
|
|
|
|
|
static void release_open_stateid(struct nfs4_ol_stateid *stp)
|
|
@@ -1164,8 +1188,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
|
|
|
LIST_HEAD(reaplist);
|
|
|
|
|
|
spin_lock(&stp->st_stid.sc_client->cl_lock);
|
|
|
- unhash_open_stateid(stp, &reaplist);
|
|
|
- put_ol_stateid_locked(stp, &reaplist);
|
|
|
+ if (unhash_open_stateid(stp, &reaplist))
|
|
|
+ put_ol_stateid_locked(stp, &reaplist);
|
|
|
spin_unlock(&stp->st_stid.sc_client->cl_lock);
|
|
|
free_ol_stateid_reaplist(&reaplist);
|
|
|
}
|
|
@@ -1210,8 +1234,8 @@ static void release_openowner(struct nfs4_openowner *oo)
|
|
|
while (!list_empty(&oo->oo_owner.so_stateids)) {
|
|
|
stp = list_first_entry(&oo->oo_owner.so_stateids,
|
|
|
struct nfs4_ol_stateid, st_perstateowner);
|
|
|
- unhash_open_stateid(stp, &reaplist);
|
|
|
- put_ol_stateid_locked(stp, &reaplist);
|
|
|
+ if (unhash_open_stateid(stp, &reaplist))
|
|
|
+ put_ol_stateid_locked(stp, &reaplist);
|
|
|
}
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
free_ol_stateid_reaplist(&reaplist);
|
|
@@ -1714,7 +1738,7 @@ __destroy_client(struct nfs4_client *clp)
|
|
|
spin_lock(&state_lock);
|
|
|
while (!list_empty(&clp->cl_delegations)) {
|
|
|
dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
|
|
|
- unhash_delegation_locked(dp);
|
|
|
+ WARN_ON(!unhash_delegation_locked(dp));
|
|
|
list_add(&dp->dl_recall_lru, &reaplist);
|
|
|
}
|
|
|
spin_unlock(&state_lock);
|
|
@@ -1894,7 +1918,7 @@ static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
|
|
|
* __force to keep sparse happy
|
|
|
*/
|
|
|
verf[0] = (__force __be32)get_seconds();
|
|
|
- verf[1] = (__force __be32)nn->clientid_counter;
|
|
|
+ verf[1] = (__force __be32)nn->clverifier_counter++;
|
|
|
memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
|
|
|
}
|
|
|
|
|
@@ -2241,6 +2265,9 @@ static bool client_has_state(struct nfs4_client *clp)
|
|
|
* Also note we should probably be using this in 4.0 case too.
|
|
|
*/
|
|
|
return !list_empty(&clp->cl_openowners)
|
|
|
+#ifdef CONFIG_NFSD_PNFS
|
|
|
+ || !list_empty(&clp->cl_lo_states)
|
|
|
+#endif
|
|
|
|| !list_empty(&clp->cl_delegations)
|
|
|
|| !list_empty(&clp->cl_sessions);
|
|
|
}
|
|
@@ -2547,11 +2574,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
|
|
|
goto out_free_conn;
|
|
|
cs_slot = &conf->cl_cs_slot;
|
|
|
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
|
|
|
- if (status == nfserr_replay_cache) {
|
|
|
- status = nfsd4_replay_create_session(cr_ses, cs_slot);
|
|
|
- goto out_free_conn;
|
|
|
- } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
|
|
|
- status = nfserr_seq_misordered;
|
|
|
+ if (status) {
|
|
|
+ if (status == nfserr_replay_cache)
|
|
|
+ status = nfsd4_replay_create_session(cr_ses, cs_slot);
|
|
|
goto out_free_conn;
|
|
|
}
|
|
|
} else if (unconf) {
|
|
@@ -3041,10 +3066,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
unconf = find_unconfirmed_client_by_name(&clname, nn);
|
|
|
if (unconf)
|
|
|
unhash_client_locked(unconf);
|
|
|
- if (conf && same_verf(&conf->cl_verifier, &clverifier))
|
|
|
+ if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
|
|
|
/* case 1: probable callback update */
|
|
|
copy_clid(new, conf);
|
|
|
- else /* case 4 (new client) or cases 2, 3 (client reboot): */
|
|
|
+ gen_confirm(new, nn);
|
|
|
+ } else /* case 4 (new client) or cases 2, 3 (client reboot): */
|
|
|
gen_clid(new, nn);
|
|
|
new->cl_minorversion = 0;
|
|
|
gen_callback(new, setclid, rqstp);
|
|
@@ -3085,10 +3111,11 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
|
|
|
/*
|
|
|
* We try hard to give out unique clientid's, so if we get an
|
|
|
* attempt to confirm the same clientid with a different cred,
|
|
|
- * there's a bug somewhere. Let's charitably assume it's our
|
|
|
- * bug.
|
|
|
+ * the client may be buggy; this should never happen.
|
|
|
+ *
|
|
|
+ * Nevertheless, RFC 7530 recommends INUSE for this case:
|
|
|
*/
|
|
|
- status = nfserr_serverfault;
|
|
|
+ status = nfserr_clid_inuse;
|
|
|
if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
|
|
|
goto out;
|
|
|
if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
|
|
@@ -3315,7 +3342,8 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
|
|
|
hash_openowner(oo, clp, strhashval);
|
|
|
ret = oo;
|
|
|
} else
|
|
|
- nfs4_free_openowner(&oo->oo_owner);
|
|
|
+ nfs4_free_stateowner(&oo->oo_owner);
|
|
|
+
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
return ret;
|
|
|
}
|
|
@@ -3482,6 +3510,9 @@ static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
|
|
|
{
|
|
|
struct nfs4_delegation *dp = cb_to_delegation(cb);
|
|
|
|
|
|
+ if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
|
|
|
+ return 1;
|
|
|
+
|
|
|
switch (task->tk_status) {
|
|
|
case 0:
|
|
|
return 1;
|
|
@@ -3885,12 +3916,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
|
|
|
-{
|
|
|
- open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
|
|
|
-}
|
|
|
-
|
|
|
/* Should we give out recallable state?: */
|
|
|
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
|
|
|
{
|
|
@@ -3923,7 +3948,7 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
|
|
|
static int nfs4_setlease(struct nfs4_delegation *dp)
|
|
|
{
|
|
|
struct nfs4_file *fp = dp->dl_stid.sc_file;
|
|
|
- struct file_lock *fl, *ret;
|
|
|
+ struct file_lock *fl;
|
|
|
struct file *filp;
|
|
|
int status = 0;
|
|
|
|
|
@@ -3934,10 +3959,10 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
|
|
|
if (!filp) {
|
|
|
/* We should always have a readable file here */
|
|
|
WARN_ON_ONCE(1);
|
|
|
+ locks_free_lock(fl);
|
|
|
return -EBADF;
|
|
|
}
|
|
|
fl->fl_file = filp;
|
|
|
- ret = fl;
|
|
|
status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
|
|
|
if (fl)
|
|
|
locks_free_lock(fl);
|
|
@@ -4063,7 +4088,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
|
|
|
case NFS4_OPEN_CLAIM_FH:
|
|
|
/*
|
|
|
* Let's not give out any delegations till everyone's
|
|
|
- * had the chance to reclaim theirs....
|
|
|
+ * had the chance to reclaim theirs, *and* until
|
|
|
+ * NLM locks have all been reclaimed:
|
|
|
*/
|
|
|
if (locks_in_grace(clp->net))
|
|
|
goto out_no_deleg;
|
|
@@ -4209,7 +4235,7 @@ out:
|
|
|
if (fp)
|
|
|
put_nfs4_file(fp);
|
|
|
if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
|
|
|
- nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
|
|
|
+ open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
|
|
|
/*
|
|
|
* To finish the open response, we just need to set the rflags.
|
|
|
*/
|
|
@@ -4338,14 +4364,12 @@ nfs4_laundromat(struct nfsd_net *nn)
|
|
|
spin_lock(&state_lock);
|
|
|
list_for_each_safe(pos, next, &nn->del_recall_lru) {
|
|
|
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
|
|
|
- if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
|
|
|
- continue;
|
|
|
if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
|
|
|
t = dp->dl_time - cutoff;
|
|
|
new_timeo = min(new_timeo, t);
|
|
|
break;
|
|
|
}
|
|
|
- unhash_delegation_locked(dp);
|
|
|
+ WARN_ON(!unhash_delegation_locked(dp));
|
|
|
list_add(&dp->dl_recall_lru, &reaplist);
|
|
|
}
|
|
|
spin_unlock(&state_lock);
|
|
@@ -4440,7 +4464,7 @@ check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid,
|
|
|
{
|
|
|
if (ONE_STATEID(stateid) && (flags & RD_STATE))
|
|
|
return nfs_ok;
|
|
|
- else if (locks_in_grace(net)) {
|
|
|
+ else if (opens_in_grace(net)) {
|
|
|
/* Answer in remaining cases depends on existence of
|
|
|
* conflicting state; so we must wait out the grace period. */
|
|
|
return nfserr_grace;
|
|
@@ -4459,7 +4483,7 @@ check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid,
|
|
|
static inline int
|
|
|
grace_disallows_io(struct net *net, struct inode *inode)
|
|
|
{
|
|
|
- return locks_in_grace(net) && mandatory_lock(inode);
|
|
|
+ return opens_in_grace(net) && mandatory_lock(inode);
|
|
|
}
|
|
|
|
|
|
/* Returns true iff a is later than b: */
|
|
@@ -4751,7 +4775,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
if (check_for_locks(stp->st_stid.sc_file,
|
|
|
lockowner(stp->st_stateowner)))
|
|
|
break;
|
|
|
- unhash_lock_stateid(stp);
|
|
|
+ WARN_ON(!unhash_lock_stateid(stp));
|
|
|
spin_unlock(&cl->cl_lock);
|
|
|
nfs4_put_stid(s);
|
|
|
ret = nfs_ok;
|
|
@@ -4967,20 +4991,23 @@ out:
|
|
|
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
|
|
|
{
|
|
|
struct nfs4_client *clp = s->st_stid.sc_client;
|
|
|
+ bool unhashed;
|
|
|
LIST_HEAD(reaplist);
|
|
|
|
|
|
s->st_stid.sc_type = NFS4_CLOSED_STID;
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
- unhash_open_stateid(s, &reaplist);
|
|
|
+ unhashed = unhash_open_stateid(s, &reaplist);
|
|
|
|
|
|
if (clp->cl_minorversion) {
|
|
|
- put_ol_stateid_locked(s, &reaplist);
|
|
|
+ if (unhashed)
|
|
|
+ put_ol_stateid_locked(s, &reaplist);
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
free_ol_stateid_reaplist(&reaplist);
|
|
|
} else {
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
free_ol_stateid_reaplist(&reaplist);
|
|
|
- move_to_close_lru(s, clp->net);
|
|
|
+ if (unhashed)
|
|
|
+ move_to_close_lru(s, clp->net);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5045,9 +5072,6 @@ out:
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-#define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
|
|
|
-
|
|
|
static inline u64
|
|
|
end_offset(u64 start, u64 len)
|
|
|
{
|
|
@@ -5139,8 +5163,7 @@ nevermind:
|
|
|
}
|
|
|
|
|
|
static struct nfs4_lockowner *
|
|
|
-find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
|
|
|
- struct nfs4_client *clp)
|
|
|
+find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
|
|
|
{
|
|
|
unsigned int strhashval = ownerstr_hashval(owner);
|
|
|
struct nfs4_stateowner *so;
|
|
@@ -5158,13 +5181,12 @@ find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
|
|
|
}
|
|
|
|
|
|
static struct nfs4_lockowner *
|
|
|
-find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
|
|
|
- struct nfs4_client *clp)
|
|
|
+find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
|
|
|
{
|
|
|
struct nfs4_lockowner *lo;
|
|
|
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
- lo = find_lockowner_str_locked(clid, owner, clp);
|
|
|
+ lo = find_lockowner_str_locked(clp, owner);
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
return lo;
|
|
|
}
|
|
@@ -5208,14 +5230,14 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
|
|
|
lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
|
|
|
lo->lo_owner.so_ops = &lockowner_ops;
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
- ret = find_lockowner_str_locked(&clp->cl_clientid,
|
|
|
- &lock->lk_new_owner, clp);
|
|
|
+ ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
|
|
|
if (ret == NULL) {
|
|
|
list_add(&lo->lo_owner.so_strhash,
|
|
|
&clp->cl_ownerstr_hashtbl[strhashval]);
|
|
|
ret = lo;
|
|
|
} else
|
|
|
- nfs4_free_lockowner(&lo->lo_owner);
|
|
|
+ nfs4_free_stateowner(&lo->lo_owner);
|
|
|
+
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
return ret;
|
|
|
}
|
|
@@ -5298,8 +5320,8 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
|
|
|
static int
|
|
|
check_lock_length(u64 offset, u64 length)
|
|
|
{
|
|
|
- return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
|
|
|
- LOFF_OVERFLOW(offset, length)));
|
|
|
+ return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
|
|
|
+ (length > ~offset)));
|
|
|
}
|
|
|
|
|
|
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
|
|
@@ -5328,9 +5350,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
|
|
|
struct nfs4_lockowner *lo;
|
|
|
unsigned int strhashval;
|
|
|
|
|
|
- lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
|
|
|
+ lo = find_lockowner_str(cl, &lock->lk_new_owner);
|
|
|
if (!lo) {
|
|
|
- strhashval = ownerstr_hashval(&lock->v.new.owner);
|
|
|
+ strhashval = ownerstr_hashval(&lock->lk_new_owner);
|
|
|
lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
|
|
|
if (lo == NULL)
|
|
|
return nfserr_jukebox;
|
|
@@ -5391,7 +5413,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
if (lock->lk_is_new) {
|
|
|
if (nfsd4_has_session(cstate))
|
|
|
/* See rfc 5661 18.10.3: given clientid is ignored: */
|
|
|
- memcpy(&lock->v.new.clientid,
|
|
|
+ memcpy(&lock->lk_new_clientid,
|
|
|
&cstate->session->se_client->cl_clientid,
|
|
|
sizeof(clientid_t));
|
|
|
|
|
@@ -5409,7 +5431,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
open_sop = openowner(open_stp->st_stateowner);
|
|
|
status = nfserr_bad_stateid;
|
|
|
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
|
|
|
- &lock->v.new.clientid))
|
|
|
+ &lock->lk_new_clientid))
|
|
|
goto out;
|
|
|
status = lookup_or_create_lock_state(cstate, open_stp, lock,
|
|
|
&lock_stp, &new);
|
|
@@ -5603,8 +5625,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
|
|
|
- cstate->clp);
|
|
|
+ lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
|
|
|
if (lo)
|
|
|
file_lock->fl_owner = (fl_owner_t)lo;
|
|
|
file_lock->fl_pid = current->tgid;
|
|
@@ -6019,7 +6040,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
|
|
|
|
|
|
static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
|
|
|
struct list_head *collect,
|
|
|
- void (*func)(struct nfs4_ol_stateid *))
|
|
|
+ bool (*func)(struct nfs4_ol_stateid *))
|
|
|
{
|
|
|
struct nfs4_openowner *oop;
|
|
|
struct nfs4_ol_stateid *stp, *st_next;
|
|
@@ -6033,9 +6054,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
|
|
|
list_for_each_entry_safe(lst, lst_next,
|
|
|
&stp->st_locks, st_locks) {
|
|
|
if (func) {
|
|
|
- func(lst);
|
|
|
- nfsd_inject_add_lock_to_list(lst,
|
|
|
- collect);
|
|
|
+ if (func(lst))
|
|
|
+ nfsd_inject_add_lock_to_list(lst,
|
|
|
+ collect);
|
|
|
}
|
|
|
++count;
|
|
|
/*
|
|
@@ -6305,7 +6326,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
|
|
|
continue;
|
|
|
|
|
|
atomic_inc(&clp->cl_refcount);
|
|
|
- unhash_delegation_locked(dp);
|
|
|
+ WARN_ON(!unhash_delegation_locked(dp));
|
|
|
list_add(&dp->dl_recall_lru, victims);
|
|
|
}
|
|
|
++count;
|
|
@@ -6584,6 +6605,7 @@ nfs4_state_start_net(struct net *net)
|
|
|
return ret;
|
|
|
nn->boot_time = get_seconds();
|
|
|
nn->grace_ended = false;
|
|
|
+ nn->nfsd4_manager.block_opens = true;
|
|
|
locks_start_grace(net, &nn->nfsd4_manager);
|
|
|
nfsd4_client_tracking_init(net);
|
|
|
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
|
|
@@ -6602,7 +6624,7 @@ nfs4_state_start(void)
|
|
|
ret = set_callback_cred();
|
|
|
if (ret)
|
|
|
return -ENOMEM;
|
|
|
- laundry_wq = create_singlethread_workqueue("nfsd4");
|
|
|
+ laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
|
|
|
if (laundry_wq == NULL) {
|
|
|
ret = -ENOMEM;
|
|
|
goto out_recovery;
|
|
@@ -6635,7 +6657,7 @@ nfs4_state_shutdown_net(struct net *net)
|
|
|
spin_lock(&state_lock);
|
|
|
list_for_each_safe(pos, next, &nn->del_recall_lru) {
|
|
|
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
|
|
|
- unhash_delegation_locked(dp);
|
|
|
+ WARN_ON(!unhash_delegation_locked(dp));
|
|
|
list_add(&dp->dl_recall_lru, &reaplist);
|
|
|
}
|
|
|
spin_unlock(&state_lock);
|