|
@@ -359,7 +359,7 @@ put_nfs4_file(struct nfs4_file *fi)
|
|
|
{
|
|
|
might_lock(&state_lock);
|
|
|
|
|
|
- if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
|
|
|
+ if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
|
|
|
hlist_del_rcu(&fi->fi_hash);
|
|
|
spin_unlock(&state_lock);
|
|
|
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
|
|
@@ -568,7 +568,7 @@ alloc_clnt_odstate(struct nfs4_client *clp)
|
|
|
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
|
|
|
if (co) {
|
|
|
co->co_client = clp;
|
|
|
- atomic_set(&co->co_odcount, 1);
|
|
|
+ refcount_set(&co->co_odcount, 1);
|
|
|
}
|
|
|
return co;
|
|
|
}
|
|
@@ -586,7 +586,7 @@ static inline void
|
|
|
get_clnt_odstate(struct nfs4_clnt_odstate *co)
|
|
|
{
|
|
|
if (co)
|
|
|
- atomic_inc(&co->co_odcount);
|
|
|
+ refcount_inc(&co->co_odcount);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -598,7 +598,7 @@ put_clnt_odstate(struct nfs4_clnt_odstate *co)
|
|
|
return;
|
|
|
|
|
|
fp = co->co_file;
|
|
|
- if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
|
|
|
+ if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
|
|
|
list_del(&co->co_perfile);
|
|
|
spin_unlock(&fp->fi_lock);
|
|
|
|
|
@@ -656,7 +656,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
|
|
|
stid->sc_stateid.si_opaque.so_id = new_id;
|
|
|
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
|
|
|
/* Will be incremented before return to client: */
|
|
|
- atomic_set(&stid->sc_count, 1);
|
|
|
+ refcount_set(&stid->sc_count, 1);
|
|
|
spin_lock_init(&stid->sc_lock);
|
|
|
|
|
|
/*
|
|
@@ -813,7 +813,7 @@ nfs4_put_stid(struct nfs4_stid *s)
|
|
|
|
|
|
might_lock(&clp->cl_lock);
|
|
|
|
|
|
- if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
|
|
|
+ if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
|
|
|
wake_up_all(&close_wq);
|
|
|
return;
|
|
|
}
|
|
@@ -913,7 +913,7 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
|
|
|
if (status)
|
|
|
return status;
|
|
|
++fp->fi_delegees;
|
|
|
- atomic_inc(&dp->dl_stid.sc_count);
|
|
|
+ refcount_inc(&dp->dl_stid.sc_count);
|
|
|
dp->dl_stid.sc_type = NFS4_DELEG_STID;
|
|
|
list_add(&dp->dl_perfile, &fp->fi_delegations);
|
|
|
list_add(&dp->dl_perclnt, &clp->cl_delegations);
|
|
@@ -1214,7 +1214,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
|
|
|
|
|
|
WARN_ON_ONCE(!list_empty(&stp->st_locks));
|
|
|
|
|
|
- if (!atomic_dec_and_test(&s->sc_count)) {
|
|
|
+ if (!refcount_dec_and_test(&s->sc_count)) {
|
|
|
wake_up_all(&close_wq);
|
|
|
return;
|
|
|
}
|
|
@@ -1439,8 +1439,10 @@ free_session_slots(struct nfsd4_session *ses)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < ses->se_fchannel.maxreqs; i++)
|
|
|
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
|
|
|
+ free_svc_cred(&ses->se_slots[i]->sl_cred);
|
|
|
kfree(ses->se_slots[i]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1472,6 +1474,11 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
|
|
|
spin_lock(&nfsd_drc_lock);
|
|
|
avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
|
|
|
nfsd_drc_max_mem - nfsd_drc_mem_used);
|
|
|
+ /*
|
|
|
+ * Never use more than a third of the remaining memory,
|
|
|
+ * unless it's the only way to give this client a slot:
|
|
|
+ */
|
|
|
+ avail = clamp_t(int, avail, slotsize, avail/3);
|
|
|
num = min_t(int, num, avail / slotsize);
|
|
|
nfsd_drc_mem_used += num * slotsize;
|
|
|
spin_unlock(&nfsd_drc_lock);
|
|
@@ -2072,7 +2079,7 @@ find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
|
|
|
s = find_stateid_locked(cl, t);
|
|
|
if (s != NULL) {
|
|
|
if (typemask & s->sc_type)
|
|
|
- atomic_inc(&s->sc_count);
|
|
|
+ refcount_inc(&s->sc_count);
|
|
|
else
|
|
|
s = NULL;
|
|
|
}
|
|
@@ -2287,14 +2294,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
|
|
|
|
|
|
dprintk("--> %s slot %p\n", __func__, slot);
|
|
|
|
|
|
+ slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
|
|
|
slot->sl_opcnt = resp->opcnt;
|
|
|
slot->sl_status = resp->cstate.status;
|
|
|
+ free_svc_cred(&slot->sl_cred);
|
|
|
+ copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
|
|
|
|
|
|
- slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
|
|
|
- if (nfsd4_not_cached(resp)) {
|
|
|
- slot->sl_datalen = 0;
|
|
|
+ if (!nfsd4_cache_this(resp)) {
|
|
|
+ slot->sl_flags &= ~NFSD4_SLOT_CACHED;
|
|
|
return;
|
|
|
}
|
|
|
+ slot->sl_flags |= NFSD4_SLOT_CACHED;
|
|
|
+
|
|
|
base = resp->cstate.data_offset;
|
|
|
slot->sl_datalen = buf->len - base;
|
|
|
if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
|
|
@@ -2321,8 +2332,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
|
|
|
op = &args->ops[resp->opcnt - 1];
|
|
|
nfsd4_encode_operation(resp, op);
|
|
|
|
|
|
- /* Return nfserr_retry_uncached_rep in next operation. */
|
|
|
- if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
|
|
|
+ if (slot->sl_flags & NFSD4_SLOT_CACHED)
|
|
|
+ return op->status;
|
|
|
+ if (args->opcnt == 1) {
|
|
|
+ /*
|
|
|
+ * The original operation wasn't a solo sequence--we
|
|
|
+ * always cache those--so this retry must not match the
|
|
|
+ * original:
|
|
|
+ */
|
|
|
+ op->status = nfserr_seq_false_retry;
|
|
|
+ } else {
|
|
|
op = &args->ops[resp->opcnt++];
|
|
|
op->status = nfserr_retry_uncached_rep;
|
|
|
nfsd4_encode_operation(resp, op);
|
|
@@ -2986,6 +3005,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
|
|
|
return xb->len > session->se_fchannel.maxreq_sz;
|
|
|
}
|
|
|
|
|
|
+static bool replay_matches_cache(struct svc_rqst *rqstp,
|
|
|
+ struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
|
|
|
+{
|
|
|
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
|
|
|
+
|
|
|
+ if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
|
|
|
+ (bool)seq->cachethis)
|
|
|
+ return false;
|
|
|
+ /*
|
|
|
+ * If there's an error than the reply can have fewer ops than
|
|
|
+ * the call. But if we cached a reply with *more* ops than the
|
|
|
+ * call you're sending us now, then this new call is clearly not
|
|
|
+ * really a replay of the old one:
|
|
|
+ */
|
|
|
+ if (slot->sl_opcnt < argp->opcnt)
|
|
|
+ return false;
|
|
|
+ /* This is the only check explicitly called by spec: */
|
|
|
+ if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
|
|
|
+ return false;
|
|
|
+ /*
|
|
|
+ * There may be more comparisons we could actually do, but the
|
|
|
+ * spec doesn't require us to catch every case where the calls
|
|
|
+ * don't match (that would require caching the call as well as
|
|
|
+ * the reply), so we don't bother.
|
|
|
+ */
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
__be32
|
|
|
nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
union nfsd4_op_u *u)
|
|
@@ -3045,6 +3092,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
status = nfserr_seq_misordered;
|
|
|
if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
|
|
|
goto out_put_session;
|
|
|
+ status = nfserr_seq_false_retry;
|
|
|
+ if (!replay_matches_cache(rqstp, seq, slot))
|
|
|
+ goto out_put_session;
|
|
|
cstate->slot = slot;
|
|
|
cstate->session = session;
|
|
|
cstate->clp = clp;
|
|
@@ -3351,7 +3401,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
|
|
|
{
|
|
|
lockdep_assert_held(&state_lock);
|
|
|
|
|
|
- atomic_set(&fp->fi_ref, 1);
|
|
|
+ refcount_set(&fp->fi_ref, 1);
|
|
|
spin_lock_init(&fp->fi_lock);
|
|
|
INIT_LIST_HEAD(&fp->fi_stateids);
|
|
|
INIT_LIST_HEAD(&fp->fi_delegations);
|
|
@@ -3514,7 +3564,7 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
|
|
|
continue;
|
|
|
if (local->st_stateowner == &oo->oo_owner) {
|
|
|
ret = local;
|
|
|
- atomic_inc(&ret->st_stid.sc_count);
|
|
|
+ refcount_inc(&ret->st_stid.sc_count);
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
@@ -3573,7 +3623,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
|
|
|
goto out_unlock;
|
|
|
|
|
|
open->op_stp = NULL;
|
|
|
- atomic_inc(&stp->st_stid.sc_count);
|
|
|
+ refcount_inc(&stp->st_stid.sc_count);
|
|
|
stp->st_stid.sc_type = NFS4_OPEN_STID;
|
|
|
INIT_LIST_HEAD(&stp->st_locks);
|
|
|
stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
|
|
@@ -3621,7 +3671,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
|
|
|
* there should be no danger of the refcount going back up again at
|
|
|
* this point.
|
|
|
*/
|
|
|
- wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
|
|
|
+ wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
|
|
|
|
|
|
release_all_access(s);
|
|
|
if (s->st_stid.sc_file) {
|
|
@@ -3647,7 +3697,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
|
|
|
|
|
|
hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
|
|
|
if (fh_match(&fp->fi_fhandle, fh)) {
|
|
|
- if (atomic_inc_not_zero(&fp->fi_ref))
|
|
|
+ if (refcount_inc_not_zero(&fp->fi_ref))
|
|
|
return fp;
|
|
|
}
|
|
|
}
|
|
@@ -3783,7 +3833,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
|
|
|
* lock) we know the server hasn't removed the lease yet, we know
|
|
|
* it's safe to take a reference.
|
|
|
*/
|
|
|
- atomic_inc(&dp->dl_stid.sc_count);
|
|
|
+ refcount_inc(&dp->dl_stid.sc_count);
|
|
|
nfsd4_run_cb(&dp->dl_recall);
|
|
|
}
|
|
|
|
|
@@ -3966,7 +4016,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
|
|
|
{
|
|
|
struct nfs4_stid *ret;
|
|
|
|
|
|
- ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
|
|
|
+ ret = find_stateid_by_type(cl, s,
|
|
|
+ NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
|
|
|
if (!ret)
|
|
|
return NULL;
|
|
|
return delegstateid(ret);
|
|
@@ -3989,6 +4040,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
|
|
|
deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
|
|
|
if (deleg == NULL)
|
|
|
goto out;
|
|
|
+ if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
|
|
|
+ nfs4_put_stid(&deleg->dl_stid);
|
|
|
+ if (cl->cl_minorversion)
|
|
|
+ status = nfserr_deleg_revoked;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
flags = share_access_to_flags(open->op_share_access);
|
|
|
status = nfs4_check_delegmode(deleg, flags);
|
|
|
if (status) {
|
|
@@ -4858,6 +4915,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
|
|
|
struct nfs4_stid **s, struct nfsd_net *nn)
|
|
|
{
|
|
|
__be32 status;
|
|
|
+ bool return_revoked = false;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * only return revoked delegations if explicitly asked.
|
|
|
+ * otherwise we report revoked or bad_stateid status.
|
|
|
+ */
|
|
|
+ if (typemask & NFS4_REVOKED_DELEG_STID)
|
|
|
+ return_revoked = true;
|
|
|
+ else if (typemask & NFS4_DELEG_STID)
|
|
|
+ typemask |= NFS4_REVOKED_DELEG_STID;
|
|
|
|
|
|
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
|
|
|
return nfserr_bad_stateid;
|
|
@@ -4872,6 +4939,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
|
|
|
*s = find_stateid_by_type(cstate->clp, stateid, typemask);
|
|
|
if (!*s)
|
|
|
return nfserr_bad_stateid;
|
|
|
+ if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
|
|
|
+ nfs4_put_stid(*s);
|
|
|
+ if (cstate->minorversion)
|
|
|
+ return nfserr_deleg_revoked;
|
|
|
+ return nfserr_bad_stateid;
|
|
|
+ }
|
|
|
return nfs_ok;
|
|
|
}
|
|
|
|
|
@@ -5071,7 +5144,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
|
|
|
ret = nfserr_locks_held;
|
|
|
break;
|
|
|
case NFS4_LOCK_STID:
|
|
|
- atomic_inc(&s->sc_count);
|
|
|
+ refcount_inc(&s->sc_count);
|
|
|
spin_unlock(&cl->cl_lock);
|
|
|
ret = nfsd4_free_lock_stateid(stateid, s);
|
|
|
goto out;
|
|
@@ -5578,7 +5651,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
|
|
|
|
|
|
lockdep_assert_held(&clp->cl_lock);
|
|
|
|
|
|
- atomic_inc(&stp->st_stid.sc_count);
|
|
|
+ refcount_inc(&stp->st_stid.sc_count);
|
|
|
stp->st_stid.sc_type = NFS4_LOCK_STID;
|
|
|
stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
|
|
|
get_nfs4_file(fp);
|
|
@@ -5604,7 +5677,7 @@ find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
|
|
|
|
|
|
list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
|
|
|
if (lst->st_stid.sc_file == fp) {
|
|
|
- atomic_inc(&lst->st_stid.sc_count);
|
|
|
+ refcount_inc(&lst->st_stid.sc_count);
|
|
|
return lst;
|
|
|
}
|
|
|
}
|
|
@@ -7006,8 +7079,8 @@ nfs4_state_start_net(struct net *net)
|
|
|
nn->nfsd4_manager.block_opens = true;
|
|
|
locks_start_grace(net, &nn->nfsd4_manager);
|
|
|
nfsd4_client_tracking_init(net);
|
|
|
- printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
|
|
|
- nn->nfsd4_grace, net);
|
|
|
+ printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
|
|
|
+ nn->nfsd4_grace, net->ns.inum);
|
|
|
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
|
|
|
return 0;
|
|
|
}
|