|
@@ -224,13 +224,6 @@ hash_refile(struct svc_cacherep *rp)
|
|
|
hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
|
|
|
}
|
|
|
|
|
|
-static inline bool
|
|
|
-nfsd_cache_entry_expired(struct svc_cacherep *rp)
|
|
|
-{
|
|
|
- return rp->c_state != RC_INPROG &&
|
|
|
- time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Walk the LRU list and prune off entries that are older than RC_EXPIRE.
|
|
|
* Also prune the oldest ones when the total exceeds the max number of entries.
|
|
@@ -242,8 +235,14 @@ prune_cache_entries(void)
|
|
|
long freed = 0;
|
|
|
|
|
|
list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
|
|
|
- if (!nfsd_cache_entry_expired(rp) &&
|
|
|
- num_drc_entries <= max_drc_entries)
|
|
|
+ /*
|
|
|
+ * Don't free entries attached to calls that are still
|
|
|
+ * in-progress, but do keep scanning the list.
|
|
|
+ */
|
|
|
+ if (rp->c_state == RC_INPROG)
|
|
|
+ continue;
|
|
|
+ if (num_drc_entries <= max_drc_entries &&
|
|
|
+ time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
|
|
|
break;
|
|
|
nfsd_reply_cache_free_locked(rp);
|
|
|
freed++;
|