|
@@ -2079,7 +2079,7 @@ MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache lengt
|
|
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|
|
{
|
|
|
put_rpccred(entry->cred);
|
|
|
- kfree(entry);
|
|
|
+ kfree_rcu(entry, rcu_head);
|
|
|
smp_mb__before_atomic();
|
|
|
atomic_long_dec(&nfs_access_nr_entries);
|
|
|
smp_mb__after_atomic();
|
|
@@ -2257,6 +2257,38 @@ out_zap:
|
|
|
return -ENOENT;
|
|
|
}
|
|
|
|
|
|
+static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
|
|
|
+{
|
|
|
+ /* Only check the most recently returned cache entry,
|
|
|
+ * but do it without locking.
|
|
|
+ */
|
|
|
+ struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
+ struct nfs_access_entry *cache;
|
|
|
+ int err = -ECHILD;
|
|
|
+ struct list_head *lh;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
+ goto out;
|
|
|
+ lh = rcu_dereference(nfsi->access_cache_entry_lru.prev);
|
|
|
+ cache = list_entry(lh, struct nfs_access_entry, lru);
|
|
|
+ if (lh == &nfsi->access_cache_entry_lru ||
|
|
|
+ cred != cache->cred)
|
|
|
+ cache = NULL;
|
|
|
+ if (cache == NULL)
|
|
|
+ goto out;
|
|
|
+ if (!nfs_have_delegated_attributes(inode) &&
|
|
|
+ !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
|
|
|
+ goto out;
|
|
|
+ res->jiffies = cache->jiffies;
|
|
|
+ res->cred = cache->cred;
|
|
|
+ res->mask = cache->mask;
|
|
|
+ err = 0;
|
|
|
+out:
|
|
|
+ rcu_read_unlock();
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
|
|
|
{
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
@@ -2300,6 +2332,11 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
|
|
cache->cred = get_rpccred(set->cred);
|
|
|
cache->mask = set->mask;
|
|
|
|
|
|
+ /* The above field assignments must be visible
|
|
|
+ * before this item appears on the lru. We cannot easily
|
|
|
+ * use rcu_assign_pointer, so just force the memory barrier.
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
nfs_access_add_rbtree(inode, cache);
|
|
|
|
|
|
/* Update accounting */
|
|
@@ -2339,7 +2376,9 @@ static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
|
|
|
|
|
|
trace_nfs_access_enter(inode);
|
|
|
|
|
|
- status = nfs_access_get_cached(inode, cred, &cache);
|
|
|
+ status = nfs_access_get_cached_rcu(inode, cred, &cache);
|
|
|
+ if (status != 0)
|
|
|
+ status = nfs_access_get_cached(inode, cred, &cache);
|
|
|
if (status == 0)
|
|
|
goto out_cached;
|
|
|
|