|
@@ -2028,6 +2028,10 @@ static DEFINE_SPINLOCK(nfs_access_lru_lock);
|
|
|
static LIST_HEAD(nfs_access_lru_list);
|
|
|
static atomic_long_t nfs_access_nr_entries;
|
|
|
|
|
|
+static unsigned long nfs_access_max_cachesize = ULONG_MAX;
|
|
|
+module_param(nfs_access_max_cachesize, ulong, 0644);
|
|
|
+MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
|
|
|
+
|
|
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|
|
{
|
|
|
put_rpccred(entry->cred);
|
|
@@ -2048,19 +2052,14 @@ static void nfs_access_free_list(struct list_head *head)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-unsigned long
|
|
|
-nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
+static unsigned long
|
|
|
+nfs_do_access_cache_scan(unsigned int nr_to_scan)
|
|
|
{
|
|
|
LIST_HEAD(head);
|
|
|
struct nfs_inode *nfsi, *next;
|
|
|
struct nfs_access_entry *cache;
|
|
|
- int nr_to_scan = sc->nr_to_scan;
|
|
|
- gfp_t gfp_mask = sc->gfp_mask;
|
|
|
long freed = 0;
|
|
|
|
|
|
- if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
|
|
- return SHRINK_STOP;
|
|
|
-
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
|
|
|
struct inode *inode;
|
|
@@ -2093,12 +2092,40 @@ remove_lru_entry:
|
|
|
return freed;
|
|
|
}
|
|
|
|
|
|
+unsigned long
|
|
|
+nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
+{
|
|
|
+ int nr_to_scan = sc->nr_to_scan;
|
|
|
+ gfp_t gfp_mask = sc->gfp_mask;
|
|
|
+
|
|
|
+ if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
|
|
+ return SHRINK_STOP;
|
|
|
+ return nfs_do_access_cache_scan(nr_to_scan);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
unsigned long
|
|
|
nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
{
|
|
|
return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+nfs_access_cache_enforce_limit(void)
|
|
|
+{
|
|
|
+ long nr_entries = atomic_long_read(&nfs_access_nr_entries);
|
|
|
+ unsigned long diff;
|
|
|
+ unsigned int nr_to_scan;
|
|
|
+
|
|
|
+ if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize)
|
|
|
+ return;
|
|
|
+ nr_to_scan = 100;
|
|
|
+ diff = nr_entries - nfs_access_max_cachesize;
|
|
|
+ if (diff < nr_to_scan)
|
|
|
+ nr_to_scan = diff;
|
|
|
+ nfs_do_access_cache_scan(nr_to_scan);
|
|
|
+}
|
|
|
+
|
|
|
static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
|
|
|
{
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
|
@@ -2244,6 +2271,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
|
|
&nfs_access_lru_list);
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
}
|
|
|
+ nfs_access_cache_enforce_limit();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_add_cache);
|
|
|
|