|
@@ -291,6 +291,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
|
int nid = shrinkctl->nid;
|
|
|
long batch_size = shrinker->batch ? shrinker->batch
|
|
|
: SHRINK_BATCH;
|
|
|
+ long scanned = 0, next_deferred;
|
|
|
|
|
|
freeable = shrinker->count_objects(shrinker, shrinkctl);
|
|
|
if (freeable == 0)
|
|
@@ -312,7 +313,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
|
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
|
|
shrinker->scan_objects, total_scan);
|
|
|
total_scan = freeable;
|
|
|
- }
|
|
|
+ next_deferred = nr;
|
|
|
+ } else
|
|
|
+ next_deferred = total_scan;
|
|
|
|
|
|
/*
|
|
|
* We need to avoid excessive windup on filesystem shrinkers
|
|
@@ -369,17 +372,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
|
|
|
|
count_vm_events(SLABS_SCANNED, nr_to_scan);
|
|
|
total_scan -= nr_to_scan;
|
|
|
+ scanned += nr_to_scan;
|
|
|
|
|
|
cond_resched();
|
|
|
}
|
|
|
|
|
|
+ if (next_deferred >= scanned)
|
|
|
+ next_deferred -= scanned;
|
|
|
+ else
|
|
|
+ next_deferred = 0;
|
|
|
/*
|
|
|
* move the unused scan count back into the shrinker in a
|
|
|
* manner that handles concurrent updates. If we exhausted the
|
|
|
* scan, there is no need to do an update.
|
|
|
*/
|
|
|
- if (total_scan > 0)
|
|
|
- new_nr = atomic_long_add_return(total_scan,
|
|
|
+ if (next_deferred > 0)
|
|
|
+ new_nr = atomic_long_add_return(next_deferred,
|
|
|
&shrinker->nr_deferred[nid]);
|
|
|
else
|
|
|
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
|