|
@@ -232,10 +232,10 @@ EXPORT_SYMBOL(unregister_shrinker);
|
|
|
|
|
|
#define SHRINK_BATCH 128
|
|
|
|
|
|
-static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
|
|
|
- struct shrinker *shrinker,
|
|
|
- unsigned long nr_scanned,
|
|
|
- unsigned long nr_eligible)
|
|
|
+static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|
|
+ struct shrinker *shrinker,
|
|
|
+ unsigned long nr_scanned,
|
|
|
+ unsigned long nr_eligible)
|
|
|
{
|
|
|
unsigned long freed = 0;
|
|
|
unsigned long long delta;
|
|
@@ -344,9 +344,10 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * shrink_node_slabs - shrink slab caches of a given node
|
|
|
+ * shrink_slab - shrink slab caches
|
|
|
* @gfp_mask: allocation context
|
|
|
* @nid: node whose slab caches to target
|
|
|
+ * @memcg: memory cgroup whose slab caches to target
|
|
|
* @nr_scanned: pressure numerator
|
|
|
* @nr_eligible: pressure denominator
|
|
|
*
|
|
@@ -355,6 +356,12 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
|
|
|
* @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
|
|
|
* unaware shrinkers will receive a node id of 0 instead.
|
|
|
*
|
|
|
+ * @memcg specifies the memory cgroup to target. If it is not NULL,
|
|
|
+ * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
|
|
|
+ * objects from the memory cgroup specified. Otherwise all shrinkers
|
|
|
+ * are called, and memcg aware shrinkers are supposed to scan the
|
|
|
+ * global list then.
|
|
|
+ *
|
|
|
* @nr_scanned and @nr_eligible form a ratio that indicate how much of
|
|
|
* the available objects should be scanned. Page reclaim for example
|
|
|
* passes the number of pages scanned and the number of pages on the
|
|
@@ -365,13 +372,17 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl,
|
|
|
*
|
|
|
* Returns the number of reclaimed slab objects.
|
|
|
*/
|
|
|
-unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
|
|
|
- unsigned long nr_scanned,
|
|
|
- unsigned long nr_eligible)
|
|
|
+static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|
|
+ struct mem_cgroup *memcg,
|
|
|
+ unsigned long nr_scanned,
|
|
|
+ unsigned long nr_eligible)
|
|
|
{
|
|
|
struct shrinker *shrinker;
|
|
|
unsigned long freed = 0;
|
|
|
|
|
|
+ if (memcg && !memcg_kmem_is_active(memcg))
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (nr_scanned == 0)
|
|
|
nr_scanned = SWAP_CLUSTER_MAX;
|
|
|
|
|
@@ -390,12 +401,16 @@ unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
|
|
|
struct shrink_control sc = {
|
|
|
.gfp_mask = gfp_mask,
|
|
|
.nid = nid,
|
|
|
+ .memcg = memcg,
|
|
|
};
|
|
|
|
|
|
+ if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
|
|
|
+ continue;
|
|
|
+
|
|
|
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
|
|
sc.nid = 0;
|
|
|
|
|
|
- freed += shrink_slabs(&sc, shrinker, nr_scanned, nr_eligible);
|
|
|
+ freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
|
|
|
}
|
|
|
|
|
|
up_read(&shrinker_rwsem);
|
|
@@ -404,6 +419,29 @@ out:
|
|
|
return freed;
|
|
|
}
|
|
|
|
|
|
+void drop_slab_node(int nid)
|
|
|
+{
|
|
|
+ unsigned long freed;
|
|
|
+
|
|
|
+ do {
|
|
|
+ struct mem_cgroup *memcg = NULL;
|
|
|
+
|
|
|
+ freed = 0;
|
|
|
+ do {
|
|
|
+ freed += shrink_slab(GFP_KERNEL, nid, memcg,
|
|
|
+ 1000, 1000);
|
|
|
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
|
|
|
+ } while (freed > 10);
|
|
|
+}
|
|
|
+
|
|
|
+void drop_slab(void)
|
|
|
+{
|
|
|
+ int nid;
|
|
|
+
|
|
|
+ for_each_online_node(nid)
|
|
|
+ drop_slab_node(nid);
|
|
|
+}
|
|
|
+
|
|
|
static inline int is_page_cache_freeable(struct page *page)
|
|
|
{
|
|
|
/*
|
|
@@ -2276,6 +2314,7 @@ static inline bool should_continue_reclaim(struct zone *zone,
|
|
|
static bool shrink_zone(struct zone *zone, struct scan_control *sc,
|
|
|
bool is_classzone)
|
|
|
{
|
|
|
+ struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
unsigned long nr_reclaimed, nr_scanned;
|
|
|
bool reclaimable = false;
|
|
|
|
|
@@ -2294,6 +2333,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
|
|
|
memcg = mem_cgroup_iter(root, NULL, &reclaim);
|
|
|
do {
|
|
|
unsigned long lru_pages;
|
|
|
+ unsigned long scanned;
|
|
|
struct lruvec *lruvec;
|
|
|
int swappiness;
|
|
|
|
|
@@ -2305,10 +2345,16 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
|
|
|
|
|
|
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
|
|
|
swappiness = mem_cgroup_swappiness(memcg);
|
|
|
+ scanned = sc->nr_scanned;
|
|
|
|
|
|
shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
|
|
|
zone_lru_pages += lru_pages;
|
|
|
|
|
|
+ if (memcg && is_classzone)
|
|
|
+ shrink_slab(sc->gfp_mask, zone_to_nid(zone),
|
|
|
+ memcg, sc->nr_scanned - scanned,
|
|
|
+ lru_pages);
|
|
|
+
|
|
|
/*
|
|
|
* Direct reclaim and kswapd have to scan all memory
|
|
|
* cgroups to fulfill the overall scan target for the
|
|
@@ -2330,19 +2376,14 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
|
|
|
* Shrink the slab caches in the same proportion that
|
|
|
* the eligible LRU pages were scanned.
|
|
|
*/
|
|
|
- if (global_reclaim(sc) && is_classzone) {
|
|
|
- struct reclaim_state *reclaim_state;
|
|
|
-
|
|
|
- shrink_node_slabs(sc->gfp_mask, zone_to_nid(zone),
|
|
|
- sc->nr_scanned - nr_scanned,
|
|
|
- zone_lru_pages);
|
|
|
-
|
|
|
- reclaim_state = current->reclaim_state;
|
|
|
- if (reclaim_state) {
|
|
|
- sc->nr_reclaimed +=
|
|
|
- reclaim_state->reclaimed_slab;
|
|
|
- reclaim_state->reclaimed_slab = 0;
|
|
|
- }
|
|
|
+ if (global_reclaim(sc) && is_classzone)
|
|
|
+ shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
|
|
|
+ sc->nr_scanned - nr_scanned,
|
|
|
+ zone_lru_pages);
|
|
|
+
|
|
|
+ if (reclaim_state) {
|
|
|
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
+ reclaim_state->reclaimed_slab = 0;
|
|
|
}
|
|
|
|
|
|
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
|