|
@@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
|
|
|
disarm_kmem_keys(memcg);
|
|
|
}
|
|
|
|
|
|
-static void drain_all_stock_async(struct mem_cgroup *memcg);
|
|
|
-
|
|
|
static struct mem_cgroup_per_zone *
|
|
|
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
|
|
|
{
|
|
@@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
|
|
|
|
/*
|
|
|
* Drains all per-CPU charge caches for given root_memcg resp. subtree
|
|
|
- * of the hierarchy under it. sync flag says whether we should block
|
|
|
- * until the work is done.
|
|
|
+ * of the hierarchy under it.
|
|
|
*/
|
|
|
-static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
|
|
+static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
|
{
|
|
|
int cpu, curcpu;
|
|
|
|
|
|
+ /* If someone's already draining, avoid adding running more workers. */
|
|
|
+ if (!mutex_trylock(&percpu_charge_mutex))
|
|
|
+ return;
|
|
|
/* Notify other cpus that system-wide "drain" is running */
|
|
|
get_online_cpus();
|
|
|
curcpu = get_cpu();
|
|
@@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
|
|
|
}
|
|
|
}
|
|
|
put_cpu();
|
|
|
-
|
|
|
- if (!sync)
|
|
|
- goto out;
|
|
|
-
|
|
|
- for_each_online_cpu(cpu) {
|
|
|
- struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
|
|
- if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
|
|
|
- flush_work(&stock->work);
|
|
|
- }
|
|
|
-out:
|
|
|
put_online_cpus();
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Tries to drain stocked charges in other cpus. This function is asynchronous
|
|
|
- * and just put a work per cpu for draining localy on each cpu. Caller can
|
|
|
- * expects some charges will be back later but cannot wait for it.
|
|
|
- */
|
|
|
-static void drain_all_stock_async(struct mem_cgroup *root_memcg)
|
|
|
-{
|
|
|
- /*
|
|
|
- * If someone calls draining, avoid adding more kworker runs.
|
|
|
- */
|
|
|
- if (!mutex_trylock(&percpu_charge_mutex))
|
|
|
- return;
|
|
|
- drain_all_stock(root_memcg, false);
|
|
|
- mutex_unlock(&percpu_charge_mutex);
|
|
|
-}
|
|
|
-
|
|
|
-/* This is a synchronous drain interface. */
|
|
|
-static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
|
|
|
-{
|
|
|
- /* called when force_empty is called */
|
|
|
- mutex_lock(&percpu_charge_mutex);
|
|
|
- drain_all_stock(root_memcg, true);
|
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
|
}
|
|
|
|
|
@@ -2472,7 +2438,7 @@ retry:
|
|
|
goto retry;
|
|
|
|
|
|
if (!drained) {
|
|
|
- drain_all_stock_async(mem_over_limit);
|
|
|
+ drain_all_stock(mem_over_limit);
|
|
|
drained = true;
|
|
|
goto retry;
|
|
|
}
|