|
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy)
|
|
|
struct memcg_stock_pcp *stock;
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ /*
|
|
|
+ * The only protection from memory hotplug vs. drain_stock races is
|
|
|
+ * that we always operate on local CPU stock here with IRQ disabled
|
|
|
+ */
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
stock = this_cpu_ptr(&memcg_stock);
|
|
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
|
/* If someone's already draining, avoid adding running more workers. */
|
|
|
if (!mutex_trylock(&percpu_charge_mutex))
|
|
|
return;
|
|
|
- /* Notify other cpus that system-wide "drain" is running */
|
|
|
- get_online_cpus();
|
|
|
+ /*
|
|
|
+ * Notify other cpus that system-wide "drain" is running
|
|
|
+ * We do not care about races with the cpu hotplug because cpu down
|
|
|
+ * as well as workers from this path always operate on the local
|
|
|
+ * per-cpu data. CPU up doesn't touch memcg_stock at all.
|
|
|
+ */
|
|
|
curcpu = get_cpu();
|
|
|
for_each_online_cpu(cpu) {
|
|
|
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
|
|
memcg = stock->cached;
|
|
|
- if (!memcg || !stock->nr_pages)
|
|
|
+ if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
|
|
|
continue;
|
|
|
- if (!mem_cgroup_is_descendant(memcg, root_memcg))
|
|
|
+ if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
|
|
|
+ css_put(&memcg->css);
|
|
|
continue;
|
|
|
+ }
|
|
|
if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
|
|
|
if (cpu == curcpu)
|
|
|
drain_local_stock(&stock->work);
|
|
|
else
|
|
|
schedule_work_on(cpu, &stock->work);
|
|
|
}
|
|
|
+ css_put(&memcg->css);
|
|
|
}
|
|
|
put_cpu();
|
|
|
- put_online_cpus();
|
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
|
}
|
|
|
|