|
@@ -324,11 +324,6 @@ struct mem_cgroup {
|
|
|
* percpu counter.
|
|
|
*/
|
|
|
struct mem_cgroup_stat_cpu __percpu *stat;
|
|
|
- /*
|
|
|
- * used when a cpu is offlined or other synchronizations
|
|
|
- * See mem_cgroup_read_stat().
|
|
|
- */
|
|
|
- struct mem_cgroup_stat_cpu nocpu_base;
|
|
|
spinlock_t pcp_counter_lock;
|
|
|
|
|
|
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
|
|
@@ -834,15 +829,8 @@ static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
|
|
|
long val = 0;
|
|
|
int cpu;
|
|
|
|
|
|
- get_online_cpus();
|
|
|
- for_each_online_cpu(cpu)
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
val += per_cpu(memcg->stat->count[idx], cpu);
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- spin_lock(&memcg->pcp_counter_lock);
|
|
|
- val += memcg->nocpu_base.count[idx];
|
|
|
- spin_unlock(&memcg->pcp_counter_lock);
|
|
|
-#endif
|
|
|
- put_online_cpus();
|
|
|
return val;
|
|
|
}
|
|
|
|
|
@@ -852,15 +840,8 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
|
|
|
unsigned long val = 0;
|
|
|
int cpu;
|
|
|
|
|
|
- get_online_cpus();
|
|
|
- for_each_online_cpu(cpu)
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
val += per_cpu(memcg->stat->events[idx], cpu);
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
- spin_lock(&memcg->pcp_counter_lock);
|
|
|
- val += memcg->nocpu_base.events[idx];
|
|
|
- spin_unlock(&memcg->pcp_counter_lock);
|
|
|
-#endif
|
|
|
- put_online_cpus();
|
|
|
return val;
|
|
|
}
|
|
|
|
|
@@ -2210,37 +2191,12 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
|
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This function drains percpu counter value from DEAD cpu and
|
|
|
- * move it to local cpu. Note that this function can be preempted.
|
|
|
- */
|
|
|
-static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- spin_lock(&memcg->pcp_counter_lock);
|
|
|
- for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
|
|
|
- long x = per_cpu(memcg->stat->count[i], cpu);
|
|
|
-
|
|
|
- per_cpu(memcg->stat->count[i], cpu) = 0;
|
|
|
- memcg->nocpu_base.count[i] += x;
|
|
|
- }
|
|
|
- for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
|
|
|
- unsigned long x = per_cpu(memcg->stat->events[i], cpu);
|
|
|
-
|
|
|
- per_cpu(memcg->stat->events[i], cpu) = 0;
|
|
|
- memcg->nocpu_base.events[i] += x;
|
|
|
- }
|
|
|
- spin_unlock(&memcg->pcp_counter_lock);
|
|
|
-}
|
|
|
-
|
|
|
static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
|
|
|
unsigned long action,
|
|
|
void *hcpu)
|
|
|
{
|
|
|
int cpu = (unsigned long)hcpu;
|
|
|
struct memcg_stock_pcp *stock;
|
|
|
- struct mem_cgroup *iter;
|
|
|
|
|
|
if (action == CPU_ONLINE)
|
|
|
return NOTIFY_OK;
|
|
@@ -2248,9 +2204,6 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
|
|
|
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
|
|
|
return NOTIFY_OK;
|
|
|
|
|
|
- for_each_mem_cgroup(iter)
|
|
|
- mem_cgroup_drain_pcp_counter(iter, cpu);
|
|
|
-
|
|
|
stock = &per_cpu(memcg_stock, cpu);
|
|
|
drain_stock(stock);
|
|
|
return NOTIFY_OK;
|