|
@@ -2138,17 +2138,6 @@ static void drain_local_stock(struct work_struct *dummy)
|
|
|
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
|
|
}
|
|
|
|
|
|
-static void __init memcg_stock_init(void)
|
|
|
-{
|
|
|
- int cpu;
|
|
|
-
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
- struct memcg_stock_pcp *stock =
|
|
|
- &per_cpu(memcg_stock, cpu);
|
|
|
- INIT_WORK(&stock->work, drain_local_stock);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Cache charges(val) to local per_cpu area.
|
|
|
* This will be consumed by consume_stock() function, later.
|
|
@@ -4507,28 +4496,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
|
|
|
}
|
|
|
EXPORT_SYMBOL(parent_mem_cgroup);
|
|
|
|
|
|
-static void __init mem_cgroup_soft_limit_tree_init(void)
|
|
|
-{
|
|
|
- int node;
|
|
|
-
|
|
|
- for_each_node(node) {
|
|
|
- struct mem_cgroup_tree_per_node *rtpn;
|
|
|
- int zone;
|
|
|
-
|
|
|
- rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
|
|
|
- node_online(node) ? node : NUMA_NO_NODE);
|
|
|
-
|
|
|
- for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
|
|
- struct mem_cgroup_tree_per_zone *rtpz;
|
|
|
-
|
|
|
- rtpz = &rtpn->rb_tree_per_zone[zone];
|
|
|
- rtpz->rb_root = RB_ROOT;
|
|
|
- spin_lock_init(&rtpz->lock);
|
|
|
- }
|
|
|
- soft_limit_tree.rb_tree_per_node[node] = rtpn;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static struct cgroup_subsys_state * __ref
|
|
|
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|
|
{
|
|
@@ -5905,10 +5872,33 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
|
|
|
*/
|
|
|
static int __init mem_cgroup_init(void)
|
|
|
{
|
|
|
+ int cpu, node;
|
|
|
+
|
|
|
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
|
|
|
+ drain_local_stock);
|
|
|
+
|
|
|
+ for_each_node(node) {
|
|
|
+ struct mem_cgroup_tree_per_node *rtpn;
|
|
|
+ int zone;
|
|
|
+
|
|
|
+ rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
|
|
|
+ node_online(node) ? node : NUMA_NO_NODE);
|
|
|
+
|
|
|
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
|
|
|
+ struct mem_cgroup_tree_per_zone *rtpz;
|
|
|
+
|
|
|
+ rtpz = &rtpn->rb_tree_per_zone[zone];
|
|
|
+ rtpz->rb_root = RB_ROOT;
|
|
|
+ spin_lock_init(&rtpz->lock);
|
|
|
+ }
|
|
|
+ soft_limit_tree.rb_tree_per_node[node] = rtpn;
|
|
|
+ }
|
|
|
+
|
|
|
enable_swap_cgroup();
|
|
|
- mem_cgroup_soft_limit_tree_init();
|
|
|
- memcg_stock_init();
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
subsys_initcall(mem_cgroup_init);
|