mm: memcontrol: consolidate memory controller initialization
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 11 Feb 2015 23:26:33 +0000 (15:26 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 01:06:03 +0000 (17:06 -0800)
The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 2efec685793b6e5606228bd27c0d72428cceb79d..ebf1139f323e44271fcffa49e5a63494f0a07b86 100644 (file)
@@ -2138,17 +2138,6 @@ static void drain_local_stock(struct work_struct *dummy)
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 }
 
-static void __init memcg_stock_init(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct memcg_stock_pcp *stock =
-                                       &per_cpu(memcg_stock, cpu);
-               INIT_WORK(&stock->work, drain_local_stock);
-       }
-}
-
 /*
  * Cache charges(val) to local per_cpu area.
  * This will be consumed by consume_stock() function, later.
@@ -4507,28 +4496,6 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 }
 EXPORT_SYMBOL(parent_mem_cgroup);
 
-static void __init mem_cgroup_soft_limit_tree_init(void)
-{
-       int node;
-
-       for_each_node(node) {
-               struct mem_cgroup_tree_per_node *rtpn;
-               int zone;
-
-               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
-                                   node_online(node) ? node : NUMA_NO_NODE);
-
-               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
-                       struct mem_cgroup_tree_per_zone *rtpz;
-
-                       rtpz = &rtpn->rb_tree_per_zone[zone];
-                       rtpz->rb_root = RB_ROOT;
-                       spin_lock_init(&rtpz->lock);
-               }
-               soft_limit_tree.rb_tree_per_node[node] = rtpn;
-       }
-}
-
 static struct cgroup_subsys_state * __ref
 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
@@ -5905,10 +5872,33 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
  */
 static int __init mem_cgroup_init(void)
 {
+       int cpu, node;
+
        hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+       for_each_possible_cpu(cpu)
+               INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+                         drain_local_stock);
+
+       for_each_node(node) {
+               struct mem_cgroup_tree_per_node *rtpn;
+               int zone;
+
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
+                                   node_online(node) ? node : NUMA_NO_NODE);
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       struct mem_cgroup_tree_per_zone *rtpz;
+
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+       }
+
        enable_swap_cgroup();
-       mem_cgroup_soft_limit_tree_init();
-       memcg_stock_init();
+
        return 0;
 }
 subsys_initcall(mem_cgroup_init);