Commit 8787a1df authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

memcg: move mem_cgroup_soft_limit_tree_init to mem_cgroup_init

Per-node-zone soft limit tree is currently initialized when the root
cgroup is created which is OK but it pointlessly pollutes memcg
allocation code with something that can be called when the memcg
subsystem is initialized by mem_cgroup_init along with other controller
specific parts.

While we are at it let's make mem_cgroup_soft_limit_tree_init void
because it doesn't make much sense to report memory failure because if
we fail to allocate memory that early during the boot then we are
screwed anyway (this saves some code).
Signed-off-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <htejun@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0e50ce3b
...@@ -6052,7 +6052,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) ...@@ -6052,7 +6052,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
} }
EXPORT_SYMBOL(parent_mem_cgroup); EXPORT_SYMBOL(parent_mem_cgroup);
static int mem_cgroup_soft_limit_tree_init(void) static void __init mem_cgroup_soft_limit_tree_init(void)
{ {
struct mem_cgroup_tree_per_node *rtpn; struct mem_cgroup_tree_per_node *rtpn;
struct mem_cgroup_tree_per_zone *rtpz; struct mem_cgroup_tree_per_zone *rtpz;
...@@ -6063,8 +6063,7 @@ static int mem_cgroup_soft_limit_tree_init(void) ...@@ -6063,8 +6063,7 @@ static int mem_cgroup_soft_limit_tree_init(void)
if (!node_state(node, N_NORMAL_MEMORY)) if (!node_state(node, N_NORMAL_MEMORY))
tmp = -1; tmp = -1;
rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
if (!rtpn) BUG_ON(!rtpn);
goto err_cleanup;
soft_limit_tree.rb_tree_per_node[node] = rtpn; soft_limit_tree.rb_tree_per_node[node] = rtpn;
...@@ -6074,17 +6073,6 @@ static int mem_cgroup_soft_limit_tree_init(void) ...@@ -6074,17 +6073,6 @@ static int mem_cgroup_soft_limit_tree_init(void)
spin_lock_init(&rtpz->lock); spin_lock_init(&rtpz->lock);
} }
} }
return 0;
err_cleanup:
for_each_node(node) {
if (!soft_limit_tree.rb_tree_per_node[node])
break;
kfree(soft_limit_tree.rb_tree_per_node[node]);
soft_limit_tree.rb_tree_per_node[node] = NULL;
}
return 1;
} }
static struct cgroup_subsys_state * __ref static struct cgroup_subsys_state * __ref
...@@ -6106,8 +6094,6 @@ mem_cgroup_css_alloc(struct cgroup *cont) ...@@ -6106,8 +6094,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
if (cont->parent == NULL) { if (cont->parent == NULL) {
int cpu; int cpu;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
root_mem_cgroup = memcg; root_mem_cgroup = memcg;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct memcg_stock_pcp *stock = struct memcg_stock_pcp *stock =
...@@ -6850,6 +6836,7 @@ static int __init mem_cgroup_init(void) ...@@ -6850,6 +6836,7 @@ static int __init mem_cgroup_init(void)
{ {
hotcpu_notifier(memcg_cpu_hotplug_callback, 0); hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
enable_swap_cgroup(); enable_swap_cgroup();
mem_cgroup_soft_limit_tree_init();
return 0; return 0;
} }
subsys_initcall(mem_cgroup_init); subsys_initcall(mem_cgroup_init);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment