memcg, slab: do not destroy children caches if parent has aliases
authorVladimir Davydov <vdavydov@parallels.com>
Mon, 7 Apr 2014 22:39:28 +0000 (15:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Apr 2014 23:36:13 +0000 (16:36 -0700)
Currently we destroy children caches at the very beginning of
kmem_cache_destroy().  This is wrong, because the root cache will not
necessarily be destroyed in the end - if it has aliases (refcount > 0),
kmem_cache_destroy() will simply decrement its refcount and return.  In
this case, at best we will get a bunch of warnings in dmesg, like this
one:

  kmem_cache_destroy kmalloc-32:0: Slab cache still has objects
  CPU: 1 PID: 7139 Comm: modprobe Tainted: G    B   W    3.13.0+ #117
  Call Trace:
    dump_stack+0x49/0x5b
    kmem_cache_destroy+0xdf/0xf0
    kmem_cache_destroy_memcg_children+0x97/0xc0
    kmem_cache_destroy+0xf/0xf0
    xfs_mru_cache_uninit+0x21/0x30 [xfs]
    exit_xfs_fs+0x2e/0xc44 [xfs]
    SyS_delete_module+0x198/0x1f0
    system_call_fastpath+0x16/0x1b

At worst - if kmem_cache_destroy() will race with an allocation from a
memcg cache - the kernel will panic.

This patch fixes this by moving children caches destruction after the
check if the cache has aliases.  Plus, it forbids destroying a root
cache if it still has children caches, because each children cache keeps
a reference to its parent.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Glauber Costa <glommer@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab_common.c

index 02d3072841e96fcc9dfdb185d67e5ee98589a772..b569b8be5c5ac49f918f5fdd492424530be8f185 100644 (file)
@@ -507,7 +507,7 @@ struct kmem_cache *
 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
 
 void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
-void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
+int __kmem_cache_destroy_memcg_children(struct kmem_cache *s);
 
 /**
  * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
@@ -661,10 +661,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 {
        return cachep;
 }
-
-static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
-{
-}
 #endif /* CONFIG_MEMCG_KMEM */
 #endif /* _LINUX_MEMCONTROL_H */
 
index c22d8bf42d9a4923b175a9200f14aa9a62652e5d..29501f04056887297be694c315c7caf3adf666f5 100644 (file)
@@ -3321,15 +3321,10 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
        schedule_work(&cachep->memcg_params->destroy);
 }
 
-void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
 {
        struct kmem_cache *c;
-       int i;
-
-       if (!s->memcg_params)
-               return;
-       if (!s->memcg_params->is_root_cache)
-               return;
+       int i, failed = 0;
 
        /*
         * If the cache is being destroyed, we trust that there is no one else
@@ -3363,8 +3358,12 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
                c->memcg_params->dead = false;
                cancel_work_sync(&c->memcg_params->destroy);
                kmem_cache_destroy(c);
+
+               if (cache_from_memcg_idx(s, i))
+                       failed++;
        }
        mutex_unlock(&activate_kmem_mutex);
+       return failed;
 }
 
 static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
index 0c2879ff414c46111a68b2bcba874e475e95fd98..f3cfccf76dda693250106d3c8d0387292e741c14 100644 (file)
@@ -301,39 +301,64 @@ out_unlock:
        mutex_unlock(&slab_mutex);
        put_online_cpus();
 }
+
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+       int rc;
+
+       if (!s->memcg_params ||
+           !s->memcg_params->is_root_cache)
+               return 0;
+
+       mutex_unlock(&slab_mutex);
+       rc = __kmem_cache_destroy_memcg_children(s);
+       mutex_lock(&slab_mutex);
+
+       return rc;
+}
+#else
+static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
+{
+       return 0;
+}
 #endif /* CONFIG_MEMCG_KMEM */
 
 void kmem_cache_destroy(struct kmem_cache *s)
 {
-       /* Destroy all the children caches if we aren't a memcg cache */
-       kmem_cache_destroy_memcg_children(s);
-
        get_online_cpus();
        mutex_lock(&slab_mutex);
+
        s->refcount--;
-       if (!s->refcount) {
-               list_del(&s->list);
-               memcg_unregister_cache(s);
-
-               if (!__kmem_cache_shutdown(s)) {
-                       mutex_unlock(&slab_mutex);
-                       if (s->flags & SLAB_DESTROY_BY_RCU)
-                               rcu_barrier();
-
-                       memcg_free_cache_params(s);
-                       kfree(s->name);
-                       kmem_cache_free(kmem_cache, s);
-               } else {
-                       list_add(&s->list, &slab_caches);
-                       memcg_register_cache(s);
-                       mutex_unlock(&slab_mutex);
-                       printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
-                               s->name);
-                       dump_stack();
-               }
-       } else {
-               mutex_unlock(&slab_mutex);
+       if (s->refcount)
+               goto out_unlock;
+
+       if (kmem_cache_destroy_memcg_children(s) != 0)
+               goto out_unlock;
+
+       list_del(&s->list);
+       memcg_unregister_cache(s);
+
+       if (__kmem_cache_shutdown(s) != 0) {
+               list_add(&s->list, &slab_caches);
+               memcg_register_cache(s);
+               printk(KERN_ERR "kmem_cache_destroy %s: "
+                      "Slab cache still has objects\n", s->name);
+               dump_stack();
+               goto out_unlock;
        }
+
+       mutex_unlock(&slab_mutex);
+       if (s->flags & SLAB_DESTROY_BY_RCU)
+               rcu_barrier();
+
+       memcg_free_cache_params(s);
+       kfree(s->name);
+       kmem_cache_free(kmem_cache, s);
+       goto out_put_cpus;
+
+out_unlock:
+       mutex_unlock(&slab_mutex);
+out_put_cpus:
        put_online_cpus();
 }
 EXPORT_SYMBOL(kmem_cache_destroy);