Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penber...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 28 Mar 2012 22:04:26 +0000 (15:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 28 Mar 2012 22:04:26 +0000 (15:04 -0700)
Pull SLAB changes from Pekka Enberg:
 "There's the new kmalloc_array() API, minor fixes and performance
  improvements, but quite honestly, nothing terribly exciting."

* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  mm: SLAB Out-of-memory diagnostics
  slab: introduce kmalloc_array()
  slub: per cpu partial statistics change
  slub: include include for prefetch
  slub: Do not hold slub_lock when calling sysfs_slab_add()
  slub: prefetch next freelist pointer in slab_alloc()
  slab, cleanup: remove unneeded return

include/linux/slab.h
include/linux/slub_def.h
mm/slab.c
mm/slub.c

index 573c809c33d9900b49ff0d1a29f31b499d377dab..a595dce6b0c7596d1481e2c87a2b55028c66a449 100644 (file)
@@ -190,7 +190,7 @@ size_t ksize(const void *);
 #endif
 
 /**
- * kcalloc - allocate memory for an array. The memory is set to zero.
+ * kmalloc_array - allocate memory for an array.
  * @n: number of elements.
  * @size: element size.
  * @flags: the type of memory to allocate.
@@ -240,11 +240,22 @@ size_t ksize(const void *);
  * for general use, and so are not documented here. For a full list of
  * potential flags, always refer to linux/gfp.h.
  */
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
        if (size != 0 && n > ULONG_MAX / size)
                return NULL;
-       return __kmalloc(n * size, flags | __GFP_ZERO);
+       return __kmalloc(n * size, flags);
+}
+
+/**
+ * kcalloc - allocate memory for an array. The memory is set to zero.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+       return kmalloc_array(n, size, flags | __GFP_ZERO);
 }
 
 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
index ca122b36aec1220b2cf7762704306cb99c6ae635..c2f8c8bc56edd08183dd0dd22b4f7d3b39f1c63d 100644 (file)
@@ -22,7 +22,7 @@ enum stat_item {
        FREE_FROZEN,            /* Freeing to frozen slab */
        FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
        FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
-       ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from partial list */
+       ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
        ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
        ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
        ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
@@ -38,7 +38,9 @@ enum stat_item {
        CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
        CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
        CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
-       CPU_PARTIAL_FREE,       /* USed cpu partial on free */
+       CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
+       CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
+       CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
        NR_SLUB_STAT_ITEMS };
 
 struct kmem_cache_cpu {
index 29c8716eb7a9a69a4d67501081add66d0d9caa36..e901a36e2520c2b7aa5123ddcd0553415b764397 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1731,6 +1731,52 @@ static int __init cpucache_init(void)
 }
 __initcall(cpucache_init);
 
+static noinline void
+slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
+{
+       struct kmem_list3 *l3;
+       struct slab *slabp;
+       unsigned long flags;
+       int node;
+
+       printk(KERN_WARNING
+               "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
+               nodeid, gfpflags);
+       printk(KERN_WARNING "  cache: %s, object size: %d, order: %d\n",
+               cachep->name, cachep->buffer_size, cachep->gfporder);
+
+       for_each_online_node(node) {
+               unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
+               unsigned long active_slabs = 0, num_slabs = 0;
+
+               l3 = cachep->nodelists[node];
+               if (!l3)
+                       continue;
+
+               spin_lock_irqsave(&l3->list_lock, flags);
+               list_for_each_entry(slabp, &l3->slabs_full, list) {
+                       active_objs += cachep->num;
+                       active_slabs++;
+               }
+               list_for_each_entry(slabp, &l3->slabs_partial, list) {
+                       active_objs += slabp->inuse;
+                       active_slabs++;
+               }
+               list_for_each_entry(slabp, &l3->slabs_free, list)
+                       num_slabs++;
+
+               free_objects += l3->free_objects;
+               spin_unlock_irqrestore(&l3->list_lock, flags);
+
+               num_slabs += active_slabs;
+               num_objs = num_slabs * cachep->num;
+               printk(KERN_WARNING
+                       "  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
+                       node, active_slabs, num_slabs, active_objs, num_objs,
+                       free_objects);
+       }
+}
+
 /*
  * Interface to system's page allocator. No need to hold the cache-lock.
  *
@@ -1757,8 +1803,11 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
                flags |= __GFP_RECLAIMABLE;
 
        page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
-       if (!page)
+       if (!page) {
+               if (!(flags & __GFP_NOWARN) && printk_ratelimit())
+                       slab_out_of_memory(cachep, flags, nodeid);
                return NULL;
+       }
 
        nr_pages = (1 << cachep->gfporder);
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
@@ -3696,13 +3745,12 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
 
        if (likely(ac->avail < ac->limit)) {
                STATS_INC_FREEHIT(cachep);
-               ac->entry[ac->avail++] = objp;
-               return;
        } else {
                STATS_INC_FREEMISS(cachep);
                cache_flusharray(cachep, ac);
-               ac->entry[ac->avail++] = objp;
        }
+
+       ac->entry[ac->avail++] = objp;
 }
 
 /**
index f4a6229848fdb6bd654c90947f341b6060ab0bde..64d9966d16bc5961f44c37bd7f86441227447ec4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -29,6 +29,7 @@
 #include <linux/math64.h>
 #include <linux/fault-inject.h>
 #include <linux/stacktrace.h>
+#include <linux/prefetch.h>
 
 #include <trace/events/kmem.h>
 
@@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
        return *(void **)(object + s->offset);
 }
 
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+       prefetch(object + s->offset);
+}
+
 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 {
        void *p;
@@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s,
                } else {
                        page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
+                       stat(s, CPU_PARTIAL_NODE);
                }
                if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
                        break;
@@ -1983,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                                local_irq_restore(flags);
                                pobjects = 0;
                                pages = 0;
+                               stat(s, CPU_PARTIAL_DRAIN);
                        }
                }
 
@@ -1994,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->next = oldpage;
 
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
-       stat(s, CPU_PARTIAL_FREE);
        return pobjects;
 }
 
@@ -2319,6 +2326,8 @@ redo:
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
+               void *next_object = get_freepointer_safe(s, object);
+
                /*
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
@@ -2334,11 +2343,12 @@ redo:
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
-                               get_freepointer_safe(s, object), next_tid(tid)))) {
+                               next_object, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_alloc", s, tid);
                        goto redo;
                }
+               prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
 
@@ -2475,9 +2485,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 * If we just froze the page then put it onto the
                 * per cpu partial list.
                 */
-               if (new.frozen && !was_frozen)
+               if (new.frozen && !was_frozen) {
                        put_cpu_partial(s, page, 1);
-
+                       stat(s, CPU_PARTIAL_FREE);
+               }
                /*
                 * The list lock was not taken therefore no list
                 * activity can be necessary.
@@ -3939,13 +3950,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                if (kmem_cache_open(s, n,
                                size, align, flags, ctor)) {
                        list_add(&s->list, &slab_caches);
+                       up_write(&slub_lock);
                        if (sysfs_slab_add(s)) {
+                               down_write(&slub_lock);
                                list_del(&s->list);
                                kfree(n);
                                kfree(s);
                                goto err;
                        }
-                       up_write(&slub_lock);
                        return s;
                }
                kfree(n);
@@ -5069,6 +5081,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
+STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
 #endif
 
 static struct attribute *slab_attrs[] = {
@@ -5134,6 +5148,8 @@ static struct attribute *slab_attrs[] = {
        &cmpxchg_double_cpu_fail_attr.attr,
        &cpu_partial_alloc_attr.attr,
        &cpu_partial_free_attr.attr,
+       &cpu_partial_node_attr.attr,
+       &cpu_partial_drain_attr.attr,
 #endif
 #ifdef CONFIG_FAILSLAB
        &failslab_attr.attr,