Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[linux-drm-fsl-dcu.git] / mm / slub.c
index 1ee14acb3bdacb71adc0c6a4ff26a1a13eec1a94..545a170ebf9f66cf0e3716c9cd6f4cb7eef0eda6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
 #endif
        {
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
                local_irq_save(flags);
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-       printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-               page, page->objects, page->inuse, page->freelist, page->flags);
+       printk(KERN_ERR
+              "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+              page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
        print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+                       const char *fmt, ...)
 {
        va_list args;
        char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
        } else {
                if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
                        check_bytes_and_report(s, page, p, "Alignment padding",
-                               endobject, POISON_INUSE, s->inuse - s->object_size);
+                               endobject, POISON_INUSE,
+                               s->inuse - s->object_size);
                }
        }
 
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
                                object_err(s, page, object,
                                        "Freechain corrupt");
                                set_freepointer(s, object, NULL);
-                               break;
                        } else {
                                slab_err(s, page, "Freepointer corrupt");
                                page->freelist = NULL;
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object, s->object_size);
+                       print_section("Object ", (void *)object,
+                                       s->object_size);
 
                dump_stack();
        }
@@ -947,7 +952,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
        return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+                                       gfp_t flags, void *object)
 {
        flags &= gfp_allowed_mask;
        kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -959,7 +965,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
        kmemleak_free_recursive(x, s->flags);
 
        /*
-        * Trouble is that we may no longer disable interupts in the fast path
+        * Trouble is that we may no longer disable interrupts in the fast path
         * So in order to make the debug calls that expect irqs to be
         * disabled we need to disable interrupts temporarily.
         */
@@ -1049,7 +1055,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+                                       struct page *page,
                                        void *object, unsigned long addr)
 {
        if (!check_slab(s, page))
@@ -1770,7 +1777,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+                               void *freelist)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2026,7 +2034,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->pobjects = pobjects;
                page->next = oldpage;
 
-       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+                                                               != oldpage);
 #endif
 }
 
@@ -2196,8 +2205,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2341,7 +2350,8 @@ new_slab:
                goto load_freelist;
 
        /* Only entered in the debug case */
-       if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+       if (kmem_cache_debug(s) &&
+                       !alloc_debug_processing(s, page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
        deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2399,7 +2409,7 @@ redo:
 
        object = c->freelist;
        page = c->page;
-       if (unlikely(!object || !page || !node_match(page, node)))
+       if (unlikely(!object || !node_match(page, node)))
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
@@ -2409,13 +2419,15 @@ redo:
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
                 *
-                * The cmpxchg does the following atomically (without lock semantics!)
+                * The cmpxchg does the following atomically (without lock
+                * semantics!)
                 * 1. Relocate first pointer to the current per cpu area.
                 * 2. Verify that tid and freelist have not been changed
                 * 3. If they were not changed replace tid and freelist
                 *
-                * Since this is without lock semantics the protection is only against
-                * code executing on this cpu *not* from access by other cpus.
+                * Since this is without lock semantics the protection is only
+                * against code executing on this cpu *not* from access by
+                * other cpus.
                 */
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2447,7 +2459,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
        void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+                               s->size, gfpflags);
 
        return ret;
 }
@@ -2461,14 +2474,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
        return ret;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_trace);
-
-void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
-       void *ret = kmalloc_order(size, flags, order);
-       trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
-       return ret;
-}
-EXPORT_SYMBOL(kmalloc_order_trace);
 #endif
 
 #ifdef CONFIG_NUMA
@@ -2539,8 +2544,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                        if (kmem_cache_has_cpu_partial(s) && !prior)
 
                                /*
-                                * Slab was on no list before and will be partially empty
-                                * We can defer the list move and instead freeze it.
+                                * Slab was on no list before and will be
+                                * partially empty
+                                * We can defer the list move and instead
+                                * freeze it.
                                 */
                                new.frozen = 1;
 
@@ -3098,8 +3105,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
         * A) The number of objects from per cpu partial slabs dumped to the
         *    per node list when we reach the limit.
         * B) The number of objects in cpu partial slabs to extract from the
-        *    per node list when we run out of per cpu objects. We only fetch 50%
-        *    to keep some capacity around for frees.
+        *    per node list when we run out of per cpu objects. We only fetch
+        *    50% to keep some capacity around for frees.
         */
        if (!kmem_cache_has_cpu_partial(s))
                s->cpu_partial = 0;
@@ -3126,8 +3133,8 @@ error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
                        "order=%u offset=%u flags=%lx\n",
-                       s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-                       s->offset, flags);
+                       s->name, (unsigned long)s->size, s->size,
+                       oo_order(s->oo), s->offset, flags);
        return -EINVAL;
 }
 
@@ -3343,42 +3350,6 @@ size_t ksize(const void *object)
 }
 EXPORT_SYMBOL(ksize);
 
-#ifdef CONFIG_SLUB_DEBUG
-bool verify_mem_not_deleted(const void *x)
-{
-       struct page *page;
-       void *object = (void *)x;
-       unsigned long flags;
-       bool rv;
-
-       if (unlikely(ZERO_OR_NULL_PTR(x)))
-               return false;
-
-       local_irq_save(flags);
-
-       page = virt_to_head_page(x);
-       if (unlikely(!PageSlab(page))) {
-               /* maybe it was from stack? */
-               rv = true;
-               goto out_unlock;
-       }
-
-       slab_lock(page);
-       if (on_freelist(page->slab_cache, page, object)) {
-               object_err(page->slab_cache, page, object, "Object is on free-list");
-               rv = false;
-       } else {
-               rv = true;
-       }
-       slab_unlock(page);
-
-out_unlock:
-       local_irq_restore(flags);
-       return rv;
-}
-EXPORT_SYMBOL(verify_mem_not_deleted);
-#endif
-
 void kfree(const void *x)
 {
        struct page *page;
@@ -4189,15 +4160,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
                                !cpumask_empty(to_cpumask(l->cpus)) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " cpus=");
-                       len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+                       len += cpulist_scnprintf(buf + len,
+                                                PAGE_SIZE - len - 50,
                                                 to_cpumask(l->cpus));
                }
 
                if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " nodes=");
-                       len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-                                       l->nodes);
+                       len += nodelist_scnprintf(buf + len,
+                                                 PAGE_SIZE - len - 50,
+                                                 l->nodes);
                }
 
                len += sprintf(buf + len, "\n");
@@ -4295,18 +4268,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
        int node;
        int x;
        unsigned long *nodes;
-       unsigned long *per_cpu;
 
-       nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+       nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
        if (!nodes)
                return -ENOMEM;
-       per_cpu = nodes + nr_node_ids;
 
        if (flags & SO_CPU) {
                int cpu;
 
                for_each_possible_cpu(cpu) {
-                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+                                                              cpu);
                        int node;
                        struct page *page;
 
@@ -4331,8 +4303,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
-
-                       per_cpu[node]++;
                }
        }
 
@@ -4342,12 +4312,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                for_each_node_state(node, N_NORMAL_MEMORY) {
                        struct kmem_cache_node *n = get_node(s, node);
 
-               if (flags & SO_TOTAL)
-                       x = atomic_long_read(&n->total_objects);
-               else if (flags & SO_OBJECTS)
-                       x = atomic_long_read(&n->total_objects) -
-                               count_partial(n, count_free);
-
+                       if (flags & SO_TOTAL)
+                               x = atomic_long_read(&n->total_objects);
+                       else if (flags & SO_OBJECTS)
+                               x = atomic_long_read(&n->total_objects) -
+                                       count_partial(n, count_free);
                        else
                                x = atomic_long_read(&n->nr_slabs);
                        total += x;
@@ -4447,7 +4416,7 @@ static ssize_t order_store(struct kmem_cache *s,
        unsigned long order;
        int err;
 
-       err = strict_strtoul(buf, 10, &order);
+       err = kstrtoul(buf, 10, &order);
        if (err)
                return err;
 
@@ -4475,7 +4444,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
        unsigned long min;
        int err;
 
-       err = strict_strtoul(buf, 10, &min);
+       err = kstrtoul(buf, 10, &min);
        if (err)
                return err;
 
@@ -4495,7 +4464,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
        unsigned long objects;
        int err;
 
-       err = strict_strtoul(buf, 10, &objects);
+       err = kstrtoul(buf, 10, &objects);
        if (err)
                return err;
        if (objects && !kmem_cache_has_cpu_partial(s))
@@ -4811,7 +4780,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
        unsigned long ratio;
        int err;
 
-       err = strict_strtoul(buf, 10, &ratio);
+       err = kstrtoul(buf, 10, &ratio);
        if (err)
                return err;
 
@@ -5041,7 +5010,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
                 * through the descendants with best-effort propagation.
                 */
                for_each_memcg_cache_index(i) {
-                       struct kmem_cache *c = cache_from_memcg(s, i);
+                       struct kmem_cache *c = cache_from_memcg_idx(s, i);
                        if (c)
                                attribute->store(c, buf, len);
                }
@@ -5163,7 +5132,8 @@ static char *create_unique_id(struct kmem_cache *s)
 
 #ifdef CONFIG_MEMCG_KMEM
        if (!is_root_cache(s))
-               p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+               p += sprintf(p, "-%08d",
+                               memcg_cache_id(s->memcg_params->memcg));
 #endif
 
        BUG_ON(p > name + ID_STR_LENGTH - 1);