mm: slab/slub: use page->list consistently instead of page->lru
authorDave Hansen <dave.hansen@linux.intel.com>
Tue, 8 Apr 2014 20:44:27 +0000 (13:44 -0700)
committerPekka Enberg <penberg@kernel.org>
Fri, 11 Apr 2014 07:06:06 +0000 (10:06 +0300)
'struct page' has two list_head fields: 'lru' and 'list'.  Conveniently,
they are unioned together.  This means that code can use them
interchangably, which gets horribly confusing like with this nugget from
slab.c:

> list_del(&page->lru);
> if (page->active == cachep->num)
> list_add(&page->list, &n->slabs_full);

This patch makes the slab and slub code use page->lru universally instead
of mixing ->list and ->lru.

So, the new rule is: page->lru is what the you use if you want to keep
your page on a list.  Don't like the fact that it's not called ->list?
Too bad.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
include/linux/mm_types.h
mm/slab.c
mm/slob.c

index 290901a8c1de9f0193ae3e2c640c070623431081..84b74080beb79418c7bb59e87106b050cefd2df2 100644 (file)
@@ -124,6 +124,8 @@ struct page {
        union {
                struct list_head lru;   /* Pageout list, eg. active_list
                                         * protected by zone->lru_lock !
+                                        * Can be used as a generic list
+                                        * by the page owner.
                                         */
                struct {                /* slub per cpu partial pages */
                        struct page *next;      /* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
 #endif
                };
 
-               struct list_head list;  /* slobs list of pages */
                struct slab *slab_page; /* slab fields */
                struct rcu_head rcu_head;       /* Used by SLAB
                                                 * when destroying via RCU
index 8dd8e0875e4c88b3cfd88a3db8255f2f4699cb9a..f6718197cdd0177c766481b33e0aac0844a7a52f 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2922,9 +2922,9 @@ retry:
                /* move slabp to correct slabp list: */
                list_del(&page->lru);
                if (page->active == cachep->num)
-                       list_add(&page->list, &n->slabs_full);
+                       list_add(&page->lru, &n->slabs_full);
                else
-                       list_add(&page->list, &n->slabs_partial);
+                       list_add(&page->lru, &n->slabs_partial);
        }
 
 must_grow:
index 4bf8809dfcce78f900c9c52b1f0aa0d614ece1bb..730cad45d4be0154ad2c5814935ac8caa7a80015 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
 
 static void set_slob_page_free(struct page *sp, struct list_head *list)
 {
-       list_add(&sp->list, list);
+       list_add(&sp->lru, list);
        __SetPageSlobFree(sp);
 }
 
 static inline void clear_slob_page_free(struct page *sp)
 {
-       list_del(&sp->list);
+       list_del(&sp->lru);
        __ClearPageSlobFree(sp);
 }
 
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 
        spin_lock_irqsave(&slob_lock, flags);
        /* Iterate through each partially free page, try to find room */
-       list_for_each_entry(sp, slob_list, list) {
+       list_for_each_entry(sp, slob_list, lru) {
 #ifdef CONFIG_NUMA
                /*
                 * If there's a node specification, search for a partial
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                        continue;
 
                /* Attempt to alloc */
-               prev = sp->list.prev;
+               prev = sp->lru.prev;
                b = slob_page_alloc(sp, size, align);
                if (!b)
                        continue;
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                spin_lock_irqsave(&slob_lock, flags);
                sp->units = SLOB_UNITS(PAGE_SIZE);
                sp->freelist = b;
-               INIT_LIST_HEAD(&sp->list);
+               INIT_LIST_HEAD(&sp->lru);
                set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
                set_slob_page_free(sp, slob_list);
                b = slob_page_alloc(sp, size, align);