Merge tag 'wireless-drivers-for-davem-2015-10-17' of git://git.kernel.org/pub/scm...
[linux-drm-fsl-dcu.git] / mm / zsmalloc.c
index 0a7f81aa2249c2c9925cceadd4737ec8c88bf259..f135b1b6fcdcab49aaf0845e078c6fc299b4b28b 100644 (file)
@@ -169,14 +169,12 @@ enum zs_stat_type {
        NR_ZS_STAT_TYPE,
 };
 
-#ifdef CONFIG_ZSMALLOC_STAT
-
-static struct dentry *zs_stat_root;
-
 struct zs_size_stat {
        unsigned long objs[NR_ZS_STAT_TYPE];
 };
 
+#ifdef CONFIG_ZSMALLOC_STAT
+static struct dentry *zs_stat_root;
 #endif
 
 /*
@@ -201,6 +199,8 @@ static int zs_size_classes;
 static const int fullness_threshold_frac = 4;
 
 struct size_class {
+       spinlock_t lock;
+       struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
        /*
         * Size of objects stored in this class. Must be multiple
         * of ZS_ALIGN.
@@ -210,16 +210,10 @@ struct size_class {
 
        /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
        int pages_per_zspage;
-       /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
-       bool huge;
-
-#ifdef CONFIG_ZSMALLOC_STAT
        struct zs_size_stat stats;
-#endif
-
-       spinlock_t lock;
 
-       struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+       /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
+       bool huge;
 };
 
 /*
@@ -251,6 +245,15 @@ struct zs_pool {
        gfp_t flags;    /* allocation flags used when growing pool */
        atomic_long_t pages_allocated;
 
+       struct zs_pool_stats stats;
+
+       /* Compact classes */
+       struct shrinker shrinker;
+       /*
+        * To signify that register_shrinker() was successful
+        * and unregister_shrinker() will not Oops.
+        */
+       bool shrinker_enabled;
 #ifdef CONFIG_ZSMALLOC_STAT
        struct dentry *stat_dentry;
 #endif
@@ -285,8 +288,7 @@ static int create_handle_cache(struct zs_pool *pool)
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-       if (pool->handle_cachep)
-               kmem_cache_destroy(pool->handle_cachep);
+       kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
@@ -309,7 +311,8 @@ static void record_obj(unsigned long handle, unsigned long obj)
 
 #ifdef CONFIG_ZPOOL
 
-static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops,
+static void *zs_zpool_create(char *name, gfp_t gfp,
+                            const struct zpool_ops *zpool_ops,
                             struct zpool *zpool)
 {
        return zs_create_pool(name, gfp);
@@ -441,8 +444,6 @@ static int get_size_class_index(int size)
        return min(zs_size_classes - 1, idx);
 }
 
-#ifdef CONFIG_ZSMALLOC_STAT
-
 static inline void zs_stat_inc(struct size_class *class,
                                enum zs_stat_type type, unsigned long cnt)
 {
@@ -461,6 +462,8 @@ static inline unsigned long zs_stat_get(struct size_class *class,
        return class->stats.objs[type];
 }
 
+#ifdef CONFIG_ZSMALLOC_STAT
+
 static int __init zs_stat_init(void)
 {
        if (!debugfs_initialized())
@@ -576,23 +579,6 @@ static void zs_pool_stat_destroy(struct zs_pool *pool)
 }
 
 #else /* CONFIG_ZSMALLOC_STAT */
-
-static inline void zs_stat_inc(struct size_class *class,
-                               enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline void zs_stat_dec(struct size_class *class,
-                               enum zs_stat_type type, unsigned long cnt)
-{
-}
-
-static inline unsigned long zs_stat_get(struct size_class *class,
-                               enum zs_stat_type type)
-{
-       return 0;
-}
-
 static int __init zs_stat_init(void)
 {
        return 0;
@@ -610,7 +596,6 @@ static inline int zs_pool_stat_create(char *name, struct zs_pool *pool)
 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
 {
 }
-
 #endif
 
 
@@ -658,13 +643,22 @@ static void insert_zspage(struct page *page, struct size_class *class,
        if (fullness >= _ZS_NR_FULLNESS_GROUPS)
                return;
 
-       head = &class->fullness_list[fullness];
-       if (*head)
-               list_add_tail(&page->lru, &(*head)->lru);
-
-       *head = page;
        zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
                        CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+
+       head = &class->fullness_list[fullness];
+       if (!*head) {
+               *head = page;
+               return;
+       }
+
+       /*
+        * We want to see more ZS_FULL pages and less almost
+        * empty/full. Put pages with higher ->inuse first.
+        */
+       list_add_tail(&page->lru, &(*head)->lru);
+       if (page->inuse >= (*head)->inuse)
+               *head = page;
 }
 
 /*
@@ -1495,7 +1489,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
 }
 EXPORT_SYMBOL_GPL(zs_free);
 
-static void zs_object_copy(unsigned long src, unsigned long dst,
+static void zs_object_copy(unsigned long dst, unsigned long src,
                                struct size_class *class)
 {
        struct page *s_page, *d_page;
@@ -1602,8 +1596,6 @@ struct zs_compact_control {
         /* Starting object index within @s_page which used for live object
          * in the subpage. */
        int index;
-       /* how many of objects are migrated */
-       int nr_migrated;
 };
 
 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
@@ -1614,7 +1606,6 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
        struct page *s_page = cc->s_page;
        struct page *d_page = cc->d_page;
        unsigned long index = cc->index;
-       int nr_migrated = 0;
        int ret = 0;
 
        while (1) {
@@ -1636,23 +1627,21 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
 
                used_obj = handle_to_obj(handle);
                free_obj = obj_malloc(d_page, class, handle);
-               zs_object_copy(used_obj, free_obj, class);
+               zs_object_copy(free_obj, used_obj, class);
                index++;
                record_obj(handle, free_obj);
                unpin_tag(handle);
                obj_free(pool, class, used_obj);
-               nr_migrated++;
        }
 
        /* Remember last position in this iteration */
        cc->s_page = s_page;
        cc->index = index;
-       cc->nr_migrated = nr_migrated;
 
        return ret;
 }
 
-static struct page *alloc_target_page(struct size_class *class)
+static struct page *isolate_target_page(struct size_class *class)
 {
        int i;
        struct page *page;
@@ -1668,8 +1657,17 @@ static struct page *alloc_target_page(struct size_class *class)
        return page;
 }
 
-static void putback_zspage(struct zs_pool *pool, struct size_class *class,
-                               struct page *first_page)
+/*
+ * putback_zspage - add @first_page into right class's fullness list
+ * @pool: target pool
+ * @class: destination class
+ * @first_page: target page
+ *
+ * Return @fist_page's fullness_group
+ */
+static enum fullness_group putback_zspage(struct zs_pool *pool,
+                       struct size_class *class,
+                       struct page *first_page)
 {
        enum fullness_group fullness;
 
@@ -1687,50 +1685,72 @@ static void putback_zspage(struct zs_pool *pool, struct size_class *class,
 
                free_zspage(first_page);
        }
+
+       return fullness;
 }
 
 static struct page *isolate_source_page(struct size_class *class)
 {
-       struct page *page;
+       int i;
+       struct page *page = NULL;
 
-       page = class->fullness_list[ZS_ALMOST_EMPTY];
-       if (page)
-               remove_zspage(page, class, ZS_ALMOST_EMPTY);
+       for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
+               page = class->fullness_list[i];
+               if (!page)
+                       continue;
+
+               remove_zspage(page, class, i);
+               break;
+       }
 
        return page;
 }
 
-static unsigned long __zs_compact(struct zs_pool *pool,
-                               struct size_class *class)
+/*
+ *
+ * Based on the number of unused allocated objects calculate
+ * and return the number of pages that we can free.
+ */
+static unsigned long zs_can_compact(struct size_class *class)
+{
+       unsigned long obj_wasted;
+
+       obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
+               zs_stat_get(class, OBJ_USED);
+
+       obj_wasted /= get_maxobj_per_zspage(class->size,
+                       class->pages_per_zspage);
+
+       return obj_wasted * class->pages_per_zspage;
+}
+
+static void __zs_compact(struct zs_pool *pool, struct size_class *class)
 {
-       int nr_to_migrate;
        struct zs_compact_control cc;
        struct page *src_page;
        struct page *dst_page = NULL;
-       unsigned long nr_total_migrated = 0;
 
        spin_lock(&class->lock);
        while ((src_page = isolate_source_page(class))) {
 
                BUG_ON(!is_first_page(src_page));
 
-               /* The goal is to migrate all live objects in source page */
-               nr_to_migrate = src_page->inuse;
+               if (!zs_can_compact(class))
+                       break;
+
                cc.index = 0;
                cc.s_page = src_page;
 
-               while ((dst_page = alloc_target_page(class))) {
+               while ((dst_page = isolate_target_page(class))) {
                        cc.d_page = dst_page;
                        /*
-                        * If there is no more space in dst_page, try to
-                        * allocate another zspage.
+                        * If there is no more space in dst_page, resched
+                        * and see if anyone had allocated another zspage.
                         */
                        if (!migrate_zspage(pool, class, &cc))
                                break;
 
                        putback_zspage(pool, class, dst_page);
-                       nr_total_migrated += cc.nr_migrated;
-                       nr_to_migrate -= cc.nr_migrated;
                }
 
                /* Stop if we couldn't find slot */
@@ -1738,9 +1758,9 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                        break;
 
                putback_zspage(pool, class, dst_page);
-               putback_zspage(pool, class, src_page);
+               if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+                       pool->stats.pages_compacted += class->pages_per_zspage;
                spin_unlock(&class->lock);
-               nr_total_migrated += cc.nr_migrated;
                cond_resched();
                spin_lock(&class->lock);
        }
@@ -1749,14 +1769,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
                putback_zspage(pool, class, src_page);
 
        spin_unlock(&class->lock);
-
-       return nr_total_migrated;
 }
 
 unsigned long zs_compact(struct zs_pool *pool)
 {
        int i;
-       unsigned long nr_migrated = 0;
        struct size_class *class;
 
        for (i = zs_size_classes - 1; i >= 0; i--) {
@@ -1765,13 +1782,80 @@ unsigned long zs_compact(struct zs_pool *pool)
                        continue;
                if (class->index != i)
                        continue;
-               nr_migrated += __zs_compact(pool, class);
+               __zs_compact(pool, class);
        }
 
-       return nr_migrated;
+       return pool->stats.pages_compacted;
 }
 EXPORT_SYMBOL_GPL(zs_compact);
 
+void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
+{
+       memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
+}
+EXPORT_SYMBOL_GPL(zs_pool_stats);
+
+static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
+               struct shrink_control *sc)
+{
+       unsigned long pages_freed;
+       struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+                       shrinker);
+
+       pages_freed = pool->stats.pages_compacted;
+       /*
+        * Compact classes and calculate compaction delta.
+        * Can run concurrently with a manually triggered
+        * (by user) compaction.
+        */
+       pages_freed = zs_compact(pool) - pages_freed;
+
+       return pages_freed ? pages_freed : SHRINK_STOP;
+}
+
+static unsigned long zs_shrinker_count(struct shrinker *shrinker,
+               struct shrink_control *sc)
+{
+       int i;
+       struct size_class *class;
+       unsigned long pages_to_free = 0;
+       struct zs_pool *pool = container_of(shrinker, struct zs_pool,
+                       shrinker);
+
+       if (!pool->shrinker_enabled)
+               return 0;
+
+       for (i = zs_size_classes - 1; i >= 0; i--) {
+               class = pool->size_class[i];
+               if (!class)
+                       continue;
+               if (class->index != i)
+                       continue;
+
+               pages_to_free += zs_can_compact(class);
+       }
+
+       return pages_to_free;
+}
+
+static void zs_unregister_shrinker(struct zs_pool *pool)
+{
+       if (pool->shrinker_enabled) {
+               unregister_shrinker(&pool->shrinker);
+               pool->shrinker_enabled = false;
+       }
+}
+
+static int zs_register_shrinker(struct zs_pool *pool)
+{
+       pool->shrinker.scan_objects = zs_shrinker_scan;
+       pool->shrinker.count_objects = zs_shrinker_count;
+       pool->shrinker.batch = 0;
+       pool->shrinker.seeks = DEFAULT_SEEKS;
+
+       return register_shrinker(&pool->shrinker);
+}
+
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata
@@ -1857,6 +1941,12 @@ struct zs_pool *zs_create_pool(char *name, gfp_t flags)
        if (zs_pool_stat_create(name, pool))
                goto err;
 
+       /*
+        * Not critical, we still can use the pool
+        * and user can trigger compaction manually.
+        */
+       if (zs_register_shrinker(pool) == 0)
+               pool->shrinker_enabled = true;
        return pool;
 
 err:
@@ -1869,6 +1959,7 @@ void zs_destroy_pool(struct zs_pool *pool)
 {
        int i;
 
+       zs_unregister_shrinker(pool);
        zs_pool_stat_destroy(pool);
 
        for (i = 0; i < zs_size_classes; i++) {