fb4a7c94aed36f087cb41cc771f947b1d6776678
[linux.git] / drivers / staging / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 struct zram *zram_devices;
41
42 /* Module params (documentation at end) */
43 static unsigned int num_devices;
44
45 static void zram_stat_inc(u32 *v)
46 {
47         *v = *v + 1;
48 }
49
50 static void zram_stat_dec(u32 *v)
51 {
52         *v = *v - 1;
53 }
54
55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56 {
57         spin_lock(&zram->stat64_lock);
58         *v = *v + inc;
59         spin_unlock(&zram->stat64_lock);
60 }
61
62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63 {
64         spin_lock(&zram->stat64_lock);
65         *v = *v - dec;
66         spin_unlock(&zram->stat64_lock);
67 }
68
69 static void zram_stat64_inc(struct zram *zram, u64 *v)
70 {
71         zram_stat64_add(zram, v, 1);
72 }
73
74 static int zram_test_flag(struct zram *zram, u32 index,
75                         enum zram_pageflags flag)
76 {
77         return zram->table[index].flags & BIT(flag);
78 }
79
80 static void zram_set_flag(struct zram *zram, u32 index,
81                         enum zram_pageflags flag)
82 {
83         zram->table[index].flags |= BIT(flag);
84 }
85
86 static void zram_clear_flag(struct zram *zram, u32 index,
87                         enum zram_pageflags flag)
88 {
89         zram->table[index].flags &= ~BIT(flag);
90 }
91
92 static int page_zero_filled(void *ptr)
93 {
94         unsigned int pos;
95         unsigned long *page;
96
97         page = (unsigned long *)ptr;
98
99         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100                 if (page[pos])
101                         return 0;
102         }
103
104         return 1;
105 }
106
107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108 {
109         if (!zram->disksize) {
110                 pr_info(
111                 "disk size not provided. You can use disksize_kb module "
112                 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113                 default_disksize_perc_ram
114                 );
115                 zram->disksize = default_disksize_perc_ram *
116                                         (totalram_bytes / 100);
117         }
118
119         if (zram->disksize > 2 * (totalram_bytes)) {
120                 pr_info(
121                 "There is little point creating a zram of greater than "
122                 "twice the size of memory since we expect a 2:1 compression "
123                 "ratio. Note that zram uses about 0.1%% of the size of "
124                 "the disk when not in use so a huge zram is "
125                 "wasteful.\n"
126                 "\tMemory Size: %zu kB\n"
127                 "\tSize you selected: %llu kB\n"
128                 "Continuing anyway ...\n",
129                 totalram_bytes >> 10, zram->disksize
130                 );
131         }
132
133         zram->disksize &= PAGE_MASK;
134 }
135
136 static void zram_free_page(struct zram *zram, size_t index)
137 {
138         unsigned long handle = zram->table[index].handle;
139         u16 size = zram->table[index].size;
140
141         if (unlikely(!handle)) {
142                 /*
143                  * No memory is allocated for zero filled pages.
144                  * Simply clear zero page flag.
145                  */
146                 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
147                         zram_clear_flag(zram, index, ZRAM_ZERO);
148                         zram_stat_dec(&zram->stats.pages_zero);
149                 }
150                 return;
151         }
152
153         if (unlikely(size > max_zpage_size))
154                 zram_stat_dec(&zram->stats.bad_compress);
155
156         zs_free(zram->mem_pool, handle);
157
158         if (size <= PAGE_SIZE / 2)
159                 zram_stat_dec(&zram->stats.good_compress);
160
161         zram_stat64_sub(zram, &zram->stats.compr_size,
162                         zram->table[index].size);
163         zram_stat_dec(&zram->stats.pages_stored);
164
165         zram->table[index].handle = 0;
166         zram->table[index].size = 0;
167 }
168
169 static void handle_zero_page(struct bio_vec *bvec)
170 {
171         struct page *page = bvec->bv_page;
172         void *user_mem;
173
174         user_mem = kmap_atomic(page);
175         memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
176         kunmap_atomic(user_mem);
177
178         flush_dcache_page(page);
179 }
180
181 static inline int is_partial_io(struct bio_vec *bvec)
182 {
183         return bvec->bv_len != PAGE_SIZE;
184 }
185
186 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
187 {
188         int ret = LZO_E_OK;
189         size_t clen = PAGE_SIZE;
190         unsigned char *cmem;
191         unsigned long handle = zram->table[index].handle;
192
193         if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
194                 memset(mem, 0, PAGE_SIZE);
195                 return 0;
196         }
197
198         cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
199         if (zram->table[index].size == PAGE_SIZE)
200                 memcpy(mem, cmem, PAGE_SIZE);
201         else
202                 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
203                                                 mem, &clen);
204         zs_unmap_object(zram->mem_pool, handle);
205
206         /* Should NEVER happen. Return bio error if it does. */
207         if (unlikely(ret != LZO_E_OK)) {
208                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
209                 zram_stat64_inc(zram, &zram->stats.failed_reads);
210                 return ret;
211         }
212
213         return 0;
214 }
215
216 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
217                           u32 index, int offset, struct bio *bio)
218 {
219         int ret;
220         struct page *page;
221         unsigned char *user_mem, *uncmem = NULL;
222
223         page = bvec->bv_page;
224
225         if (unlikely(!zram->table[index].handle) ||
226                         zram_test_flag(zram, index, ZRAM_ZERO)) {
227                 handle_zero_page(bvec);
228                 return 0;
229         }
230
231         user_mem = kmap_atomic(page);
232         if (is_partial_io(bvec))
233                 /* Use  a temporary buffer to decompress the page */
234                 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
235         else
236                 uncmem = user_mem;
237
238         if (!uncmem) {
239                 pr_info("Unable to allocate temp memory\n");
240                 ret = -ENOMEM;
241                 goto out_cleanup;
242         }
243
244         ret = zram_decompress_page(zram, uncmem, index);
245         /* Should NEVER happen. Return bio error if it does. */
246         if (unlikely(ret != LZO_E_OK)) {
247                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
248                 zram_stat64_inc(zram, &zram->stats.failed_reads);
249                 goto out_cleanup;
250         }
251
252         if (is_partial_io(bvec))
253                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
254                                 bvec->bv_len);
255
256         flush_dcache_page(page);
257         ret = 0;
258 out_cleanup:
259         kunmap_atomic(user_mem);
260         if (is_partial_io(bvec))
261                 kfree(uncmem);
262         return ret;
263 }
264
265 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
266                            int offset)
267 {
268         int ret;
269         size_t clen;
270         unsigned long handle;
271         struct page *page;
272         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
273
274         page = bvec->bv_page;
275         src = zram->compress_buffer;
276
277         if (is_partial_io(bvec)) {
278                 /*
279                  * This is a partial IO. We need to read the full page
280                  * before to write the changes.
281                  */
282                 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
283                 if (!uncmem) {
284                         pr_info("Error allocating temp memory!\n");
285                         ret = -ENOMEM;
286                         goto out;
287                 }
288                 ret = zram_decompress_page(zram, uncmem, index);
289                 if (ret) {
290                         kfree(uncmem);
291                         goto out;
292                 }
293         }
294
295         /*
296          * System overwrites unused sectors. Free memory associated
297          * with this sector now.
298          */
299         if (zram->table[index].handle ||
300             zram_test_flag(zram, index, ZRAM_ZERO))
301                 zram_free_page(zram, index);
302
303         user_mem = kmap_atomic(page);
304
305         if (is_partial_io(bvec))
306                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
307                        bvec->bv_len);
308         else
309                 uncmem = user_mem;
310
311         if (page_zero_filled(uncmem)) {
312                 kunmap_atomic(user_mem);
313                 if (is_partial_io(bvec))
314                         kfree(uncmem);
315                 zram_stat_inc(&zram->stats.pages_zero);
316                 zram_set_flag(zram, index, ZRAM_ZERO);
317                 ret = 0;
318                 goto out;
319         }
320
321         ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
322                                zram->compress_workmem);
323
324         kunmap_atomic(user_mem);
325         if (is_partial_io(bvec))
326                         kfree(uncmem);
327
328         if (unlikely(ret != LZO_E_OK)) {
329                 pr_err("Compression failed! err=%d\n", ret);
330                 goto out;
331         }
332
333         if (unlikely(clen > max_zpage_size)) {
334                 zram_stat_inc(&zram->stats.bad_compress);
335                 src = uncmem;
336                 clen = PAGE_SIZE;
337         }
338
339         handle = zs_malloc(zram->mem_pool, clen);
340         if (!handle) {
341                 pr_info("Error allocating memory for compressed "
342                         "page: %u, size=%zu\n", index, clen);
343                 ret = -ENOMEM;
344                 goto out;
345         }
346         cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
347
348         memcpy(cmem, src, clen);
349
350         zs_unmap_object(zram->mem_pool, handle);
351
352         zram->table[index].handle = handle;
353         zram->table[index].size = clen;
354
355         /* Update stats */
356         zram_stat64_add(zram, &zram->stats.compr_size, clen);
357         zram_stat_inc(&zram->stats.pages_stored);
358         if (clen <= PAGE_SIZE / 2)
359                 zram_stat_inc(&zram->stats.good_compress);
360
361         return 0;
362
363 out:
364         if (ret)
365                 zram_stat64_inc(zram, &zram->stats.failed_writes);
366         return ret;
367 }
368
369 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
370                         int offset, struct bio *bio, int rw)
371 {
372         int ret;
373
374         if (rw == READ) {
375                 down_read(&zram->lock);
376                 ret = zram_bvec_read(zram, bvec, index, offset, bio);
377                 up_read(&zram->lock);
378         } else {
379                 down_write(&zram->lock);
380                 ret = zram_bvec_write(zram, bvec, index, offset);
381                 up_write(&zram->lock);
382         }
383
384         return ret;
385 }
386
387 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
388 {
389         if (*offset + bvec->bv_len >= PAGE_SIZE)
390                 (*index)++;
391         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
392 }
393
394 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
395 {
396         int i, offset;
397         u32 index;
398         struct bio_vec *bvec;
399
400         switch (rw) {
401         case READ:
402                 zram_stat64_inc(zram, &zram->stats.num_reads);
403                 break;
404         case WRITE:
405                 zram_stat64_inc(zram, &zram->stats.num_writes);
406                 break;
407         }
408
409         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
410         offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
411
412         bio_for_each_segment(bvec, bio, i) {
413                 int max_transfer_size = PAGE_SIZE - offset;
414
415                 if (bvec->bv_len > max_transfer_size) {
416                         /*
417                          * zram_bvec_rw() can only make operation on a single
418                          * zram page. Split the bio vector.
419                          */
420                         struct bio_vec bv;
421
422                         bv.bv_page = bvec->bv_page;
423                         bv.bv_len = max_transfer_size;
424                         bv.bv_offset = bvec->bv_offset;
425
426                         if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
427                                 goto out;
428
429                         bv.bv_len = bvec->bv_len - max_transfer_size;
430                         bv.bv_offset += max_transfer_size;
431                         if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
432                                 goto out;
433                 } else
434                         if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
435                             < 0)
436                                 goto out;
437
438                 update_position(&index, &offset, bvec);
439         }
440
441         set_bit(BIO_UPTODATE, &bio->bi_flags);
442         bio_endio(bio, 0);
443         return;
444
445 out:
446         bio_io_error(bio);
447 }
448
449 /*
450  * Check if request is within bounds and aligned on zram logical blocks.
451  */
452 static inline int valid_io_request(struct zram *zram, struct bio *bio)
453 {
454         if (unlikely(
455                 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
456                 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
457                 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
458
459                 return 0;
460         }
461
462         /* I/O request is valid */
463         return 1;
464 }
465
466 /*
467  * Handler function for all zram I/O requests.
468  */
469 static void zram_make_request(struct request_queue *queue, struct bio *bio)
470 {
471         struct zram *zram = queue->queuedata;
472
473         if (unlikely(!zram->init_done) && zram_init_device(zram))
474                 goto error;
475
476         down_read(&zram->init_lock);
477         if (unlikely(!zram->init_done))
478                 goto error_unlock;
479
480         if (!valid_io_request(zram, bio)) {
481                 zram_stat64_inc(zram, &zram->stats.invalid_io);
482                 goto error_unlock;
483         }
484
485         __zram_make_request(zram, bio, bio_data_dir(bio));
486         up_read(&zram->init_lock);
487
488         return;
489
490 error_unlock:
491         up_read(&zram->init_lock);
492 error:
493         bio_io_error(bio);
494 }
495
496 void __zram_reset_device(struct zram *zram)
497 {
498         size_t index;
499
500         zram->init_done = 0;
501
502         /* Free various per-device buffers */
503         kfree(zram->compress_workmem);
504         free_pages((unsigned long)zram->compress_buffer, 1);
505
506         zram->compress_workmem = NULL;
507         zram->compress_buffer = NULL;
508
509         /* Free all pages that are still in this zram device */
510         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
511                 unsigned long handle = zram->table[index].handle;
512                 if (!handle)
513                         continue;
514
515                 zs_free(zram->mem_pool, handle);
516         }
517
518         vfree(zram->table);
519         zram->table = NULL;
520
521         zs_destroy_pool(zram->mem_pool);
522         zram->mem_pool = NULL;
523
524         /* Reset stats */
525         memset(&zram->stats, 0, sizeof(zram->stats));
526
527         zram->disksize = 0;
528 }
529
530 void zram_reset_device(struct zram *zram)
531 {
532         down_write(&zram->init_lock);
533         __zram_reset_device(zram);
534         up_write(&zram->init_lock);
535 }
536
537 int zram_init_device(struct zram *zram)
538 {
539         int ret;
540         size_t num_pages;
541
542         down_write(&zram->init_lock);
543
544         if (zram->init_done) {
545                 up_write(&zram->init_lock);
546                 return 0;
547         }
548
549         zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
550
551         zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
552         if (!zram->compress_workmem) {
553                 pr_err("Error allocating compressor working memory!\n");
554                 ret = -ENOMEM;
555                 goto fail_no_table;
556         }
557
558         zram->compress_buffer =
559                 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
560         if (!zram->compress_buffer) {
561                 pr_err("Error allocating compressor buffer space\n");
562                 ret = -ENOMEM;
563                 goto fail_no_table;
564         }
565
566         num_pages = zram->disksize >> PAGE_SHIFT;
567         zram->table = vzalloc(num_pages * sizeof(*zram->table));
568         if (!zram->table) {
569                 pr_err("Error allocating zram address table\n");
570                 ret = -ENOMEM;
571                 goto fail_no_table;
572         }
573
574         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
575
576         /* zram devices sort of resembles non-rotational disks */
577         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
578
579         zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
580         if (!zram->mem_pool) {
581                 pr_err("Error creating memory pool\n");
582                 ret = -ENOMEM;
583                 goto fail;
584         }
585
586         zram->init_done = 1;
587         up_write(&zram->init_lock);
588
589         pr_debug("Initialization done!\n");
590         return 0;
591
592 fail_no_table:
593         /* To prevent accessing table entries during cleanup */
594         zram->disksize = 0;
595 fail:
596         __zram_reset_device(zram);
597         up_write(&zram->init_lock);
598         pr_err("Initialization failed: err=%d\n", ret);
599         return ret;
600 }
601
602 static void zram_slot_free_notify(struct block_device *bdev,
603                                 unsigned long index)
604 {
605         struct zram *zram;
606
607         zram = bdev->bd_disk->private_data;
608         zram_free_page(zram, index);
609         zram_stat64_inc(zram, &zram->stats.notify_free);
610 }
611
612 static const struct block_device_operations zram_devops = {
613         .swap_slot_free_notify = zram_slot_free_notify,
614         .owner = THIS_MODULE
615 };
616
617 static int create_device(struct zram *zram, int device_id)
618 {
619         int ret = 0;
620
621         init_rwsem(&zram->lock);
622         init_rwsem(&zram->init_lock);
623         spin_lock_init(&zram->stat64_lock);
624
625         zram->queue = blk_alloc_queue(GFP_KERNEL);
626         if (!zram->queue) {
627                 pr_err("Error allocating disk queue for device %d\n",
628                         device_id);
629                 ret = -ENOMEM;
630                 goto out;
631         }
632
633         blk_queue_make_request(zram->queue, zram_make_request);
634         zram->queue->queuedata = zram;
635
636          /* gendisk structure */
637         zram->disk = alloc_disk(1);
638         if (!zram->disk) {
639                 blk_cleanup_queue(zram->queue);
640                 pr_warn("Error allocating disk structure for device %d\n",
641                         device_id);
642                 ret = -ENOMEM;
643                 goto out;
644         }
645
646         zram->disk->major = zram_major;
647         zram->disk->first_minor = device_id;
648         zram->disk->fops = &zram_devops;
649         zram->disk->queue = zram->queue;
650         zram->disk->private_data = zram;
651         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
652
653         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
654         set_capacity(zram->disk, 0);
655
656         /*
657          * To ensure that we always get PAGE_SIZE aligned
658          * and n*PAGE_SIZED sized I/O requests.
659          */
660         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
661         blk_queue_logical_block_size(zram->disk->queue,
662                                         ZRAM_LOGICAL_BLOCK_SIZE);
663         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
664         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
665
666         add_disk(zram->disk);
667
668         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
669                                 &zram_disk_attr_group);
670         if (ret < 0) {
671                 pr_warn("Error creating sysfs group");
672                 goto out;
673         }
674
675         zram->init_done = 0;
676
677 out:
678         return ret;
679 }
680
681 static void destroy_device(struct zram *zram)
682 {
683         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
684                         &zram_disk_attr_group);
685
686         if (zram->disk) {
687                 del_gendisk(zram->disk);
688                 put_disk(zram->disk);
689         }
690
691         if (zram->queue)
692                 blk_cleanup_queue(zram->queue);
693 }
694
695 unsigned int zram_get_num_devices(void)
696 {
697         return num_devices;
698 }
699
700 static int __init zram_init(void)
701 {
702         int ret, dev_id;
703
704         if (num_devices > max_num_devices) {
705                 pr_warn("Invalid value for num_devices: %u\n",
706                                 num_devices);
707                 ret = -EINVAL;
708                 goto out;
709         }
710
711         zram_major = register_blkdev(0, "zram");
712         if (zram_major <= 0) {
713                 pr_warn("Unable to get major number\n");
714                 ret = -EBUSY;
715                 goto out;
716         }
717
718         if (!num_devices) {
719                 pr_info("num_devices not specified. Using default: 1\n");
720                 num_devices = 1;
721         }
722
723         /* Allocate the device array and initialize each one */
724         pr_info("Creating %u devices ...\n", num_devices);
725         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
726         if (!zram_devices) {
727                 ret = -ENOMEM;
728                 goto unregister;
729         }
730
731         for (dev_id = 0; dev_id < num_devices; dev_id++) {
732                 ret = create_device(&zram_devices[dev_id], dev_id);
733                 if (ret)
734                         goto free_devices;
735         }
736
737         return 0;
738
739 free_devices:
740         while (dev_id)
741                 destroy_device(&zram_devices[--dev_id]);
742         kfree(zram_devices);
743 unregister:
744         unregister_blkdev(zram_major, "zram");
745 out:
746         return ret;
747 }
748
749 static void __exit zram_exit(void)
750 {
751         int i;
752         struct zram *zram;
753
754         for (i = 0; i < num_devices; i++) {
755                 zram = &zram_devices[i];
756
757                 destroy_device(zram);
758                 if (zram->init_done)
759                         zram_reset_device(zram);
760         }
761
762         unregister_blkdev(zram_major, "zram");
763
764         kfree(zram_devices);
765         pr_debug("Cleanup done!\n");
766 }
767
768 module_param(num_devices, uint, 0);
769 MODULE_PARM_DESC(num_devices, "Number of zram devices");
770
771 module_init(zram_init);
772 module_exit(zram_exit);
773
774 MODULE_LICENSE("Dual BSD/GPL");
775 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
776 MODULE_DESCRIPTION("Compressed RAM Block Device");