fs: convert core functions to zero_user_page
authorNate Diller <nate.diller@gmail.com>
Wed, 9 May 2007 09:35:07 +0000 (02:35 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:55 +0000 (12:30 -0700)
It's very common for file systems to need to zero part or all of a page,
the simplist way is just to use kmap_atomic() and memset().  There's
actually a library function in include/linux/highmem.h that does exactly
that, but it's confusingly named memclear_highpage_flush(), which is
descriptive of *how* it does the work rather than what the *purpose* is.
So this patchset renames the function to zero_user_page(), and calls it
from the various places that currently open code it.

This first patch introduces the new function call, and converts all the
core kernel callsites, both the open-coded ones and the old
memclear_highpage_flush() ones.  Following this patch is a series of
conversions for each file system individually, per AKPM, and finally a
patch deprecating the old call.  The diffstat below shows the entire
patchset.

[akpm@linux-foundation.org: fix a few things]
Signed-off-by: Nate Diller <nate.diller@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/block/loop.c
fs/buffer.c
fs/direct-io.c
fs/mpage.c
include/linux/highmem.h
mm/filemap_xip.c
mm/truncate.c

index af6d7274a7cc9a28c84d5bf7a4dfba1b6fdf0789..18cdd8c7762693d659bcc58c6e42b20e7b0e39dc 100644 (file)
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
                transfer_result = lo_do_transfer(lo, WRITE, page, offset,
                                bvec->bv_page, bv_offs, size, IV);
                if (unlikely(transfer_result)) {
-                       char *kaddr;
-
                        /*
                         * The transfer failed, but we still write the data to
                         * keep prepare/commit calls balanced.
                         */
                        printk(KERN_ERR "loop: transfer error block %llu\n",
                               (unsigned long long)index);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr + offset, 0, size);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_page(page, offset, size, KM_USER0);
                }
                flush_dcache_page(page);
                ret = aops->commit_write(file, page, offset,
index eb820b82a636fc917a699ff86caf0e5e853f780b..fc2d763a8d7817f3b7ec92ed5598f2a4cd24cf19 100644 (file)
@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (block_start >= to)
                        break;
                if (buffer_new(bh)) {
-                       void *kaddr;
-
                        clear_buffer_new(bh);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr+block_start, 0, bh->b_size);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_page(page, block_start, bh->b_size, KM_USER0);
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
                }
@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               void *kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + i * blocksize, 0, blocksize);
-                               flush_dcache_page(page);
-                               kunmap_atomic(kaddr, KM_USER0);
+                               zero_user_page(page, i * blocksize, blocksize,
+                                               KM_USER0);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
        long status;
        unsigned zerofrom;
        unsigned blocksize = 1 << inode->i_blkbits;
-       void *kaddr;
 
        while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
                status = -ENOMEM;
@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
                                                PAGE_CACHE_SIZE, get_block);
                if (status)
                        goto out_unmap;
-               kaddr = kmap_atomic(new_page, KM_USER0);
-               memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-               flush_dcache_page(new_page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
+                               KM_USER0);
                generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
                unlock_page(new_page);
                page_cache_release(new_page);
@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
        if (status)
                goto out1;
        if (zerofrom < offset) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr+zerofrom, 0, offset-zerofrom);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
                __block_commit_write(inode, page, zerofrom, offset);
        }
        return 0;
@@ -2340,10 +2327,7 @@ failed:
         * Error recovery is pretty slack.  Clear the page and mark it dirty
         * so we'll later zero out any blocks which _were_ allocated.
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
        SetPageUptodate(page);
        set_page_dirty(page);
        return ret;
@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
        int ret;
 
        /* Is the page fully inside i_size? */
@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        unsigned to;
        struct page *page;
        const struct address_space_operations *a_ops = mapping->a_ops;
-       char *kaddr;
        int ret = 0;
 
        if ((offset & (blocksize - 1)) == 0)
@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        to = (offset + blocksize) & ~(blocksize - 1);
        ret = a_ops->prepare_write(NULL, page, offset, to);
        if (ret == 0) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
+                               KM_USER0);
                /*
                 * It would be more correct to call aops->commit_write()
                 * here, but this is more efficient.
@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping,
        struct inode *inode = mapping->host;
        struct page *page;
        struct buffer_head *bh;
-       void *kaddr;
        int err;
 
        blocksize = 1 << inode->i_blkbits;
@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-
+       zero_user_page(page, offset, length, KM_USER0);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
index d9d0833444f59da15dc8be42420c13366429d402..8aa2d8b04ef155ea91a718939aa15abe020a845e 100644 (file)
@@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio)
 do_holes:
                        /* Handle holes */
                        if (!buffer_mapped(map_bh)) {
-                               char *kaddr;
                                loff_t i_size_aligned;
 
                                /* AKPM: eargh, -ENOTBLK is a hack */
@@ -888,11 +887,8 @@ do_holes:
                                        page_cache_release(page);
                                        goto out;
                                }
-                               kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + (block_in_page << blkbits),
-                                               0, 1 << blkbits);
-                               flush_dcache_page(page);
-                               kunmap_atomic(kaddr, KM_USER0);
+                               zero_user_page(page, block_in_page << blkbits,
+                                               1 << blkbits, KM_USER0);
                                dio->block_in_file++;
                                block_in_page++;
                                goto next_block;
index fa2441f57b4194fe498e9e35ba42fbd936029cad..0fb914fc2ee0d2f912137b3922a6b4eebd65f633 100644 (file)
@@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
        }
 
        if (first_hole != blocks_per_page) {
-               char *kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + (first_hole << blkbits), 0,
-                               PAGE_CACHE_SIZE - (first_hole << blkbits));
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, first_hole << blkbits,
+                               PAGE_CACHE_SIZE - (first_hole << blkbits),
+                               KM_USER0);
                if (first_hole == 0) {
                        SetPageUptodate(page);
                        unlock_page(page);
@@ -576,14 +574,11 @@ page_is_mapped:
                 * written out to the file."
                 */
                unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
-               char *kaddr;
 
                if (page->index > end_index || !offset)
                        goto confused;
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
+                               KM_USER0);
        }
 
        /*
index a515eb0afdfb660a56eedd0c8928b8281fe0b5b7..b5f2ab42d9842ce4b6303ace53f1d5efe8eca319 100644 (file)
@@ -94,17 +94,27 @@ static inline void clear_highpage(struct page *page)
 
 /*
  * Same but also flushes aliased cache contents to RAM.
+ *
+ * This must be a macro because KM_USER0 and friends aren't defined if
+ * !CONFIG_HIGHMEM
  */
-static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
+#define zero_user_page(page, offset, size, km_type)            \
+       do {                                                    \
+               void *kaddr;                                    \
+                                                               \
+               BUG_ON((offset) + (size) > PAGE_SIZE);          \
+                                                               \
+               kaddr = kmap_atomic(page, km_type);             \
+               memset((char *)kaddr + (offset), 0, (size));    \
+               flush_dcache_page(page);                        \
+               kunmap_atomic(kaddr, (km_type));                \
+       } while (0)
+
+
+static inline void memclear_highpage_flush(struct page *page,
+                       unsigned int offset, unsigned int size)
 {
-       void *kaddr;
-
-       BUG_ON(offset + size > PAGE_SIZE);
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset((char *)kaddr + offset, 0, size);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, size, KM_USER0);
 }
 
 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
index cbb335813ec01509ccf0b5525ccb1a3e0f51e1cc..1b49dab9b25d5d04aa3b98fdfc9b69de5f1d4acb 100644 (file)
@@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
        unsigned blocksize;
        unsigned length;
        struct page *page;
-       void *kaddr;
 
        BUG_ON(!mapping->a_ops->get_xip_page);
 
@@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
                else
                        return PTR_ERR(page);
        }
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       kunmap_atomic(kaddr, KM_USER0);
-
-       flush_dcache_page(page);
+       zero_user_page(page, offset, length, KM_USER0);
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_truncate_page);
index 0f4b6d18ab0ed663360e0fba11f46f23e8c6b5b3..4fbe1a2da5fb973acc352b1b17d87ca390aab376 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/swap.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
+#include <linux/highmem.h>
 #include <linux/pagevec.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/buffer_head.h> /* grr. try_to_release_page,
@@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
 
 static inline void truncate_partial_page(struct page *page, unsigned partial)
 {
-       memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
+       zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
        if (PagePrivate(page))
                do_invalidatepage(page, partial);
 }