Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-drm-fsl-dcu.git] / fs / ext3 / inode.c
index 0f0b1eadb98dd6dd5f74faa98d8b04012fd0a9f6..8a824f4ce5c608d231a6862b5a1c86de167d0882 100644 (file)
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *
  *  Goal-directed block allocation by Stephen Tweedie
- *     (sct@redhat.com), 1993, 1998
+ *     (sct@redhat.com), 1993, 1998
  *  Big-endian to little-endian byte-swapping/bitmaps by
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *     (jj@sunsite.ms.mff.cuni.cz)
+ *     (jj@sunsite.ms.mff.cuni.cz)
  *
  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  */
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/mpage.h>
 #include <linux/uio.h>
+#include <linux/bio.h>
 #include "xattr.h"
 #include "acl.h"
 
@@ -55,7 +56,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
 /*
  * The ext3 forget function must perform a revoke if we are freeing data
  * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases. 
+ * revoked in all cases.
  *
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
@@ -105,7 +106,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  * Work out how many blocks we need to proceed with the next chunk of a
  * truncate transaction.
  */
-static unsigned long blocks_for_truncate(struct inode *inode) 
+static unsigned long blocks_for_truncate(struct inode *inode)
 {
        unsigned long needed;
 
@@ -122,13 +123,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
 
        /* But we need to bound the transaction so we don't overflow the
         * journal. */
-       if (needed > EXT3_MAX_TRANS_DATA) 
+       if (needed > EXT3_MAX_TRANS_DATA)
                needed = EXT3_MAX_TRANS_DATA;
 
        return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 }
 
-/* 
+/*
  * Truncate transactions can be complex and absolutely huge.  So we need to
  * be able to restart the transaction at a conventient checkpoint to make
  * sure we don't overflow the journal.
@@ -136,9 +137,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
  * start_transaction gets us a new handle for a truncate transaction,
  * and extend_transaction tries to extend the existing one a bit.  If
  * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct 
+ * transaction in the top-level truncate loop. --sct
  */
-static handle_t *start_transaction(struct inode *inode) 
+static handle_t *start_transaction(struct inode *inode)
 {
        handle_t *result;
 
@@ -215,12 +216,12 @@ void ext3_delete_inode (struct inode * inode)
        ext3_orphan_del(handle, inode);
        EXT3_I(inode)->i_dtime  = get_seconds();
 
-       /* 
+       /*
         * One subtle ordering requirement: if anything has gone wrong
         * (transaction abort, IO errors, whatever), then we can still
         * do these next steps (the fs will already have been marked as
         * having errors), but we can't free the inode if the mark_dirty
-        * fails.  
+        * fails.
         */
        if (ext3_mark_inode_dirty(handle, inode))
                /* If that failed, just do the required in-core inode clear. */
@@ -398,7 +399,7 @@ no_block:
  *       + if there is a block to the left of our position - allocate near it.
  *       + if pointer will live in indirect block - allocate near that block.
  *       + if pointer will live in inode - allocate in the same
- *         cylinder group. 
+ *         cylinder group.
  *
  * In the latter case we colour the starting block by the callers PID to
  * prevent it from clashing with concurrent allocations for a different inode
@@ -470,7 +471,7 @@ static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
  *     ext3_blks_to_allocate: Look up the block map and count the number
  *     of direct blocks need to be allocated for the given branch.
  *
- *     @branch: chain of indirect blocks
+ *     @branch: chain of indirect blocks
  *     @k: number of blocks need for indirect blocks
  *     @blks: number of data blocks to be mapped.
  *     @blocks_to_boundary:  the offset in the indirect block
@@ -744,7 +745,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
                jbd_debug(5, "splicing indirect only\n");
                BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
                err = ext3_journal_dirty_metadata(handle, where->bh);
-               if (err) 
+               if (err)
                        goto err_out;
        } else {
                /*
@@ -925,7 +926,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
        set_buffer_new(bh_result);
 got_it:
        map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-       if (blocks_to_boundary == 0)
+       if (count > blocks_to_boundary)
                set_buffer_boundary(bh_result);
        err = count;
        /* Clean up and exit */
@@ -946,7 +947,7 @@ out:
 static int ext3_get_block(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create)
 {
-       handle_t *handle = journal_current_handle();
+       handle_t *handle = ext3_journal_current_handle();
        int ret = 0;
        unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
 
@@ -1073,7 +1074,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ, 1, &bh);
+       ll_rw_block(READ_META, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -1098,7 +1099,7 @@ static int walk_page_buffers(     handle_t *handle,
 
        for (   bh = head, block_start = 0;
                ret == 0 && (bh != head || !block_start);
-               block_start = block_end, bh = next)
+               block_start = block_end, bh = next)
        {
                next = bh->b_this_page;
                block_end = block_start + blocksize;
@@ -1137,7 +1138,7 @@ static int walk_page_buffers(     handle_t *handle,
  * So what we do is to rely on the fact that journal_stop/journal_start
  * will _not_ run commit under these circumstances because handle->h_ref
  * is elevated.  We'll still have enough credits for the tiny quotafile
- * write.  
+ * write.
  */
 static int do_journal_get_write_access(handle_t *handle,
                                        struct buffer_head *bh)
@@ -1147,37 +1148,102 @@ static int do_journal_get_write_access(handle_t *handle,
        return ext3_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext3_prepare_failure(struct file *file, struct page *page,
+                               unsigned from, unsigned to)
+{
+       struct address_space *mapping;
+       struct buffer_head *bh, *head, *next;
+       unsigned block_start, block_end;
+       unsigned blocksize;
+       int ret;
+       handle_t *handle = ext3_journal_current_handle();
+
+       mapping = page->mapping;
+       if (ext3_should_writeback_data(mapping->host)) {
+               /* optimization: no constraints about data */
+skip:
+               return ext3_journal_stop(handle);
+       }
+
+       head = page_buffers(page);
+       blocksize = head->b_size;
+       for (   bh = head, block_start = 0;
+               bh != head || !block_start;
+               block_start = block_end, bh = next)
+       {
+               next = bh->b_this_page;
+               block_end = block_start + blocksize;
+               if (block_end <= from)
+                       continue;
+               if (block_start >= to) {
+                       block_start = to;
+                       break;
+               }
+               if (!buffer_mapped(bh))
+               /* prepare_write failed on this bh */
+                       break;
+               if (ext3_should_journal_data(mapping->host)) {
+                       ret = do_journal_get_write_access(handle, bh);
+                       if (ret) {
+                               ext3_journal_stop(handle);
+                               return ret;
+                       }
+               }
+       /*
+        * block_start here becomes the first block where the current iteration
+        * of prepare_write failed.
+        */
+       }
+       if (block_start <= from)
+               goto skip;
+
+       /* commit allocated and zeroed buffers */
+       return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext3_prepare_write(struct file *file, struct page *page,
                              unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
-       int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+       int ret, ret2;
+       int needed_blocks = ext3_writepage_trans_blocks(inode);
        handle_t *handle;
        int retries = 0;
 
 retry:
        handle = ext3_journal_start(inode, needed_blocks);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
-       }
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
        if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
                ret = nobh_prepare_write(page, from, to, ext3_get_block);
        else
                ret = block_prepare_write(page, from, to, ext3_get_block);
        if (ret)
-               goto prepare_write_failed;
+               goto failure;
 
        if (ext3_should_journal_data(inode)) {
                ret = walk_page_buffers(handle, page_buffers(page),
                                from, to, NULL, do_journal_get_write_access);
+               if (ret)
+                       /* fatal error, just put the handle and return */
+                       journal_stop(handle);
        }
-prepare_write_failed:
-       if (ret)
-               ext3_journal_stop(handle);
+       return ret;
+
+failure:
+       ret2 = ext3_prepare_failure(file, page, from, to);
+       if (ret2 < 0)
+               return ret2;
        if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
-out:
+       /* retry number exceeded, or other error like -EDQUOT */
        return ret;
 }
 
@@ -1282,7 +1348,7 @@ static int ext3_journalled_commit_write(struct file *file,
        if (inode->i_size > EXT3_I(inode)->i_disksize) {
                EXT3_I(inode)->i_disksize = inode->i_size;
                ret2 = ext3_mark_inode_dirty(handle, inode);
-               if (!ret) 
+               if (!ret)
                        ret = ret2;
        }
        ret2 = ext3_journal_stop(handle);
@@ -1291,7 +1357,7 @@ static int ext3_journalled_commit_write(struct file *file,
        return ret;
 }
 
-/* 
+/*
  * bmap() is special.  It gets used by applications such as lilo and by
  * the swapper to find the on-disk block of a specific piece of data.
  *
@@ -1300,10 +1366,10 @@ static int ext3_journalled_commit_write(struct file *file,
  * filesystem and enables swap, then they may get a nasty shock when the
  * data getting swapped to that swapfile suddenly gets overwritten by
  * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache. 
+ * awaiting writeback in the kernel's buffer cache.
  *
  * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache. 
+ * take extra steps to flush any blocks which might be in the cache.
  */
 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
 {
@@ -1312,16 +1378,16 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
        int err;
 
        if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
-               /* 
+               /*
                 * This is a REALLY heavyweight approach, but the use of
                 * bmap on dirty files is expected to be extremely rare:
                 * only if we run lilo or swapon on a freshly made file
-                * do we expect this to happen. 
+                * do we expect this to happen.
                 *
                 * (bmap requires CAP_SYS_RAWIO so this does not
                 * represent an unprivileged user DOS attack --- we'd be
                 * in trouble if mortal users could trigger this path at
-                * will.) 
+                * will.)
                 *
                 * NB. EXT3_STATE_JDATA is not set on files other than
                 * regular files.  If somebody wants to bmap a directory
@@ -1457,7 +1523,7 @@ static int ext3_ordered_writepage(struct page *page,
         */
 
        /*
-        * And attach them to the current transaction.  But only if 
+        * And attach them to the current transaction.  But only if
         * block_write_full_page() succeeded.  Otherwise they are unmapped,
         * and generally junk.
         */
@@ -1644,14 +1710,14 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
                }
        }
 
-       ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
+       ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext3_get_block, NULL);
 
        /*
         * Reacquire the handle: ext3_get_block() can restart the transaction
         */
-       handle = journal_current_handle();
+       handle = ext3_journal_current_handle();
 
 out_stop:
        if (handle) {
@@ -2025,7 +2091,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
                           __le32 *first, __le32 *last)
 {
        ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
-       unsigned long count = 0;            /* Number of blocks in the run */ 
+       unsigned long count = 0;            /* Number of blocks in the run */
        __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
                                               corresponding to
                                               block_to_free */
@@ -2054,7 +2120,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
                        } else if (nr == block_to_free + count) {
                                count++;
                        } else {
-                               ext3_clear_blocks(handle, inode, this_bh, 
+                               ext3_clear_blocks(handle, inode, this_bh,
                                                  block_to_free,
                                                  count, block_to_free_p, p);
                                block_to_free = nr;
@@ -2115,7 +2181,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                         */
                        if (!bh) {
                                ext3_error(inode->i_sb, "ext3_free_branches",
-                                          "Read failure, inode=%ld, block="E3FSBLK,
+                                          "Read failure, inode=%lu, block="E3FSBLK,
                                           inode->i_ino, nr);
                                continue;
                        }
@@ -2184,7 +2250,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                                        *p = 0;
                                        BUFFER_TRACE(parent_bh,
                                        "call ext3_journal_dirty_metadata");
-                                       ext3_journal_dirty_metadata(handle, 
+                                       ext3_journal_dirty_metadata(handle,
                                                                    parent_bh);
                                }
                        }
@@ -2540,7 +2606,7 @@ make_io:
                 */
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ, bh);
+               submit_bh(READ_META, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        ext3_error(inode->i_sb, "ext3_get_inode_loc",
@@ -2632,9 +2698,6 @@ void ext3_read_inode(struct inode * inode)
                 * recovery code: that's fine, we're about to complete
                 * the process of deleting those. */
        }
-       inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
-                                        * (for stat), not the fs block
-                                        * size */  
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
 #ifdef EXT3_FRAGMENTS
@@ -2704,7 +2767,7 @@ void ext3_read_inode(struct inode * inode)
                if (raw_inode->i_block[0])
                        init_special_inode(inode, inode->i_mode,
                           old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
-               else 
+               else
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
@@ -2724,8 +2787,8 @@ bad_inode:
  *
  * The caller must have write access to iloc->bh.
  */
-static int ext3_do_update_inode(handle_t *handle, 
-                               struct inode *inode, 
+static int ext3_do_update_inode(handle_t *handle,
+                               struct inode *inode,
                                struct ext3_iloc *iloc)
 {
        struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
@@ -2900,7 +2963,7 @@ int ext3_write_inode(struct inode *inode, int wait)
  * commit will leave the blocks being flushed in an unused state on
  * disk.  (On recovery, the inode will get truncated and the blocks will
  * be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)  
+ * leave these blocks visible to the user.)
  *
  * Called with inode->sem down.
  */
@@ -3043,13 +3106,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
        return err;
 }
 
-/* 
+/*
  * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later. 
+ * iloc->bh.  This _must_ be cleaned up later.
  */
 
 int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
+ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
                         struct ext3_iloc *iloc)
 {
        int err = 0;
@@ -3139,7 +3202,7 @@ out:
 }
 
 #if 0
-/* 
+/*
  * Bind an inode's backing buffer_head into this transaction, to prevent
  * it from being flushed to disk early.  Unlike
  * ext3_reserve_inode_write, this leaves behind no bh reference and
@@ -3157,7 +3220,7 @@ static int ext3_pin_inode(handle_t *handle, struct inode *inode)
                        BUFFER_TRACE(iloc.bh, "get_write_access");
                        err = journal_get_write_access(handle, iloc.bh);
                        if (!err)
-                               err = ext3_journal_dirty_metadata(handle, 
+                               err = ext3_journal_dirty_metadata(handle,
                                                                  iloc.bh);
                        brelse(iloc.bh);
                }