Btrfs: optimize reada_for_balance
authorJosef Bacik <jbacik@fusionio.com>
Mon, 17 Jun 2013 18:23:02 +0000 (14:23 -0400)
committerJosef Bacik <jbacik@fusionio.com>
Mon, 1 Jul 2013 12:52:32 +0000 (08:52 -0400)
This patch does two things.  First we no longer explicitly read in the blocks
we're trying to readahead.  For things like balance_level we may never actually
use the blocks so this just adds uneeded latency, and balance_level and
split_node will both read in the blocks they care about explicitly so if the
blocks need to be waited on it will be done there.  Secondly we no longer drop
the path if we do readahead, we just set the path blocking before we call
reada_for_balance() and then we're good to go.  Hopefully this will cut down on
the number of re-searches.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
fs/btrfs/ctree.c

index c85cde7612485c208f4e878da0e883f5c414aad8..c32d03dff4fcf7ea3368c47c86c21f71c03baa04 100644 (file)
@@ -2178,12 +2178,8 @@ static void reada_for_search(struct btrfs_root *root,
        }
 }
 
-/*
- * returns -EAGAIN if it had to drop the path, or zero if everything was in
- * cache
- */
-static noinline int reada_for_balance(struct btrfs_root *root,
-                                     struct btrfs_path *path, int level)
+static noinline void reada_for_balance(struct btrfs_root *root,
+                                      struct btrfs_path *path, int level)
 {
        int slot;
        int nritems;
@@ -2192,12 +2188,11 @@ static noinline int reada_for_balance(struct btrfs_root *root,
        u64 gen;
        u64 block1 = 0;
        u64 block2 = 0;
-       int ret = 0;
        int blocksize;
 
        parent = path->nodes[level + 1];
        if (!parent)
-               return 0;
+               return;
 
        nritems = btrfs_header_nritems(parent);
        slot = path->slots[level + 1];
@@ -2224,28 +2219,11 @@ static noinline int reada_for_balance(struct btrfs_root *root,
                        block2 = 0;
                free_extent_buffer(eb);
        }
-       if (block1 || block2) {
-               ret = -EAGAIN;
-
-               /* release the whole path */
-               btrfs_release_path(path);
 
-               /* read the blocks */
-               if (block1)
-                       readahead_tree_block(root, block1, blocksize, 0);
-               if (block2)
-                       readahead_tree_block(root, block2, blocksize, 0);
-
-               if (block1) {
-                       eb = read_tree_block(root, block1, blocksize, 0);
-                       free_extent_buffer(eb);
-               }
-               if (block2) {
-                       eb = read_tree_block(root, block2, blocksize, 0);
-                       free_extent_buffer(eb);
-               }
-       }
-       return ret;
+       if (block1)
+               readahead_tree_block(root, block1, blocksize, 0);
+       if (block2)
+               readahead_tree_block(root, block2, blocksize, 0);
 }
 
 
@@ -2441,11 +2419,8 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
                        goto again;
                }
 
-               sret = reada_for_balance(root, p, level);
-               if (sret)
-                       goto again;
-
                btrfs_set_path_blocking(p);
+               reada_for_balance(root, p, level);
                sret = split_node(trans, root, p, level);
                btrfs_clear_path_blocking(p, NULL, 0);
 
@@ -2465,11 +2440,8 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
                        goto again;
                }
 
-               sret = reada_for_balance(root, p, level);
-               if (sret)
-                       goto again;
-
                btrfs_set_path_blocking(p);
+               reada_for_balance(root, p, level);
                sret = balance_level(trans, root, p, level);
                btrfs_clear_path_blocking(p, NULL, 0);