Btrfs: fix locking in btrfs_destroy_delayed_refs
authorJosef Bacik <josef@redhat.com>
Thu, 31 May 2012 15:06:33 +0000 (11:06 -0400)
committerChris Mason <chris.mason@oracle.com>
Fri, 15 Jun 2012 01:29:11 +0000 (21:29 -0400)
The transaction abort stuff was throwing warnings from the list debugging
code because we do a list_del_init outside of the delayed_refs spin lock.
The delayed refs locking makes baby Jesus cry so it's not hard to get wrong,
but we need to take the ref head mutex to make sure it's not being processed
currently, and so if it is we need to drop the spin lock and then take and
drop the mutex and do the search again.  If we can take the mutex then we
can safely remove the head from the list and carry on.  Now when the
transaction aborts I don't get the list debugging warnings.  Thanks,

Signed-off-by: Josef Bacik <josef@redhat.com>
fs/btrfs/disk-io.c

index b99d5127ba18997a6aab35f8946711ae0b3ff5b4..c79ddc75608126451e084690ba69750adc03ecc4 100644 (file)
@@ -3400,7 +3400,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 
        delayed_refs = &trans->delayed_refs;
 
-again:
        spin_lock(&delayed_refs->lock);
        if (delayed_refs->num_entries == 0) {
                spin_unlock(&delayed_refs->lock);
@@ -3408,31 +3407,36 @@ again:
                return ret;
        }
 
-       node = rb_first(&delayed_refs->root);
-       while (node) {
+       while ((node = rb_first(&delayed_refs->root)) != NULL) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
-               node = rb_next(node);
-
-               ref->in_tree = 0;
-               rb_erase(&ref->rb_node, &delayed_refs->root);
-               delayed_refs->num_entries--;
 
                atomic_set(&ref->refs, 1);
                if (btrfs_delayed_ref_is_head(ref)) {
                        struct btrfs_delayed_ref_head *head;
 
                        head = btrfs_delayed_node_to_head(ref);
-                       spin_unlock(&delayed_refs->lock);
-                       mutex_lock(&head->mutex);
+                       if (!mutex_trylock(&head->mutex)) {
+                               atomic_inc(&ref->refs);
+                               spin_unlock(&delayed_refs->lock);
+
+                               /* Need to wait for the delayed ref to run */
+                               mutex_lock(&head->mutex);
+                               mutex_unlock(&head->mutex);
+                               btrfs_put_delayed_ref(ref);
+
+                               continue;
+                       }
+
                        kfree(head->extent_op);
                        delayed_refs->num_heads--;
                        if (list_empty(&head->cluster))
                                delayed_refs->num_heads_ready--;
                        list_del_init(&head->cluster);
-                       mutex_unlock(&head->mutex);
-                       btrfs_put_delayed_ref(ref);
-                       goto again;
                }
+               ref->in_tree = 0;
+               rb_erase(&ref->rb_node, &delayed_refs->root);
+               delayed_refs->num_entries--;
+
                spin_unlock(&delayed_refs->lock);
                btrfs_put_delayed_ref(ref);