[PATCH] Fix JBD race in t_forget list handling
authorJan Kara <jack@suse.cz>
Tue, 6 Sep 2005 22:19:09 +0000 (15:19 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 7 Sep 2005 23:57:54 +0000 (16:57 -0700)
Fix race between journal_commit_transaction() and other places as
journal_unmap_buffer() that are adding buffers to transaction's t_forget list.
 We have to protect against such places by holding j_list_lock even when
traversing the t_forget list.  The fact that other places can only add buffers
to the list makes the locking easier.  OTOH the lock ranking complicates the
stuff...

Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/jbd/commit.c

index dac720c837abf0475ee41fa56708c5748daf480d..9d0494dcc571ef8480ab6e087eb300b11aa5d25c 100644 (file)
@@ -720,11 +720,17 @@ wait_for_iobuf:
        J_ASSERT(commit_transaction->t_log_list == NULL);
 
 restart_loop:
+       /*
+        * As there are other places (journal_unmap_buffer()) adding buffers
+        * to this list we have to be careful and hold the j_list_lock.
+        */
+       spin_lock(&journal->j_list_lock);
        while (commit_transaction->t_forget) {
                transaction_t *cp_transaction;
                struct buffer_head *bh;
 
                jh = commit_transaction->t_forget;
+               spin_unlock(&journal->j_list_lock);
                bh = jh2bh(jh);
                jbd_lock_bh_state(bh);
                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
@@ -792,9 +798,25 @@ restart_loop:
                        journal_remove_journal_head(bh);  /* needs a brelse */
                        release_buffer_page(bh);
                }
+               cond_resched_lock(&journal->j_list_lock);
+       }
+       spin_unlock(&journal->j_list_lock);
+       /*
+        * This is a bit sleazy.  We borrow j_list_lock to protect
+        * journal->j_committing_transaction in __journal_remove_checkpoint.
+        * Really, __journal_remove_checkpoint should be using j_state_lock but
+        * it's a bit hassle to hold that across __journal_remove_checkpoint
+        */
+       spin_lock(&journal->j_state_lock);
+       spin_lock(&journal->j_list_lock);
+       /*
+        * Now recheck if some buffers did not get attached to the transaction
+        * while the lock was dropped...
+        */
+       if (commit_transaction->t_forget) {
                spin_unlock(&journal->j_list_lock);
-               if (cond_resched())
-                       goto restart_loop;
+               spin_unlock(&journal->j_state_lock);
+               goto restart_loop;
        }
 
        /* Done with this transaction! */
@@ -803,14 +825,6 @@ restart_loop:
 
        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 
-       /*
-        * This is a bit sleazy.  We borrow j_list_lock to protect
-        * journal->j_committing_transaction in __journal_remove_checkpoint.
-        * Really, __jornal_remove_checkpoint should be using j_state_lock but
-        * it's a bit hassle to hold that across __journal_remove_checkpoint
-        */
-       spin_lock(&journal->j_state_lock);
-       spin_lock(&journal->j_list_lock);
        commit_transaction->t_state = T_FINISHED;
        J_ASSERT(commit_transaction == journal->j_committing_transaction);
        journal->j_commit_sequence = commit_transaction->t_tid;