summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2005-09-06 15:19:09 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 16:57:54 -0700
commite6c9f5c1888097c936334bf9740024520ca47b8e (patch)
tree6fcf2cccb7e4d155dd663f10001efdb2a9d7daae
parentcbf0d27a131639f4f3e4faa94373c5c6f89f8f07 (diff)
[PATCH] Fix JBD race in t_forget list handling
Fix race between journal_commit_transaction() and other places as journal_unmap_buffer() that are adding buffers to transaction's t_forget list. We have to protect against such places by holding j_list_lock even when traversing the t_forget list. The fact that other places can only add buffers to the list makes the locking easier. OTOH the lock ranking complicates the stuff... Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/jbd/commit.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index dac720c837a..9d0494dcc57 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -720,11 +720,17 @@ wait_for_iobuf:
J_ASSERT(commit_transaction->t_log_list == NULL);
restart_loop:
+ /*
+ * As there are other places (journal_unmap_buffer()) adding buffers
+ * to this list we have to be careful and hold the j_list_lock.
+ */
+ spin_lock(&journal->j_list_lock);
while (commit_transaction->t_forget) {
transaction_t *cp_transaction;
struct buffer_head *bh;
jh = commit_transaction->t_forget;
+ spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh);
jbd_lock_bh_state(bh);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
@@ -792,9 +798,25 @@ restart_loop:
journal_remove_journal_head(bh); /* needs a brelse */
release_buffer_page(bh);
}
+ cond_resched_lock(&journal->j_list_lock);
+ }
+ spin_unlock(&journal->j_list_lock);
+ /*
+ * This is a bit sleazy. We borrow j_list_lock to protect
+ * journal->j_committing_transaction in __journal_remove_checkpoint.
+ * Really, __journal_remove_checkpoint should be using j_state_lock but
+ * it's a bit hassle to hold that across __journal_remove_checkpoint
+ */
+ spin_lock(&journal->j_state_lock);
+ spin_lock(&journal->j_list_lock);
+ /*
+ * Now recheck if some buffers did not get attached to the transaction
+ * while the lock was dropped...
+ */
+ if (commit_transaction->t_forget) {
spin_unlock(&journal->j_list_lock);
- if (cond_resched())
- goto restart_loop;
+ spin_unlock(&journal->j_state_lock);
+ goto restart_loop;
}
/* Done with this transaction! */
@@ -803,14 +825,6 @@ restart_loop:
J_ASSERT(commit_transaction->t_state == T_COMMIT);
- /*
- * This is a bit sleazy. We borrow j_list_lock to protect
- * journal->j_committing_transaction in __journal_remove_checkpoint.
- * Really, __jornal_remove_checkpoint should be using j_state_lock but
- * it's a bit hassle to hold that across __journal_remove_checkpoint
- */
- spin_lock(&journal->j_state_lock);
- spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
journal->j_commit_sequence = commit_transaction->t_tid;