summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornpiggin@suse.de <npiggin@suse.de>2010-06-24 13:02:14 +1000
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-29 10:38:22 -0700
commit57439f878afafefad8836ebf5c49da2a0a746105 (patch)
treeec11dd35704aa37d77a5fd8404fda57f297a3834
parent5904b3b81d25166e5e39b9727645bb47937618e3 (diff)
fs: fix superblock iteration race
list_for_each_entry_safe is not suitable to protect against concurrent modification of the list. 6754af6 introduced a race in sb walking. list_for_each_entry can use the trick of pinning the current entry in the list before we drop and retake the lock because it subsequently follows cur->next. However list_for_each_entry_safe saves n=cur->next for following before entering the loop body, so when the lock is dropped, n may be deleted. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Christoph Hellwig <hch@infradead.org> Cc: John Stultz <johnstul@us.ibm.com> Cc: Frank Mayhar <fmayhar@google.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/super.c6
-rw-r--r--include/linux/list.h15
3 files changed, 23 insertions, 0 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index d96047b4a63..c8c78ba0782 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -590,6 +590,8 @@ static void prune_dcache(int count)
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
+ /* lock was dropped, must reset next */
+ list_safe_reset_next(sb, n, s_list);
count -= pruned;
__put_super(sb);
/* more work left to do? */
diff --git a/fs/super.c b/fs/super.c
index 5c35bc7a499..938119ab8dc 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -374,6 +374,8 @@ void sync_supers(void)
up_read(&sb->s_umount);
spin_lock(&sb_lock);
+ /* lock was dropped, must reset next */
+ list_safe_reset_next(sb, n, s_list);
__put_super(sb);
}
}
@@ -405,6 +407,8 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
up_read(&sb->s_umount);
spin_lock(&sb_lock);
+ /* lock was dropped, must reset next */
+ list_safe_reset_next(sb, n, s_list);
__put_super(sb);
}
spin_unlock(&sb_lock);
@@ -585,6 +589,8 @@ static void do_emergency_remount(struct work_struct *work)
}
up_write(&sb->s_umount);
spin_lock(&sb_lock);
+ /* lock was dropped, must reset next */
+ list_safe_reset_next(sb, n, s_list);
__put_super(sb);
}
spin_unlock(&sb_lock);
diff --git a/include/linux/list.h b/include/linux/list.h
index 8392884a297..5d57a3a1fa1 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -544,6 +544,21 @@ static inline void list_splice_tail_init(struct list_head *list,
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+/**
+ * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos: the loop cursor used in the list_for_each_entry_safe loop
+ * @n: temporary storage used in list_for_each_entry_safe
+ * @member: the name of the list_struct within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_safe_reset_next(pos, n, member) \
+ n = list_entry(pos->member.next, typeof(*pos), member)
+
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is