summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2010-08-19 14:13:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-20 09:34:55 -0700
commitd5ed3a4af77b851b6271ad3d9abc4c57fa3ce0f5 (patch)
treef06894404e4af25051e8918bfd3fdac95974fc97
parentf2e41e910320197d55b52e28d99a07130f2ae738 (diff)
lib/radix-tree.c: fix overflow in radix_tree_range_tag_if_tagged()
When radix_tree_maxindex() is ~0UL, it can happen that scanning overflows index and tree traversal code goes astray reading memory until it hits unreadable memory. Check for overflow and exit in that case. Signed-off-by: Jan Kara <jack@suse.cz> Cc: Christoph Hellwig <hch@lst.de> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/radix-tree.c5
-rw-r--r--mm/page-writeback.c3
2 files changed, 6 insertions, 2 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e907858498a..5b7d4623f0b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -625,6 +625,8 @@ EXPORT_SYMBOL(radix_tree_tag_get);
*
* The function returns number of leaves where the tag was set and sets
* *first_indexp to the first unscanned index.
+ * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
+ * be prepared to handle that.
*/
unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
unsigned long *first_indexp, unsigned long last_index,
@@ -675,7 +677,8 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
next:
/* Go to next item at level determined by 'shift' */
index = ((index >> shift) + 1) << shift;
- if (index > last_index)
+ /* Overflow can happen when last_index is ~0UL... */
+ if (index > last_index || !index)
break;
if (tagged >= nr_to_tag)
break;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7262aacea8a..c09ef5219cb 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping,
spin_unlock_irq(&mapping->tree_lock);
WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
cond_resched();
- } while (tagged >= WRITEBACK_TAG_BATCH);
+ /* We check 'start' to handle wrapping when end == ~0UL */
+ } while (tagged >= WRITEBACK_TAG_BATCH && start);
}
EXPORT_SYMBOL(tag_pages_for_writeback);