summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-07-05 15:46:08 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-07-05 15:46:08 +0900
commit285eba57db7bd7d7c3c5929fb8621fdcaaea1b00 (patch)
treea9e7f0563cef296b24c53b20dbb388ec5c210172 /mm/page-writeback.c
parent1c14e6cecb1811543b1016f27e5d308fbea8c08a (diff)
parent815c4163b6c8ebf8152f42b0a5fd015cfdcedc78 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/linux/serial_sci.h Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c35
1 files changed, 22 insertions, 13 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b289310e2c8..54f28bd493d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping,
(!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS))
> background_thresh)))
- bdi_start_writeback(bdi, NULL, 0, 0);
+ bdi_start_background_writeback(bdi);
}
void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data)
* We want to write everything out, not just down to the dirty
* threshold
*/
-
if (bdi_has_dirty_io(&q->backing_dev_info))
- bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages, 0);
+ bdi_start_writeback(&q->backing_dev_info, nr_pages);
}
/*
@@ -835,7 +834,6 @@ int write_cache_pages(struct address_space *mapping,
pgoff_t done_index;
int cycled;
int range_whole = 0;
- long nr_to_write = wbc->nr_to_write;
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
@@ -852,7 +850,22 @@ int write_cache_pages(struct address_space *mapping,
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
+
+ /*
+ * If this is a data integrity sync, cap the writeback to the
+ * current end of file. Any extension to the file that occurs
+ * after this is a new write and we don't need to write those
+ * pages out to fulfil our data integrity requirements. If we
+ * try to write them out, we can get stuck in this scan until
+ * the concurrent writer stops adding dirty pages and extending
+ * EOF.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL &&
+ wbc->range_end == LLONG_MAX) {
+ end = i_size_read(mapping->host) >> PAGE_CACHE_SHIFT;
+ }
}
+
retry:
done_index = index;
while (!done && (index <= end)) {
@@ -935,11 +948,10 @@ continue_unlock:
done = 1;
break;
}
- }
+ }
- if (nr_to_write > 0) {
- nr_to_write--;
- if (nr_to_write == 0 &&
+ if (wbc->nr_to_write > 0) {
+ if (--wbc->nr_to_write == 0 &&
wbc->sync_mode == WB_SYNC_NONE) {
/*
* We stop writing back only if we are
@@ -970,11 +982,8 @@ continue_unlock:
end = writeback_index - 1;
goto retry;
}
- if (!wbc->no_nrwrite_index_update) {
- if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
- mapping->writeback_index = done_index;
- wbc->nr_to_write = nr_to_write;
- }
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = done_index;
return ret;
}