diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-05 23:20:17 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-12-05 23:20:17 +0000 |
commit | 742eaa6a6e356a16788ce6530271de89bc4f8fb5 (patch) | |
tree | 12fc040daab06ac796c61c1d92bfad9bb054d1c1 /mm/page-writeback.c | |
parent | ba8bb18a03f8c7508565c385576a5431a4ad804a (diff) | |
parent | ae72fd588a2b302222769b44775912b83f0785eb (diff) |
Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable
Conflicts:
arch/arm/common/gic.c
arch/arm/plat-omap/include/plat/common.h
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 23 |
1 files changed, 7 insertions, 16 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a3278f00523..71252486bc6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -128,7 +128,6 @@ unsigned long global_dirty_limit; * */ static struct prop_descriptor vm_completions; -static struct prop_descriptor vm_dirties; /* * couple the period to the dirty_ratio: @@ -154,7 +153,6 @@ static void update_completion_period(void) { int shift = calc_period_shift(); prop_change_shift(&vm_completions, shift); - prop_change_shift(&vm_dirties, shift); writeback_set_ratelimit(); } @@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) } EXPORT_SYMBOL_GPL(bdi_writeout_inc); -void task_dirty_inc(struct task_struct *tsk) -{ - prop_inc_single(&vm_dirties, &tsk->dirties); -} - /* * Obtain an accurate fraction of the BDI's portion. */ @@ -1133,17 +1126,17 @@ pause: pages_dirtied, pause, start_time); - __set_current_state(TASK_UNINTERRUPTIBLE); + __set_current_state(TASK_KILLABLE); io_schedule_timeout(pause); - dirty_thresh = hard_dirty_limit(dirty_thresh); /* - * max-pause area. If dirty exceeded but still within this - * area, no need to sleep for more than 200ms: (a) 8 pages per - * 200ms is typically more than enough to curb heavy dirtiers; - * (b) the pause time limit makes the dirtiers more responsive. + * This is typically equal to (nr_dirty < dirty_thresh) and can + * also keep "1000+ dd on a slow USB stick" under control. */ - if (nr_dirty < dirty_thresh) + if (task_ratelimit) + break; + + if (fatal_signal_pending(current)) break; } @@ -1395,7 +1388,6 @@ void __init page_writeback_init(void) shift = calc_period_shift(); prop_descriptor_init(&vm_completions, shift); - prop_descriptor_init(&vm_dirties, shift); } /** @@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); - task_dirty_inc(current); task_io_account_write(PAGE_CACHE_SIZE); } } |