summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-08-04 22:16:46 -0600
committerWu Fengguang <fengguang.wu@intel.com>2011-10-03 21:08:58 +0800
commit8927f66c4ede9a18b4b58f7e6f9debca67065f6b (patch)
treef7c8490ab23a20cb86874ca8112f3dd1fc6002ae /mm/page-writeback.c
parent57fc978cfb61ed40a7bbfe5a569359159ba31abd (diff)
writeback: dirty position control - bdi reserve area
Keep a minimal pool of dirty pages for each bdi, so that the disk IO queues won't underrun. Also gently increase a small bdi_thresh to avoid it stuck in 0 for some light dirtied bdi. It's particularly useful for JBOD and small memory system. It may result in (pos_ratio > 1) at the setpoint and push the dirty pages high. This is more or less intended because the bdi is in the danger of IO queue underflow. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6a8bb693b42..325f753c80e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -599,6 +599,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
+ bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
@@ -622,6 +623,20 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
} else
pos_ratio /= 4;
+ /*
+ * bdi reserve area, safeguard against dirty pool underrun and disk idle
+ * It may push the desired control point of global dirty pages higher
+ * than setpoint.
+ */
+ x_intercept = bdi_thresh / 2;
+ if (bdi_dirty < x_intercept) {
+ if (bdi_dirty > x_intercept / 8) {
+ pos_ratio *= x_intercept;
+ do_div(pos_ratio, bdi_dirty);
+ } else
+ pos_ratio *= 8;
+ }
+
return pos_ratio;
}