summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 11:53:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 11:53:51 -0700
commitd3dc366bbaf07c125561e90d6da4bb147741101a (patch)
tree6eb7e79a8ec9df1fa705393c6d15ccea3d104661 /mm
parent511c41d9e6665a07aca94eb00983cf6d77dd87ff (diff)
parente19a8a0ad2d255316830ead05b59c5a704434cbb (diff)
Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
Pull core block layer changes from Jens Axboe: "This is the core block IO pull request for 3.18. Apart from the new and improved flush machinery for blk-mq, this is all mostly bug fixes and cleanups. - blk-mq timeout updates and fixes from Christoph. - Removal of REQ_END, also from Christoph. We pass it through the ->queue_rq() hook for blk-mq instead, freeing up one of the request bits. The space was overly tight on 32-bit, so Martin also killed REQ_KERNEL since it's no longer used. - blk integrity updates and fixes from Martin and Gu Zheng. - Update to the flush machinery for blk-mq from Ming Lei. Now we have a per hardware context flush request, which both cleans up the code should scale better for flush intensive workloads on blk-mq. - Improve the error printing, from Rob Elliott. - Backing device improvements and cleanups from Tejun. - Fixup of a misplaced rq_complete() tracepoint from Hannes. - Make blk_get_request() return error pointers, fixing up issues where we NULL deref when a device goes bad or missing. From Joe Lawrence. - Prep work for drastically reducing the memory consumption of dm devices from Junichi Nomura. This allows creating clone bio sets without preallocating a lot of memory. - Fix a blk-mq hang on certain combinations of queue depths and hardware queues from me. - Limit memory consumption for blk-mq devices for crash dump scenarios and drivers that use crazy high depths (certain SCSI shared tag setups). We now just use a single queue and limited depth for that" * 'for-3.18/core' of git://git.kernel.dk/linux-block: (58 commits) block: Remove REQ_KERNEL blk-mq: allocate cpumask on the home node bio-integrity: remove the needless fail handle of bip_slab creating block: include func name in __get_request prints block: make blk_update_request print prefix match ratelimited prefix blk-merge: don't compute bi_phys_segments from bi_vcnt for cloned bio block: fix alignment_offset math that assumes io_min is a power-of-2 blk-mq: Make bt_clear_tag() easier to read blk-mq: fix potential hang if rolling wakeup depth is too high block: add bioset_create_nobvec() block: use bio_clone_fast() in blk_rq_prep_clone() block: misplaced rq_complete tracepoint sd: Honor block layer integrity handling flags block: Replace strnicmp with strncasecmp block: Add T10 Protection Information functions block: Don't merge requests if integrity flags differ block: Integrity checksum flag block: Relocate bio integrity flags block: Add a disk flag to block integrity profile block: Add prefix to block integrity profile flags ...
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c40
1 files changed, 16 insertions, 24 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 12a992b6257..0ae0df55000 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -40,7 +40,7 @@ LIST_HEAD(bdi_list);
/* bdi_wq serves all asynchronous writeback tasks */
struct workqueue_struct *bdi_wq;
-void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
+static void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
{
if (wb1 < wb2) {
spin_lock(&wb1->list_lock);
@@ -376,13 +376,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
flush_delayed_work(&bdi->wb.dwork);
WARN_ON(!list_empty(&bdi->work_list));
-
- /*
- * This shouldn't be necessary unless @bdi for some reason has
- * unflushed dirty IO after work_list is drained. Do it anyway
- * just in case.
- */
- cancel_delayed_work_sync(&bdi->wb.dwork);
+ WARN_ON(delayed_work_pending(&bdi->wb.dwork));
}
/*
@@ -402,21 +396,15 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
void bdi_unregister(struct backing_dev_info *bdi)
{
- struct device *dev = bdi->dev;
-
- if (dev) {
+ if (bdi->dev) {
bdi_set_min_ratio(bdi, 0);
trace_writeback_bdi_unregister(bdi);
bdi_prune_sb(bdi);
bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
-
- spin_lock_bh(&bdi->wb_lock);
+ device_unregister(bdi->dev);
bdi->dev = NULL;
- spin_unlock_bh(&bdi->wb_lock);
-
- device_unregister(dev);
}
}
EXPORT_SYMBOL(bdi_unregister);
@@ -487,8 +475,17 @@ void bdi_destroy(struct backing_dev_info *bdi)
int i;
/*
- * Splice our entries to the default_backing_dev_info, if this
- * bdi disappears
+ * Splice our entries to the default_backing_dev_info. This
+ * condition shouldn't happen. @wb must be empty at this point and
+ * dirty inodes on it might cause other issues. This workaround is
+ * added by ce5f8e779519 ("writeback: splice dirty inode entries to
+ * default bdi on bdi_destroy()") without root-causing the issue.
+ *
+ * http://lkml.kernel.org/g/1253038617-30204-11-git-send-email-jens.axboe@oracle.com
+ * http://thread.gmane.org/gmane.linux.file-systems/35341/focus=35350
+ *
+ * We should probably add WARN_ON() to find out whether it still
+ * happens and track it down if so.
*/
if (bdi_has_dirty_io(bdi)) {
struct bdi_writeback *dst = &default_backing_dev_info.wb;
@@ -503,12 +500,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
bdi_unregister(bdi);
- /*
- * If bdi_unregister() had already been called earlier, the dwork
- * could still be pending because bdi_prune_sb() can race with the
- * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
- */
- cancel_delayed_work_sync(&bdi->wb.dwork);
+ WARN_ON(delayed_work_pending(&bdi->wb.dwork));
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]);