From 25f7fd470bc97bb93d3a674e8c56c4a29063ec97 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 19 Jul 2012 15:59:18 +1000 Subject: md: fix bug in handling of new_data_offset commit c6563a8c38fde3c1c7fc925a10bde3ca20799301 md: add possibility to change data-offset for devices. introduced a 'new_data_offset' attribute which should normally be the same as 'data_offset', but can be explicitly set to a different value to allow a reshape operation to move the data. Unfortunately when the 'data_offset' is explicitly set through sysfs, the new_data_offset is not also set, so the two would become out-of-sync incorrectly. One result of this is that trying to set the 'size' after the 'data_offset' would fail because it is not permitted to set the size when the 'data_offset' and 'new_data_offset' are different - as that can be confusing. Consequently when mdadm tried to do this while assembling an IMSM array it would fail. This bug was introduced in 3.5-rc1. Reported-by: Brian Downing Bisected-by: Brian Downing Tested-by: Brian Downing Signed-off-by: NeilBrown --- drivers/md/md.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/md/md.c') diff --git a/drivers/md/md.c b/drivers/md/md.c index a4c219e3c85..0c1fe4cbce6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2931,6 +2931,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len) * can be sane */ return -EBUSY; rdev->data_offset = offset; + rdev->new_data_offset = offset; return len; } -- cgit v1.2.3-70-g09d2 From a05b7ea03d72f36edb0cec05e8893803335c61a0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 19 Jul 2012 15:59:18 +1000 Subject: md: avoid crash when stopping md array races with closing other open fds. md will refuse to stop an array if any other fd (or mounted fs) is using it. When any fs is unmounted of when the last open fd is closed all pending IO will be flushed (e.g. sync_blockdev call in __blkdev_put) so there will be no pending IO to worry about when the array is stopped. However in order to send the STOP_ARRAY ioctl to stop the array one must first get and open fd on the block device. If some fd is being used to write to the block device and it is closed after mdadm open the block device, but before mdadm issues the STOP_ARRAY ioctl, then there will be no last-close on the md device so __blkdev_put will not call sync_blockdev. If this happens, then IO can still be in-flight while md tears down the array and bad things can happen (use-after-free and subsequent havoc). So in the case where do_md_stop is being called from an open file descriptor, call sync_block after taking the mutex to ensure there will be no new openers. This is needed when setting a read-write device to read-only too. Cc: stable@vger.kernel.org Reported-by: majianpeng Signed-off-by: NeilBrown --- drivers/md/md.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'drivers/md/md.c') diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c1fe4cbce6..d5ab4493c8b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3927,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page) return sprintf(page, "%s\n", array_states[st]); } -static int do_md_stop(struct mddev * mddev, int ro, int is_open); -static int md_set_readonly(struct mddev * mddev, int is_open); +static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev); +static int md_set_readonly(struct mddev * mddev, struct block_device *bdev); static int do_md_run(struct mddev * mddev); static int restart_array(struct mddev *mddev); @@ -3944,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) /* stopping an active array */ if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 0, 0); + err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ if (mddev->pers) { if (atomic_read(&mddev->openers) > 0) return -EBUSY; - err = do_md_stop(mddev, 2, 0); + err = do_md_stop(mddev, 2, NULL); } else err = 0; /* already inactive */ break; @@ -3959,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; /* not supported yet */ case readonly: if (mddev->pers) - err = md_set_readonly(mddev, 0); + err = md_set_readonly(mddev, NULL); else { mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); @@ -3969,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) case read_auto: if (mddev->pers) { if (mddev->ro == 0) - err = md_set_readonly(mddev, 0); + err = md_set_readonly(mddev, NULL); else if (mddev->ro == 1) err = restart_array(mddev); if (err == 0) { @@ -5352,15 +5352,17 @@ void md_stop(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop); -static int md_set_readonly(struct mddev *mddev, int is_open) +static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) { int err = 0; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > is_open) { + if (atomic_read(&mddev->openers) > !!bdev) { printk("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; goto out; } + if (bdev) + sync_blockdev(bdev); if (mddev->pers) { __md_stop_writes(mddev); @@ -5382,18 +5384,26 @@ out: * 0 - completely stop and dis-assemble array * 2 - stop but do not disassemble array */ -static int do_md_stop(struct mddev * mddev, int mode, int is_open) +static int do_md_stop(struct mddev * mddev, int mode, + struct block_device *bdev) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > is_open || + if (atomic_read(&mddev->openers) > !!bdev || mddev->sysfs_active) { printk("md: %s still in use.\n",mdname(mddev)); mutex_unlock(&mddev->open_mutex); return -EBUSY; } + if (bdev) + /* It is possible IO was issued on some other + * open file which was closed before we took ->open_mutex. + * As that was not the last close __blkdev_put will not + * have called sync_blockdev, so we must. + */ + sync_blockdev(bdev); if (mddev->pers) { if (mddev->ro) @@ -5467,7 +5477,7 @@ static void autorun_array(struct mddev *mddev) err = do_md_run(mddev); if (err) { printk(KERN_WARNING "md: do_md_run() returned %d\n", err); - do_md_stop(mddev, 0, 0); + do_md_stop(mddev, 0, NULL); } } @@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, goto done_unlock; case STOP_ARRAY: - err = do_md_stop(mddev, 0, 1); + err = do_md_stop(mddev, 0, bdev); goto done_unlock; case STOP_ARRAY_RO: - err = md_set_readonly(mddev, 1); + err = md_set_readonly(mddev, bdev); goto done_unlock; case BLKROSET: -- cgit v1.2.3-70-g09d2 From 90cf195d9bcb4bf70e8b6df5073b05164b279ba0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 31 Jul 2012 10:04:55 +1000 Subject: md: remove duplicated test on ->openers when calling do_md_stop() do_md_stop tests mddev->openers while holding ->open_mutex, and fails if this count is too high. So callers do not need to check mddev->openers and doing so isn't very meaningful as they don't hold ->open_mutex so the number could change. So remove the unnecessary tests on mddev->openers. These are not called often enough for there to be any gain in an early test on ->open_mutex to avoid the need for a slightly more costly mutex_lock call. Signed-off-by: NeilBrown --- drivers/md/md.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/md/md.c') diff --git a/drivers/md/md.c b/drivers/md/md.c index d5ab4493c8b..f6c46109b07 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3942,17 +3942,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clear: /* stopping an active array */ - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ - if (mddev->pers) { - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; + if (mddev->pers) err = do_md_stop(mddev, 2, NULL); - } else + else err = 0; /* already inactive */ break; case suspended: -- cgit v1.2.3-70-g09d2 From 0021b7bc045e4b0b85d8c53614342aaf84ca96a5 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 31 Jul 2012 09:08:14 +0200 Subject: md: remove plug_cnt feature of plugging. This seemed like a good idea at the time, but after further thought I cannot see it making a difference other than very occasionally and testing to try to exercise the case it is most likely to help did not show any performance difference by removing it. So remove the counting of active plugs and allow 'pending writes' to be activated at any time, not just when no plugs are active. This is only relevant when there is a write-intent bitmap, and the updating of the bitmap will likely introduce enough delay that the single-threading of bitmap updates will be enough to collect large numbers of updates together. Removing this will make it easier to centralise the unplug code, and will clear the other for other unplug enhancements which have a measurable effect. Signed-off-by: NeilBrown Signed-off-by: Jens Axboe --- drivers/md/md.c | 5 +---- drivers/md/md.h | 3 --- drivers/md/raid1.c | 3 +-- drivers/md/raid10.c | 3 +-- drivers/md/raid5.c | 5 ++--- 5 files changed, 5 insertions(+), 14 deletions(-) (limited to 'drivers/md/md.c') diff --git a/drivers/md/md.c b/drivers/md/md.c index d5ab4493c8b..34381172a94 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -514,8 +514,7 @@ struct md_plug_cb { static void plugger_unplug(struct blk_plug_cb *cb) { struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) - md_wakeup_thread(mdcb->mddev->thread); + md_wakeup_thread(mdcb->mddev->thread); kfree(mdcb); } @@ -548,7 +547,6 @@ int mddev_check_plugged(struct mddev *mddev) mdcb->mddev = mddev; mdcb->cb.callback = plugger_unplug; - atomic_inc(&mddev->plug_cnt); list_add(&mdcb->cb.list, &plug->cb_list); return 1; } @@ -602,7 +600,6 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); - atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); diff --git a/drivers/md/md.h b/drivers/md/md.h index 7b4a3c318ca..91786c46b85 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -266,9 +266,6 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - atomic_t plug_cnt; /* If device is expecting - * more bios soon. - */ struct md_thread *thread; /* management thread */ struct md_thread *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cacd008d686..36a8fc059ac 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2173,8 +2173,7 @@ static void raid1d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8da6282254c..5d33603a497 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2660,8 +2660,7 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 04348d76bb3..bde9da2baa3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4543,7 +4543,7 @@ static void raid5d(struct mddev *mddev) while (1) { struct bio *bio; - if (atomic_read(&mddev->plug_cnt) == 0 && + if ( !list_empty(&conf->bitmap_list)) { /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; @@ -4553,8 +4553,7 @@ static void raid5d(struct mddev *mddev) conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } - if (atomic_read(&mddev->plug_cnt) == 0) - raid5_activate_delayed(conf); + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; -- cgit v1.2.3-70-g09d2 From 9cbb17508808f8a6bdd83354b61e126ac4fa6fed Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 31 Jul 2012 09:08:14 +0200 Subject: blk: centralize non-request unplug handling. Both md and umem has similar code for getting notified on an blk_finish_plug event. Centralize this code in block/ and allow each driver to provide its distinctive difference. Signed-off-by: NeilBrown Signed-off-by: Jens Axboe --- block/blk-core.c | 25 ++++++++++++++++++++++ drivers/block/umem.c | 35 ++++++------------------------- drivers/md/md.c | 56 +++++--------------------------------------------- drivers/md/md.h | 8 +++++++- include/linux/blkdev.h | 8 ++++++-- 5 files changed, 49 insertions(+), 83 deletions(-) (limited to 'drivers/md/md.c') diff --git a/block/blk-core.c b/block/blk-core.c index dd134d834d5..177ddcf356e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2927,6 +2927,31 @@ static void flush_plug_callbacks(struct blk_plug *plug) } } +struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, + int size) +{ + struct blk_plug *plug = current->plug; + struct blk_plug_cb *cb; + + if (!plug) + return NULL; + + list_for_each_entry(cb, &plug->cb_list, list) + if (cb->callback == unplug && cb->data == data) + return cb; + + /* Not currently on the callback list */ + BUG_ON(size < sizeof(*cb)); + cb = kzalloc(size, GFP_ATOMIC); + if (cb) { + cb->data = data; + cb->callback = unplug; + list_add(&cb->list, &plug->cb_list); + } + return cb; +} +EXPORT_SYMBOL(blk_check_plugged); + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 9a72277a31d..6ef3489568e 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,42 +513,19 @@ static void process_page(unsigned long data) } } -struct mm_plug_cb { - struct blk_plug_cb cb; - struct cardinfo *card; -}; - static void mm_unplug(struct blk_plug_cb *cb) { - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + struct cardinfo *card = cb->data; - spin_lock_irq(&mmcb->card->lock); - activate(mmcb->card); - spin_unlock_irq(&mmcb->card->lock); - kfree(mmcb); + spin_lock_irq(&card->lock); + activate(card); + spin_unlock_irq(&card->lock); + kfree(cb); } static int mm_check_plugged(struct cardinfo *card) { - struct blk_plug *plug = current->plug; - struct mm_plug_cb *mmcb; - - if (!plug) - return 0; - - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) - return 1; - } - /* Not currently on the callback list */ - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); - if (!mmcb) - return 0; - - mmcb->card = card; - mmcb->cb.callback = mm_unplug; - list_add(&mmcb->cb.list, &plug->cb_list); - return 1; + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } static void mm_make_request(struct request_queue *q, struct bio *bio) diff --git a/drivers/md/md.c b/drivers/md/md.c index 34381172a94..b493fa41738 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -498,59 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -/* Support for plugging. - * This mirrors the plugging support in request_queue, but does not - * require having a whole queue or request structures. - * We allocate an md_plug_cb for each md device and each thread it gets - * plugged on. This links tot the private plug_handle structure in the - * personality data where we keep a count of the number of outstanding - * plugs so other code can see if a plug is active. - */ -struct md_plug_cb { - struct blk_plug_cb cb; - struct mddev *mddev; -}; - -static void plugger_unplug(struct blk_plug_cb *cb) +void md_unplug(struct blk_plug_cb *cb) { - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - md_wakeup_thread(mdcb->mddev->thread); - kfree(mdcb); -} - -/* Check that an unplug wakeup will come shortly. - * If not, wakeup the md thread immediately - */ -int mddev_check_plugged(struct mddev *mddev) -{ - struct blk_plug *plug = current->plug; - struct md_plug_cb *mdcb; - - if (!plug) - return 0; - - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { - if (mdcb->cb.callback == plugger_unplug && - mdcb->mddev == mddev) { - /* Already on the list, move to top */ - if (mdcb != list_first_entry(&plug->cb_list, - struct md_plug_cb, - cb.list)) - list_move(&mdcb->cb.list, &plug->cb_list); - return 1; - } - } - /* Not currently on the callback list */ - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); - if (!mdcb) - return 0; - - mdcb->mddev = mddev; - mdcb->cb.callback = plugger_unplug; - list_add(&mdcb->cb.list, &plug->cb_list); - return 1; + struct mddev *mddev = cb->data; + md_wakeup_thread(mddev->thread); + kfree(cb); } -EXPORT_SYMBOL_GPL(mddev_check_plugged); +EXPORT_SYMBOL(md_unplug); static inline struct mddev *mddev_get(struct mddev *mddev) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 91786c46b85..8f998e08fb8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -627,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern int mddev_check_plugged(struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); + +extern void md_unplug(struct blk_plug_cb *cb); +static inline int mddev_check_plugged(struct mddev *mddev) +{ + return !!blk_check_plugged(md_unplug, mddev, + sizeof(struct blk_plug_cb)); +} #endif /* _MD_MD_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3816ce8a08f..607ca228f47 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -922,11 +922,15 @@ struct blk_plug { }; #define BLK_MAX_REQUEST_COUNT 16 +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *); struct blk_plug_cb { struct list_head list; - void (*callback)(struct blk_plug_cb *); + blk_plug_cb_fn callback; + void *data; }; - +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); extern void blk_start_plug(struct blk_plug *); extern void blk_finish_plug(struct blk_plug *); extern void blk_flush_plug_list(struct blk_plug *, bool); -- cgit v1.2.3-70-g09d2 From 74018dc3063a2c729fc73041c0a9f03aac995920 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 31 Jul 2012 09:08:15 +0200 Subject: blk: pass from_schedule to non-request unplug functions. This will allow md/raid to know why the unplug was called, and will be able to act according - if !from_schedule it is safe to perform tasks which could themselves schedule. Signed-off-by: NeilBrown Signed-off-by: Jens Axboe --- block/blk-core.c | 6 +++--- drivers/block/umem.c | 2 +- drivers/md/md.c | 2 +- drivers/md/md.h | 2 +- include/linux/blkdev.h | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/md/md.c') diff --git a/block/blk-core.c b/block/blk-core.c index 35bf4fe8234..4b4dbdfbca8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2909,7 +2909,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, } -static void flush_plug_callbacks(struct blk_plug *plug) +static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(callbacks); @@ -2921,7 +2921,7 @@ static void flush_plug_callbacks(struct blk_plug *plug) struct blk_plug_cb, list); list_del(&cb->list); - cb->callback(cb); + cb->callback(cb, from_schedule); } } } @@ -2961,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug); + flush_plug_callbacks(plug, from_schedule); if (list_empty(&plug->list)) return; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 6ef3489568e..eb0d8216f55 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,7 +513,7 @@ static void process_page(unsigned long data) } } -static void mm_unplug(struct blk_plug_cb *cb) +static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) { struct cardinfo *card = cb->data; diff --git a/drivers/md/md.c b/drivers/md/md.c index b493fa41738..db02d2efb76 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -498,7 +498,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -void md_unplug(struct blk_plug_cb *cb) +void md_unplug(struct blk_plug_cb *cb, bool from_schedule) { struct mddev *mddev = cb->data; md_wakeup_thread(mddev->thread); diff --git a/drivers/md/md.h b/drivers/md/md.h index 8f998e08fb8..f385b038589 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -629,7 +629,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); -extern void md_unplug(struct blk_plug_cb *cb); +extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); static inline int mddev_check_plugged(struct mddev *mddev) { return !!blk_check_plugged(md_unplug, mddev, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 607ca228f47..4e72a9d4823 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -923,7 +923,7 @@ struct blk_plug { #define BLK_MAX_REQUEST_COUNT 16 struct blk_plug_cb; -typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *); +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; blk_plug_cb_fn callback; -- cgit v1.2.3-70-g09d2