From 88e740f1654bf28565edd528a060695c1f2b5ad7 Mon Sep 17 00:00:00 2001 From: Fernando Luis Vázquez Cao Date: Mon, 27 Oct 2008 18:44:46 +0900 Subject: block: add queue flag for paravirt frontend drivers As is the case with SSD devices, we do not want to idle in AS/CFQ when the block device is a paravirt front-end driver. This patch adds a flag (QUEUE_FLAG_VIRT) which should be used by front-end drivers such as virtio_blk and xen-blkfront to indicate a paravirtualized device. Signed-off-by: Fernando Luis Vazquez Cao Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 031a315c050..482e9600f7a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -449,6 +449,7 @@ struct request_queue #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ static inline int queue_is_locked(struct request_queue *q) { -- cgit v1.2.3-70-g09d2 From 64d01dc9e1927e6535627d73f2336c75d1dd3fe2 Mon Sep 17 00:00:00 2001 From: Cheng Renquan Date: Wed, 3 Dec 2008 12:41:39 +0100 Subject: block: use cancel_work_sync() instead of kblockd_flush_work() After many improvements on kblockd_flush_work, it is now identical to cancel_work_sync, so a direct call to cancel_work_sync is suggested. The only difference is that cancel_work_sync is a GPL symbol, so no non-GPL modules anymore. Signed-off-by: Cheng Renquan Cc: Jens Axboe Signed-off-by: Jens Axboe --- block/as-iosched.c | 2 +- block/blk-core.c | 8 +------- block/cfq-iosched.c | 2 +- include/linux/blkdev.h | 1 - 4 files changed, 3 insertions(+), 10 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/as-iosched.c b/block/as-iosched.c index 71f0abb219e..802b5d0d853 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1344,7 +1344,7 @@ static void as_exit_queue(elevator_t *e) struct as_data *ad = e->elevator_data; del_timer_sync(&ad->antic_timer); - kblockd_flush_work(&ad->antic_work); + cancel_work_sync(&ad->antic_work); BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); diff --git a/block/blk-core.c b/block/blk-core.c index 20e1724ccb4..2fdcd0cff57 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -408,7 +408,7 @@ void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->unplug_timer); del_timer_sync(&q->timeout); - kblockd_flush_work(&q->unplug_work); + cancel_work_sync(&q->unplug_work); } EXPORT_SYMBOL(blk_sync_queue); @@ -2147,12 +2147,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -void kblockd_flush_work(struct work_struct *work) -{ - cancel_work_sync(work); -} -EXPORT_SYMBOL(kblockd_flush_work); - int __init blk_dev_init(void) { kblockd_workqueue = create_workqueue("kblockd"); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a062eebbd1..a2bfec7d6b3 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2160,7 +2160,7 @@ out_cont: static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); - kblockd_flush_work(&cfqd->unplug_work); + cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 482e9600f7a..e9bb73ff1d6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -978,7 +978,6 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) -- cgit v1.2.3-70-g09d2 From 313e42999dbc0f234ca5909a236f78f082cb43b1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Nov 2008 13:32:02 +0900 Subject: block: reorganize QUEUE_ORDERED_* constants Separate out ordering type (drain,) and action masks (preflush, postflush, fua) from visible ordering mode selectors (QUEUE_ORDERED_*). Ordering types are now named QUEUE_ORDERED_BY_* while action masks are named QUEUE_ORDERED_DO_*. This change is necessary to add QUEUE_ORDERED_DO_BAR and make it optional to improve empty barrier implementation. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-barrier.c | 20 ++++++++++---------- include/linux/blkdev.h | 39 +++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 26 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6e72d661ae4..1d7adc72c95 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -24,8 +24,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, prepare_flush_fn *prepare_flush_fn) { - if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && - prepare_flush_fn == NULL) { + if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH))) { printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); return -EINVAL; } @@ -134,7 +134,7 @@ static void queue_flush(struct request_queue *q, unsigned which) struct request *rq; rq_end_io_fn *end_io; - if (which == QUEUE_ORDERED_PREFLUSH) { + if (which == QUEUE_ORDERED_DO_PREFLUSH) { rq = &q->pre_flush_rq; end_io = pre_flush_end_io; } else { @@ -167,7 +167,7 @@ static inline struct request *start_ordered(struct request_queue *q, blk_rq_init(q, rq); if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) rq->cmd_flags |= REQ_RW; - if (q->ordered & QUEUE_ORDERED_FUA) + if (q->ordered & QUEUE_ORDERED_DO_FUA) rq->cmd_flags |= REQ_FUA; init_request_from_bio(rq, q->orig_bar_rq->bio); rq->end_io = bar_end_io; @@ -181,20 +181,20 @@ static inline struct request *start_ordered(struct request_queue *q, * there will be no data written between the pre and post flush. * Hence a single flush will suffice. */ - if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) - queue_flush(q, QUEUE_ORDERED_POSTFLUSH); + if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) + queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; elv_insert(q, rq, ELEVATOR_INSERT_FRONT); - if (q->ordered & QUEUE_ORDERED_PREFLUSH) { - queue_flush(q, QUEUE_ORDERED_PREFLUSH); + if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { + queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); rq = &q->pre_flush_rq; } else q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; - if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) + if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) q->ordseq |= QUEUE_ORDSEQ_DRAIN; else rq = NULL; @@ -237,7 +237,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) rq != &q->pre_flush_rq && rq != &q->post_flush_rq) return 1; - if (q->ordered & QUEUE_ORDERED_TAG) { + if (q->ordered & QUEUE_ORDERED_BY_TAG) { /* Ordered by tag. Blocking the next barrier is enough. */ if (is_barrier && rq != &q->bar_rq) *rqp = NULL; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e9bb73ff1d6..5c92b443239 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -523,22 +523,29 @@ enum { * TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FUA : ordering by tag w/ pre flush and FUA write */ - QUEUE_ORDERED_NONE = 0x00, - QUEUE_ORDERED_DRAIN = 0x01, - QUEUE_ORDERED_TAG = 0x02, - - QUEUE_ORDERED_PREFLUSH = 0x10, - QUEUE_ORDERED_POSTFLUSH = 0x20, - QUEUE_ORDERED_FUA = 0x40, - - QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, - QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + QUEUE_ORDERED_BY_DRAIN = 0x01, + QUEUE_ORDERED_BY_TAG = 0x02, + QUEUE_ORDERED_DO_PREFLUSH = 0x10, + QUEUE_ORDERED_DO_POSTFLUSH = 0x40, + QUEUE_ORDERED_DO_FUA = 0x80, + + QUEUE_ORDERED_NONE = 0x00, + + QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN, + QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, + + QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG, + QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, /* * Ordered operation sequence -- cgit v1.2.3-70-g09d2 From f671620e7d895af221bdfeda751d54fa55ed9546 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Nov 2008 13:32:04 +0900 Subject: block: make every barrier action optional In all barrier sequences, the barrier write itself was always assumed to be issued and thus didn't have corresponding control flag. This patch adds QUEUE_ORDERED_DO_BAR and unify action mask handling in start_ordered() such that any barrier action can be skipped. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-barrier.c | 41 ++++++++++++++++++++++++----------------- include/linux/blkdev.h | 7 +++++-- 2 files changed, 29 insertions(+), 19 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 43d479a1e66..1efabf829c5 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -158,19 +158,10 @@ static inline struct request *start_ordered(struct request_queue *q, q->ordered = q->next_ordered; q->ordseq |= QUEUE_ORDSEQ_STARTED; - /* - * Prep proxy barrier request. - */ + /* stash away the original request */ elv_dequeue_request(q, rq); q->orig_bar_rq = rq; - rq = &q->bar_rq; - blk_rq_init(q, rq); - if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) - rq->cmd_flags |= REQ_RW; - if (q->ordered & QUEUE_ORDERED_DO_FUA) - rq->cmd_flags |= REQ_FUA; - init_request_from_bio(rq, q->orig_bar_rq->bio); - rq->end_io = bar_end_io; + rq = NULL; /* * Queue ordered sequence. As we stack them at the head, we @@ -181,12 +172,28 @@ static inline struct request *start_ordered(struct request_queue *q, * there will be no data written between the pre and post flush. * Hence a single flush will suffice. */ - if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) + if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && + !blk_empty_barrier(q->orig_bar_rq)) { queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); - else + rq = &q->post_flush_rq; + } else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + if (q->ordered & QUEUE_ORDERED_DO_BAR) { + rq = &q->bar_rq; + + /* initialize proxy request and queue it */ + blk_rq_init(q, rq); + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_RW; + if (q->ordered & QUEUE_ORDERED_DO_FUA) + rq->cmd_flags |= REQ_FUA; + init_request_from_bio(rq, q->orig_bar_rq->bio); + rq->end_io = bar_end_io; + + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + } else + q->ordseq |= QUEUE_ORDSEQ_BAR; if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); @@ -194,10 +201,10 @@ static inline struct request *start_ordered(struct request_queue *q, } else q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; - if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) - q->ordseq |= QUEUE_ORDSEQ_DRAIN; - else + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) rq = NULL; + else + q->ordseq |= QUEUE_ORDSEQ_DRAIN; return rq; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5c92b443239..b044267009e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -526,12 +526,14 @@ enum { QUEUE_ORDERED_BY_DRAIN = 0x01, QUEUE_ORDERED_BY_TAG = 0x02, QUEUE_ORDERED_DO_PREFLUSH = 0x10, + QUEUE_ORDERED_DO_BAR = 0x20, QUEUE_ORDERED_DO_POSTFLUSH = 0x40, QUEUE_ORDERED_DO_FUA = 0x80, QUEUE_ORDERED_NONE = 0x00, - QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN, + QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | + QUEUE_ORDERED_DO_BAR, QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | QUEUE_ORDERED_DO_PREFLUSH | QUEUE_ORDERED_DO_POSTFLUSH, @@ -539,7 +541,8 @@ enum { QUEUE_ORDERED_DO_PREFLUSH | QUEUE_ORDERED_DO_FUA, - QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG, + QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | + QUEUE_ORDERED_DO_BAR, QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | QUEUE_ORDERED_DO_PREFLUSH | QUEUE_ORDERED_DO_POSTFLUSH, -- cgit v1.2.3-70-g09d2 From 8f11b3e99a1136fcbb67316c3260f085299c0bff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Nov 2008 13:32:05 +0900 Subject: block: make barrier completion more robust Barrier completion had the following assumptions. * start_ordered() couldn't finish the whole sequence properly. If all actions are to be skipped, q->ordseq is set correctly but the actual completion was never triggered thus hanging the barrier request. * Drain completion in elv_complete_request() assumed that there's always at least one request in the queue when drain completes. Both assumptions are true but these assumptions need to be removed to improve empty barrier implementation. This patch makes the following changes. * Make start_ordered() use blk_ordered_complete_seq() to mark skipped steps complete and notify __elv_next_request() that it should fetch the next request if the whole barrier has completed inside start_ordered(). * Make drain completion path in elv_complete_request() check whether the queue is empty. Empty queue also indicates drain completion. * While at it, convert 0/1 return from blk_do_ordered() to false/true. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-barrier.c | 45 +++++++++++++++++++++++++++------------------ block/elevator.c | 10 +++++++--- include/linux/blkdev.h | 4 ++-- 3 files changed, 36 insertions(+), 23 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 1efabf829c5..b03d88013e1 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq) return QUEUE_ORDSEQ_DONE; } -void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) +bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) { struct request *rq; @@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) q->ordseq |= seq; if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) - return; + return false; /* * Okay, sequence complete. @@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) BUG(); + + return true; } static void pre_flush_end_io(struct request *rq, int error) @@ -151,9 +153,11 @@ static void queue_flush(struct request_queue *q, unsigned which) elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } -static inline struct request *start_ordered(struct request_queue *q, - struct request *rq) +static inline bool start_ordered(struct request_queue *q, struct request **rqp) { + struct request *rq = *rqp; + unsigned skip = 0; + q->orderr = 0; q->ordered = q->next_ordered; q->ordseq |= QUEUE_ORDSEQ_STARTED; @@ -177,7 +181,7 @@ static inline struct request *start_ordered(struct request_queue *q, queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); rq = &q->post_flush_rq; } else - q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; + skip |= QUEUE_ORDSEQ_POSTFLUSH; if (q->ordered & QUEUE_ORDERED_DO_BAR) { rq = &q->bar_rq; @@ -193,35 +197,40 @@ static inline struct request *start_ordered(struct request_queue *q, elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } else - q->ordseq |= QUEUE_ORDSEQ_BAR; + skip |= QUEUE_ORDSEQ_BAR; if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); rq = &q->pre_flush_rq; } else - q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; + skip |= QUEUE_ORDSEQ_PREFLUSH; if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) rq = NULL; else - q->ordseq |= QUEUE_ORDSEQ_DRAIN; + skip |= QUEUE_ORDSEQ_DRAIN; + + *rqp = rq; - return rq; + /* + * Complete skipped sequences. If whole sequence is complete, + * return false to tell elevator that this request is gone. + */ + return !blk_ordered_complete_seq(q, skip, 0); } -int blk_do_ordered(struct request_queue *q, struct request **rqp) +bool blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); if (!q->ordseq) { if (!is_barrier) - return 1; + return true; - if (q->next_ordered != QUEUE_ORDERED_NONE) { - *rqp = start_ordered(q, rq); - return 1; - } else { + if (q->next_ordered != QUEUE_ORDERED_NONE) + return start_ordered(q, rqp); + else { /* * Queue ordering not supported. Terminate * with prejudice. @@ -231,7 +240,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) blk_rq_bytes(rq))) BUG(); *rqp = NULL; - return 0; + return false; } } @@ -242,7 +251,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) /* Special requests are not subject to ordering rules. */ if (!blk_fs_request(rq) && rq != &q->pre_flush_rq && rq != &q->post_flush_rq) - return 1; + return true; if (q->ordered & QUEUE_ORDERED_BY_TAG) { /* Ordered by tag. Blocking the next barrier is enough. */ @@ -255,7 +264,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) *rqp = NULL; } - return 1; + return true; } static void bio_end_empty_barrier(struct bio *bio, int err) diff --git a/block/elevator.c b/block/elevator.c index 86836dd179c..261ffaaf47b 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -944,10 +944,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq) * drained for flush sequence. */ if (unlikely(q->ordseq)) { - struct request *first_rq = list_entry_rq(q->queue_head.next); - if (q->in_flight == 0 && + struct request *next = NULL; + + if (!list_empty(&q->queue_head)) + next = list_entry_rq(q->queue_head.next); + + if (!q->in_flight && blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && - blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { + (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); blk_start_queueing(q); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b044267009e..3c7078e0129 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -866,10 +866,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); -extern int blk_do_ordered(struct request_queue *, struct request **); +extern bool blk_do_ordered(struct request_queue *, struct request **); extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); +extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); -- cgit v1.2.3-70-g09d2 From 58eea927d2de43dc6f03d1ba2c46e55854b31540 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Nov 2008 13:32:06 +0900 Subject: block: simplify empty barrier implementation Empty barrier required special handling in __elv_next_request() to complete it without letting the low level driver see it. With previous changes, barrier code is now flexible enough to skip the BAR step using the same barrier sequence selection mechanism. Drop the special handling and mask off q->ordered from start_ordered(). Remove blk_empty_barrier() test which now has no user. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-barrier.c | 16 ++++++++++------ block/elevator.c | 8 -------- include/linux/blkdev.h | 1 - 3 files changed, 10 insertions(+), 15 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index b03d88013e1..c63044e9c4c 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -162,6 +162,14 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) q->ordered = q->next_ordered; q->ordseq |= QUEUE_ORDSEQ_STARTED; + /* + * For an empty barrier, there's no actual BAR request, which + * in turn makes POSTFLUSH unnecessary. Mask them off. + */ + if (!rq->hard_nr_sectors) + q->ordered &= ~(QUEUE_ORDERED_DO_BAR | + QUEUE_ORDERED_DO_POSTFLUSH); + /* stash away the original request */ elv_dequeue_request(q, rq); q->orig_bar_rq = rq; @@ -171,13 +179,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) * Queue ordered sequence. As we stack them at the head, we * need to queue in reverse order. Note that we rely on that * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs - * request gets inbetween ordered sequence. If this request is - * an empty barrier, we don't need to do a postflush ever since - * there will be no data written between the pre and post flush. - * Hence a single flush will suffice. + * request gets inbetween ordered sequence. */ - if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && - !blk_empty_barrier(q->orig_bar_rq)) { + if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) { queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); rq = &q->post_flush_rq; } else diff --git a/block/elevator.c b/block/elevator.c index 261ffaaf47b..ff60177a3ba 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -755,14 +755,6 @@ struct request *elv_next_request(struct request_queue *q) int ret; while ((rq = __elv_next_request(q)) != NULL) { - /* - * Kill the empty barrier place holder, the driver must - * not ever see it. - */ - if (blk_empty_barrier(rq)) { - __blk_end_request(rq, 0, blk_rq_bytes(rq)); - continue; - } if (!(rq->cmd_flags & REQ_STARTED)) { /* * This is the first time the device driver diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3c7078e0129..41bbadfd17f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -596,7 +596,6 @@ enum { #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) -#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) /* rq->queuelist of dequeued request must be list_empty() */ #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) -- cgit v1.2.3-70-g09d2 From b374d18a4bfce705e4a99ae9f501b53e86ecb283 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 31 Oct 2008 10:05:07 +0100 Subject: block: get rid of elevator_t typedef Just use struct elevator_queue everywhere instead. Signed-off-by: Jens Axboe --- block/as-iosched.c | 8 +++---- block/cfq-iosched.c | 6 +++--- block/deadline-iosched.c | 6 +++--- block/elevator.c | 55 +++++++++++++++++++++++++----------------------- block/noop-iosched.c | 2 +- drivers/block/nbd.c | 2 +- include/linux/blkdev.h | 3 +-- include/linux/elevator.h | 8 +++---- 8 files changed, 46 insertions(+), 44 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/as-iosched.c b/block/as-iosched.c index 802b5d0d853..631f6f44460 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1339,7 +1339,7 @@ static int as_may_queue(struct request_queue *q, int rw) return ret; } -static void as_exit_queue(elevator_t *e) +static void as_exit_queue(struct elevator_queue *e) { struct as_data *ad = e->elevator_data; @@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count) return count; } -static ssize_t est_time_show(elevator_t *e, char *page) +static ssize_t est_time_show(struct elevator_queue *e, char *page) { struct as_data *ad = e->elevator_data; int pos = 0; @@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page) } #define SHOW_FUNCTION(__FUNC, __VAR) \ -static ssize_t __FUNC(elevator_t *e, char *page) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct as_data *ad = e->elevator_data; \ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ @@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ -static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct as_data *ad = e->elevator_data; \ int ret = as_var_store(__PTR, (page), count); \ diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a2bfec7d6b3..adaf93a9d19 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2178,7 +2178,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) cfq_put_queue(cfqd->async_idle_cfqq); } -static void cfq_exit_queue(elevator_t *e) +static void cfq_exit_queue(struct elevator_queue *e) { struct cfq_data *cfqd = e->elevator_data; struct request_queue *q = cfqd->queue; @@ -2288,7 +2288,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -static ssize_t __FUNC(elevator_t *e, char *page) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data = __VAR; \ @@ -2308,7 +2308,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index fd311179f44..c4d991d4ade 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -334,7 +334,7 @@ static int deadline_queue_empty(struct request_queue *q) && list_empty(&dd->fifo_list[READ]); } -static void deadline_exit_queue(elevator_t *e) +static void deadline_exit_queue(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; @@ -387,7 +387,7 @@ deadline_var_store(int *var, const char *page, size_t count) } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ -static ssize_t __FUNC(elevator_t *e, char *page) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct deadline_data *dd = e->elevator_data; \ int __data = __VAR; \ @@ -403,7 +403,7 @@ SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ -static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct deadline_data *dd = e->elevator_data; \ int __data; \ diff --git a/block/elevator.c b/block/elevator.c index ff60177a3ba..98259eda0ef 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -65,7 +65,7 @@ DEFINE_TRACE(block_rq_issue); static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) { struct request_queue *q = rq->q; - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_allow_merge_fn) return e->ops->elevator_allow_merge_fn(q, rq, bio); @@ -208,13 +208,13 @@ __setup("elevator=", elevator_setup); static struct kobj_type elv_ktype; -static elevator_t *elevator_alloc(struct request_queue *q, +static struct elevator_queue *elevator_alloc(struct request_queue *q, struct elevator_type *e) { - elevator_t *eq; + struct elevator_queue *eq; int i; - eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); + eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); if (unlikely(!eq)) goto err; @@ -240,8 +240,9 @@ err: static void elevator_release(struct kobject *kobj) { - elevator_t *e = container_of(kobj, elevator_t, kobj); + struct elevator_queue *e; + e = container_of(kobj, struct elevator_queue, kobj); elevator_put(e->elevator_type); kfree(e->hash); kfree(e); @@ -297,7 +298,7 @@ int elevator_init(struct request_queue *q, char *name) } EXPORT_SYMBOL(elevator_init); -void elevator_exit(elevator_t *e) +void elevator_exit(struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->ops->elevator_exit_fn) @@ -311,7 +312,7 @@ EXPORT_SYMBOL(elevator_exit); static void elv_activate_rq(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_activate_req_fn) e->ops->elevator_activate_req_fn(q, rq); @@ -319,7 +320,7 @@ static void elv_activate_rq(struct request_queue *q, struct request *rq) static void elv_deactivate_rq(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_deactivate_req_fn) e->ops->elevator_deactivate_req_fn(q, rq); @@ -338,7 +339,7 @@ static void elv_rqhash_del(struct request_queue *q, struct request *rq) static void elv_rqhash_add(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; BUG_ON(ELV_ON_HASH(rq)); hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); @@ -352,7 +353,7 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; struct hlist_node *entry, *next; struct request *rq; @@ -494,7 +495,7 @@ EXPORT_SYMBOL(elv_dispatch_add_tail); int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; struct request *__rq; int ret; @@ -529,7 +530,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) void elv_merged_request(struct request_queue *q, struct request *rq, int type) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_merged_fn) e->ops->elevator_merged_fn(q, rq, type); @@ -543,7 +544,7 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type) void elv_merge_requests(struct request_queue *q, struct request *rq, struct request *next) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); @@ -846,7 +847,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) int elv_queue_empty(struct request_queue *q) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (!list_empty(&q->queue_head)) return 0; @@ -860,7 +861,7 @@ EXPORT_SYMBOL(elv_queue_empty); struct request *elv_latter_request(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_latter_req_fn) return e->ops->elevator_latter_req_fn(q, rq); @@ -869,7 +870,7 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq) struct request *elv_former_request(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_former_req_fn) return e->ops->elevator_former_req_fn(q, rq); @@ -878,7 +879,7 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq) int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_set_req_fn) return e->ops->elevator_set_req_fn(q, rq, gfp_mask); @@ -889,7 +890,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) void elv_put_request(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_put_req_fn) e->ops->elevator_put_req_fn(rq); @@ -897,7 +898,7 @@ void elv_put_request(struct request_queue *q, struct request *rq) int elv_may_queue(struct request_queue *q, int rw) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; if (e->ops->elevator_may_queue_fn) return e->ops->elevator_may_queue_fn(q, rw); @@ -920,7 +921,7 @@ EXPORT_SYMBOL(elv_abort_queue); void elv_completed_request(struct request_queue *q, struct request *rq) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; /* * request is released from the driver, io must be done @@ -955,13 +956,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq) static ssize_t elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { - elevator_t *e = container_of(kobj, elevator_t, kobj); struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; ssize_t error; if (!entry->show) return -EIO; + e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); error = e->ops ? entry->show(e, page) : -ENOENT; mutex_unlock(&e->sysfs_lock); @@ -972,13 +974,14 @@ static ssize_t elv_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { - elevator_t *e = container_of(kobj, elevator_t, kobj); struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; ssize_t error; if (!entry->store) return -EIO; + e = container_of(kobj, struct elevator_queue, kobj); mutex_lock(&e->sysfs_lock); error = e->ops ? entry->store(e, page, length) : -ENOENT; mutex_unlock(&e->sysfs_lock); @@ -997,7 +1000,7 @@ static struct kobj_type elv_ktype = { int elv_register_queue(struct request_queue *q) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; int error; error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); @@ -1015,7 +1018,7 @@ int elv_register_queue(struct request_queue *q) return error; } -static void __elv_unregister_queue(elevator_t *e) +static void __elv_unregister_queue(struct elevator_queue *e) { kobject_uevent(&e->kobj, KOBJ_REMOVE); kobject_del(&e->kobj); @@ -1078,7 +1081,7 @@ EXPORT_SYMBOL_GPL(elv_unregister); */ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) { - elevator_t *old_elevator, *e; + struct elevator_queue *old_elevator, *e; void *data; /* @@ -1184,7 +1187,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, ssize_t elv_iosched_show(struct request_queue *q, char *name) { - elevator_t *e = q->elevator; + struct elevator_queue *e = q->elevator; struct elevator_type *elv = e->elevator_type; struct elevator_type *__e; int len = 0; diff --git a/block/noop-iosched.c b/block/noop-iosched.c index c23e0296965..3a0d369d08c 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c @@ -76,7 +76,7 @@ static void *noop_init_queue(struct request_queue *q) return nd; } -static void noop_exit_queue(elevator_t *e) +static void noop_exit_queue(struct elevator_queue *e) { struct noop_data *nd = e->elevator_data; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index d3a91cacee8..0766ce6187a 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -722,7 +722,7 @@ static int __init nbd_init(void) for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); - elevator_t *old_e; + struct elevator_queue *old_e; if (!disk) goto out; nbd_dev[i].disk = disk; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 41bbadfd17f..7035cec583b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -26,7 +26,6 @@ struct scsi_ioctl_command; struct request_queue; struct elevator_queue; -typedef struct elevator_queue elevator_t; struct request_pm_state; struct blk_trace; struct request; @@ -313,7 +312,7 @@ struct request_queue */ struct list_head queue_head; struct request *last_merge; - elevator_t *elevator; + struct elevator_queue *elevator; /* * the queue request freelist, one for reads and one for writes diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 92f6f634e3e..7a204256b15 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); typedef void *(elevator_init_fn) (struct request_queue *); -typedef void (elevator_exit_fn) (elevator_t *); +typedef void (elevator_exit_fn) (struct elevator_queue *); struct elevator_ops { @@ -62,8 +62,8 @@ struct elevator_ops struct elv_fs_entry { struct attribute attr; - ssize_t (*show)(elevator_t *, char *); - ssize_t (*store)(elevator_t *, const char *, size_t); + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); }; /* @@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(elevator_t *); +extern void elevator_exit(struct elevator_queue *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* -- cgit v1.2.3-70-g09d2