diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-04-18 11:41:33 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-04-18 11:41:33 +0200 |
commit | 24ecfbe27f65563909b14492afda2f1c21f7c044 (patch) | |
tree | a7e51d903c400d0925f87be5f3069a5a44e0af24 /block/elevator.c | |
parent | 4521cc4ed5173f92714f6999a69910c3385fed68 (diff) |
block: add blk_run_queue_async
Instead of overloading __blk_run_queue to force an offload to kblockd
add a new blk_run_queue_async helper to do it explicitly. I've kept
the blk_queue_stopped check for now, but I suspect it's not needed
as the check we do when the workqueue items runs should be enough.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r-- | block/elevator.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/block/elevator.c b/block/elevator.c index 0cdb4e7ebab..6f6abc08bb5 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - __blk_run_queue(q, false); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - __blk_run_queue(q, false); + __blk_run_queue(q); break; case ELEVATOR_INSERT_SORT_MERGE: |