summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-07-28 09:07:29 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-07-28 09:07:29 +0200
commita4e7d46407d73f35d217013b363b79a8f8eafcaa (patch)
tree109996edac0c6382e7a7cfb99736e75ea5398b49 /block
parenta85a00a699740f6f9863f88aef22060fe1534681 (diff)
block: always assign default lock to queues
Move the assignment of a default lock below blk_init_queue() to blk_queue_make_request(), so we also get to set the default lock for ->make_request_fn() based drivers. This is important since the queue flag locking requires a lock to be in place. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-settings.c7
2 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435c6ea..a0c340d239b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
return NULL;
}
- /*
- * if caller didn't supply a lock, they get per-queue locking with
- * our embedded lock
- */
- if (!lock)
- lock = &q->__queue_lock;
-
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index bd582a7f531..8a3ea3bba10 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -165,6 +165,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_set_default_limits(&q->limits);
/*
+ * If the caller didn't supply a lock, fall back to our embedded
+ * per-queue locks
+ */
+ if (!q->queue_lock)
+ q->queue_lock = &q->__queue_lock;
+
+ /*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);