summaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e112d1a5dab..eac48bec147 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q)
* don't plug a stopped queue, it must be paired with blk_start_queue()
* which will restart the queueing
*/
- if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+ if (blk_queue_stopped(q))
return;
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug);
*/
void __generic_unplug_device(request_queue_t *q)
{
- if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
+ if (unlikely(blk_queue_stopped(q)))
return;
if (!blk_remove_plug(q))
@@ -1732,8 +1732,21 @@ void blk_run_queue(struct request_queue *q)
spin_lock_irqsave(q->queue_lock, flags);
blk_remove_plug(q);
- if (!elv_queue_empty(q))
- q->request_fn(q);
+
+ /*
+ * Only recurse once to avoid overrunning the stack, let the unplug
+ * handling reinvoke the handler shortly if we already got there.
+ */
+ if (!elv_queue_empty(q)) {
+ if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+ q->request_fn(q);
+ clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+ } else {
+ blk_plug_device(q);
+ kblockd_schedule_work(&q->unplug_work);
+ }
+ }
+
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -3385,7 +3398,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
-static struct notifier_block __devinitdata blk_cpu_notifier = {
+static struct notifier_block blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};