summaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-13 21:16:56 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-13 21:16:56 -0700
commitfc8e1ead9314cf0e0f1922e661428b93d3a50d88 (patch)
treef3cb97c4769b74f6627a59769f1ed5c92a13c58a /block/elevator.c
parent2bcaa6a4238094c5695d5b1943078388d82d3004 (diff)
parent9de48cc300fb10f7d9faa978670becf5e352462a (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c206
1 files changed, 51 insertions, 155 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 7073a907257..2d511f9105e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -33,17 +33,16 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/blktrace_api.h>
-#include <trace/block.h>
#include <linux/hash.h>
#include <linux/uaccess.h>
+#include <trace/events/block.h>
+
#include "blk.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
-DEFINE_TRACE(block_rq_abort);
-
/*
* Merge hash stuff.
*/
@@ -52,11 +51,7 @@ static const int elv_hash_shift = 6;
#define ELV_HASH_FN(sec) \
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
-
-DEFINE_TRACE(block_rq_insert);
-DEFINE_TRACE(block_rq_issue);
+#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
* Query io scheduler to see if the current process issuing bio may be
@@ -105,6 +100,19 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
+ /*
+ * Don't merge if failfast settings don't match.
+ *
+ * FIXME: The negation in front of each condition is necessary
+ * because bio and request flags use different bit positions
+ * and the accessors return those bits directly. This
+ * ugliness will soon go away.
+ */
+ if (!bio_failfast_dev(bio) != !blk_failfast_dev(rq) ||
+ !bio_failfast_transport(bio) != !blk_failfast_transport(rq) ||
+ !bio_failfast_driver(bio) != !blk_failfast_driver(rq))
+ return 0;
+
if (!elv_iosched_allow_merge(rq, bio))
return 0;
@@ -120,9 +128,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
* we can merge and sequence is ok, check if it's possible
*/
if (elv_rq_merge_ok(__rq, bio)) {
- if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+ if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
ret = ELEVATOR_BACK_MERGE;
- else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
ret = ELEVATOR_FRONT_MERGE;
}
@@ -310,22 +318,6 @@ void elevator_exit(struct elevator_queue *e)
}
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_activate_req_fn)
- e->ops->elevator_activate_req_fn(q, rq);
-}
-
-static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_deactivate_req_fn)
- e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
static inline void __elv_rqhash_del(struct request *rq)
{
hlist_del_init(&rq->hash);
@@ -387,9 +379,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
- if (rq->sector < __rq->sector)
+ if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
- else if (rq->sector > __rq->sector)
+ else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
p = &(*p)->rb_right;
else
return __rq;
@@ -417,9 +409,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
while (n) {
rq = rb_entry(n, struct request, rb_node);
- if (sector < rq->sector)
+ if (sector < blk_rq_pos(rq))
n = n->rb_left;
- else if (sector > rq->sector)
+ else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
@@ -458,14 +450,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (pos->cmd_flags & stop_flags)
break;
- if (rq->sector >= boundary) {
- if (pos->sector < boundary)
+ if (blk_rq_pos(rq) >= boundary) {
+ if (blk_rq_pos(pos) < boundary)
continue;
} else {
- if (pos->sector >= boundary)
+ if (blk_rq_pos(pos) >= boundary)
break;
}
- if (rq->sector >= pos->sector)
+ if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
@@ -563,7 +555,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq);
}
@@ -592,6 +584,9 @@ void elv_drain_elevator(struct request_queue *q)
*/
void elv_quiesce_start(struct request_queue *q)
{
+ if (!q->elevator)
+ return;
+
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
/*
@@ -599,7 +594,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- blk_start_queueing(q);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -643,8 +638,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- blk_remove_plug(q);
- blk_start_queueing(q);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT:
@@ -703,7 +697,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- - q->in_flight;
+ - queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
@@ -759,117 +753,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
}
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
- struct request *rq;
-
- while (1) {
- while (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- if (blk_do_ordered(q, &rq))
- return rq;
- }
-
- if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
-struct request *elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- int ret;
-
- while ((rq = __elv_next_request(q)) != NULL) {
- if (!(rq->cmd_flags & REQ_STARTED)) {
- /*
- * This is the first time the device driver
- * sees this request (possibly after
- * requeueing). Notify IO scheduler.
- */
- if (blk_sorted_rq(rq))
- elv_activate_rq(q, rq);
-
- /*
- * just mark as started even if we don't start
- * it, a request that has been delayed should
- * not be passed by new incoming requests
- */
- rq->cmd_flags |= REQ_STARTED;
- trace_block_rq_issue(q, rq);
- }
-
- if (!q->boundary_rq || q->boundary_rq == rq) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = NULL;
- }
-
- if (rq->cmd_flags & REQ_DONTPREP)
- break;
-
- if (q->dma_drain_size && rq->data_len) {
- /*
- * make sure space for the drain appears we
- * know we can do this because max_hw_segments
- * has been adjusted to be one fewer than the
- * device can handle
- */
- rq->nr_phys_segments++;
- }
-
- if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
- if (ret == BLKPREP_OK) {
- break;
- } else if (ret == BLKPREP_DEFER) {
- /*
- * the request may have been (partially) prepped.
- * we need to keep this request in the front to
- * avoid resource deadlock. REQ_STARTED will
- * prevent other fs requests from passing this one.
- */
- if (q->dma_drain_size && rq->data_len &&
- !(rq->cmd_flags & REQ_DONTPREP)) {
- /*
- * remove the space for the drain we added
- * so that we don't add it again
- */
- --rq->nr_phys_segments;
- }
-
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL) {
- rq->cmd_flags |= REQ_QUIET;
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
- } else {
- printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
- break;
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(elv_next_request);
-
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
-{
- BUG_ON(list_empty(&rq->queuelist));
- BUG_ON(ELV_ON_HASH(rq));
-
- list_del_init(&rq->queuelist);
-
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- * the driver side.
- */
- if (blk_account_rq(rq))
- q->in_flight++;
-}
-
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
@@ -939,7 +822,12 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ /*
+ * Mark this request as started so we don't trigger
+ * any debug logic in the end I/O path.
+ */
+ blk_start_request(rq);
+ __blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
@@ -952,7 +840,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
@@ -967,11 +855,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next);
- if (!q->in_flight &&
+ if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
- blk_start_queueing(q);
+ __blk_run_queue(q);
}
}
}
@@ -1179,6 +1067,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
+ if (!q->elevator)
+ return count;
+
strlcpy(elevator_name, name, sizeof(elevator_name));
strstrip(elevator_name);
@@ -1202,10 +1093,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
struct elevator_queue *e = q->elevator;
- struct elevator_type *elv = e->elevator_type;
+ struct elevator_type *elv;
struct elevator_type *__e;
int len = 0;
+ if (!q->elevator)
+ return sprintf(name, "none\n");
+
+ elv = e->elevator_type;
+
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
if (!strcmp(elv->elevator_name, __e->elevator_name))