summaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-12-16 14:00:31 +0100
committerJens Axboe <axboe@kernel.dk>2011-12-16 14:00:31 +0100
commit274193224cdabd687d804a26e0150bb20f2dd52c (patch)
treef07a788183f2ac91b9b16295f8f146bd5b88fb96 /block/elevator.c
parent4a0b75c7d02c2bd46ed227d4ba5941ba8a0aba5d (diff)
block: recursive merge requests
In my workload, thread 1 accesses a, a+2, ..., thread 2 accesses a+1, a+3,.... When the requests are flushed to queue, a and a+1 are merged to (a, a+1), a+2 and a+3 too to (a+2, a+3), but (a, a+1) and (a+2, a+3) aren't merged. With recursive merge below, the workload throughput gets improved 20% and context switch drops 60%. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9b..99838f460b4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -515,6 +515,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
struct request *rq)
{
struct request *__rq;
+ bool ret;
if (blk_queue_nomerges(q))
return false;
@@ -528,14 +529,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
if (blk_queue_noxmerges(q))
return false;
+ ret = false;
/*
* See if our hash lookup can find a potential backmerge.
*/
- __rq = elv_rqhash_find(q, blk_rq_pos(rq));
- if (__rq && blk_attempt_req_merge(q, __rq, rq))
- return true;
+ while (1) {
+ __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+ if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
+ break;
- return false;
+ /* The merged request could be merged with others, try again */
+ ret = true;
+ rq = __rq;
+ }
+
+ return ret;
}
void elv_merged_request(struct request_queue *q, struct request *rq, int type)