summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c81
-rw-r--r--block/blk-merge.c36
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c10
4 files changed, 39 insertions, 89 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 82dc20621c0..3596ca71909 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
- rq->sector = rq->hard_sector = (sector_t) -1;
+ rq->sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
(unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
- rq->bio, rq->biotail,
- rq->buffer, rq->data_len);
+ rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
- req->hard_sector = req->sector = bio->bi_sector;
+ req->sector = bio->bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors;
+ int el_ret;
+ unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
int rw_flags;
- nr_sectors = bio_sectors(bio);
-
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail->bi_next = bio;
req->biotail = bio;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->current_nr_sectors = bio_cur_sectors(bio);
- req->hard_cur_sectors = req->current_nr_sectors;
- req->sector = req->hard_sector = bio->bi_sector;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->sector = bio->bi_sector;
+ req->data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio);
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
if (blk_rq_sectors(rq) > q->max_sectors ||
- rq->data_len > q->max_hw_sectors << 9) {
+ blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req)
}
}
-/**
- * blk_rq_bytes - Returns bytes left to complete in the entire request
- * @rq: the request being processed
- **/
-unsigned int blk_rq_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return blk_rq_sectors(rq) << 9;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_bytes);
-
-/**
- * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
- * @rq: the request being processed
- **/
-unsigned int blk_rq_cur_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return rq->current_nr_sectors << 9;
-
- if (rq->bio)
- return rq->bio->bi_size;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q)
if (rq->cmd_flags & REQ_DONTPREP)
break;
- if (q->dma_drain_size && rq->data_len) {
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q)
* avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one.
*/
- if (q->dma_drain_size && rq->data_len &&
+ if (q->dma_drain_size && blk_rq_bytes(rq) &&
!(rq->cmd_flags & REQ_DONTPREP)) {
/*
* remove the space for the drain we added
@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* can find how many bytes remain in the request
* later.
*/
- req->nr_sectors = req->hard_nr_sectors = 0;
- req->current_nr_sectors = req->hard_cur_sectors = 0;
+ req->data_len = 0;
return false;
}
@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
bio_iovec(bio)->bv_len -= nr_bytes;
}
- blk_recalc_rq_sectors(req, total_bytes >> 9);
+ req->data_len -= total_bytes;
+ req->buffer = bio_data(req->bio);
+
+ /* update sector only for requests with clear definition of sector */
+ if (blk_fs_request(req) || blk_discard_rq(req))
+ req->sector += total_bytes >> 9;
+
+ /*
+ * If total number of sectors is less than the first segment
+ * size, something has gone terribly wrong.
+ */
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+ printk(KERN_ERR "blk: request botched\n");
+ req->data_len = blk_rq_cur_bytes(req);
+ }
+
+ /* recalculate the number of segments */
blk_recalc_rq_segments(req);
+
return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);
@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->current_nr_sectors = bio_cur_sectors(bio);
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->data_len = bio->bi_size;
-
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bf62a87a9da..b8df66aef0f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
#include "blk.h"
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
- if (blk_fs_request(rq) || blk_discard_rq(rq)) {
- rq->hard_sector += nsect;
- rq->hard_nr_sectors -= nsect;
-
- /*
- * Move the I/O submission pointers ahead if required.
- */
- if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
- (rq->sector <= rq->hard_sector)) {
- rq->sector = rq->hard_sector;
- rq->nr_sectors = rq->hard_nr_sectors;
- rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
- rq->current_nr_sectors = rq->hard_cur_sectors;
- rq->buffer = bio_data(rq->bio);
- }
-
- /*
- * if total number of sectors is less than the first segment
- * size, something has gone terribly wrong
- */
- if (rq->nr_sectors < rq->current_nr_sectors) {
- printk(KERN_ERR "blk: request botched\n");
- rq->nr_sectors = rq->current_nr_sectors;
- }
- }
-}
-
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
@@ -199,8 +170,9 @@ new_segment:
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
- (rq->data_len & q->dma_pad_mask)) {
- unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+ req->data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);
diff --git a/block/blk.h b/block/blk.h
index 51115599df9..ab54529103c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
-void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index db4d990a66b..99ac4304d71 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
- if (sector > cfqq->next_rq->sector)
+ if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right;
- else if (sector < cfqq->next_rq->sector)
+ else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left;
else
break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
- __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector,
- &parent, &p);
+ __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+ blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
if (cfq_rq_close(cfqd, __cfqq->next_rq))
return __cfqq;
- if (__cfqq->next_rq->sector < sector)
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node);
else
node = rb_prev(&__cfqq->p_node);