diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-02-05 15:19:29 -0800 |
---|---|---|
committer | Kent Overstreet <koverstreet@google.com> | 2013-03-23 14:15:30 -0700 |
commit | aa8b57aa3d1c06ca53312294ee6dfc767ee3ddb3 (patch) | |
tree | a5921d44756d32e0555a8ae284db3c16a8386860 /drivers/md/raid10.c | |
parent | f73a1c7d117d07a96d89475066188a2b79e53c48 (diff) |
block: Use bio_sectors() more consistently
Bunch of places in the code weren't using it where they could be -
this'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx
into a struct bvec_iter.
Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: "Ed L. Cashin" <ecashin@coraid.com>
CC: Nick Piggin <npiggin@kernel.dk>
CC: Jiri Kosina <jkosina@suse.cz>
CC: Jim Paris <jim@jtan.com>
CC: Geoff Levand <geoff@infradead.org>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Neil Brown <neilb@suse.de>
CC: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Ed Cashin <ecashin@coraid.com>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 77b562d18a9..5ee14ab16a0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1169,7 +1169,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) /* If this request crosses a chunk boundary, we need to * split it. This will only happen for 1 PAGE (or less) requests. */ - if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) + if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) > chunk_sects && (conf->geo.near_copies < conf->geo.raid_disks || conf->prev.near_copies < conf->prev.raid_disks))) { @@ -1209,7 +1209,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) bad_map: printk("md/raid10:%s: make_request bug: can't convert block across chunks" " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, - (unsigned long long)bio->bi_sector, bio->bi_size >> 10); + (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); bio_io_error(bio); return; @@ -1224,7 +1224,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) */ wait_barrier(conf); - sectors = bio->bi_size >> 9; + sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_sector < conf->reshape_progress && bio->bi_sector + sectors > conf->reshape_progress) { @@ -1326,8 +1326,7 @@ read_again: r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio->master_bio = bio; - r10_bio->sectors = ((bio->bi_size >> 9) - - sectors_handled); + r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->state = 0; r10_bio->mddev = mddev; r10_bio->sector = bio->bi_sector + sectors_handled; @@ -1569,7 +1568,7 @@ retry_write: * after checking if we need to go around again. */ - if (sectors_handled < (bio->bi_size >> 9)) { + if (sectors_handled < bio_sectors(bio)) { one_write_done(r10_bio); /* We need another r10_bio. It has already been counted * in bio->bi_phys_segments. @@ -1577,7 +1576,7 @@ retry_write: r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio->master_bio = bio; - r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; + r10_bio->sectors = bio_sectors(bio) - sectors_handled; r10_bio->mddev = mddev; r10_bio->sector = bio->bi_sector + sectors_handled; @@ -2103,7 +2102,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) d = r10_bio->devs[i].devnum; atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); - md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); + md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); tbio->bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_bdev = conf->mirrors[d].rdev->bdev; @@ -2128,7 +2127,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) d = r10_bio->devs[i].devnum; atomic_inc(&r10_bio->remaining); md_sync_acct(conf->mirrors[d].replacement->bdev, - tbio->bi_size >> 9); + bio_sectors(tbio)); generic_make_request(tbio); } @@ -2254,13 +2253,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) wbio2 = r10_bio->devs[1].repl_bio; if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); - md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); + md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); generic_make_request(wbio); } if (wbio2 && wbio2->bi_end_io) { atomic_inc(&conf->mirrors[d].replacement->nr_pending); md_sync_acct(conf->mirrors[d].replacement->bdev, - wbio2->bi_size >> 9); + bio_sectors(wbio2)); generic_make_request(wbio2); } } @@ -2690,8 +2689,7 @@ read_more: r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio->master_bio = mbio; - r10_bio->sectors = (mbio->bi_size >> 9) - - sectors_handled; + r10_bio->sectors = bio_sectors(mbio) - sectors_handled; r10_bio->state = 0; set_bit(R10BIO_ReadError, &r10_bio->state); |