summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorRaz Ben-Jehuda(caro) <raziebe@gmail.com>2006-12-10 02:20:46 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 09:57:20 -0800
commitf679623f50545bc0577caf2d0f8675b61162f059 (patch)
tree7253c48db142ec63e6f22df12dfa2babb3e6129c /drivers/md/raid5.c
parent23032a0eb97c8eaae8ac9d17373b53b19d0f5413 (diff)
[PATCH] md: handle bypassing the read cache (assuming nothing fails)
Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b86ceba04f6..269b7771a30 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2633,6 +2633,84 @@ static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
return max;
}
+
+static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
+{
+ sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ unsigned int chunk_sectors = mddev->chunk_size >> 9;
+ unsigned int bio_sectors = bio->bi_size >> 9;
+
+ return chunk_sectors >=
+ ((sector & (chunk_sectors - 1)) + bio_sectors);
+}
+
+/*
+ * The "raid5_align_endio" should check if the read succeeded and if it
+ * did, call bio_endio on the original bio (having bio_put the new bio
+ * first).
+ * If the read failed..
+ */
+int raid5_align_endio(struct bio *bi, unsigned int bytes , int error)
+{
+ struct bio* raid_bi = bi->bi_private;
+ if (bi->bi_size)
+ return 1;
+ bio_put(bi);
+ bio_endio(raid_bi, bytes, error);
+ return 0;
+}
+
+static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+{
+ mddev_t *mddev = q->queuedata;
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ const unsigned int raid_disks = conf->raid_disks;
+ const unsigned int data_disks = raid_disks - 1;
+ unsigned int dd_idx, pd_idx;
+ struct bio* align_bi;
+ mdk_rdev_t *rdev;
+
+ if (!in_chunk_boundary(mddev, raid_bio)) {
+ printk("chunk_aligned_read : non aligned\n");
+ return 0;
+ }
+ /*
+ * use bio_clone to make a copy of the bio
+ */
+ align_bi = bio_clone(raid_bio, GFP_NOIO);
+ if (!align_bi)
+ return 0;
+ /*
+ * set bi_end_io to a new function, and set bi_private to the
+ * original bio.
+ */
+ align_bi->bi_end_io = raid5_align_endio;
+ align_bi->bi_private = raid_bio;
+ /*
+ * compute position
+ */
+ align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector,
+ raid_disks,
+ data_disks,
+ &dd_idx,
+ &pd_idx,
+ conf);
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags)) {
+ align_bi->bi_bdev = rdev->bdev;
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ generic_make_request(align_bi);
+ return 1;
+ } else {
+ rcu_read_unlock();
+ return 0;
+ }
+}
+
+
static int make_request(request_queue_t *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;