summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-02-14 11:10:10 +1100
committerNeilBrown <neilb@suse.de>2012-02-14 11:10:10 +1100
commitfae8cc5ed0714953b1ad7cf86f030d2177278424 (patch)
treef61f54aaff4122671153e63cad86914566bce389 /drivers/md
parentf53e29fc87b6fb76db0043202ec2c3450caa5ff3 (diff)
md/raid10: fix handling of error on last working device in array.
If we get a read error on the last working device in a RAID10 which contains the target block, then we don't fail the device (which is good) but we don't abort retries, which is wrong. We end up in an infinite loop retrying the read on the one device. This patch fixes the problem in two places: 1/ in raid10_end_read_request we don't even ask for a retry if this was the last usable device. This is efficient but a little racy and will sometimes retry when it should not. 2/ in handle_read_error we are careful to exclude any device from retry which we tried to mark as faulty (that might have failed if it was the last device). This is race-free but less efficient. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid10.c37
1 files changed, 27 insertions, 10 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e8aa213f0d..bd06ea21756 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -67,6 +67,7 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
+static int enough(struct r10conf *conf, int ignore);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -347,6 +348,19 @@ static void raid10_end_read_request(struct bio *bio, int error)
* wait for the 'master' bio.
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
+ } else {
+ /* If all other devices that store this block have
+ * failed, we want to return the error upwards rather
+ * than fail the last device. Here we redefine
+ * "uptodate" to mean "Don't want to retry"
+ */
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!enough(conf, rdev->raid_disk))
+ uptodate = 1;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+ if (uptodate) {
raid_end_bio_io(r10_bio);
rdev_dec_pending(rdev, conf->mddev);
} else {
@@ -2052,6 +2066,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
+ r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
}
@@ -2105,8 +2120,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
rdev,
r10_bio->devs[r10_bio->read_slot].addr
+ sect,
- s, 0))
+ s, 0)) {
md_error(mddev, rdev);
+ r10_bio->devs[r10_bio->read_slot].bio
+ = IO_BLOCKED;
+ }
break;
}
@@ -2299,17 +2317,20 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* This is all done synchronously while the array is
* frozen.
*/
+ bio = r10_bio->devs[slot].bio;
+ bdevname(bio->bi_bdev, b);
+ bio_put(bio);
+ r10_bio->devs[slot].bio = NULL;
+
if (mddev->ro == 0) {
freeze_array(conf);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
- }
+ } else
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+
rdev_dec_pending(rdev, mddev);
- bio = r10_bio->devs[slot].bio;
- bdevname(bio->bi_bdev, b);
- r10_bio->devs[slot].bio =
- mddev->ro ? IO_BLOCKED : NULL;
read_more:
rdev = read_balance(conf, r10_bio, &max_sectors);
if (rdev == NULL) {
@@ -2318,13 +2339,10 @@ read_more:
mdname(mddev), b,
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
- bio_put(bio);
return;
}
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
- if (bio)
- bio_put(bio);
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
@@ -2360,7 +2378,6 @@ read_more:
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
generic_make_request(bio);
- bio = NULL;
r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO);