diff options
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r-- | drivers/md/raid5.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f6fe053a5be..297e2609217 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -47,6 +47,7 @@ #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> +#include <linux/module.h> #include <linux/async.h> #include <linux/seq_file.h> #include <linux/cpu.h> @@ -3109,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh) struct r5dev *pdev, *qdev; clear_bit(STRIPE_HANDLE, &sh->state); - if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { + if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { /* already being handled, ensure it gets handled * again when current action finishes */ set_bit(STRIPE_HANDLE, &sh->state); @@ -3158,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh) /* check if the array has lost more than max_degraded devices and, * if so, some requests might need to be failed. */ - if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); - if (s.failed > conf->max_degraded && s.syncing) - handle_failed_sync(conf, sh, &s); + if (s.failed > conf->max_degraded) { + sh->check_state = 0; + sh->reconstruct_state = 0; + if (s.to_read+s.to_write+s.written) + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); + if (s.syncing) + handle_failed_sync(conf, sh, &s); + } /* * might be able to return some write requests if the parity blocks @@ -3370,7 +3375,7 @@ finish: return_io(s.return_bi); - clear_bit(STRIPE_ACTIVE, &sh->state); + clear_bit_unlock(STRIPE_ACTIVE, &sh->state); } static void raid5_activate_delayed(struct r5conf *conf) @@ -3688,7 +3693,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) return sh; } -static int make_request(struct mddev *mddev, struct bio * bi) +static void make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; int dd_idx; @@ -3701,7 +3706,7 @@ static int make_request(struct mddev *mddev, struct bio * bi) if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); - return 0; + return; } md_write_start(mddev, bi); @@ -3709,7 +3714,7 @@ static int make_request(struct mddev *mddev, struct bio * bi) if (rw == READ && mddev->reshape_position == MaxSector && chunk_aligned_read(mddev,bi)) - return 0; + return; logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bi->bi_sector + (bi->bi_size>>9); @@ -3844,8 +3849,6 @@ static int make_request(struct mddev *mddev, struct bio * bi) bio_endio(bi, 0); } - - return 0; } static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); |