summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 23:12:58 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-15 23:12:58 +0200
commit1e09481365ce248dbb4eb06dad70129bb5807037 (patch)
treec0cff5bef95c8b5e7486f144718ade9a06c284dc /drivers/md/raid5.c
parent3e2f69fdd1b00166e7d589bce56b2d36a9e74374 (diff)
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/softlockup.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c37e256b117..9ce7154845c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2017,12 +2017,7 @@ static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
*/
s->uptodate++;
return 0; /* uptodate + compute == disks */
- } else if ((s->uptodate < disks - 1) &&
- test_bit(R5_Insync, &dev->flags)) {
- /* Note: we hold off compute operations while checks are
- * in flight, but we still prefer 'compute' over 'read'
- * hence we only read if (uptodate < * disks-1)
- */
+ } else if (test_bit(R5_Insync, &dev->flags)) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
@@ -2898,6 +2893,8 @@ static void handle_stripe5(struct stripe_head *sh)
for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ s.locked++;
if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
sh->ops.count++;
}
@@ -2911,6 +2908,7 @@ static void handle_stripe5(struct stripe_head *sh)
conf->raid_disks);
s.locked += handle_write_operations5(sh, 1, 1);
} else if (s.expanded &&
+ s.locked == 0 &&
!test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
@@ -3316,15 +3314,17 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
- sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9;
- unsigned int bio_sectors = bio->bi_size >> 9;
+ unsigned int bio_sectors = bvm->bi_size >> 9;
- if (bio_data_dir(bio) == WRITE)
+ if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -4305,7 +4305,9 @@ static int run(mddev_t *mddev)
" disk %d\n", bdevname(rdev->bdev,b),
raid_disk);
working_disks++;
- }
+ } else
+ /* Cannot rely on bitmap to complete recovery */
+ conf->fullsync = 1;
}
/*