summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-30 12:56:51 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-30 12:56:51 +0100
commite515b47e5646154448e46231d811919d93923da5 (patch)
tree1b10297d37170a1e00b4f2211f4296c2b0020f8d /drivers/md/raid10.c
parentc2c1d4912cd7028384d7f25d2faefefb8958f64d (diff)
parentef64cf9d06049e4e9df661f3be60b217e476bee1 (diff)
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Backmerge drm-next - I need to backmerge drm-intel-fixes patches touching the error capture code to be able to merge Ben's cleanup patches. Conflicts: drivers/gpu/drm/i915/i915_gpu_error.c Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e6..06eeb99ea6f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1319,7 +1319,7 @@ read_again:
/* Could not read all from this device, so we will
* need another r10_bio.
*/
- sectors_handled = (r10_bio->sectors + max_sectors
+ sectors_handled = (r10_bio->sector + max_sectors
- bio->bi_sector);
r10_bio->sectors = max_sectors;
spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
bio->bi_phys_segments = 2;
else
bio->bi_phys_segments++;
- spin_unlock(&conf->device_lock);
+ spin_unlock_irq(&conf->device_lock);
/* Cannot call generic_make_request directly
* as that will be queued in __generic_make_request
* and subsequent mempool_alloc might block
@@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
- put_buf(r10_bio);
- if (rb2)
- atomic_dec(&rb2->remaining);
- r10_bio = rb2;
if (any_working) {
/* problem is that there are bad blocks
* on other device(s)
@@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
mirror->recovery_disabled
= mddev->recovery_disabled;
}
+ put_buf(r10_bio);
+ if (rb2)
+ atomic_dec(&rb2->remaining);
+ r10_bio = rb2;
break;
}
}