summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c108
1 files changed, 45 insertions, 63 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 0b66afef2d8..caaca9e178b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
{
struct bio *bi = return_bi;
while (bi) {
- int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
bi = return_bi;
@@ -289,7 +288,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
int pd_idx, int noblock)
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
return pending;
}
-static int
-raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
-static int
-raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
+static void
+raid5_end_read_request(struct bio *bi, int error);
+static void
+raid5_end_write_request(struct bio *bi, int error);
static void ops_run_io(struct stripe_head *sh)
{
@@ -493,12 +492,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
if (frombio)
tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen,
- ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC,
+ ASYNC_TX_DEP_ACK,
tx, NULL, NULL);
else
tx = async_memcpy(bio_page, page, b_offset,
page_offset, clen,
- ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST,
+ ASYNC_TX_DEP_ACK,
tx, NULL, NULL);
}
if (clen < len) /* hit end of page */
@@ -514,7 +513,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
raid5_conf_t *conf = sh->raid_conf;
- int i, more_to_read = 0;
+ int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
@@ -522,16 +521,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
/* clear completed biofills */
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- /* check if this stripe has new incoming reads */
- if (dev->toread)
- more_to_read++;
/* acknowledge completion of a biofill operation */
- /* and check if we need to reply to a read request
- */
- if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+ /* and check if we need to reply to a read request,
+ * new R5_Wantfill requests are held off until
+ * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+ */
+ if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2;
- clear_bit(R5_Wantfill, &dev->flags);
/* The access to dev->read is outside of the
* spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +555,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
return_io(return_bi);
- if (more_to_read)
- set_bit(STRIPE_HANDLE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
@@ -951,7 +947,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
@@ -1003,7 +999,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return -ENOMEM;
@@ -1113,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL;
}
-static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
- int error)
+static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
@@ -1123,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
char b[BDEVNAME_SIZE];
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1135,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (uptodate) {
@@ -1188,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
-static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
- int error)
+static void raid5_end_write_request (struct bio *bi, int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
- if (bi->bi_size)
- return 1;
-
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
@@ -1211,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
uptodate);
if (i == disks) {
BUG();
- return 0;
+ return;
}
if (!uptodate)
@@ -1222,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
- return 0;
}
@@ -2541,7 +2528,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
struct dma_async_tx_descriptor *tx = NULL;
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
- if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+ if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) {
int dd_idx, pd_idx, j;
struct stripe_head *sh2;
@@ -2574,7 +2561,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
if (j != sh2->pd_idx &&
- (r6s && j != r6s->qd_idx) &&
+ (!r6s || j != raid6_next_disk(sh2->pd_idx,
+ sh2->disks)) &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
@@ -2583,12 +2571,12 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
}
release_stripe(sh2);
- /* done submitting copies, wait for them to complete */
- if (i + 1 >= sh->disks) {
- async_tx_ack(tx);
- dma_wait_for_async_tx(tx);
- }
}
+ /* done submitting copies, wait for them to complete */
+ if (tx) {
+ async_tx_ack(tx);
+ dma_wait_for_async_tx(tx);
+ }
}
/*
@@ -2855,7 +2843,7 @@ static void handle_stripe5(struct stripe_head *sh)
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
conf->raid_disks);
- s.locked += handle_write_operations5(sh, 0, 1);
+ s.locked += handle_write_operations5(sh, 1, 1);
} else if (s.expanded &&
!test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
@@ -3182,7 +3170,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -3197,7 +3185,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3204,7 @@ static void raid5_unplug_device(request_queue_t *q)
unplug_slaves(mddev);
}
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -3228,7 +3216,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -3267,7 +3255,7 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3342,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
* first).
* If the read failed..
*/
-static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
+static void raid5_align_endio(struct bio *bi, int error)
{
struct bio* raid_bi = bi->bi_private;
mddev_t *mddev;
@@ -3350,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
mdk_rdev_t *rdev;
- if (bi->bi_size)
- return 1;
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3362,22 +3348,21 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- bio_endio(raid_bi, bytes, 0);
+ bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
- return 0;
+ return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
- return 0;
}
static int bio_fits_rdev(struct bio *bi)
{
- request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
@@ -3396,7 +3381,7 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3451,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
}
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3478,7 +3463,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
int remaining;
if (unlikely(bio_barrier(bi))) {
- bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
+ bio_endio(bi, -EOPNOTSUPP);
return 0;
}
@@ -3594,12 +3579,11 @@ static int make_request(request_queue_t *q, struct bio * bi)
remaining = --bi->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = bi->bi_size;
if ( rw == WRITE )
md_write_end(mddev);
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
+
+ bi->bi_end_io(bi,
test_bit(BIO_UPTODATE, &bi->bi_flags)
? 0 : -EIO);
}
@@ -3877,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
remaining = --raid_bio->bi_phys_segments;
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
- int bytes = raid_bio->bi_size;
- raid_bio->bi_size = 0;
- raid_bio->bi_end_io(raid_bio, bytes,
+ raid_bio->bi_end_io(raid_bio,
test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
? 0 : -EIO);
}