summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-hw-handler.c3
-rw-r--r--drivers/md/dm-stripe.c8
-rw-r--r--drivers/md/dm.c68
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c13
6 files changed, 67 insertions, 34 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index eae4473eadd..e1c18aa1d71 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -556,7 +556,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
unsigned long flags;
spin_lock_irqsave(&bitmap->lock, flags);
- if (!bitmap || !bitmap->sb_page) { /* can't set the state */
+ if (!bitmap->sb_page) { /* can't set the state */
spin_unlock_irqrestore(&bitmap->lock, flags);
return;
}
@@ -1309,7 +1309,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
case 1:
*bmc = 2;
}
- if ((*bmc & COUNTER_MAX) == COUNTER_MAX) BUG();
+ BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX);
(*bmc)++;
spin_unlock_irq(&bitmap->lock);
diff --git a/drivers/md/dm-hw-handler.c b/drivers/md/dm-hw-handler.c
index 4cc0010e015..baafaaba4d4 100644
--- a/drivers/md/dm-hw-handler.c
+++ b/drivers/md/dm-hw-handler.c
@@ -83,8 +83,7 @@ void dm_put_hw_handler(struct hw_handler_type *hwht)
if (--hwhi->use == 0)
module_put(hwhi->hwht.module);
- if (hwhi->use < 0)
- BUG();
+ BUG_ON(hwhi->use < 0);
out:
up_read(&_hwh_lock);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index ab89278a56b..697aacafb02 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -103,9 +103,15 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
+ if (((uint32_t)ti->len) & (chunk_size - 1)) {
+ ti->error = "dm-stripe: Target length not divisible by "
+ "chunk size";
+ return -EINVAL;
+ }
+
width = ti->len;
if (sector_div(width, stripes)) {
- ti->error = "dm-stripe: Target length not divisable by "
+ ti->error = "dm-stripe: Target length not divisible by "
"number of stripes";
return -EINVAL;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e9adeb9d172..8c82373f7ff 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -17,6 +17,7 @@
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/blktrace_api.h>
static const char *_name = DM_NAME;
@@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error)
/* nudge anyone waiting on suspend queue */
wake_up(&io->md->wait);
+ blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE);
+
bio_endio(io->bio, io->bio->bi_size, io->error);
free_io(io->md, io);
}
@@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
struct target_io *tio)
{
int r;
+ sector_t sector;
/*
* Sanity checks.
@@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
* this io.
*/
atomic_inc(&tio->io->io_count);
+ sector = clone->bi_sector;
r = ti->type->map(ti, clone, &tio->info);
- if (r > 0)
+ if (r > 0) {
/* the bio has been remapped so dispatch it */
+
+ blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
+ tio->io->bio->bi_bdev->bd_dev, sector,
+ clone->bi_sector);
+
generic_make_request(clone);
+ }
else if (r < 0) {
/* error the io and bail out */
@@ -533,30 +544,35 @@ static void __clone_and_map(struct clone_info *ci)
} else {
/*
- * Create two copy bios to deal with io that has
- * been split across a target.
+ * Handle a bvec that must be split between two or more targets.
*/
struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+ sector_t remaining = to_sector(bv->bv_len);
+ unsigned int offset = 0;
- clone = split_bvec(bio, ci->sector, ci->idx,
- bv->bv_offset, max);
- __map_bio(ti, clone, tio);
+ do {
+ if (offset) {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ max = max_io_len(ci->md, ci->sector, ti);
- ci->sector += max;
- ci->sector_count -= max;
- ti = dm_table_find_target(ci->map, ci->sector);
-
- len = to_sector(bv->bv_len) - max;
- clone = split_bvec(bio, ci->sector, ci->idx,
- bv->bv_offset + to_bytes(max), len);
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
- __map_bio(ti, clone, tio);
+ tio = alloc_tio(ci->md);
+ tio->io = ci->io;
+ tio->ti = ti;
+ memset(&tio->info, 0, sizeof(tio->info));
+ }
+
+ len = min(remaining, max);
+
+ clone = split_bvec(bio, ci->sector, ci->idx,
+ bv->bv_offset + offset, len);
+
+ __map_bio(ti, clone, tio);
+
+ ci->sector += len;
+ ci->sector_count -= len;
+ offset += to_bytes(len);
+ } while (remaining -= len);
- ci->sector += len;
- ci->sector_count -= len;
ci->idx++;
}
}
@@ -840,7 +856,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
bad3:
mempool_destroy(md->io_pool);
bad2:
- blk_put_queue(md->queue);
+ blk_cleanup_queue(md->queue);
free_minor(minor);
bad1:
kfree(md);
@@ -849,12 +865,18 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
static void free_dev(struct mapped_device *md)
{
- free_minor(md->disk->first_minor);
+ unsigned int minor = md->disk->first_minor;
+
+ if (md->suspended_bdev) {
+ thaw_bdev(md->suspended_bdev, NULL);
+ bdput(md->suspended_bdev);
+ }
mempool_destroy(md->tio_pool);
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
+ free_minor(minor);
put_disk(md->disk);
- blk_put_queue(md->queue);
+ blk_cleanup_queue(md->queue);
kfree(md);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d05e3125d29..5ed2228745c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev)
return;
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
- blk_put_queue(mddev->queue);
+ /* that blocks */
+ blk_cleanup_queue(mddev->queue);
+ /* that also blocks */
kobject_unregister(&mddev->kobj);
+ /* result blows... */
}
spin_unlock(&all_mddevs_lock);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d39f584cd8b..5d88329e3c7 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -306,6 +306,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ struct bio *to_put = NULL;
if (bio->bi_size)
return 1;
@@ -323,6 +324,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
* this branch is our 'one mirror IO has finished' event handler:
*/
r1_bio->bios[mirror] = NULL;
+ to_put = bio;
if (!uptodate) {
md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
/* an I/O failed, we can't clear the bitmap */
@@ -375,7 +377,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
/* Don't dec_pending yet, we want to hold
* the reference over the retry
*/
- return 0;
+ goto out;
}
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
/* free extra copy of the data pages */
@@ -392,10 +394,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
raid_end_bio_io(r1_bio);
}
- if (r1_bio->bios[mirror]==NULL)
- bio_put(bio);
-
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ out:
+ if (to_put)
+ bio_put(to_put);
+
return 0;
}
@@ -857,7 +860,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio->bi_rw & BIO_RW_BARRIER;
+ do_barriers = bio_barrier(bio);
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);