From e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 22 May 2009 17:17:49 -0400 Subject: block: Do away with the notion of hardsect_size Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- include/linux/device-mapper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/device-mapper.h') diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c4266..49c2362977f 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -149,7 +149,7 @@ struct io_restrictions { unsigned max_hw_sectors; unsigned max_sectors; unsigned max_segment_size; - unsigned short hardsect_size; + unsigned short logical_block_size; unsigned short max_hw_segments; unsigned short max_phys_segments; unsigned char no_cluster; /* inverted so that 0 is default */ -- cgit v1.2.3-70-g09d2 From f9ab94cee313746573b2d693bc2afb807ebb0998 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 22 Jun 2009 10:12:20 +0100 Subject: dm: introduce num_flush_requests Introduce num_flush_requests for a target to set to say how many flush instructions (empty barriers) it wants to receive. These are sent by __clone_and_map_empty_barrier with map_info->flush_request going from 0 to (num_flush_requests - 1). Old targets without flush support won't receive any flush requests. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm.c | 39 +++++++++++++++++++++++++++++++++++++++ include/linux/device-mapper.h | 11 +++++++++++ 2 files changed, 50 insertions(+) (limited to 'include/linux/device-mapper.h') diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7d9ca709433..badb7519ccc 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -750,6 +750,40 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, return clone; } +static void __flush_target(struct clone_info *ci, struct dm_target *ti, + unsigned flush_nr) +{ + struct dm_target_io *tio = alloc_tio(ci->md); + struct bio *clone; + + tio->io = ci->io; + tio->ti = ti; + + memset(&tio->info, 0, sizeof(tio->info)); + tio->info.flush_request = flush_nr; + + clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); + __bio_clone(clone, ci->bio); + clone->bi_destructor = dm_bio_destructor; + + __map_bio(ti, clone, tio); +} + +static int __clone_and_map_empty_barrier(struct clone_info *ci) +{ + unsigned target_nr = 0, flush_nr; + struct dm_target *ti; + + while ((ti = dm_table_get_target(ci->map, target_nr++))) + for (flush_nr = 0; flush_nr < ti->num_flush_requests; + flush_nr++) + __flush_target(ci, ti, flush_nr); + + ci->sector_count = 0; + + return 0; +} + static int __clone_and_map(struct clone_info *ci) { struct bio *clone, *bio = ci->bio; @@ -757,6 +791,9 @@ static int __clone_and_map(struct clone_info *ci) sector_t len = 0, max; struct dm_target_io *tio; + if (unlikely(bio_empty_barrier(bio))) + return __clone_and_map_empty_barrier(ci); + ti = dm_table_find_target(ci->map, ci->sector); if (!dm_target_is_valid(ti)) return -EIO; @@ -877,6 +914,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.io->md = md; ci.sector = bio->bi_sector; ci.sector_count = bio_sectors(bio); + if (unlikely(bio_empty_barrier(bio))) + ci.sector_count = 1; ci.idx = bio->bi_idx; start_io_acct(ci.io); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 49c2362977f..fc36a4d0772 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -21,6 +21,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { void *ptr; unsigned long long ll; + unsigned flush_request; }; /* @@ -167,6 +168,16 @@ struct dm_target { /* Always a power of 2 */ sector_t split_io; + /* + * A number of zero-length barrier requests that will be submitted + * to the target for the purpose of flushing cache. + * + * The request number will be placed in union map_info->flush_request. + * It is a responsibility of the target driver to remap these requests + * to the real underlying devices. + */ + unsigned num_flush_requests; + /* * These are automatically filled in by * dm_table_get_device. -- cgit v1.2.3-70-g09d2 From 5ab97588fb266187b88d1ad893251c94388f18ba Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 22 Jun 2009 10:12:32 +0100 Subject: dm table: replace struct io_restrictions with struct queue_limits Use blk_stack_limits() to stack block limits (including topology) rather than duplicate the equivalent within Device Mapper. Signed-off-by: Mike Snitzer Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 138 +++++++++++++----------------------------- include/linux/device-mapper.h | 16 +---- 2 files changed, 45 insertions(+), 109 deletions(-) (limited to 'include/linux/device-mapper.h') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e3bcfb8b15a..41ec2bf9fbe 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -66,7 +66,7 @@ struct dm_table { * These are optimistic limits taken from all the * targets, some targets will need smaller limits. */ - struct io_restrictions limits; + struct queue_limits limits; /* events get handed up using this callback */ void (*event_fn)(void *); @@ -88,43 +88,6 @@ static unsigned int int_log(unsigned int n, unsigned int base) return result; } -/* - * Returns the minimum that is _not_ zero, unless both are zero. - */ -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) - -/* - * Combine two io_restrictions, always taking the lower value. - */ -static void combine_restrictions_low(struct io_restrictions *lhs, - struct io_restrictions *rhs) -{ - lhs->max_sectors = - min_not_zero(lhs->max_sectors, rhs->max_sectors); - - lhs->max_phys_segments = - min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments); - - lhs->max_hw_segments = - min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); - - lhs->logical_block_size = max(lhs->logical_block_size, - rhs->logical_block_size); - - lhs->max_segment_size = - min_not_zero(lhs->max_segment_size, rhs->max_segment_size); - - lhs->max_hw_sectors = - min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors); - - lhs->seg_boundary_mask = - min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); - - lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn); - - lhs->no_cluster |= rhs->no_cluster; -} - /* * Calculate the index of the child node of the n'th node k'th key. */ @@ -511,10 +474,14 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, return 0; } +/* + * Returns the minimum that is _not_ zero, unless both are zero. + */ +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - struct io_restrictions *rs = &ti->limits; char b[BDEVNAME_SIZE]; if (unlikely(!q)) { @@ -523,15 +490,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) return; } - /* - * Combine the device limits low. - * - * FIXME: if we move an io_restriction struct - * into q this would just be a call to - * combine_restrictions_low() - */ - rs->max_sectors = - min_not_zero(rs->max_sectors, queue_max_sectors(q)); + if (blk_stack_limits(&ti->limits, &q->limits, 0) < 0) + DMWARN("%s: target device %s is misaligned", + dm_device_name(ti->table->md), bdevname(bdev, b)); /* * Check if merge fn is supported. @@ -540,33 +501,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) */ if (q->merge_bvec_fn && !ti->type->merge) - rs->max_sectors = - min_not_zero(rs->max_sectors, + ti->limits.max_sectors = + min_not_zero(ti->limits.max_sectors, (unsigned int) (PAGE_SIZE >> 9)); - - rs->max_phys_segments = - min_not_zero(rs->max_phys_segments, - queue_max_phys_segments(q)); - - rs->max_hw_segments = - min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); - - rs->logical_block_size = max(rs->logical_block_size, - queue_logical_block_size(q)); - - rs->max_segment_size = - min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); - - rs->max_hw_sectors = - min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); - - rs->seg_boundary_mask = - min_not_zero(rs->seg_boundary_mask, - queue_segment_boundary(q)); - - rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); - - rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); } EXPORT_SYMBOL_GPL(dm_set_device_limits); @@ -704,24 +641,32 @@ int dm_split_args(int *argc, char ***argvp, char *input) return 0; } -static void check_for_valid_limits(struct io_restrictions *rs) +static void init_valid_queue_limits(struct queue_limits *limits) { - if (!rs->max_sectors) - rs->max_sectors = SAFE_MAX_SECTORS; - if (!rs->max_hw_sectors) - rs->max_hw_sectors = SAFE_MAX_SECTORS; - if (!rs->max_phys_segments) - rs->max_phys_segments = MAX_PHYS_SEGMENTS; - if (!rs->max_hw_segments) - rs->max_hw_segments = MAX_HW_SEGMENTS; - if (!rs->logical_block_size) - rs->logical_block_size = 1 << SECTOR_SHIFT; - if (!rs->max_segment_size) - rs->max_segment_size = MAX_SEGMENT_SIZE; - if (!rs->seg_boundary_mask) - rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; - if (!rs->bounce_pfn) - rs->bounce_pfn = -1; + if (!limits->max_sectors) + limits->max_sectors = SAFE_MAX_SECTORS; + if (!limits->max_hw_sectors) + limits->max_hw_sectors = SAFE_MAX_SECTORS; + if (!limits->max_phys_segments) + limits->max_phys_segments = MAX_PHYS_SEGMENTS; + if (!limits->max_hw_segments) + limits->max_hw_segments = MAX_HW_SEGMENTS; + if (!limits->logical_block_size) + limits->logical_block_size = 1 << SECTOR_SHIFT; + if (!limits->physical_block_size) + limits->physical_block_size = 1 << SECTOR_SHIFT; + if (!limits->io_min) + limits->io_min = 1 << SECTOR_SHIFT; + if (!limits->max_segment_size) + limits->max_segment_size = MAX_SEGMENT_SIZE; + if (!limits->seg_boundary_mask) + limits->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + if (!limits->bounce_pfn) + limits->bounce_pfn = -1; + /* + * The other fields (alignment_offset, io_opt, misaligned) + * hold 0 from the kzalloc(). + */ } /* @@ -841,9 +786,12 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; - /* FIXME: the plan is to combine high here and then have - * the merge fn apply the target level restrictions. */ - combine_restrictions_low(&t->limits, &tgt->limits); + if (blk_stack_limits(&t->limits, &tgt->limits, 0) < 0) + DMWARN("%s: target device (start sect %llu len %llu) " + "is misaligned", + dm_device_name(t->md), + (unsigned long long) tgt->begin, + (unsigned long long) tgt->len); return 0; bad: @@ -886,7 +834,7 @@ int dm_table_complete(struct dm_table *t) int r = 0; unsigned int leaf_nodes; - check_for_valid_limits(&t->limits); + init_valid_queue_limits(&t->limits); r = validate_hardware_logical_block_alignment(t); if (r) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index fc36a4d0772..236880c1dc3 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -144,18 +144,6 @@ struct target_type { struct list_head list; }; -struct io_restrictions { - unsigned long bounce_pfn; - unsigned long seg_boundary_mask; - unsigned max_hw_sectors; - unsigned max_sectors; - unsigned max_segment_size; - unsigned short logical_block_size; - unsigned short max_hw_segments; - unsigned short max_phys_segments; - unsigned char no_cluster; /* inverted so that 0 is default */ -}; - struct dm_target { struct dm_table *table; struct target_type *type; @@ -164,7 +152,7 @@ struct dm_target { sector_t begin; sector_t len; - /* FIXME: turn this into a mask, and merge with io_restrictions */ + /* FIXME: turn this into a mask, and merge with queue_limits */ /* Always a power of 2 */ sector_t split_io; @@ -182,7 +170,7 @@ struct dm_target { * These are automatically filled in by * dm_table_get_device. */ - struct io_restrictions limits; + struct queue_limits limits; /* target specific data */ void *private; -- cgit v1.2.3-70-g09d2 From af4874e03ed82f050d5872d8c39ce64bf16b5c38 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 22 Jun 2009 10:12:33 +0100 Subject: dm target:s introduce iterate devices fn Add .iterate_devices to 'struct target_type' to allow a function to be called for all devices in a DM target. Implemented it for all targets except those in dm-snap.c (origin and snapshot). (The raid1 version number jumps to 1.12 because we originally reserved 1.1 to 1.11 for 'block_on_error' but ended up using 'handle_errors' instead.) Signed-off-by: Mike Snitzer Signed-off-by: Alasdair G Kergon Cc: martin.petersen@oracle.com --- drivers/md/dm-crypt.c | 11 ++++++++++- drivers/md/dm-delay.c | 20 +++++++++++++++++++- drivers/md/dm-linear.c | 11 ++++++++++- drivers/md/dm-mpath.c | 23 ++++++++++++++++++++++- drivers/md/dm-raid1.c | 17 ++++++++++++++++- drivers/md/dm-stripe.c | 18 +++++++++++++++++- include/linux/device-mapper.h | 11 +++++++++++ 7 files changed, 105 insertions(+), 6 deletions(-) (limited to 'include/linux/device-mapper.h') diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 04db6c4004a..9933eb861c7 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1313,9 +1313,17 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } +static int crypt_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct crypt_config *cc = ti->private; + + return fn(ti, cc->dev, cc->start, data); +} + static struct target_type crypt_target = { .name = "crypt", - .version= {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, @@ -1326,6 +1334,7 @@ static struct target_type crypt_target = { .resume = crypt_resume, .message = crypt_message, .merge = crypt_merge, + .iterate_devices = crypt_iterate_devices, }; static int __init dm_crypt_init(void) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 8ad8a9044bb..4e5b843cd4d 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -318,9 +318,26 @@ static int delay_status(struct dm_target *ti, status_type_t type, return 0; } +static int delay_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct delay_c *dc = ti->private; + int ret = 0; + + ret = fn(ti, dc->dev_read, dc->start_read, data); + if (ret) + goto out; + + if (dc->dev_write) + ret = fn(ti, dc->dev_write, dc->start_write, data); + +out: + return ret; +} + static struct target_type delay_target = { .name = "delay", - .version = {1, 0, 2}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, @@ -328,6 +345,7 @@ static struct target_type delay_target = { .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, + .iterate_devices = delay_iterate_devices, }; static int __init dm_delay_init(void) diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index ecbb17421da..9184b6deb86 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -134,9 +134,17 @@ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } +static int linear_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct linear_c *lc = ti->private; + + return fn(ti, lc->dev, lc->start, data); +} + static struct target_type linear_target = { .name = "linear", - .version= {1, 0, 3}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = linear_ctr, .dtr = linear_dtr, @@ -144,6 +152,7 @@ static struct target_type linear_target = { .status = linear_status, .ioctl = linear_ioctl, .merge = linear_merge, + .iterate_devices = linear_iterate_devices, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 890c0e8ed13..f8aeaaa54af 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1450,12 +1450,32 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } +static int multipath_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct multipath *m = ti->private; + struct priority_group *pg; + struct pgpath *p; + int ret = 0; + + list_for_each_entry(pg, &m->priority_groups, list) { + list_for_each_entry(p, &pg->pgpaths, list) { + ret = fn(ti, p->path.dev, ti->begin, data); + if (ret) + goto out; + } + } + +out: + return ret; +} + /*----------------------------------------------------------------- * Module setup *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 0, 5}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, @@ -1466,6 +1486,7 @@ static struct target_type multipath_target = { .status = multipath_status, .message = multipath_message, .ioctl = multipath_ioctl, + .iterate_devices = multipath_iterate_devices, }; static int __init dm_multipath_init(void) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 076fbb4e967..ce8868c768c 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1283,9 +1283,23 @@ static int mirror_status(struct dm_target *ti, status_type_t type, return 0; } +static int mirror_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct mirror_set *ms = ti->private; + int ret = 0; + unsigned i; + + for (i = 0; !ret && i < ms->nr_mirrors; i++) + ret = fn(ti, ms->mirror[i].dev, + ms->mirror[i].offset, data); + + return ret; +} + static struct target_type mirror_target = { .name = "mirror", - .version = {1, 0, 20}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = mirror_ctr, .dtr = mirror_dtr, @@ -1295,6 +1309,7 @@ static struct target_type mirror_target = { .postsuspend = mirror_postsuspend, .resume = mirror_resume, .status = mirror_status, + .iterate_devices = mirror_iterate_devices, }; static int __init dm_mirror_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index c64fe827a5f..b240e85ae39 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -313,15 +313,31 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, return error; } +static int stripe_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct stripe_c *sc = ti->private; + int ret = 0; + unsigned i = 0; + + do + ret = fn(ti, sc->stripe[i].dev, + sc->stripe[i].physical_start, data); + while (!ret && ++i < sc->stripes); + + return ret; +} + static struct target_type stripe_target = { .name = "striped", - .version = {1, 1, 0}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, .map = stripe_map, .end_io = stripe_end_io, .status = stripe_status, + .iterate_devices = stripe_iterate_devices, }; int __init dm_stripe_init(void) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 236880c1dc3..deac3b4e5e1 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -11,6 +11,7 @@ #include #include +struct dm_dev; struct dm_target; struct dm_table; struct mapped_device; @@ -81,6 +82,15 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size); +typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, + struct dm_dev *dev, + sector_t physical_start, + void *data); + +typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data); + /* * Returns: * 0: The target can handle the next I/O immediately. @@ -139,6 +149,7 @@ struct target_type { dm_ioctl_fn ioctl; dm_merge_fn merge; dm_busy_fn busy; + dm_iterate_devices_fn iterate_devices; /* For internal device-mapper use. */ struct list_head list; -- cgit v1.2.3-70-g09d2 From 754c5fc7ebb417b23601a6222a6005cc2e7f2913 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 22 Jun 2009 10:12:34 +0100 Subject: dm: calculate queue limits during resume not load Currently, device-mapper maintains a separate instance of 'struct queue_limits' for each table of each device. When the configuration of a device is to be changed, first its table is loaded and this structure is populated, then the device is 'resumed' and the calculated queue_limits are applied. This places restrictions on how userspace may process related devices, where it is often advantageous to 'load' tables for several devices at once before 'resuming' them together. As the new queue_limits only take effect after the 'resume', if they are changing and one device uses another, the latter must be 'resumed' before the former may be 'loaded'. This patch moves the calculation of these queue_limits out of the 'load' operation into 'resume'. Since we are no longer pre-calculating this struct, we no longer need to maintain copies within our dm structs. dm_set_device_limits() now passes the 'start' of the device's data area (aka pe_start) as the 'offset' to blk_stack_limits(). init_valid_queue_limits() is replaced by blk_set_default_limits(). Signed-off-by: Mike Snitzer Cc: martin.petersen@oracle.com Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 185 +++++++++++++++++++++++------------------- drivers/md/dm.c | 12 ++- drivers/md/dm.h | 5 +- include/linux/device-mapper.h | 10 +-- 4 files changed, 117 insertions(+), 95 deletions(-) (limited to 'include/linux/device-mapper.h') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 267817edc84..09a57113955 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -62,12 +62,6 @@ struct dm_table { /* a list of devices used by this table */ struct list_head devices; - /* - * These are optimistic limits taken from all the - * targets, some targets will need smaller limits. - */ - struct queue_limits limits; - /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context; @@ -346,18 +340,21 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) /* * If possible, this checks an area of a destination device is valid. */ -static int device_area_is_valid(struct dm_target *ti, struct block_device *bdev, - sector_t start, sector_t len) +static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, + sector_t start, void *data) { - sector_t dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + struct queue_limits *limits = data; + struct block_device *bdev = dev->bdev; + sector_t dev_size = + i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; unsigned short logical_block_size_sectors = - ti->limits.logical_block_size >> SECTOR_SHIFT; + limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; if (!dev_size) return 1; - if ((start >= dev_size) || (start + len > dev_size)) { + if ((start >= dev_size) || (start + ti->len > dev_size)) { DMWARN("%s: %s too small for target", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; @@ -371,16 +368,16 @@ static int device_area_is_valid(struct dm_target *ti, struct block_device *bdev, "logical block size %hu of %s", dm_device_name(ti->table->md), (unsigned long long)start, - ti->limits.logical_block_size, bdevname(bdev, b)); + limits->logical_block_size, bdevname(bdev, b)); return 0; } - if (len & (logical_block_size_sectors - 1)) { + if (ti->len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %hu of %s", dm_device_name(ti->table->md), - (unsigned long long)len, - ti->limits.logical_block_size, bdevname(bdev, b)); + (unsigned long long)ti->len, + limits->logical_block_size, bdevname(bdev, b)); return 0; } @@ -479,18 +476,21 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, */ #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) -void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) +int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + sector_t start, void *data) { + struct queue_limits *limits = data; + struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); char b[BDEVNAME_SIZE]; if (unlikely(!q)) { DMWARN("%s: Cannot set limits for nonexistent device %s", dm_device_name(ti->table->md), bdevname(bdev, b)); - return; + return 0; } - if (blk_stack_limits(&ti->limits, &q->limits, 0) < 0) + if (blk_stack_limits(limits, &q->limits, start) < 0) DMWARN("%s: target device %s is misaligned", dm_device_name(ti->table->md), bdevname(bdev, b)); @@ -501,32 +501,21 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) */ if (q->merge_bvec_fn && !ti->type->merge) - ti->limits.max_sectors = - min_not_zero(ti->limits.max_sectors, + limits->max_sectors = + min_not_zero(limits->max_sectors, (unsigned int) (PAGE_SIZE >> 9)); + return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); int dm_get_device(struct dm_target *ti, const char *path, sector_t start, sector_t len, fmode_t mode, struct dm_dev **result) { - int r = __table_get_device(ti->table, ti, path, - start, len, mode, result); - - if (r) - return r; - - dm_set_device_limits(ti, (*result)->bdev); - - if (!device_area_is_valid(ti, (*result)->bdev, start, len)) { - dm_put_device(ti, *result); - *result = NULL; - return -EINVAL; - } - - return r; + return __table_get_device(ti->table, ti, path, + start, len, mode, result); } + /* * Decrement a devices use count and remove it if necessary. */ @@ -641,34 +630,6 @@ int dm_split_args(int *argc, char ***argvp, char *input) return 0; } -static void init_valid_queue_limits(struct queue_limits *limits) -{ - if (!limits->max_sectors) - limits->max_sectors = SAFE_MAX_SECTORS; - if (!limits->max_hw_sectors) - limits->max_hw_sectors = SAFE_MAX_SECTORS; - if (!limits->max_phys_segments) - limits->max_phys_segments = MAX_PHYS_SEGMENTS; - if (!limits->max_hw_segments) - limits->max_hw_segments = MAX_HW_SEGMENTS; - if (!limits->logical_block_size) - limits->logical_block_size = 1 << SECTOR_SHIFT; - if (!limits->physical_block_size) - limits->physical_block_size = 1 << SECTOR_SHIFT; - if (!limits->io_min) - limits->io_min = 1 << SECTOR_SHIFT; - if (!limits->max_segment_size) - limits->max_segment_size = MAX_SEGMENT_SIZE; - if (!limits->seg_boundary_mask) - limits->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; - if (!limits->bounce_pfn) - limits->bounce_pfn = -1; - /* - * The other fields (alignment_offset, io_opt, misaligned) - * hold 0 from the kzalloc(). - */ -} - /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be @@ -676,14 +637,15 @@ static void init_valid_queue_limits(struct queue_limits *limits) * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */ -static int validate_hardware_logical_block_alignment(struct dm_table *table) +static int validate_hardware_logical_block_alignment(struct dm_table *table, + struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size * (in units of 512-byte sectors). */ unsigned short device_logical_block_size_sects = - table->limits.logical_block_size >> SECTOR_SHIFT; + limits->logical_block_size >> SECTOR_SHIFT; /* * Offset of the start of the next table entry, mod logical_block_size. @@ -697,6 +659,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table) unsigned short remaining = 0; struct dm_target *uninitialized_var(ti); + struct queue_limits ti_limits; unsigned i = 0; /* @@ -705,12 +668,19 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table) while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); + blk_set_default_limits(&ti_limits); + + /* combine all target devices' limits */ + if (ti->type->iterate_devices) + ti->type->iterate_devices(ti, dm_set_device_limits, + &ti_limits); + /* * If the remaining sectors fall entirely within this * table entry are they compatible with its logical_block_size? */ if (remaining < ti->len && - remaining & ((ti->limits.logical_block_size >> + remaining & ((ti_limits.logical_block_size >> SECTOR_SHIFT) - 1)) break; /* Error */ @@ -723,11 +693,11 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table) if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to hardware logical block size %hu", + "not aligned to h/w logical block size %hu", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, - table->limits.logical_block_size); + limits->logical_block_size); return -EINVAL; } @@ -786,12 +756,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; - if (blk_stack_limits(&t->limits, &tgt->limits, 0) < 0) - DMWARN("%s: target device (start sect %llu len %llu) " - "is misaligned", - dm_device_name(t->md), - (unsigned long long) tgt->begin, - (unsigned long long) tgt->len); return 0; bad: @@ -834,12 +798,6 @@ int dm_table_complete(struct dm_table *t) int r = 0; unsigned int leaf_nodes; - init_valid_queue_limits(&t->limits); - - r = validate_hardware_logical_block_alignment(t); - if (r) - return r; - /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); @@ -914,6 +872,57 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/* + * Establish the new table's queue_limits and validate them. + */ +int dm_calculate_queue_limits(struct dm_table *table, + struct queue_limits *limits) +{ + struct dm_target *uninitialized_var(ti); + struct queue_limits ti_limits; + unsigned i = 0; + + blk_set_default_limits(limits); + + while (i < dm_table_get_num_targets(table)) { + blk_set_default_limits(&ti_limits); + + ti = dm_table_get_target(table, i++); + + if (!ti->type->iterate_devices) + goto combine_limits; + + /* + * Combine queue limits of all the devices this target uses. + */ + ti->type->iterate_devices(ti, dm_set_device_limits, + &ti_limits); + + /* + * Check each device area is consistent with the target's + * overall queue limits. + */ + if (!ti->type->iterate_devices(ti, device_area_is_valid, + &ti_limits)) + return -EINVAL; + +combine_limits: + /* + * Merge this target's queue limits into the overall limits + * for the table. + */ + if (blk_stack_limits(limits, &ti_limits, 0) < 0) + DMWARN("%s: target device " + "(start sect %llu len %llu) " + "is misaligned", + dm_device_name(table->md), + (unsigned long long) ti->begin, + (unsigned long long) ti->len); + } + + return validate_hardware_logical_block_alignment(table, limits); +} + /* * Set the integrity profile for this device if all devices used have * matching profiles. @@ -953,14 +962,24 @@ no_integrity: return; } -void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) +void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *limits) { + /* + * Each target device in the table has a data area that should normally + * be aligned such that the DM device's alignment_offset is 0. + * FIXME: Propagate alignment_offsets up the stack and warn of + * sub-optimal or inconsistent settings. + */ + limits->alignment_offset = 0; + limits->misaligned = 0; + /* * Copy table's limits to the DM device's request_queue */ - q->limits = t->limits; + q->limits = *limits; - if (t->limits.no_cluster) + if (limits->no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); else queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a9210bb594e..f609793a92d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1313,7 +1313,8 @@ static void __set_size(struct mapped_device *md, sector_t size) mutex_unlock(&md->bdev->bd_inode->i_mutex); } -static int __bind(struct mapped_device *md, struct dm_table *t) +static int __bind(struct mapped_device *md, struct dm_table *t, + struct queue_limits *limits) { struct request_queue *q = md->queue; sector_t size; @@ -1337,7 +1338,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t) write_lock(&md->map_lock); md->map = t; - dm_table_set_restrictions(t, q); + dm_table_set_restrictions(t, q, limits); write_unlock(&md->map_lock); return 0; @@ -1562,6 +1563,7 @@ static void dm_queue_flush(struct mapped_device *md) */ int dm_swap_table(struct mapped_device *md, struct dm_table *table) { + struct queue_limits limits; int r = -EINVAL; mutex_lock(&md->suspend_lock); @@ -1570,8 +1572,12 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) if (!dm_suspended(md)) goto out; + r = dm_calculate_queue_limits(table, &limits); + if (r) + goto out; + __unbind(md); - r = __bind(md, table); + r = __bind(md, table, &limits); out: mutex_unlock(&md->suspend_lock); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index b5935c610c4..604e85caadf 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -41,7 +41,10 @@ void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); -void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); +int dm_calculate_queue_limits(struct dm_table *table, + struct queue_limits *limits); +void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *limits); struct list_head *dm_table_get_devices(struct dm_table *t); void dm_table_presuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index deac3b4e5e1..e6bf3b8c7bf 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -103,7 +103,8 @@ void dm_error(const char *message); /* * Combine device limits. */ -void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); +int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + sector_t start, void *data); struct dm_dev { struct block_device *bdev; @@ -163,7 +164,6 @@ struct dm_target { sector_t begin; sector_t len; - /* FIXME: turn this into a mask, and merge with queue_limits */ /* Always a power of 2 */ sector_t split_io; @@ -177,12 +177,6 @@ struct dm_target { */ unsigned num_flush_requests; - /* - * These are automatically filled in by - * dm_table_get_device. - */ - struct queue_limits limits; - /* target specific data */ void *private; -- cgit v1.2.3-70-g09d2 From cec47e3d4a861e1d942b3a580d0bbef2700d2bb2 Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Mon, 22 Jun 2009 10:12:35 +0100 Subject: dm: prepare for request based option This patch adds core functions for request-based dm. When struct mapped device (md) is initialized, md->queue has an I/O scheduler and the following functions are used for request-based dm as the queue functions: make_request_fn: dm_make_request() pref_fn: dm_prep_fn() request_fn: dm_request_fn() softirq_done_fn: dm_softirq_done() lld_busy_fn: dm_lld_busy() Actual initializations are done in another patch (PATCH 2). Below is a brief summary of how request-based dm behaves, including: - making request from bio - cloning, mapping and dispatching request - completing request and bio - suspending md - resuming md bio to request ============== md->queue->make_request_fn() (dm_make_request()) calls __make_request() for a bio submitted to the md. Then, the bio is kept in the queue as a new request or merged into another request in the queue if possible. Cloning and Mapping =================== Cloning and mapping are done in md->queue->request_fn() (dm_request_fn()), when requests are dispatched after they are sorted by the I/O scheduler. dm_request_fn() checks busy state of underlying devices using target's busy() function and stops dispatching requests to keep them on the dm device's queue if busy. It helps better I/O merging, since no merge is done for a request once it is dispatched to underlying devices. Actual cloning and mapping are done in dm_prep_fn() and map_request() called from dm_request_fn(). dm_prep_fn() clones not only request but also bios of the request so that dm can hold bio completion in error cases and prevent the bio submitter from noticing the error. (See the "Completion" section below for details.) After the cloning, the clone is mapped by target's map_rq() function and inserted to underlying device's queue using blk_insert_cloned_request(). Completion ========== Request completion can be hooked by rq->end_io(), but then, all bios in the request will have been completed even error cases, and the bio submitter will have noticed the error. To prevent the bio completion in error cases, request-based dm clones both bio and request and hooks both bio->bi_end_io() and rq->end_io(): bio->bi_end_io(): end_clone_bio() rq->end_io(): end_clone_request() Summary of the request completion flow is below: blk_end_request() for a clone request => blk_update_request() => bio->bi_end_io() == end_clone_bio() for each clone bio => Free the clone bio => Success: Complete the original bio (blk_update_request()) Error: Don't complete the original bio => blk_finish_request() => rq->end_io() == end_clone_request() => blk_complete_request() => dm_softirq_done() => Free the clone request => Success: Complete the original request (blk_end_request()) Error: Requeue the original request end_clone_bio() completes the original request on the size of the original bio in successful cases. Even if all bios in the original request are completed by that completion, the original request must not be completed yet to keep the ordering of request completion for the stacking. So end_clone_bio() uses blk_update_request() instead of blk_end_request(). In error cases, end_clone_bio() doesn't complete the original bio. It just frees the cloned bio and gives over the error handling to end_clone_request(). end_clone_request(), which is called with queue lock held, completes the clone request and the original request in a softirq context (dm_softirq_done()), which has no queue lock, to avoid a deadlock issue on submission of another request during the completion: - The submitted request may be mapped to the same device - Request submission requires queue lock, but the queue lock has been held by itself and it doesn't know that The clone request has no clone bio when dm_softirq_done() is called. So target drivers can't resubmit it again even error cases. Instead, they can ask dm core for requeueing and remapping the original request in that cases. suspend ======= Request-based dm uses stopping md->queue as suspend of the md. For noflush suspend, just stops md->queue. For flush suspend, inserts a marker request to the tail of md->queue. And dispatches all requests in md->queue until the marker comes to the front of md->queue. Then, stops dispatching request and waits for the all dispatched requests to complete. After that, completes the marker request, stops md->queue and wake up the waiter on the suspend queue, md->wait. resume ====== Starts md->queue. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 14 + drivers/md/dm.c | 705 +++++++++++++++++++++++++++++++++++++++++- drivers/md/dm.h | 1 + include/linux/device-mapper.h | 9 + 4 files changed, 725 insertions(+), 4 deletions(-) (limited to 'include/linux/device-mapper.h') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 09a57113955..c5f784419f2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1080,6 +1080,20 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits) return r; } +int dm_table_any_busy_target(struct dm_table *t) +{ + unsigned i; + struct dm_target *ti; + + for (i = 0; i < t->num_targets; i++) { + ti = t->targets + i; + if (ti->type->busy && ti->type->busy(ti)) + return 1; + } + + return 0; +} + void dm_table_unplug_all(struct dm_table *t) { struct dm_dev_internal *dd; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f609793a92d..be003e5fea3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -78,7 +78,7 @@ struct dm_rq_target_io { */ struct dm_rq_clone_bio_info { struct bio *orig; - struct request *rq; + struct dm_rq_target_io *tio; }; union map_info *dm_get_mapinfo(struct bio *bio) @@ -88,6 +88,14 @@ union map_info *dm_get_mapinfo(struct bio *bio) return NULL; } +union map_info *dm_get_rq_mapinfo(struct request *rq) +{ + if (rq && rq->end_io_data) + return &((struct dm_rq_target_io *)rq->end_io_data)->info; + return NULL; +} +EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); + #define MINOR_ALLOCED ((void *)-1) /* @@ -169,6 +177,12 @@ struct mapped_device { /* forced geometry settings */ struct hd_geometry geometry; + /* marker of flush suspend for request-based dm */ + struct request suspend_rq; + + /* For saving the address of __make_request for request based dm */ + make_request_fn *saved_make_request_fn; + /* sysfs handle */ struct kobject kobj; @@ -406,6 +420,26 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio) mempool_free(tio, md->tio_pool); } +static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md) +{ + return mempool_alloc(md->tio_pool, GFP_ATOMIC); +} + +static void free_rq_tio(struct dm_rq_target_io *tio) +{ + mempool_free(tio, tio->md->tio_pool); +} + +static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md) +{ + return mempool_alloc(md->io_pool, GFP_ATOMIC); +} + +static void free_bio_info(struct dm_rq_clone_bio_info *info) +{ + mempool_free(info, info->tio->md->io_pool); +} + static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; @@ -615,6 +649,262 @@ static void clone_endio(struct bio *bio, int error) dec_pending(io, error); } +/* + * Partial completion handling for request-based dm + */ +static void end_clone_bio(struct bio *clone, int error) +{ + struct dm_rq_clone_bio_info *info = clone->bi_private; + struct dm_rq_target_io *tio = info->tio; + struct bio *bio = info->orig; + unsigned int nr_bytes = info->orig->bi_size; + + bio_put(clone); + + if (tio->error) + /* + * An error has already been detected on the request. + * Once error occurred, just let clone->end_io() handle + * the remainder. + */ + return; + else if (error) { + /* + * Don't notice the error to the upper layer yet. + * The error handling decision is made by the target driver, + * when the request is completed. + */ + tio->error = error; + return; + } + + /* + * I/O for the bio successfully completed. + * Notice the data completion to the upper layer. + */ + + /* + * bios are processed from the head of the list. + * So the completing bio should always be rq->bio. + * If it's not, something wrong is happening. + */ + if (tio->orig->bio != bio) + DMERR("bio completion is going in the middle of the request"); + + /* + * Update the original request. + * Do not use blk_end_request() here, because it may complete + * the original request before the clone, and break the ordering. + */ + blk_update_request(tio->orig, 0, nr_bytes); +} + +/* + * Don't touch any member of the md after calling this function because + * the md may be freed in dm_put() at the end of this function. + * Or do dm_get() before calling this function and dm_put() later. + */ +static void rq_completed(struct mapped_device *md, int run_queue) +{ + int wakeup_waiters = 0; + struct request_queue *q = md->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (!queue_in_flight(q)) + wakeup_waiters = 1; + spin_unlock_irqrestore(q->queue_lock, flags); + + /* nudge anyone waiting on suspend queue */ + if (wakeup_waiters) + wake_up(&md->wait); + + if (run_queue) + blk_run_queue(q); + + /* + * dm_put() must be at the end of this function. See the comment above + */ + dm_put(md); +} + +static void dm_unprep_request(struct request *rq) +{ + struct request *clone = rq->special; + struct dm_rq_target_io *tio = clone->end_io_data; + + rq->special = NULL; + rq->cmd_flags &= ~REQ_DONTPREP; + + blk_rq_unprep_clone(clone); + free_rq_tio(tio); +} + +/* + * Requeue the original request of a clone. + */ +void dm_requeue_unmapped_request(struct request *clone) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; + struct request_queue *q = rq->q; + unsigned long flags; + + dm_unprep_request(rq); + + spin_lock_irqsave(q->queue_lock, flags); + if (elv_queue_empty(q)) + blk_plug_device(q); + blk_requeue_request(q, rq); + spin_unlock_irqrestore(q->queue_lock, flags); + + rq_completed(md, 0); +} +EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); + +static void __stop_queue(struct request_queue *q) +{ + blk_stop_queue(q); +} + +static void stop_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __stop_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void __start_queue(struct request_queue *q) +{ + if (blk_queue_stopped(q)) + blk_start_queue(q); +} + +static void start_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * Complete the clone and the original request. + * Must be called without queue lock. + */ +static void dm_end_request(struct request *clone, int error) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; + + if (blk_pc_request(rq)) { + rq->errors = clone->errors; + rq->resid_len = clone->resid_len; + + if (rq->sense) + /* + * We are using the sense buffer of the original + * request. + * So setting the length of the sense data is enough. + */ + rq->sense_len = clone->sense_len; + } + + BUG_ON(clone->bio); + free_rq_tio(tio); + + blk_end_request_all(rq, error); + + rq_completed(md, 1); +} + +/* + * Request completion handler for request-based dm + */ +static void dm_softirq_done(struct request *rq) +{ + struct request *clone = rq->completion_data; + struct dm_rq_target_io *tio = clone->end_io_data; + dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; + int error = tio->error; + + if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io) + error = rq_end_io(tio->ti, clone, error, &tio->info); + + if (error <= 0) + /* The target wants to complete the I/O */ + dm_end_request(clone, error); + else if (error == DM_ENDIO_INCOMPLETE) + /* The target will handle the I/O */ + return; + else if (error == DM_ENDIO_REQUEUE) + /* The target wants to requeue the I/O */ + dm_requeue_unmapped_request(clone); + else { + DMWARN("unimplemented target endio return value: %d", error); + BUG(); + } +} + +/* + * Complete the clone and the original request with the error status + * through softirq context. + */ +static void dm_complete_request(struct request *clone, int error) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + struct request *rq = tio->orig; + + tio->error = error; + rq->completion_data = clone; + blk_complete_request(rq); +} + +/* + * Complete the not-mapped clone and the original request with the error status + * through softirq context. + * Target's rq_end_io() function isn't called. + * This may be used when the target's map_rq() function fails. + */ +void dm_kill_unmapped_request(struct request *clone, int error) +{ + struct dm_rq_target_io *tio = clone->end_io_data; + struct request *rq = tio->orig; + + rq->cmd_flags |= REQ_FAILED; + dm_complete_request(clone, error); +} +EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); + +/* + * Called with the queue lock held + */ +static void end_clone_request(struct request *clone, int error) +{ + /* + * For just cleaning up the information of the queue in which + * the clone was dispatched. + * The clone is *NOT* freed actually here because it is alloced from + * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. + */ + __blk_put_request(clone->q, clone); + + /* + * Actual request completion is done in a softirq context which doesn't + * hold the queue lock. Otherwise, deadlock could occur because: + * - another request may be submitted by the upper level driver + * of the stacking during the completion + * - the submission which requires queue lock may be done + * against this queue + */ + dm_complete_request(clone, error); +} + static sector_t max_io_len(struct mapped_device *md, sector_t sector, struct dm_target *ti) { @@ -998,7 +1288,7 @@ out: * The request function that just remaps the bio built up by * dm_merge_bvec. */ -static int dm_request(struct request_queue *q, struct bio *bio) +static int _dm_request(struct request_queue *q, struct bio *bio) { int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; @@ -1035,12 +1325,274 @@ static int dm_request(struct request_queue *q, struct bio *bio) return 0; } +static int dm_make_request(struct request_queue *q, struct bio *bio) +{ + struct mapped_device *md = q->queuedata; + + if (unlikely(bio_barrier(bio))) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + + return md->saved_make_request_fn(q, bio); /* call __make_request() */ +} + +static int dm_request_based(struct mapped_device *md) +{ + return blk_queue_stackable(md->queue); +} + +static int dm_request(struct request_queue *q, struct bio *bio) +{ + struct mapped_device *md = q->queuedata; + + if (dm_request_based(md)) + return dm_make_request(q, bio); + + return _dm_request(q, bio); +} + +void dm_dispatch_request(struct request *rq) +{ + int r; + + if (blk_queue_io_stat(rq->q)) + rq->cmd_flags |= REQ_IO_STAT; + + rq->start_time = jiffies; + r = blk_insert_cloned_request(rq->q, rq); + if (r) + dm_complete_request(rq, r); +} +EXPORT_SYMBOL_GPL(dm_dispatch_request); + +static void dm_rq_bio_destructor(struct bio *bio) +{ + struct dm_rq_clone_bio_info *info = bio->bi_private; + struct mapped_device *md = info->tio->md; + + free_bio_info(info); + bio_free(bio, md->bs); +} + +static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, + void *data) +{ + struct dm_rq_target_io *tio = data; + struct mapped_device *md = tio->md; + struct dm_rq_clone_bio_info *info = alloc_bio_info(md); + + if (!info) + return -ENOMEM; + + info->orig = bio_orig; + info->tio = tio; + bio->bi_end_io = end_clone_bio; + bio->bi_private = info; + bio->bi_destructor = dm_rq_bio_destructor; + + return 0; +} + +static int setup_clone(struct request *clone, struct request *rq, + struct dm_rq_target_io *tio) +{ + int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, + dm_rq_bio_constructor, tio); + + if (r) + return r; + + clone->cmd = rq->cmd; + clone->cmd_len = rq->cmd_len; + clone->sense = rq->sense; + clone->buffer = rq->buffer; + clone->end_io = end_clone_request; + clone->end_io_data = tio; + + return 0; +} + +static int dm_rq_flush_suspending(struct mapped_device *md) +{ + return !md->suspend_rq.special; +} + +/* + * Called with the queue lock held. + */ +static int dm_prep_fn(struct request_queue *q, struct request *rq) +{ + struct mapped_device *md = q->queuedata; + struct dm_rq_target_io *tio; + struct request *clone; + + if (unlikely(rq == &md->suspend_rq)) { + if (dm_rq_flush_suspending(md)) + return BLKPREP_OK; + else + /* The flush suspend was interrupted */ + return BLKPREP_KILL; + } + + if (unlikely(rq->special)) { + DMWARN("Already has something in rq->special."); + return BLKPREP_KILL; + } + + tio = alloc_rq_tio(md); /* Only one for each original request */ + if (!tio) + /* -ENOMEM */ + return BLKPREP_DEFER; + + tio->md = md; + tio->ti = NULL; + tio->orig = rq; + tio->error = 0; + memset(&tio->info, 0, sizeof(tio->info)); + + clone = &tio->clone; + if (setup_clone(clone, rq, tio)) { + /* -ENOMEM */ + free_rq_tio(tio); + return BLKPREP_DEFER; + } + + rq->special = clone; + rq->cmd_flags |= REQ_DONTPREP; + + return BLKPREP_OK; +} + +static void map_request(struct dm_target *ti, struct request *rq, + struct mapped_device *md) +{ + int r; + struct request *clone = rq->special; + struct dm_rq_target_io *tio = clone->end_io_data; + + /* + * Hold the md reference here for the in-flight I/O. + * We can't rely on the reference count by device opener, + * because the device may be closed during the request completion + * when all bios are completed. + * See the comment in rq_completed() too. + */ + dm_get(md); + + tio->ti = ti; + r = ti->type->map_rq(ti, clone, &tio->info); + switch (r) { + case DM_MAPIO_SUBMITTED: + /* The target has taken the I/O to submit by itself later */ + break; + case DM_MAPIO_REMAPPED: + /* The target has remapped the I/O so dispatch it */ + dm_dispatch_request(clone); + break; + case DM_MAPIO_REQUEUE: + /* The target wants to requeue the I/O */ + dm_requeue_unmapped_request(clone); + break; + default: + if (r > 0) { + DMWARN("unimplemented target map return value: %d", r); + BUG(); + } + + /* The target wants to complete the I/O */ + dm_kill_unmapped_request(clone, r); + break; + } +} + +/* + * q->request_fn for request-based dm. + * Called with the queue lock held. + */ +static void dm_request_fn(struct request_queue *q) +{ + struct mapped_device *md = q->queuedata; + struct dm_table *map = dm_get_table(md); + struct dm_target *ti; + struct request *rq; + + /* + * For noflush suspend, check blk_queue_stopped() to immediately + * quit I/O dispatching. + */ + while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { + rq = blk_peek_request(q); + if (!rq) + goto plug_and_out; + + if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */ + if (queue_in_flight(q)) + /* Not quiet yet. Wait more */ + goto plug_and_out; + + /* This device should be quiet now */ + __stop_queue(q); + blk_start_request(rq); + __blk_end_request_all(rq, 0); + wake_up(&md->wait); + goto out; + } + + ti = dm_table_find_target(map, blk_rq_pos(rq)); + if (ti->type->busy && ti->type->busy(ti)) + goto plug_and_out; + + blk_start_request(rq); + spin_unlock(q->queue_lock); + map_request(ti, rq, md); + spin_lock_irq(q->queue_lock); + } + + goto out; + +plug_and_out: + if (!elv_queue_empty(q)) + /* Some requests still remain, retry later */ + blk_plug_device(q); + +out: + dm_table_put(map); + + return; +} + +int dm_underlying_device_busy(struct request_queue *q) +{ + return blk_lld_busy(q); +} +EXPORT_SYMBOL_GPL(dm_underlying_device_busy); + +static int dm_lld_busy(struct request_queue *q) +{ + int r; + struct mapped_device *md = q->queuedata; + struct dm_table *map = dm_get_table(md); + + if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) + r = 1; + else + r = dm_table_any_busy_target(map); + + dm_table_put(map); + + return r; +} + static void dm_unplug_all(struct request_queue *q) { struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_table(md); if (map) { + if (dm_request_based(md)) + generic_unplug_device(q); + dm_table_unplug_all(map); dm_table_put(map); } @@ -1055,7 +1607,16 @@ static int dm_any_congested(void *congested_data, int bdi_bits) if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { map = dm_get_table(md); if (map) { - r = dm_table_any_congested(map, bdi_bits); + /* + * Request-based dm cares about only own queue for + * the query about congestion status of request_queue + */ + if (dm_request_based(md)) + r = md->queue->backing_dev_info.state & + bdi_bits; + else + r = dm_table_any_congested(map, bdi_bits); + dm_table_put(map); } } @@ -1458,6 +2019,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) { int r = 0; DECLARE_WAITQUEUE(wait, current); + struct request_queue *q = md->queue; + unsigned long flags; dm_unplug_all(md->queue); @@ -1467,7 +2030,14 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) set_current_state(interruptible); smp_mb(); - if (!atomic_read(&md->pending)) + if (dm_request_based(md)) { + spin_lock_irqsave(q->queue_lock, flags); + if (!queue_in_flight(q) && blk_queue_stopped(q)) { + spin_unlock_irqrestore(q->queue_lock, flags); + break; + } + spin_unlock_irqrestore(q->queue_lock, flags); + } else if (!atomic_read(&md->pending)) break; if (interruptible == TASK_INTERRUPTIBLE && @@ -1584,6 +2154,67 @@ out: return r; } +static void dm_rq_invalidate_suspend_marker(struct mapped_device *md) +{ + md->suspend_rq.special = (void *)0x1; +} + +static void dm_rq_abort_suspend(struct mapped_device *md, int noflush) +{ + struct request_queue *q = md->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + if (!noflush) + dm_rq_invalidate_suspend_marker(md); + __start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void dm_rq_start_suspend(struct mapped_device *md, int noflush) +{ + struct request *rq = &md->suspend_rq; + struct request_queue *q = md->queue; + + if (noflush) + stop_queue(q); + else { + blk_rq_init(q, rq); + blk_insert_request(q, rq, 0, NULL); + } +} + +static int dm_rq_suspend_available(struct mapped_device *md, int noflush) +{ + int r = 1; + struct request *rq = &md->suspend_rq; + struct request_queue *q = md->queue; + unsigned long flags; + + if (noflush) + return r; + + /* The marker must be protected by queue lock if it is in use */ + spin_lock_irqsave(q->queue_lock, flags); + if (unlikely(rq->ref_count)) { + /* + * This can happen, when the previous flush suspend was + * interrupted, the marker is still in the queue and + * this flush suspend has been invoked, because we don't + * remove the marker at the time of suspend interruption. + * We have only one marker per mapped_device, so we can't + * start another flush suspend while it is in use. + */ + BUG_ON(!rq->special); /* The marker should be invalidated */ + DMWARN("Invalidating the previous flush suspend is still in" + " progress. Please retry later."); + r = 0; + } + spin_unlock_irqrestore(q->queue_lock, flags); + + return r; +} + /* * Functions to lock and unlock any filesystem running on the * device. @@ -1623,6 +2254,53 @@ static void unlock_fs(struct mapped_device *md) * dm_bind_table, dm_suspend must be called to flush any in * flight bios and ensure that any further io gets deferred. */ +/* + * Suspend mechanism in request-based dm. + * + * After the suspend starts, further incoming requests are kept in + * the request_queue and deferred. + * Remaining requests in the request_queue at the start of suspend are flushed + * if it is flush suspend. + * The suspend completes when the following conditions have been satisfied, + * so wait for it: + * 1. q->in_flight is 0 (which means no in_flight request) + * 2. queue has been stopped (which means no request dispatching) + * + * + * Noflush suspend + * --------------- + * Noflush suspend doesn't need to dispatch remaining requests. + * So stop the queue immediately. Then, wait for all in_flight requests + * to be completed or requeued. + * + * To abort noflush suspend, start the queue. + * + * + * Flush suspend + * ------------- + * Flush suspend needs to dispatch remaining requests. So stop the queue + * after the remaining requests are completed. (Requeued request must be also + * re-dispatched and completed. Until then, we can't stop the queue.) + * + * During flushing the remaining requests, further incoming requests are also + * inserted to the same queue. To distinguish which requests are to be + * flushed, we insert a marker request to the queue at the time of starting + * flush suspend, like a barrier. + * The dispatching is blocked when the marker is found on the top of the queue. + * And the queue is stopped when all in_flight requests are completed, since + * that means the remaining requests are completely flushed. + * Then, the marker is removed from the queue. + * + * To abort flush suspend, we also need to take care of the marker, not only + * starting the queue. + * We don't remove the marker forcibly from the queue since it's against + * the block-layer manner. Instead, we put a invalidated mark on the marker. + * When the invalidated marker is found on the top of the queue, it is + * immediately removed from the queue, so it doesn't block dispatching. + * Because we have only one marker per mapped_device, we can't start another + * flush suspend until the invalidated marker is removed from the queue. + * So fail and return with -EBUSY in such a case. + */ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) { struct dm_table *map = NULL; @@ -1637,6 +2315,11 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) goto out_unlock; } + if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) { + r = -EBUSY; + goto out_unlock; + } + map = dm_get_table(md); /* @@ -1682,6 +2365,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) flush_workqueue(md->wq); + if (dm_request_based(md)) + dm_rq_start_suspend(md, noflush); + /* * At this point no more requests are entering target request routines. * We call dm_wait_for_completion to wait for all existing requests @@ -1698,6 +2384,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) if (r < 0) { dm_queue_flush(md); + if (dm_request_based(md)) + dm_rq_abort_suspend(md, noflush); + unlock_fs(md); goto out; /* pushback list is already flushed, so skip flush */ } @@ -1739,6 +2428,14 @@ int dm_resume(struct mapped_device *md) dm_queue_flush(md); + /* + * Flushing deferred I/Os must be done after targets are resumed + * so that mapping of targets can work correctly. + * Request-based dm is queueing the deferred I/Os in its request_queue. + */ + if (dm_request_based(md)) + start_queue(md->queue); + unlock_fs(md); clear_bit(DMF_SUSPENDED, &md->flags); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 604e85caadf..8dcabb1caff 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -50,6 +50,7 @@ void dm_table_presuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t); int dm_table_any_congested(struct dm_table *t, int bdi_bits); +int dm_table_any_busy_target(struct dm_table *t); /* * To check the return value from dm_table_find_target(). diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e6bf3b8c7bf..0d6310657f3 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -234,6 +234,7 @@ struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct mapped_device *md); int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); +union map_info *dm_get_rq_mapinfo(struct request *rq); /* * Geometry functions. @@ -396,4 +397,12 @@ static inline unsigned long to_bytes(sector_t n) return (n << SECTOR_SHIFT); } +/*----------------------------------------------------------------- + * Helper for block layer and dm core operations + *---------------------------------------------------------------*/ +void dm_dispatch_request(struct request *rq); +void dm_requeue_unmapped_request(struct request *rq); +void dm_kill_unmapped_request(struct request *rq, int error); +int dm_underlying_device_busy(struct request_queue *q); + #endif /* _LINUX_DEVICE_MAPPER_H */ -- cgit v1.2.3-70-g09d2