diff options
-rw-r--r-- | Documentation/device-mapper/thin-provisioning.txt | 5 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 14 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 12 |
4 files changed, 23 insertions, 10 deletions
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 05a27e9442b..2f5173500bd 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -309,7 +309,10 @@ ii) Status error_if_no_space|queue_if_no_space If the pool runs out of data or metadata space, the pool will either queue or error the IO destined to the data device. The - default is to queue the IO until more space is added. + default is to queue the IO until more space is added or the + 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool + module parameter can be used to change this timeout -- it + defaults to 60 seconds but may be disabled using a value of 0. iii) Messages diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9380be7b189..5f054c44b48 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; + /* Discard bios must be split on a block boundary */ + ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index fa0f6cbd6a4..ebfa411d1a7 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; - if (!m->queue_if_no_path) - dm_table_run_md_queue_async(m->ti->table); - spin_unlock_irqrestore(&m->lock, flags); + if (!queue_if_no_path) + dm_table_run_md_queue_async(m->ti->table); + return 0; } @@ -954,7 +954,7 @@ out: */ static int reinstate_path(struct pgpath *pgpath) { - int r = 0; + int r = 0, run_queue = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; @@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath) if (!m->nr_valid_paths++) { m->current_pgpath = NULL; - dm_table_run_md_queue_async(m->ti->table); + run_queue = 1; } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; @@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) out: spin_unlock_irqrestore(&m->lock, flags); + if (run_queue) + dm_table_run_md_queue_async(m->ti->table); return r; } @@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, } if (m->pg_init_required) __pg_init_all_paths(m); - dm_table_run_md_queue_async(m->ti->table); spin_unlock_irqrestore(&m->lock, flags); + dm_table_run_md_queue_async(m->ti->table); } return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2e71de8e004..242ac2ea5f2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -27,7 +27,9 @@ #define MAPPING_POOL_SIZE 1024 #define PRISON_CELLS 1024 #define COMMIT_PERIOD HZ -#define NO_SPACE_TIMEOUT (HZ * 60) +#define NO_SPACE_TIMEOUT_SECS 60 + +static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); + unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user @@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) pool->process_prepared_mapping = process_prepared_mapping; pool->process_prepared_discard = process_prepared_discard_passdown; - if (!pool->pf.error_if_no_space) - queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); + if (!pool->pf.error_if_no_space && no_space_timeout) + queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); break; case PM_WRITE: @@ -3508,6 +3511,9 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); +module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); + MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); |