summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c59
1 files changed, 35 insertions, 24 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0df3ec085eb..29569768ffb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
#define MIGRATION_COUNT_WINDOW 10
/*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
*/
#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
/*
* FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@ struct cache {
struct dm_target *ti;
struct dm_target_callbacks callbacks;
+ struct dm_cache_metadata *cmd;
+
/*
* Metadata is written to this device.
*/
@@ -117,11 +121,6 @@ struct cache {
struct dm_dev *cache_dev;
/*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- /*
* Size of the origin device in _complete_ blocks and native sectors.
*/
dm_oblock_t origin_blocks;
@@ -138,8 +137,6 @@ struct cache {
uint32_t sectors_per_block;
int sectors_per_block_shift;
- struct dm_cache_metadata *cmd;
-
spinlock_t lock;
struct bio_list deferred_bios;
struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@ struct cache {
struct list_head completed_migrations;
struct list_head need_commit_migrations;
sector_t migration_threshold;
- atomic_t nr_migrations;
wait_queue_head_t migration_wait;
+ atomic_t nr_migrations;
/*
* cache_size entries, dirty if set
@@ -160,9 +157,16 @@ struct cache {
/*
* origin_blocks entries, discarded if set.
*/
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
dm_dblock_t discard_nr_blocks;
unsigned long *discard_bitset;
+ uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
@@ -187,14 +191,12 @@ struct cache {
bool loaded_mappings:1;
bool loaded_discards:1;
- struct cache_stats stats;
-
/*
- * Rather than reconstructing the table line for the status we just
- * save it and regurgitate.
+ * Cache features such as write-through.
*/
- unsigned nr_ctr_args;
- const char **ctr_args;
+ struct cache_features features;
+
+ struct cache_stats stats;
};
struct per_bio_data {
@@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
char **error)
{
- unsigned long tmp;
+ unsigned long block_size;
if (!at_least_one_arg(as, error))
return -EINVAL;
- if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
- tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
- tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+ block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+ block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
*error = "Invalid data block size";
return -EINVAL;
}
- if (tmp > ca->cache_sectors) {
+ if (block_size > ca->cache_sectors) {
*error = "Data block size is larger than the cache device";
return -EINVAL;
}
- ca->block_size = tmp;
+ ca->block_size = block_size;
return 0;
}
@@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct cache *cache = ti->private;
+ uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- blk_limits_io_min(limits, 0);
- blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ /*
+ * If the system-determined stacked limits are compatible with the
+ * cache's blocksize (io_opt is a factor) do not override them.
+ */
+ if (io_opt_sectors < cache->sectors_per_block ||
+ do_div(io_opt_sectors, cache->sectors_per_block)) {
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ }
set_discard_limits(cache, limits);
}