From c6eda5e81c4fcc77185117255c7419eda771f67f Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 31 Jan 2014 14:11:54 -0500 Subject: dm cache: move hook_info into common portion of per_bio_data structure Commit c9d28d5d ("dm cache: promotion optimisation for writes") incorrectly placed the 'hook_info' member in the writethrough-only portion of the per_bio_data structure. Given that the overwrite optimization may be used for writeback the 'hook_info' member must be placed above the 'cache' member of the per_bio_data structure. Any members above 'cache' are available from both writeback and writethrough modes' per_bio_data structure. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber Cc: stable@vger.kernel.org # 3.13+ --- drivers/md/dm-cache-target.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md/dm-cache-target.c') diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index ffd472e015c..14256b7fce7 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -289,6 +289,7 @@ struct per_bio_data { bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; + struct dm_hook_info hook_info; /* * writethrough fields. These MUST remain at the end of this @@ -297,7 +298,6 @@ struct per_bio_data { */ struct cache *cache; dm_cblock_t cblock; - struct dm_hook_info hook_info; struct dm_bio_details bio_details; }; -- cgit v1.2.3-70-g09d2 From 80ae49aaed32af72630251eb06161b15cde15ac8 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 31 Jan 2014 14:30:37 -0500 Subject: dm cache: do not add migration to completed list before unhooking bio When completing an overwrite bio, in overwrite_endio(), the associated migration should not be added to the 'completed_migrations' until the bio's fields are restored with dm_unhook_bio(). Otherwise, do_worker() can race to process 'completed_migrations' before dm_unhook_bio() -- so the bio's bi_end_io is incorrect. This is unlikely to cause any problems given the current code but should be fixed on the basis of correctness. Also, the cache's spinlock only needs to be held when manipulating the 'completed_migrations' list -- other changes don't need protection. Signed-off-by: Mike Snitzer Acked-by: Joe Thornber --- drivers/md/dm-cache-target.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/md/dm-cache-target.c') diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 14256b7fce7..db094446512 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1010,13 +1010,15 @@ static void overwrite_endio(struct bio *bio, int err) struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); unsigned long flags; + dm_unhook_bio(&pb->hook_info, bio); + if (err) mg->err = true; + mg->requeue_holder = false; + spin_lock_irqsave(&cache->lock, flags); list_add_tail(&mg->list, &cache->completed_migrations); - dm_unhook_bio(&pb->hook_info, bio); - mg->requeue_holder = false; spin_unlock_irqrestore(&cache->lock, flags); wake_worker(cache); -- cgit v1.2.3-70-g09d2 From e0d849fad746cb36a6822e4595d8ba9bf0adf7fa Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Thu, 27 Feb 2014 22:46:48 +0100 Subject: dm cache: fix truncation bug when mapping I/O to >2TB fast device When remapping a block to the cache's fast device that is larger than 2TB we must not truncate the destination sector to 32bits. The 32bit temporary result of from_cblock() was being overflowed in remap_to_cache() due to the logical left shift. Use an intermediate 64bit type to store the 32bit from_cblock() result to fix the overflow. Signed-off-by: Heinz Mauelshagen Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org --- drivers/md/dm-cache-target.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/md/dm-cache-target.c') diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index db094446512..1af70145fab 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, dm_cblock_t cblock) { sector_t bi_sector = bio->bi_iter.bi_sector; + sector_t block = from_cblock(cblock); bio->bi_bdev = cache->cache_dev->bdev; if (!block_size_is_power_of_two(cache)) bio->bi_iter.bi_sector = - (from_cblock(cblock) * cache->sectors_per_block) + + (block * cache->sectors_per_block) + sector_div(bi_sector, cache->sectors_per_block); else bio->bi_iter.bi_sector = - (from_cblock(cblock) << cache->sectors_per_block_shift) | + (block << cache->sectors_per_block_shift) | (bi_sector & (cache->sectors_per_block - 1)); } -- cgit v1.2.3-70-g09d2