diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 96 |
1 files changed, 58 insertions, 38 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4cba2d808af..fc93b9330af 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -59,7 +59,7 @@ struct dm_crypt_io { int error; sector_t sector; struct dm_crypt_io *base_io; -}; +} CRYPTO_MINALIGN_ATTR; struct dm_crypt_request { struct convert_context *ctx; @@ -162,6 +162,8 @@ struct crypt_config { */ unsigned int dmreq_start; + unsigned int per_bio_data_size; + unsigned long flags; unsigned int key_size; unsigned int key_parts; /* independent parts in key buffer */ @@ -524,29 +526,26 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, u8 *data) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - struct { - struct shash_desc desc; - char ctx[crypto_shash_descsize(lmk->hash_tfm)]; - } sdesc; + SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); struct md5_state md5state; __le32 buf[4]; int i, r; - sdesc.desc.tfm = lmk->hash_tfm; - sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->tfm = lmk->hash_tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - r = crypto_shash_init(&sdesc.desc); + r = crypto_shash_init(desc); if (r) return r; if (lmk->seed) { - r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); + r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); if (r) return r; } /* Sector is always 512B, block size 16, add data of blocks 1-31 */ - r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); + r = crypto_shash_update(desc, data + 16, 16 * 31); if (r) return r; @@ -555,12 +554,12 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); buf[2] = cpu_to_le32(4024); buf[3] = 0; - r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); + r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); if (r) return r; /* No MD5 padding here */ - r = crypto_shash_export(&sdesc.desc, &md5state); + r = crypto_shash_export(desc, &md5state); if (r) return r; @@ -677,10 +676,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; u64 sector = cpu_to_le64((u64)dmreq->iv_sector); u8 buf[TCW_WHITENING_SIZE]; - struct { - struct shash_desc desc; - char ctx[crypto_shash_descsize(tcw->crc32_tfm)]; - } sdesc; + SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); int i, r; /* xor whitening with sector number */ @@ -689,16 +685,16 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc, crypto_xor(&buf[8], (u8 *)§or, 8); /* calculate crc32 for every 32bit part and xor it */ - sdesc.desc.tfm = tcw->crc32_tfm; - sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc->tfm = tcw->crc32_tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; for (i = 0; i < 4; i++) { - r = crypto_shash_init(&sdesc.desc); + r = crypto_shash_init(desc); if (r) goto out; - r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4); + r = crypto_shash_update(desc, &buf[i * 4], 4); if (r) goto out; - r = crypto_shash_final(&sdesc.desc, &buf[i * 4]); + r = crypto_shash_final(desc, &buf[i * 4]); if (r) goto out; } @@ -895,6 +891,15 @@ static void crypt_alloc_req(struct crypt_config *cc, kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } +static void crypt_free_req(struct crypt_config *cc, + struct ablkcipher_request *req, struct bio *base_bio) +{ + struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); + + if ((struct ablkcipher_request *)(io + 1) != req) + mempool_free(req, cc->req_pool); +} + /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ @@ -1008,12 +1013,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) } } -static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, - struct bio *bio, sector_t sector) +static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, + struct bio *bio, sector_t sector) { - struct dm_crypt_io *io; - - io = mempool_alloc(cc->io_pool, GFP_NOIO); io->cc = cc; io->base_bio = bio; io->sector = sector; @@ -1021,8 +1023,6 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, io->base_io = NULL; io->ctx.req = NULL; atomic_set(&io->io_pending, 0); - - return io; } static void crypt_inc_pending(struct dm_crypt_io *io) @@ -1046,8 +1046,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io) return; if (io->ctx.req) - mempool_free(io->ctx.req, cc->req_pool); - mempool_free(io, cc->io_pool); + crypt_free_req(cc, io->ctx.req, base_bio); + if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) + mempool_free(io, cc->io_pool); if (likely(!base_io)) bio_endio(base_bio, error); @@ -1255,8 +1256,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { - new_io = crypt_io_alloc(io->cc, io->base_bio, - sector); + new_io = mempool_alloc(cc->io_pool, GFP_NOIO); + crypt_io_init(new_io, io->cc, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, io->base_bio, sector); @@ -1325,7 +1326,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (error < 0) io->error = -EIO; - mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); + crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); if (!atomic_dec_and_test(&ctx->cc_pending)) return; @@ -1681,6 +1682,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned int key_size, opt_params; unsigned long long tmpll; int ret; + size_t iv_size_padding; struct dm_arg_set as; const char *opt_string; char dummy; @@ -1717,17 +1719,33 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->dmreq_start = sizeof(struct ablkcipher_request); cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); - cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & - ~(crypto_tfm_ctx_alignment() - 1); + cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); + + if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { + /* Allocate the padding exactly */ + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) + & crypto_ablkcipher_alignmask(any_tfm(cc)); + } else { + /* + * If the cipher requires greater alignment than kmalloc + * alignment, we don't know the exact position of the + * initialization vector. We must assume worst case. + */ + iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); + } cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + - sizeof(struct dm_crypt_request) + cc->iv_size); + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); if (!cc->req_pool) { ti->error = "Cannot allocate crypt request mempool"; goto bad; } + cc->per_bio_data_size = ti->per_bio_data_size = + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, + ARCH_KMALLOC_MINALIGN); + cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; @@ -1824,7 +1842,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); + io = dm_per_bio_data(bio, cc->per_bio_data_size); + crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); + io->ctx.req = (struct ablkcipher_request *)(io + 1); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) |