summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ad98ded3008..046ee516074 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -56,6 +56,7 @@ struct dm_crypt_io {
atomic_t pending;
int error;
sector_t sector;
+ struct dm_crypt_io *base_io;
};
struct dm_crypt_request {
@@ -534,6 +535,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->base_io = NULL;
atomic_set(&io->pending, 0);
return io;
@@ -547,6 +549,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
*/
static void crypt_dec_pending(struct dm_crypt_io *io)
{
@@ -555,7 +558,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->error);
+ if (likely(!io->base_io))
+ bio_endio(io->base_bio, io->error);
+ else {
+ if (io->error && !io->base_io->error)
+ io->base_io->error = io->error;
+ crypt_dec_pending(io->base_io);
+ }
+
mempool_free(io, cc->io_pool);
}
@@ -699,6 +709,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
+ struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size;
@@ -753,6 +764,34 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
if (unlikely(out_of_pages))
congestion_wait(WRITE, HZ/100);
+ /*
+ * With async crypto it is unsafe to share the crypto context
+ * between fragments, so switch to a new dm_crypt_io structure.
+ */
+ if (unlikely(!crypt_finished && remaining)) {
+ new_io = crypt_io_alloc(io->target, io->base_bio,
+ sector);
+ crypt_inc_pending(new_io);
+ crypt_convert_init(cc, &new_io->ctx, NULL,
+ io->base_bio, sector);
+ new_io->ctx.idx_in = io->ctx.idx_in;
+ new_io->ctx.offset_in = io->ctx.offset_in;
+
+ /*
+ * Fragments after the first use the base_io
+ * pending count.
+ */
+ if (!io->base_io)
+ new_io->base_io = io;
+ else {
+ new_io->base_io = io->base_io;
+ crypt_inc_pending(io->base_io);
+ crypt_dec_pending(io);
+ }
+
+ io = new_io;
+ }
+
if (unlikely(remaining))
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
}