diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 42 |
1 files changed, 16 insertions, 26 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 1ae2c71bb383..02a2919f4e5a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -59,6 +59,7 @@ struct convert_context { struct bio *bio_out; struct bvec_iter iter_out; atomic_t cc_pending; + unsigned int tag_offset; u64 cc_sector; union { struct skcipher_request *req; @@ -1187,7 +1188,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift); - bip->bip_iter.bi_sector = io->cc->start + io->sector; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), tag_len, offset_in_page(io->integrity_metadata)); @@ -1256,6 +1257,7 @@ static void crypt_convert_init(struct crypt_config *cc, if (bio_out) ctx->iter_out = bio_out->bi_iter; ctx->cc_sector = sector + cc->iv_offset; + ctx->tag_offset = 0; init_completion(&ctx->restart); } @@ -1588,7 +1590,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ static blk_status_t crypt_convert(struct crypt_config *cc, struct convert_context *ctx, bool atomic, bool reset_pending) { - unsigned int tag_offset = 0; unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r; @@ -1611,9 +1612,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); if (crypt_integrity_aead(cc)) - r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); + r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset); else - r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); + r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset); switch (r) { /* @@ -1633,8 +1634,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, * exit and continue processing in a workqueue */ ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; return BLK_STS_DEV_RESOURCE; } } else { @@ -1648,8 +1649,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, */ case -EINPROGRESS: ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; continue; /* * The request was already processed (synchronously). @@ -1657,7 +1658,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case 0: atomic_dec(&ctx->cc_pending); ctx->cc_sector += sector_step; - tag_offset++; + ctx->tag_offset++; if (!atomic) cond_resched(); continue; @@ -1719,6 +1720,7 @@ retry: clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_ioprio = io->base_bio->bi_ioprio; + clone->bi_iter.bi_sector = cc->start + io->sector; remaining_size = size; @@ -1909,7 +1911,6 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) crypt_dec_pending(io); return 1; } - clone->bi_iter.bi_sector = cc->start + io->sector; crypt_convert_init(cc, &io->ctx, clone, clone, io->sector); io->saved_bi_iter = clone->bi_iter; dm_submit_bio_remap(io->base_bio, clone); @@ -1925,13 +1926,13 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs); if (!clone) return 1; + + clone->bi_iter.bi_sector = cc->start + io->sector; clone->bi_private = io; clone->bi_end_io = crypt_endio; crypt_inc_pending(io); - clone->bi_iter.bi_sector = cc->start + io->sector; - if (dm_crypt_integrity_io_alloc(io, clone)) { crypt_dec_pending(io); bio_put(clone); @@ -2039,8 +2040,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) /* crypt_convert should have filled the clone bio */ BUG_ON(io->ctx.iter_out.bi_size); - clone->bi_iter.bi_sector = cc->start + io->sector; - if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { dm_submit_bio_remap(io->base_bio, clone); @@ -2092,13 +2091,12 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) struct crypt_config *cc = io->cc; struct convert_context *ctx = &io->ctx; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - r = crypt_convert(cc, &io->ctx, true, false); + r = crypt_convert(cc, &io->ctx, false, false); if (r) io->error = r; crypt_finished = atomic_dec_and_test(&ctx->cc_pending); @@ -2109,10 +2107,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } crypt_dec_pending(io); } @@ -2123,14 +2119,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct convert_context *ctx = &io->ctx; struct bio *clone; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); + crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector); clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); if (unlikely(!clone)) { @@ -2147,8 +2142,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.iter_in = clone->bi_iter; } - sector += bio_sectors(clone); - crypt_inc_pending(io); r = crypt_convert(cc, ctx, test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); @@ -2172,10 +2165,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } dec: crypt_dec_pending(io); @@ -2203,7 +2194,7 @@ static void kcryptd_crypt_read_continue(struct work_struct *work) wait_for_completion(&io->ctx.restart); reinit_completion(&io->ctx.restart); - r = crypt_convert(cc, &io->ctx, true, false); + r = crypt_convert(cc, &io->ctx, false, false); if (r) io->error = r; @@ -2221,7 +2212,6 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) crypt_inc_pending(io); if (io->ctx.aead_recheck) { - io->ctx.cc_sector = io->sector + cc->iv_offset; r = crypt_convert(cc, &io->ctx, test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); } else { |