|
@@ -39,10 +39,8 @@ struct convert_context {
|
|
|
struct completion restart;
|
|
|
struct bio *bio_in;
|
|
|
struct bio *bio_out;
|
|
|
- unsigned int offset_in;
|
|
|
- unsigned int offset_out;
|
|
|
- unsigned int idx_in;
|
|
|
- unsigned int idx_out;
|
|
|
+ struct bvec_iter iter_in;
|
|
|
+ struct bvec_iter iter_out;
|
|
|
sector_t cc_sector;
|
|
|
atomic_t cc_pending;
|
|
|
};
|
|
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
|
|
|
{
|
|
|
ctx->bio_in = bio_in;
|
|
|
ctx->bio_out = bio_out;
|
|
|
- ctx->offset_in = 0;
|
|
|
- ctx->offset_out = 0;
|
|
|
- ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0;
|
|
|
- ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0;
|
|
|
+ if (bio_in)
|
|
|
+ ctx->iter_in = bio_in->bi_iter;
|
|
|
+ if (bio_out)
|
|
|
+ ctx->iter_out = bio_out->bi_iter;
|
|
|
ctx->cc_sector = sector + cc->iv_offset;
|
|
|
init_completion(&ctx->restart);
|
|
|
}
|
|
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
|
|
|
struct convert_context *ctx,
|
|
|
struct ablkcipher_request *req)
|
|
|
{
|
|
|
- struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
|
|
|
- struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
|
|
|
+ struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
|
|
|
+ struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
|
|
|
struct dm_crypt_request *dmreq;
|
|
|
u8 *iv;
|
|
|
int r;
|
|
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
|
|
|
dmreq->iv_sector = ctx->cc_sector;
|
|
|
dmreq->ctx = ctx;
|
|
|
sg_init_table(&dmreq->sg_in, 1);
|
|
|
- sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
|
|
|
- bv_in->bv_offset + ctx->offset_in);
|
|
|
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
|
|
|
+ bv_in.bv_offset);
|
|
|
|
|
|
sg_init_table(&dmreq->sg_out, 1);
|
|
|
- sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
|
|
|
- bv_out->bv_offset + ctx->offset_out);
|
|
|
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
|
|
|
+ bv_out.bv_offset);
|
|
|
|
|
|
- ctx->offset_in += 1 << SECTOR_SHIFT;
|
|
|
- if (ctx->offset_in >= bv_in->bv_len) {
|
|
|
- ctx->offset_in = 0;
|
|
|
- ctx->idx_in++;
|
|
|
- }
|
|
|
-
|
|
|
- ctx->offset_out += 1 << SECTOR_SHIFT;
|
|
|
- if (ctx->offset_out >= bv_out->bv_len) {
|
|
|
- ctx->offset_out = 0;
|
|
|
- ctx->idx_out++;
|
|
|
- }
|
|
|
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
|
|
|
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
|
|
|
|
|
|
if (cc->iv_gen_ops) {
|
|
|
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
|
|
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
|
|
|
|
|
|
atomic_set(&ctx->cc_pending, 1);
|
|
|
|
|
|
- while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
|
|
|
- ctx->idx_out < ctx->bio_out->bi_vcnt) {
|
|
|
+ while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
|
|
|
|
|
|
crypt_alloc_req(cc, ctx);
|
|
|
|
|
@@ -1207,7 +1195,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
|
|
}
|
|
|
|
|
|
/* crypt_convert should have filled the clone bio */
|
|
|
- BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
|
|
|
+ BUG_ON(io->ctx.iter_out.bi_size);
|
|
|
|
|
|
clone->bi_iter.bi_sector = cc->start + io->sector;
|
|
|
|
|
@@ -1246,7 +1234,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
}
|
|
|
|
|
|
io->ctx.bio_out = clone;
|
|
|
- io->ctx.idx_out = 0;
|
|
|
+ io->ctx.iter_out = clone->bi_iter;
|
|
|
|
|
|
remaining -= clone->bi_iter.bi_size;
|
|
|
sector += bio_sectors(clone);
|
|
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
|
|
crypt_inc_pending(new_io);
|
|
|
crypt_convert_init(cc, &new_io->ctx, NULL,
|
|
|
io->base_bio, sector);
|
|
|
- new_io->ctx.idx_in = io->ctx.idx_in;
|
|
|
- new_io->ctx.offset_in = io->ctx.offset_in;
|
|
|
+ new_io->ctx.iter_in = io->ctx.iter_in;
|
|
|
|
|
|
/*
|
|
|
* Fragments after the first use the base_io
|