|
@@ -19,7 +19,6 @@
|
|
#include <linux/crypto.h>
|
|
#include <linux/crypto.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/backing-dev.h>
|
|
-#include <linux/percpu.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <asm/page.h>
|
|
#include <asm/page.h>
|
|
@@ -43,6 +42,7 @@ struct convert_context {
|
|
struct bvec_iter iter_out;
|
|
struct bvec_iter iter_out;
|
|
sector_t cc_sector;
|
|
sector_t cc_sector;
|
|
atomic_t cc_pending;
|
|
atomic_t cc_pending;
|
|
|
|
+ struct ablkcipher_request *req;
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -111,15 +111,7 @@ struct iv_tcw_private {
|
|
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
|
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Duplicated per-CPU state for cipher.
|
|
|
|
- */
|
|
|
|
-struct crypt_cpu {
|
|
|
|
- struct ablkcipher_request *req;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * The fields in here must be read only after initialization,
|
|
|
|
- * changing state should be in crypt_cpu.
|
|
|
|
|
|
+ * The fields in here must be read only after initialization.
|
|
*/
|
|
*/
|
|
struct crypt_config {
|
|
struct crypt_config {
|
|
struct dm_dev *dev;
|
|
struct dm_dev *dev;
|
|
@@ -150,12 +142,6 @@ struct crypt_config {
|
|
sector_t iv_offset;
|
|
sector_t iv_offset;
|
|
unsigned int iv_size;
|
|
unsigned int iv_size;
|
|
|
|
|
|
- /*
|
|
|
|
- * Duplicated per cpu state. Access through
|
|
|
|
- * per_cpu_ptr() only.
|
|
|
|
- */
|
|
|
|
- struct crypt_cpu __percpu *cpu;
|
|
|
|
-
|
|
|
|
/* ESSIV: struct crypto_cipher *essiv_tfm */
|
|
/* ESSIV: struct crypto_cipher *essiv_tfm */
|
|
void *iv_private;
|
|
void *iv_private;
|
|
struct crypto_ablkcipher **tfms;
|
|
struct crypto_ablkcipher **tfms;
|
|
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
|
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
|
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
|
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
|
|
|
|
|
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
|
|
|
|
-{
|
|
|
|
- return this_cpu_ptr(cc->cpu);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Use this to access cipher attributes that are the same for each CPU.
|
|
* Use this to access cipher attributes that are the same for each CPU.
|
|
*/
|
|
*/
|
|
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
|
static void crypt_alloc_req(struct crypt_config *cc,
|
|
static void crypt_alloc_req(struct crypt_config *cc,
|
|
struct convert_context *ctx)
|
|
struct convert_context *ctx)
|
|
{
|
|
{
|
|
- struct crypt_cpu *this_cc = this_crypt_config(cc);
|
|
|
|
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
|
|
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
|
|
|
|
|
|
- if (!this_cc->req)
|
|
|
|
- this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
|
|
|
|
|
+ if (!ctx->req)
|
|
|
|
+ ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
|
|
|
|
|
- ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
|
|
|
|
- ablkcipher_request_set_callback(this_cc->req,
|
|
|
|
|
|
+ ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
|
|
|
|
+ ablkcipher_request_set_callback(ctx->req,
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
|
- kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
|
|
|
|
|
|
+ kcryptd_async_done, dmreq_of_req(cc, ctx->req));
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
|
|
static int crypt_convert(struct crypt_config *cc,
|
|
static int crypt_convert(struct crypt_config *cc,
|
|
struct convert_context *ctx)
|
|
struct convert_context *ctx)
|
|
{
|
|
{
|
|
- struct crypt_cpu *this_cc = this_crypt_config(cc);
|
|
|
|
int r;
|
|
int r;
|
|
|
|
|
|
atomic_set(&ctx->cc_pending, 1);
|
|
atomic_set(&ctx->cc_pending, 1);
|
|
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
|
|
|
|
|
|
atomic_inc(&ctx->cc_pending);
|
|
atomic_inc(&ctx->cc_pending);
|
|
|
|
|
|
- r = crypt_convert_block(cc, ctx, this_cc->req);
|
|
|
|
|
|
+ r = crypt_convert_block(cc, ctx, ctx->req);
|
|
|
|
|
|
switch (r) {
|
|
switch (r) {
|
|
/* async */
|
|
/* async */
|
|
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
|
|
reinit_completion(&ctx->restart);
|
|
reinit_completion(&ctx->restart);
|
|
/* fall through*/
|
|
/* fall through*/
|
|
case -EINPROGRESS:
|
|
case -EINPROGRESS:
|
|
- this_cc->req = NULL;
|
|
|
|
|
|
+ ctx->req = NULL;
|
|
ctx->cc_sector++;
|
|
ctx->cc_sector++;
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
|
|
io->sector = sector;
|
|
io->sector = sector;
|
|
io->error = 0;
|
|
io->error = 0;
|
|
io->base_io = NULL;
|
|
io->base_io = NULL;
|
|
|
|
+ io->ctx.req = NULL;
|
|
atomic_set(&io->io_pending, 0);
|
|
atomic_set(&io->io_pending, 0);
|
|
|
|
|
|
return io;
|
|
return io;
|
|
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
|
if (!atomic_dec_and_test(&io->io_pending))
|
|
if (!atomic_dec_and_test(&io->io_pending))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ if (io->ctx.req)
|
|
|
|
+ mempool_free(io->ctx.req, cc->req_pool);
|
|
mempool_free(io, cc->io_pool);
|
|
mempool_free(io, cc->io_pool);
|
|
|
|
|
|
if (likely(!base_io))
|
|
if (likely(!base_io))
|
|
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
static void crypt_dtr(struct dm_target *ti)
|
|
{
|
|
{
|
|
struct crypt_config *cc = ti->private;
|
|
struct crypt_config *cc = ti->private;
|
|
- struct crypt_cpu *cpu_cc;
|
|
|
|
- int cpu;
|
|
|
|
|
|
|
|
ti->private = NULL;
|
|
ti->private = NULL;
|
|
|
|
|
|
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
|
|
if (cc->crypt_queue)
|
|
if (cc->crypt_queue)
|
|
destroy_workqueue(cc->crypt_queue);
|
|
destroy_workqueue(cc->crypt_queue);
|
|
|
|
|
|
- if (cc->cpu)
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
|
- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
|
|
|
- if (cpu_cc->req)
|
|
|
|
- mempool_free(cpu_cc->req, cc->req_pool);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
crypt_free_tfms(cc);
|
|
crypt_free_tfms(cc);
|
|
|
|
|
|
if (cc->bs)
|
|
if (cc->bs)
|
|
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
|
|
if (cc->dev)
|
|
if (cc->dev)
|
|
dm_put_device(ti, cc->dev);
|
|
dm_put_device(ti, cc->dev);
|
|
|
|
|
|
- if (cc->cpu)
|
|
|
|
- free_percpu(cc->cpu);
|
|
|
|
-
|
|
|
|
kzfree(cc->cipher);
|
|
kzfree(cc->cipher);
|
|
kzfree(cc->cipher_string);
|
|
kzfree(cc->cipher_string);
|
|
|
|
|
|
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
|
if (tmp)
|
|
if (tmp)
|
|
DMWARN("Ignoring unexpected additional cipher options");
|
|
DMWARN("Ignoring unexpected additional cipher options");
|
|
|
|
|
|
- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
|
|
|
|
- __alignof__(struct crypt_cpu));
|
|
|
|
- if (!cc->cpu) {
|
|
|
|
- ti->error = "Cannot allocate per cpu state";
|
|
|
|
- goto bad_mem;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* For compatibility with the original dm-crypt mapping format, if
|
|
* For compatibility with the original dm-crypt mapping format, if
|
|
* only the cipher name is supplied, use cbc-plain.
|
|
* only the cipher name is supplied, use cbc-plain.
|