|
@@ -26,8 +26,6 @@
|
|
|
#include <crypto/b128ops.h>
|
|
|
#include <crypto/gf128mul.h>
|
|
|
|
|
|
-#define XTS_BUFFER_SIZE 128u
|
|
|
-
|
|
|
struct priv {
|
|
|
struct crypto_skcipher *child;
|
|
|
struct crypto_cipher *tweak;
|
|
@@ -39,19 +37,7 @@ struct xts_instance_ctx {
|
|
|
};
|
|
|
|
|
|
struct rctx {
|
|
|
- le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
|
|
|
-
|
|
|
le128 t;
|
|
|
-
|
|
|
- le128 *ext;
|
|
|
-
|
|
|
- struct scatterlist srcbuf[2];
|
|
|
- struct scatterlist dstbuf[2];
|
|
|
- struct scatterlist *src;
|
|
|
- struct scatterlist *dst;
|
|
|
-
|
|
|
- unsigned int left;
|
|
|
-
|
|
|
struct skcipher_request subreq;
|
|
|
};
|
|
|
|
|
@@ -96,81 +82,27 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int post_crypt(struct skcipher_request *req)
|
|
|
+/*
|
|
|
+ * We compute the tweak masks twice (both before and after the ECB encryption or
|
|
|
+ * decryption) to avoid having to allocate a temporary buffer and/or make
|
|
|
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
|
|
|
+ * just doing the gf128mul_x_ble() calls again.
|
|
|
+ */
|
|
|
+static int xor_tweak(struct skcipher_request *req, bool second_pass)
|
|
|
{
|
|
|
struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
- le128 *buf = rctx->ext ?: rctx->buf;
|
|
|
- struct skcipher_request *subreq;
|
|
|
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
const int bs = XTS_BLOCK_SIZE;
|
|
|
struct skcipher_walk w;
|
|
|
- struct scatterlist *sg;
|
|
|
- unsigned offset;
|
|
|
+ le128 t = rctx->t;
|
|
|
int err;
|
|
|
|
|
|
- subreq = &rctx->subreq;
|
|
|
- err = skcipher_walk_virt(&w, subreq, false);
|
|
|
-
|
|
|
- while (w.nbytes) {
|
|
|
- unsigned int avail = w.nbytes;
|
|
|
- le128 *wdst;
|
|
|
-
|
|
|
- wdst = w.dst.virt.addr;
|
|
|
-
|
|
|
- do {
|
|
|
- le128_xor(wdst, buf++, wdst);
|
|
|
- wdst++;
|
|
|
- } while ((avail -= bs) >= bs);
|
|
|
-
|
|
|
- err = skcipher_walk_done(&w, avail);
|
|
|
+ if (second_pass) {
|
|
|
+ req = &rctx->subreq;
|
|
|
+ /* set to our TFM to enforce correct alignment: */
|
|
|
+ skcipher_request_set_tfm(req, tfm);
|
|
|
}
|
|
|
-
|
|
|
- rctx->left -= subreq->cryptlen;
|
|
|
-
|
|
|
- if (err || !rctx->left)
|
|
|
- goto out;
|
|
|
-
|
|
|
- rctx->dst = rctx->dstbuf;
|
|
|
-
|
|
|
- scatterwalk_done(&w.out, 0, 1);
|
|
|
- sg = w.out.sg;
|
|
|
- offset = w.out.offset;
|
|
|
-
|
|
|
- if (rctx->dst != sg) {
|
|
|
- rctx->dst[0] = *sg;
|
|
|
- sg_unmark_end(rctx->dst);
|
|
|
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
|
|
|
- }
|
|
|
- rctx->dst[0].length -= offset - sg->offset;
|
|
|
- rctx->dst[0].offset = offset;
|
|
|
-
|
|
|
-out:
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
-static int pre_crypt(struct skcipher_request *req)
|
|
|
-{
|
|
|
- struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
- le128 *buf = rctx->ext ?: rctx->buf;
|
|
|
- struct skcipher_request *subreq;
|
|
|
- const int bs = XTS_BLOCK_SIZE;
|
|
|
- struct skcipher_walk w;
|
|
|
- struct scatterlist *sg;
|
|
|
- unsigned cryptlen;
|
|
|
- unsigned offset;
|
|
|
- bool more;
|
|
|
- int err;
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
- cryptlen = subreq->cryptlen;
|
|
|
-
|
|
|
- more = rctx->left > cryptlen;
|
|
|
- if (!more)
|
|
|
- cryptlen = rctx->left;
|
|
|
-
|
|
|
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
|
|
|
- cryptlen, NULL);
|
|
|
-
|
|
|
- err = skcipher_walk_virt(&w, subreq, false);
|
|
|
+ err = skcipher_walk_virt(&w, req, false);
|
|
|
|
|
|
while (w.nbytes) {
|
|
|
unsigned int avail = w.nbytes;
|
|
@@ -181,180 +113,71 @@ static int pre_crypt(struct skcipher_request *req)
|
|
|
wdst = w.dst.virt.addr;
|
|
|
|
|
|
do {
|
|
|
- *buf++ = rctx->t;
|
|
|
- le128_xor(wdst++, &rctx->t, wsrc++);
|
|
|
- gf128mul_x_ble(&rctx->t, &rctx->t);
|
|
|
+ le128_xor(wdst++, &t, wsrc++);
|
|
|
+ gf128mul_x_ble(&t, &t);
|
|
|
} while ((avail -= bs) >= bs);
|
|
|
|
|
|
err = skcipher_walk_done(&w, avail);
|
|
|
}
|
|
|
|
|
|
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
|
|
|
- cryptlen, NULL);
|
|
|
-
|
|
|
- if (err || !more)
|
|
|
- goto out;
|
|
|
-
|
|
|
- rctx->src = rctx->srcbuf;
|
|
|
-
|
|
|
- scatterwalk_done(&w.in, 0, 1);
|
|
|
- sg = w.in.sg;
|
|
|
- offset = w.in.offset;
|
|
|
-
|
|
|
- if (rctx->src != sg) {
|
|
|
- rctx->src[0] = *sg;
|
|
|
- sg_unmark_end(rctx->src);
|
|
|
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
|
|
|
- }
|
|
|
- rctx->src[0].length -= offset - sg->offset;
|
|
|
- rctx->src[0].offset = offset;
|
|
|
-
|
|
|
-out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
|
|
|
+static int xor_tweak_pre(struct skcipher_request *req)
|
|
|
{
|
|
|
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
|
- struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
- struct skcipher_request *subreq;
|
|
|
- gfp_t gfp;
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
- skcipher_request_set_tfm(subreq, ctx->child);
|
|
|
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
|
|
|
-
|
|
|
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
|
|
- GFP_ATOMIC;
|
|
|
- rctx->ext = NULL;
|
|
|
-
|
|
|
- subreq->cryptlen = XTS_BUFFER_SIZE;
|
|
|
- if (req->cryptlen > XTS_BUFFER_SIZE) {
|
|
|
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
|
|
|
-
|
|
|
- rctx->ext = kmalloc(n, gfp);
|
|
|
- if (rctx->ext)
|
|
|
- subreq->cryptlen = n;
|
|
|
- }
|
|
|
-
|
|
|
- rctx->src = req->src;
|
|
|
- rctx->dst = req->dst;
|
|
|
- rctx->left = req->cryptlen;
|
|
|
-
|
|
|
- /* calculate first value of T */
|
|
|
- crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
|
|
|
-
|
|
|
- return 0;
|
|
|
+ return xor_tweak(req, false);
|
|
|
}
|
|
|
|
|
|
-static void exit_crypt(struct skcipher_request *req)
|
|
|
+static int xor_tweak_post(struct skcipher_request *req)
|
|
|
{
|
|
|
- struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
-
|
|
|
- rctx->left = 0;
|
|
|
-
|
|
|
- if (rctx->ext)
|
|
|
- kzfree(rctx->ext);
|
|
|
+ return xor_tweak(req, true);
|
|
|
}
|
|
|
|
|
|
-static int do_encrypt(struct skcipher_request *req, int err)
|
|
|
-{
|
|
|
- struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
- struct skcipher_request *subreq;
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
-
|
|
|
- while (!err && rctx->left) {
|
|
|
- err = pre_crypt(req) ?:
|
|
|
- crypto_skcipher_encrypt(subreq) ?:
|
|
|
- post_crypt(req);
|
|
|
-
|
|
|
- if (err == -EINPROGRESS || err == -EBUSY)
|
|
|
- return err;
|
|
|
- }
|
|
|
-
|
|
|
- exit_crypt(req);
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
-static void encrypt_done(struct crypto_async_request *areq, int err)
|
|
|
+static void crypt_done(struct crypto_async_request *areq, int err)
|
|
|
{
|
|
|
struct skcipher_request *req = areq->data;
|
|
|
- struct skcipher_request *subreq;
|
|
|
- struct rctx *rctx;
|
|
|
-
|
|
|
- rctx = skcipher_request_ctx(req);
|
|
|
-
|
|
|
- if (err == -EINPROGRESS) {
|
|
|
- if (rctx->left != req->cryptlen)
|
|
|
- return;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
|
|
|
|
|
- err = do_encrypt(req, err ?: post_crypt(req));
|
|
|
- if (rctx->left)
|
|
|
- return;
|
|
|
+ if (!err)
|
|
|
+ err = xor_tweak_post(req);
|
|
|
|
|
|
-out:
|
|
|
skcipher_request_complete(req, err);
|
|
|
}
|
|
|
|
|
|
-static int encrypt(struct skcipher_request *req)
|
|
|
-{
|
|
|
- return do_encrypt(req, init_crypt(req, encrypt_done));
|
|
|
-}
|
|
|
-
|
|
|
-static int do_decrypt(struct skcipher_request *req, int err)
|
|
|
+static void init_crypt(struct skcipher_request *req)
|
|
|
{
|
|
|
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
|
struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
- struct skcipher_request *subreq;
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
+ struct skcipher_request *subreq = &rctx->subreq;
|
|
|
|
|
|
- while (!err && rctx->left) {
|
|
|
- err = pre_crypt(req) ?:
|
|
|
- crypto_skcipher_decrypt(subreq) ?:
|
|
|
- post_crypt(req);
|
|
|
-
|
|
|
- if (err == -EINPROGRESS || err == -EBUSY)
|
|
|
- return err;
|
|
|
- }
|
|
|
+ skcipher_request_set_tfm(subreq, ctx->child);
|
|
|
+ skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
|
|
|
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
|
|
|
+ req->cryptlen, NULL);
|
|
|
|
|
|
- exit_crypt(req);
|
|
|
- return err;
|
|
|
+ /* calculate first value of T */
|
|
|
+ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
|
|
|
}
|
|
|
|
|
|
-static void decrypt_done(struct crypto_async_request *areq, int err)
|
|
|
+static int encrypt(struct skcipher_request *req)
|
|
|
{
|
|
|
- struct skcipher_request *req = areq->data;
|
|
|
- struct skcipher_request *subreq;
|
|
|
- struct rctx *rctx;
|
|
|
-
|
|
|
- rctx = skcipher_request_ctx(req);
|
|
|
-
|
|
|
- if (err == -EINPROGRESS) {
|
|
|
- if (rctx->left != req->cryptlen)
|
|
|
- return;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- subreq = &rctx->subreq;
|
|
|
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
|
|
|
-
|
|
|
- err = do_decrypt(req, err ?: post_crypt(req));
|
|
|
- if (rctx->left)
|
|
|
- return;
|
|
|
+ struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
+ struct skcipher_request *subreq = &rctx->subreq;
|
|
|
|
|
|
-out:
|
|
|
- skcipher_request_complete(req, err);
|
|
|
+ init_crypt(req);
|
|
|
+ return xor_tweak_pre(req) ?:
|
|
|
+ crypto_skcipher_encrypt(subreq) ?:
|
|
|
+ xor_tweak_post(req);
|
|
|
}
|
|
|
|
|
|
static int decrypt(struct skcipher_request *req)
|
|
|
{
|
|
|
- return do_decrypt(req, init_crypt(req, decrypt_done));
|
|
|
+ struct rctx *rctx = skcipher_request_ctx(req);
|
|
|
+ struct skcipher_request *subreq = &rctx->subreq;
|
|
|
+
|
|
|
+ init_crypt(req);
|
|
|
+ return xor_tweak_pre(req) ?:
|
|
|
+ crypto_skcipher_decrypt(subreq) ?:
|
|
|
+ xor_tweak_post(req);
|
|
|
}
|
|
|
|
|
|
static int init_tfm(struct crypto_skcipher *tfm)
|