|
@@ -99,9 +99,8 @@ static int __ecb_crypt(struct skcipher_request *req,
|
|
|
struct skcipher_walk walk;
|
|
|
int err;
|
|
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
- kernel_neon_begin();
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
@@ -109,12 +108,13 @@ static int __ecb_crypt(struct skcipher_request *req,
|
|
|
blocks = round_down(blocks,
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
|
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
|
|
|
ctx->rounds, blocks);
|
|
|
+ kernel_neon_end();
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
}
|
|
|
- kernel_neon_end();
|
|
|
|
|
|
return err;
|
|
|
}
|
|
@@ -158,19 +158,19 @@ static int cbc_encrypt(struct skcipher_request *req)
|
|
|
struct skcipher_walk walk;
|
|
|
int err;
|
|
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
- kernel_neon_begin();
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
/* fall back to the non-bitsliced NEON implementation */
|
|
|
+ kernel_neon_begin();
|
|
|
neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
ctx->enc, ctx->key.rounds, blocks,
|
|
|
walk.iv);
|
|
|
+ kernel_neon_end();
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
|
|
}
|
|
|
- kernel_neon_end();
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -181,9 +181,8 @@ static int cbc_decrypt(struct skcipher_request *req)
|
|
|
struct skcipher_walk walk;
|
|
|
int err;
|
|
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
- kernel_neon_begin();
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
@@ -191,13 +190,14 @@ static int cbc_decrypt(struct skcipher_request *req)
|
|
|
blocks = round_down(blocks,
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
|
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
ctx->key.rk, ctx->key.rounds, blocks,
|
|
|
walk.iv);
|
|
|
+ kernel_neon_end();
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
}
|
|
|
- kernel_neon_end();
|
|
|
|
|
|
return err;
|
|
|
}
|
|
@@ -229,9 +229,8 @@ static int ctr_encrypt(struct skcipher_request *req)
|
|
|
u8 buf[AES_BLOCK_SIZE];
|
|
|
int err;
|
|
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
- kernel_neon_begin();
|
|
|
while (walk.nbytes > 0) {
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
|
|
@@ -242,8 +241,10 @@ static int ctr_encrypt(struct skcipher_request *req)
|
|
|
final = NULL;
|
|
|
}
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
|
aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
ctx->rk, ctx->rounds, blocks, walk.iv, final);
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
if (final) {
|
|
|
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
|
@@ -258,8 +259,6 @@ static int ctr_encrypt(struct skcipher_request *req)
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
}
|
|
|
- kernel_neon_end();
|
|
|
-
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -304,12 +303,11 @@ static int __xts_crypt(struct skcipher_request *req,
|
|
|
struct skcipher_walk walk;
|
|
|
int err;
|
|
|
|
|
|
- err = skcipher_walk_virt(&walk, req, true);
|
|
|
+ err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
-
|
|
|
- neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey,
|
|
|
- ctx->key.rounds, 1);
|
|
|
+ neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
@@ -318,13 +316,13 @@ static int __xts_crypt(struct skcipher_request *req,
|
|
|
blocks = round_down(blocks,
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
|
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
|
|
|
ctx->key.rounds, blocks, walk.iv);
|
|
|
+ kernel_neon_end();
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
}
|
|
|
- kernel_neon_end();
|
|
|
-
|
|
|
return err;
|
|
|
}
|
|
|
|