|
@@ -63,11 +63,12 @@ static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
|
|
|
|
|
|
asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
|
|
asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
|
|
const u8 src[], struct ghash_key const *k,
|
|
const u8 src[], struct ghash_key const *k,
|
|
- u8 ctr[], int rounds, u8 ks[]);
|
|
|
|
|
|
+ u8 ctr[], u32 const rk[], int rounds,
|
|
|
|
+ u8 ks[]);
|
|
|
|
|
|
asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
|
|
asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
|
|
const u8 src[], struct ghash_key const *k,
|
|
const u8 src[], struct ghash_key const *k,
|
|
- u8 ctr[], int rounds);
|
|
|
|
|
|
+ u8 ctr[], u32 const rk[], int rounds);
|
|
|
|
|
|
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
|
|
asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
|
|
u32 const rk[], int rounds);
|
|
u32 const rk[], int rounds);
|
|
@@ -368,26 +369,29 @@ static int gcm_encrypt(struct aead_request *req)
|
|
pmull_gcm_encrypt_block(ks, iv, NULL,
|
|
pmull_gcm_encrypt_block(ks, iv, NULL,
|
|
num_rounds(&ctx->aes_key));
|
|
num_rounds(&ctx->aes_key));
|
|
put_unaligned_be32(3, iv + GCM_IV_SIZE);
|
|
put_unaligned_be32(3, iv + GCM_IV_SIZE);
|
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
- err = skcipher_walk_aead_encrypt(&walk, req, true);
|
|
|
|
|
|
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
|
|
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
|
|
walk.src.virt.addr, &ctx->ghash_key,
|
|
walk.src.virt.addr, &ctx->ghash_key,
|
|
- iv, num_rounds(&ctx->aes_key), ks);
|
|
|
|
|
|
+ iv, ctx->aes_key.key_enc,
|
|
|
|
+ num_rounds(&ctx->aes_key), ks);
|
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
err = skcipher_walk_done(&walk,
|
|
err = skcipher_walk_done(&walk,
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
}
|
|
}
|
|
- kernel_neon_end();
|
|
|
|
} else {
|
|
} else {
|
|
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
|
|
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
|
|
num_rounds(&ctx->aes_key));
|
|
num_rounds(&ctx->aes_key));
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
|
|
|
|
- err = skcipher_walk_aead_encrypt(&walk, req, true);
|
|
|
|
|
|
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
@@ -467,15 +471,19 @@ static int gcm_decrypt(struct aead_request *req)
|
|
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
|
|
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
|
|
num_rounds(&ctx->aes_key));
|
|
num_rounds(&ctx->aes_key));
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
- err = skcipher_walk_aead_decrypt(&walk, req, true);
|
|
|
|
|
|
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
+ kernel_neon_begin();
|
|
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
|
|
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
|
|
walk.src.virt.addr, &ctx->ghash_key,
|
|
walk.src.virt.addr, &ctx->ghash_key,
|
|
- iv, num_rounds(&ctx->aes_key));
|
|
|
|
|
|
+ iv, ctx->aes_key.key_enc,
|
|
|
|
+ num_rounds(&ctx->aes_key));
|
|
|
|
+ kernel_neon_end();
|
|
|
|
|
|
err = skcipher_walk_done(&walk,
|
|
err = skcipher_walk_done(&walk,
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
@@ -483,14 +491,12 @@ static int gcm_decrypt(struct aead_request *req)
|
|
if (walk.nbytes)
|
|
if (walk.nbytes)
|
|
pmull_gcm_encrypt_block(iv, iv, NULL,
|
|
pmull_gcm_encrypt_block(iv, iv, NULL,
|
|
num_rounds(&ctx->aes_key));
|
|
num_rounds(&ctx->aes_key));
|
|
-
|
|
|
|
- kernel_neon_end();
|
|
|
|
} else {
|
|
} else {
|
|
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
|
|
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
|
|
num_rounds(&ctx->aes_key));
|
|
num_rounds(&ctx->aes_key));
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
put_unaligned_be32(2, iv + GCM_IV_SIZE);
|
|
|
|
|
|
- err = skcipher_walk_aead_decrypt(&walk, req, true);
|
|
|
|
|
|
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
int blocks = walk.nbytes / AES_BLOCK_SIZE;
|