|
@@ -31,69 +31,29 @@
|
|
|
#include <crypto/xts.h>
|
|
|
#include <asm/cpacf.h>
|
|
|
|
|
|
-#define AES_KEYLEN_128 1
|
|
|
-#define AES_KEYLEN_192 2
|
|
|
-#define AES_KEYLEN_256 4
|
|
|
-
|
|
|
static u8 *ctrblk;
|
|
|
static DEFINE_SPINLOCK(ctrblk_lock);
|
|
|
-static char keylen_flag;
|
|
|
+
|
|
|
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
|
|
|
|
|
|
struct s390_aes_ctx {
|
|
|
u8 key[AES_MAX_KEY_SIZE];
|
|
|
- long enc;
|
|
|
- long dec;
|
|
|
int key_len;
|
|
|
+ unsigned long fc;
|
|
|
union {
|
|
|
struct crypto_skcipher *blk;
|
|
|
struct crypto_cipher *cip;
|
|
|
} fallback;
|
|
|
};
|
|
|
|
|
|
-struct pcc_param {
|
|
|
- u8 key[32];
|
|
|
- u8 tweak[16];
|
|
|
- u8 block[16];
|
|
|
- u8 bit[16];
|
|
|
- u8 xts[16];
|
|
|
-};
|
|
|
-
|
|
|
struct s390_xts_ctx {
|
|
|
u8 key[32];
|
|
|
u8 pcc_key[32];
|
|
|
- long enc;
|
|
|
- long dec;
|
|
|
int key_len;
|
|
|
+ unsigned long fc;
|
|
|
struct crypto_skcipher *fallback;
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * Check if the key_len is supported by the HW.
|
|
|
- * Returns 0 if it is, a positive number if it is not and software fallback is
|
|
|
- * required or a negative number in case the key size is not valid
|
|
|
- */
|
|
|
-static int need_fallback(unsigned int key_len)
|
|
|
-{
|
|
|
- switch (key_len) {
|
|
|
- case 16:
|
|
|
- if (!(keylen_flag & AES_KEYLEN_128))
|
|
|
- return 1;
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- if (!(keylen_flag & AES_KEYLEN_192))
|
|
|
- return 1;
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- if (!(keylen_flag & AES_KEYLEN_256))
|
|
|
- return 1;
|
|
|
- break;
|
|
|
- default:
|
|
|
- return -1;
|
|
|
- break;
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
@@ -117,72 +77,44 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
- u32 *flags = &tfm->crt_flags;
|
|
|
- int ret;
|
|
|
+ unsigned long fc;
|
|
|
|
|
|
- ret = need_fallback(key_len);
|
|
|
- if (ret < 0) {
|
|
|
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ /* Pick the correct function code based on the key length */
|
|
|
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
|
|
|
+ (key_len == 24) ? CPACF_KM_AES_192 :
|
|
|
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
|
|
|
|
|
|
- sctx->key_len = key_len;
|
|
|
- if (!ret) {
|
|
|
- memcpy(sctx->key, in_key, key_len);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ /* Check if the function code is available */
|
|
|
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
+ if (!sctx->fc)
|
|
|
+ return setkey_fallback_cip(tfm, in_key, key_len);
|
|
|
|
|
|
- return setkey_fallback_cip(tfm, in_key, key_len);
|
|
|
+ sctx->key_len = key_len;
|
|
|
+ memcpy(sctx->key, in_key, key_len);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len))) {
|
|
|
+ if (unlikely(!sctx->fc)) {
|
|
|
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- switch (sctx->key_len) {
|
|
|
- case 16:
|
|
|
- cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- }
|
|
|
+ cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
|
|
|
}
|
|
|
|
|
|
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len))) {
|
|
|
+ if (unlikely(!sctx->fc)) {
|
|
|
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- switch (sctx->key_len) {
|
|
|
- case 16:
|
|
|
- cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- break;
|
|
|
- }
|
|
|
+ cpacf_km(sctx->fc | CPACF_DECRYPT,
|
|
|
+ &sctx->key, out, in, AES_BLOCK_SIZE);
|
|
|
}
|
|
|
|
|
|
static int fallback_init_cip(struct crypto_tfm *tfm)
|
|
@@ -291,50 +223,37 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
- int ret;
|
|
|
+ unsigned long fc;
|
|
|
|
|
|
- ret = need_fallback(key_len);
|
|
|
- if (ret > 0) {
|
|
|
- sctx->key_len = key_len;
|
|
|
- return setkey_fallback_blk(tfm, in_key, key_len);
|
|
|
- }
|
|
|
+ /* Pick the correct function code based on the key length */
|
|
|
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
|
|
|
+ (key_len == 24) ? CPACF_KM_AES_192 :
|
|
|
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
|
|
|
|
|
|
- switch (key_len) {
|
|
|
- case 16:
|
|
|
- sctx->enc = CPACF_KM_AES_128_ENC;
|
|
|
- sctx->dec = CPACF_KM_AES_128_DEC;
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- sctx->enc = CPACF_KM_AES_192_ENC;
|
|
|
- sctx->dec = CPACF_KM_AES_192_DEC;
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- sctx->enc = CPACF_KM_AES_256_ENC;
|
|
|
- sctx->dec = CPACF_KM_AES_256_DEC;
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Check if the function code is available */
|
|
|
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
+ if (!sctx->fc)
|
|
|
+ return setkey_fallback_blk(tfm, in_key, key_len);
|
|
|
|
|
|
- return aes_set_key(tfm, in_key, key_len);
|
|
|
+ sctx->key_len = key_len;
|
|
|
+ memcpy(sctx->key, in_key, key_len);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
|
|
|
+static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|
|
struct blkcipher_walk *walk)
|
|
|
{
|
|
|
- int ret = blkcipher_walk_virt(desc, walk);
|
|
|
- unsigned int nbytes;
|
|
|
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
+ unsigned int nbytes, n;
|
|
|
+ int ret;
|
|
|
|
|
|
- while ((nbytes = walk->nbytes)) {
|
|
|
+ ret = blkcipher_walk_virt(desc, walk);
|
|
|
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
|
|
/* only use complete blocks */
|
|
|
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
- u8 *out = walk->dst.virt.addr;
|
|
|
- u8 *in = walk->src.virt.addr;
|
|
|
-
|
|
|
- ret = cpacf_km(func, param, out, in, n);
|
|
|
- if (ret < 0 || ret != n)
|
|
|
- return -EIO;
|
|
|
-
|
|
|
- nbytes &= AES_BLOCK_SIZE - 1;
|
|
|
- ret = blkcipher_walk_done(desc, walk, nbytes);
|
|
|
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
+ cpacf_km(sctx->fc | modifier, sctx->key,
|
|
|
+ walk->dst.virt.addr, walk->src.virt.addr, n);
|
|
|
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -347,11 +266,11 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len)))
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
return fallback_blk_enc(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
|
|
|
+ return ecb_aes_crypt(desc, 0, &walk);
|
|
|
}
|
|
|
|
|
|
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|
@@ -361,11 +280,11 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len)))
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
return fallback_blk_dec(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
|
|
|
+ return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
|
|
}
|
|
|
|
|
|
static int fallback_init_blk(struct crypto_tfm *tfm)
|
|
@@ -420,64 +339,45 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
- int ret;
|
|
|
+ unsigned long fc;
|
|
|
|
|
|
- ret = need_fallback(key_len);
|
|
|
- if (ret > 0) {
|
|
|
- sctx->key_len = key_len;
|
|
|
- return setkey_fallback_blk(tfm, in_key, key_len);
|
|
|
- }
|
|
|
+ /* Pick the correct function code based on the key length */
|
|
|
+ fc = (key_len == 16) ? CPACF_KMC_AES_128 :
|
|
|
+ (key_len == 24) ? CPACF_KMC_AES_192 :
|
|
|
+ (key_len == 32) ? CPACF_KMC_AES_256 : 0;
|
|
|
|
|
|
- switch (key_len) {
|
|
|
- case 16:
|
|
|
- sctx->enc = CPACF_KMC_AES_128_ENC;
|
|
|
- sctx->dec = CPACF_KMC_AES_128_DEC;
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- sctx->enc = CPACF_KMC_AES_192_ENC;
|
|
|
- sctx->dec = CPACF_KMC_AES_192_DEC;
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- sctx->enc = CPACF_KMC_AES_256_ENC;
|
|
|
- sctx->dec = CPACF_KMC_AES_256_DEC;
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Check if the function code is available */
|
|
|
+ sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
|
|
|
+ if (!sctx->fc)
|
|
|
+ return setkey_fallback_blk(tfm, in_key, key_len);
|
|
|
|
|
|
- return aes_set_key(tfm, in_key, key_len);
|
|
|
+ sctx->key_len = key_len;
|
|
|
+ memcpy(sctx->key, in_key, key_len);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
|
|
|
+static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|
|
struct blkcipher_walk *walk)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
- int ret = blkcipher_walk_virt(desc, walk);
|
|
|
- unsigned int nbytes = walk->nbytes;
|
|
|
+ unsigned int nbytes, n;
|
|
|
+ int ret;
|
|
|
struct {
|
|
|
u8 iv[AES_BLOCK_SIZE];
|
|
|
u8 key[AES_MAX_KEY_SIZE];
|
|
|
} param;
|
|
|
|
|
|
- if (!nbytes)
|
|
|
- goto out;
|
|
|
-
|
|
|
+ ret = blkcipher_walk_virt(desc, walk);
|
|
|
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
|
|
|
memcpy(param.key, sctx->key, sctx->key_len);
|
|
|
- do {
|
|
|
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
|
|
/* only use complete blocks */
|
|
|
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
- u8 *out = walk->dst.virt.addr;
|
|
|
- u8 *in = walk->src.virt.addr;
|
|
|
-
|
|
|
- ret = cpacf_kmc(func, ¶m, out, in, n);
|
|
|
- if (ret < 0 || ret != n)
|
|
|
- return -EIO;
|
|
|
-
|
|
|
- nbytes &= AES_BLOCK_SIZE - 1;
|
|
|
- ret = blkcipher_walk_done(desc, walk, nbytes);
|
|
|
- } while ((nbytes = walk->nbytes));
|
|
|
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
+ cpacf_kmc(sctx->fc | modifier, ¶m,
|
|
|
+ walk->dst.virt.addr, walk->src.virt.addr, n);
|
|
|
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
|
|
+ }
|
|
|
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
|
|
|
-
|
|
|
-out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -488,11 +388,11 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len)))
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
return fallback_blk_enc(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return cbc_aes_crypt(desc, sctx->enc, &walk);
|
|
|
+ return cbc_aes_crypt(desc, 0, &walk);
|
|
|
}
|
|
|
|
|
|
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|
@@ -502,11 +402,11 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(need_fallback(sctx->key_len)))
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
return fallback_blk_dec(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return cbc_aes_crypt(desc, sctx->dec, &walk);
|
|
|
+ return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
|
|
}
|
|
|
|
|
|
static struct crypto_alg cbc_aes_alg = {
|
|
@@ -594,83 +494,67 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
|
|
|
- u32 *flags = &tfm->crt_flags;
|
|
|
+ unsigned long fc;
|
|
|
int err;
|
|
|
|
|
|
err = xts_check_key(tfm, in_key, key_len);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
- switch (key_len) {
|
|
|
- case 32:
|
|
|
- xts_ctx->enc = CPACF_KM_XTS_128_ENC;
|
|
|
- xts_ctx->dec = CPACF_KM_XTS_128_DEC;
|
|
|
- memcpy(xts_ctx->key + 16, in_key, 16);
|
|
|
- memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
|
|
|
- break;
|
|
|
- case 48:
|
|
|
- xts_ctx->enc = 0;
|
|
|
- xts_ctx->dec = 0;
|
|
|
- xts_fallback_setkey(tfm, in_key, key_len);
|
|
|
- break;
|
|
|
- case 64:
|
|
|
- xts_ctx->enc = CPACF_KM_XTS_256_ENC;
|
|
|
- xts_ctx->dec = CPACF_KM_XTS_256_DEC;
|
|
|
- memcpy(xts_ctx->key, in_key, 32);
|
|
|
- memcpy(xts_ctx->pcc_key, in_key + 32, 32);
|
|
|
- break;
|
|
|
- default:
|
|
|
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ /* Pick the correct function code based on the key length */
|
|
|
+ fc = (key_len == 32) ? CPACF_KM_XTS_128 :
|
|
|
+ (key_len == 64) ? CPACF_KM_XTS_256 : 0;
|
|
|
+
|
|
|
+ /* Check if the function code is available */
|
|
|
+ xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
+ if (!xts_ctx->fc)
|
|
|
+ return xts_fallback_setkey(tfm, in_key, key_len);
|
|
|
+
|
|
|
+ /* Split the XTS key into the two subkeys */
|
|
|
+ key_len = key_len / 2;
|
|
|
xts_ctx->key_len = key_len;
|
|
|
+ memcpy(xts_ctx->key, in_key, key_len);
|
|
|
+ memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
|
|
|
- struct s390_xts_ctx *xts_ctx,
|
|
|
+static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|
|
struct blkcipher_walk *walk)
|
|
|
{
|
|
|
- unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
|
|
|
- int ret = blkcipher_walk_virt(desc, walk);
|
|
|
- unsigned int nbytes = walk->nbytes;
|
|
|
- unsigned int n;
|
|
|
- u8 *in, *out;
|
|
|
- struct pcc_param pcc_param;
|
|
|
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
+ unsigned int offset, nbytes, n;
|
|
|
+ int ret;
|
|
|
+ struct {
|
|
|
+ u8 key[32];
|
|
|
+ u8 tweak[16];
|
|
|
+ u8 block[16];
|
|
|
+ u8 bit[16];
|
|
|
+ u8 xts[16];
|
|
|
+ } pcc_param;
|
|
|
struct {
|
|
|
u8 key[32];
|
|
|
u8 init[16];
|
|
|
} xts_param;
|
|
|
|
|
|
- if (!nbytes)
|
|
|
- goto out;
|
|
|
-
|
|
|
+ ret = blkcipher_walk_virt(desc, walk);
|
|
|
+ offset = xts_ctx->key_len & 0x10;
|
|
|
memset(pcc_param.block, 0, sizeof(pcc_param.block));
|
|
|
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
|
|
|
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
|
|
|
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
|
|
|
- memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
|
|
|
- /* remove decipher modifier bit from 'func' and call PCC */
|
|
|
- ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
|
|
|
- if (ret < 0)
|
|
|
- return -EIO;
|
|
|
+ memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
|
|
|
+ cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
|
|
|
|
|
|
- memcpy(xts_param.key, xts_ctx->key, 32);
|
|
|
+ memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
|
|
|
memcpy(xts_param.init, pcc_param.xts, 16);
|
|
|
- do {
|
|
|
+
|
|
|
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
|
|
/* only use complete blocks */
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
- out = walk->dst.virt.addr;
|
|
|
- in = walk->src.virt.addr;
|
|
|
-
|
|
|
- ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
|
|
|
- if (ret < 0 || ret != n)
|
|
|
- return -EIO;
|
|
|
-
|
|
|
- nbytes &= AES_BLOCK_SIZE - 1;
|
|
|
- ret = blkcipher_walk_done(desc, walk, nbytes);
|
|
|
- } while ((nbytes = walk->nbytes));
|
|
|
-out:
|
|
|
+ cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
|
|
|
+ walk->dst.virt.addr, walk->src.virt.addr, n);
|
|
|
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -681,11 +565,11 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(xts_ctx->key_len == 48))
|
|
|
+ if (unlikely(!xts_ctx->fc))
|
|
|
return xts_fallback_encrypt(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
|
|
|
+ return xts_aes_crypt(desc, 0, &walk);
|
|
|
}
|
|
|
|
|
|
static int xts_aes_decrypt(struct blkcipher_desc *desc,
|
|
@@ -695,11 +579,11 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
- if (unlikely(xts_ctx->key_len == 48))
|
|
|
+ if (unlikely(!xts_ctx->fc))
|
|
|
return xts_fallback_decrypt(desc, dst, src, nbytes);
|
|
|
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
|
|
|
+ return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
|
|
}
|
|
|
|
|
|
static int xts_fallback_init(struct crypto_tfm *tfm)
|
|
@@ -750,108 +634,79 @@ static struct crypto_alg xts_aes_alg = {
|
|
|
}
|
|
|
};
|
|
|
|
|
|
-static int xts_aes_alg_reg;
|
|
|
-
|
|
|
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
unsigned int key_len)
|
|
|
{
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
+ unsigned long fc;
|
|
|
|
|
|
- switch (key_len) {
|
|
|
- case 16:
|
|
|
- sctx->enc = CPACF_KMCTR_AES_128_ENC;
|
|
|
- sctx->dec = CPACF_KMCTR_AES_128_DEC;
|
|
|
- break;
|
|
|
- case 24:
|
|
|
- sctx->enc = CPACF_KMCTR_AES_192_ENC;
|
|
|
- sctx->dec = CPACF_KMCTR_AES_192_DEC;
|
|
|
- break;
|
|
|
- case 32:
|
|
|
- sctx->enc = CPACF_KMCTR_AES_256_ENC;
|
|
|
- sctx->dec = CPACF_KMCTR_AES_256_DEC;
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Pick the correct function code based on the key length */
|
|
|
+ fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
|
|
|
+ (key_len == 24) ? CPACF_KMCTR_AES_192 :
|
|
|
+ (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
|
|
|
+
|
|
|
+ /* Check if the function code is available */
|
|
|
+ sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
|
|
|
+ if (!sctx->fc)
|
|
|
+ return setkey_fallback_blk(tfm, in_key, key_len);
|
|
|
|
|
|
- return aes_set_key(tfm, in_key, key_len);
|
|
|
+ sctx->key_len = key_len;
|
|
|
+ memcpy(sctx->key, in_key, key_len);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
|
|
|
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
|
|
{
|
|
|
unsigned int i, n;
|
|
|
|
|
|
/* only use complete blocks, max. PAGE_SIZE */
|
|
|
+ memcpy(ctrptr, iv, AES_BLOCK_SIZE);
|
|
|
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
|
|
|
- memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
|
|
|
+ for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
|
|
|
+ memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
|
|
|
+ crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
|
|
|
+ ctrptr += AES_BLOCK_SIZE;
|
|
|
}
|
|
|
return n;
|
|
|
}
|
|
|
|
|
|
-static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
|
|
|
- struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
|
|
|
+static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
|
|
+ struct blkcipher_walk *walk)
|
|
|
{
|
|
|
- int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
|
|
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
+ u8 buf[AES_BLOCK_SIZE], *ctrptr;
|
|
|
unsigned int n, nbytes;
|
|
|
- u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
|
|
|
- u8 *out, *in, *ctrptr = ctrbuf;
|
|
|
-
|
|
|
- if (!walk->nbytes)
|
|
|
- return ret;
|
|
|
+ int ret, locked;
|
|
|
|
|
|
- if (spin_trylock(&ctrblk_lock))
|
|
|
- ctrptr = ctrblk;
|
|
|
+ locked = spin_trylock(&ctrblk_lock);
|
|
|
|
|
|
- memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
|
|
|
+ ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
|
|
|
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
|
|
|
- out = walk->dst.virt.addr;
|
|
|
- in = walk->src.virt.addr;
|
|
|
- while (nbytes >= AES_BLOCK_SIZE) {
|
|
|
- if (ctrptr == ctrblk)
|
|
|
- n = __ctrblk_init(ctrptr, nbytes);
|
|
|
- else
|
|
|
- n = AES_BLOCK_SIZE;
|
|
|
- ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
|
|
|
- if (ret < 0 || ret != n) {
|
|
|
- if (ctrptr == ctrblk)
|
|
|
- spin_unlock(&ctrblk_lock);
|
|
|
- return -EIO;
|
|
|
- }
|
|
|
- if (n > AES_BLOCK_SIZE)
|
|
|
- memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
|
|
|
- AES_BLOCK_SIZE);
|
|
|
- crypto_inc(ctrptr, AES_BLOCK_SIZE);
|
|
|
- out += n;
|
|
|
- in += n;
|
|
|
- nbytes -= n;
|
|
|
- }
|
|
|
- ret = blkcipher_walk_done(desc, walk, nbytes);
|
|
|
+ n = AES_BLOCK_SIZE;
|
|
|
+ if (nbytes >= 2*AES_BLOCK_SIZE && locked)
|
|
|
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
|
|
|
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
|
|
|
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
|
|
|
+ walk->dst.virt.addr, walk->src.virt.addr,
|
|
|
+ n, ctrptr);
|
|
|
+ if (ctrptr == ctrblk)
|
|
|
+ memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
|
|
|
+ AES_BLOCK_SIZE);
|
|
|
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
|
|
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
|
|
}
|
|
|
- if (ctrptr == ctrblk) {
|
|
|
- if (nbytes)
|
|
|
- memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
|
|
|
- else
|
|
|
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
|
|
|
+ if (locked)
|
|
|
spin_unlock(&ctrblk_lock);
|
|
|
- } else {
|
|
|
- if (!nbytes)
|
|
|
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
|
|
|
- }
|
|
|
/*
|
|
|
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
|
|
*/
|
|
|
if (nbytes) {
|
|
|
- out = walk->dst.virt.addr;
|
|
|
- in = walk->src.virt.addr;
|
|
|
- ret = cpacf_kmctr(func, sctx->key, buf, in,
|
|
|
- AES_BLOCK_SIZE, ctrbuf);
|
|
|
- if (ret < 0 || ret != AES_BLOCK_SIZE)
|
|
|
- return -EIO;
|
|
|
- memcpy(out, buf, nbytes);
|
|
|
- crypto_inc(ctrbuf, AES_BLOCK_SIZE);
|
|
|
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
|
|
|
+ buf, walk->src.virt.addr,
|
|
|
+ AES_BLOCK_SIZE, walk->iv);
|
|
|
+ memcpy(walk->dst.virt.addr, buf, nbytes);
|
|
|
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
|
|
|
ret = blkcipher_walk_done(desc, walk, 0);
|
|
|
- memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -864,8 +719,11 @@ static int ctr_aes_encrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
+ return fallback_blk_enc(desc, dst, src, nbytes);
|
|
|
+
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
|
|
|
+ return ctr_aes_crypt(desc, 0, &walk);
|
|
|
}
|
|
|
|
|
|
static int ctr_aes_decrypt(struct blkcipher_desc *desc,
|
|
@@ -875,19 +733,25 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
|
|
|
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
|
|
struct blkcipher_walk walk;
|
|
|
|
|
|
+ if (unlikely(!sctx->fc))
|
|
|
+ return fallback_blk_dec(desc, dst, src, nbytes);
|
|
|
+
|
|
|
blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
|
- return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
|
|
|
+ return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
|
|
|
}
|
|
|
|
|
|
static struct crypto_alg ctr_aes_alg = {
|
|
|
.cra_name = "ctr(aes)",
|
|
|
.cra_driver_name = "ctr-aes-s390",
|
|
|
.cra_priority = 400, /* combo: aes + ctr */
|
|
|
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
|
|
+ CRYPTO_ALG_NEED_FALLBACK,
|
|
|
.cra_blocksize = 1,
|
|
|
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
.cra_type = &crypto_blkcipher_type,
|
|
|
.cra_module = THIS_MODULE,
|
|
|
+ .cra_init = fallback_init_blk,
|
|
|
+ .cra_exit = fallback_exit_blk,
|
|
|
.cra_u = {
|
|
|
.blkcipher = {
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
@@ -900,89 +764,79 @@ static struct crypto_alg ctr_aes_alg = {
|
|
|
}
|
|
|
};
|
|
|
|
|
|
-static int ctr_aes_alg_reg;
|
|
|
+static struct crypto_alg *aes_s390_algs_ptr[5];
|
|
|
+static int aes_s390_algs_num;
|
|
|
|
|
|
-static int __init aes_s390_init(void)
|
|
|
+static int aes_s390_register_alg(struct crypto_alg *alg)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
|
|
|
- keylen_flag |= AES_KEYLEN_128;
|
|
|
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
|
|
|
- keylen_flag |= AES_KEYLEN_192;
|
|
|
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
|
|
|
- keylen_flag |= AES_KEYLEN_256;
|
|
|
+ ret = crypto_register_alg(alg);
|
|
|
+ if (!ret)
|
|
|
+ aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
|
|
|
+ return ret;
|
|
|
+}
|
|
|
|
|
|
- if (!keylen_flag)
|
|
|
- return -EOPNOTSUPP;
|
|
|
+static void aes_s390_fini(void)
|
|
|
+{
|
|
|
+ while (aes_s390_algs_num--)
|
|
|
+ crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
|
|
|
+ if (ctrblk)
|
|
|
+ free_page((unsigned long) ctrblk);
|
|
|
+}
|
|
|
|
|
|
- /* z9 109 and z9 BC/EC only support 128 bit key length */
|
|
|
- if (keylen_flag == AES_KEYLEN_128)
|
|
|
- pr_info("AES hardware acceleration is only available for"
|
|
|
- " 128-bit keys\n");
|
|
|
+static int __init aes_s390_init(void)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
|
|
|
- ret = crypto_register_alg(&aes_alg);
|
|
|
- if (ret)
|
|
|
- goto aes_err;
|
|
|
+ /* Query available functions for KM, KMC and KMCTR */
|
|
|
+ cpacf_query(CPACF_KM, &km_functions);
|
|
|
+ cpacf_query(CPACF_KMC, &kmc_functions);
|
|
|
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
|
|
|
|
|
|
- ret = crypto_register_alg(&ecb_aes_alg);
|
|
|
- if (ret)
|
|
|
- goto ecb_aes_err;
|
|
|
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
|
|
|
+ cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
|
|
|
+ cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
|
|
|
+ ret = aes_s390_register_alg(&aes_alg);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+ ret = aes_s390_register_alg(&ecb_aes_alg);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
|
|
|
- ret = crypto_register_alg(&cbc_aes_alg);
|
|
|
- if (ret)
|
|
|
- goto cbc_aes_err;
|
|
|
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
|
|
|
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
|
|
|
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
|
|
|
+ ret = aes_s390_register_alg(&cbc_aes_alg);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+ }
|
|
|
|
|
|
- if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
|
|
|
- cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
|
|
|
- ret = crypto_register_alg(&xts_aes_alg);
|
|
|
+ if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
|
|
|
+ cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
|
|
|
+ ret = aes_s390_register_alg(&xts_aes_alg);
|
|
|
if (ret)
|
|
|
- goto xts_aes_err;
|
|
|
- xts_aes_alg_reg = 1;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
|
|
|
- if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
|
|
|
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
|
|
|
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
|
|
|
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
|
|
|
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
|
|
|
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
|
|
|
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
|
|
|
if (!ctrblk) {
|
|
|
ret = -ENOMEM;
|
|
|
- goto ctr_aes_err;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
- ret = crypto_register_alg(&ctr_aes_alg);
|
|
|
- if (ret) {
|
|
|
- free_page((unsigned long) ctrblk);
|
|
|
- goto ctr_aes_err;
|
|
|
- }
|
|
|
- ctr_aes_alg_reg = 1;
|
|
|
+ ret = aes_s390_register_alg(&ctr_aes_alg);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
|
|
|
-out:
|
|
|
+ return 0;
|
|
|
+out_err:
|
|
|
+ aes_s390_fini();
|
|
|
return ret;
|
|
|
-
|
|
|
-ctr_aes_err:
|
|
|
- crypto_unregister_alg(&xts_aes_alg);
|
|
|
-xts_aes_err:
|
|
|
- crypto_unregister_alg(&cbc_aes_alg);
|
|
|
-cbc_aes_err:
|
|
|
- crypto_unregister_alg(&ecb_aes_alg);
|
|
|
-ecb_aes_err:
|
|
|
- crypto_unregister_alg(&aes_alg);
|
|
|
-aes_err:
|
|
|
- goto out;
|
|
|
-}
|
|
|
-
|
|
|
-static void __exit aes_s390_fini(void)
|
|
|
-{
|
|
|
- if (ctr_aes_alg_reg) {
|
|
|
- crypto_unregister_alg(&ctr_aes_alg);
|
|
|
- free_page((unsigned long) ctrblk);
|
|
|
- }
|
|
|
- if (xts_aes_alg_reg)
|
|
|
- crypto_unregister_alg(&xts_aes_alg);
|
|
|
- crypto_unregister_alg(&cbc_aes_alg);
|
|
|
- crypto_unregister_alg(&ecb_aes_alg);
|
|
|
- crypto_unregister_alg(&aes_alg);
|
|
|
}
|
|
|
|
|
|
module_cpu_feature_match(MSA, aes_s390_init);
|