|
@@ -41,6 +41,7 @@
|
|
|
#include <linux/platform_data/crypto-atmel.h>
|
|
|
#include <dt-bindings/dma/at91.h>
|
|
|
#include "atmel-aes-regs.h"
|
|
|
+#include "atmel-authenc.h"
|
|
|
|
|
|
#define ATMEL_AES_PRIORITY 300
|
|
|
|
|
@@ -78,6 +79,7 @@
|
|
|
#define AES_FLAGS_INIT BIT(2)
|
|
|
#define AES_FLAGS_BUSY BIT(3)
|
|
|
#define AES_FLAGS_DUMP_REG BIT(4)
|
|
|
+#define AES_FLAGS_OWN_SHA BIT(5)
|
|
|
|
|
|
#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
|
|
|
|
|
@@ -92,6 +94,7 @@ struct atmel_aes_caps {
|
|
|
bool has_ctr32;
|
|
|
bool has_gcm;
|
|
|
bool has_xts;
|
|
|
+ bool has_authenc;
|
|
|
u32 max_burst_size;
|
|
|
};
|
|
|
|
|
@@ -144,10 +147,31 @@ struct atmel_aes_xts_ctx {
|
|
|
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
|
|
|
};
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+struct atmel_aes_authenc_ctx {
|
|
|
+ struct atmel_aes_base_ctx base;
|
|
|
+ struct atmel_sha_authenc_ctx *auth;
|
|
|
+};
|
|
|
+#endif
|
|
|
+
|
|
|
struct atmel_aes_reqctx {
|
|
|
unsigned long mode;
|
|
|
};
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+struct atmel_aes_authenc_reqctx {
|
|
|
+ struct atmel_aes_reqctx base;
|
|
|
+
|
|
|
+ struct scatterlist src[2];
|
|
|
+ struct scatterlist dst[2];
|
|
|
+ size_t textlen;
|
|
|
+ u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)];
|
|
|
+
|
|
|
+ /* auth_req MUST be place last. */
|
|
|
+ struct ahash_request auth_req;
|
|
|
+};
|
|
|
+#endif
|
|
|
+
|
|
|
struct atmel_aes_dma {
|
|
|
struct dma_chan *chan;
|
|
|
struct scatterlist *sg;
|
|
@@ -291,6 +315,9 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
|
|
|
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
|
|
|
break;
|
|
|
|
|
|
+ case AES_EMR:
|
|
|
+ return "EMR";
|
|
|
+
|
|
|
case AES_TWR(0):
|
|
|
case AES_TWR(1):
|
|
|
case AES_TWR(2):
|
|
@@ -463,8 +490,16 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
|
|
|
return (dd->flags & AES_FLAGS_ENCRYPT);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
|
|
|
+#endif
|
|
|
+
|
|
|
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
|
|
|
{
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+ atmel_aes_authenc_complete(dd, err);
|
|
|
+#endif
|
|
|
+
|
|
|
clk_disable(dd->iclk);
|
|
|
dd->flags &= ~AES_FLAGS_BUSY;
|
|
|
|
|
@@ -1931,6 +1966,384 @@ static struct crypto_alg aes_xts_alg = {
|
|
|
}
|
|
|
};
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+/* authenc aead functions */
|
|
|
+
|
|
|
+static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
|
|
|
+static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async);
|
|
|
+static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async);
|
|
|
+static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
|
|
|
+static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async);
|
|
|
+
|
|
|
+static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+
|
|
|
+ if (err && (dd->flags & AES_FLAGS_OWN_SHA))
|
|
|
+ atmel_sha_authenc_abort(&rctx->auth_req);
|
|
|
+ dd->flags &= ~AES_FLAGS_OWN_SHA;
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
+ struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
+ int err;
|
|
|
+
|
|
|
+ atmel_aes_set_mode(dd, &rctx->base);
|
|
|
+
|
|
|
+ err = atmel_aes_hw_init(dd);
|
|
|
+ if (err)
|
|
|
+ return atmel_aes_complete(dd, err);
|
|
|
+
|
|
|
+ return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
|
|
|
+ atmel_aes_authenc_init, dd);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+
|
|
|
+ if (is_async)
|
|
|
+ dd->is_async = true;
|
|
|
+ if (err)
|
|
|
+ return atmel_aes_complete(dd, err);
|
|
|
+
|
|
|
+ /* If here, we've got the ownership of the SHA device. */
|
|
|
+ dd->flags |= AES_FLAGS_OWN_SHA;
|
|
|
+
|
|
|
+ /* Configure the SHA device. */
|
|
|
+ return atmel_sha_authenc_init(&rctx->auth_req,
|
|
|
+ req->src, req->assoclen,
|
|
|
+ rctx->textlen,
|
|
|
+ atmel_aes_authenc_transfer, dd);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+ bool enc = atmel_aes_is_encrypt(dd);
|
|
|
+ struct scatterlist *src, *dst;
|
|
|
+ u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
|
|
|
+ u32 emr;
|
|
|
+
|
|
|
+ if (is_async)
|
|
|
+ dd->is_async = true;
|
|
|
+ if (err)
|
|
|
+ return atmel_aes_complete(dd, err);
|
|
|
+
|
|
|
+ /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
|
|
|
+ src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
|
|
+ dst = src;
|
|
|
+
|
|
|
+ if (req->src != req->dst)
|
|
|
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
|
|
+
|
|
|
+ /* Configure the AES device. */
|
|
|
+ memcpy(iv, req->iv, sizeof(iv));
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
|
|
|
+ * 'true' even if the data transfer is actually performed by the CPU (so
|
|
|
+ * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
|
|
|
+ * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
|
|
|
+ * must be set to *_MR_SMOD_IDATAR0.
|
|
|
+ */
|
|
|
+ atmel_aes_write_ctrl(dd, true, iv);
|
|
|
+ emr = AES_EMR_PLIPEN;
|
|
|
+ if (!enc)
|
|
|
+ emr |= AES_EMR_PLIPD;
|
|
|
+ atmel_aes_write(dd, AES_EMR, emr);
|
|
|
+
|
|
|
+ /* Transfer data. */
|
|
|
+ return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
|
|
|
+ atmel_aes_authenc_digest);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+
|
|
|
+ /* atmel_sha_authenc_final() releases the SHA device. */
|
|
|
+ dd->flags &= ~AES_FLAGS_OWN_SHA;
|
|
|
+ return atmel_sha_authenc_final(&rctx->auth_req,
|
|
|
+ rctx->digest, sizeof(rctx->digest),
|
|
|
+ atmel_aes_authenc_final, dd);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
|
|
|
+ bool is_async)
|
|
|
+{
|
|
|
+ struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
+ bool enc = atmel_aes_is_encrypt(dd);
|
|
|
+ u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
|
|
|
+ u32 offs, authsize;
|
|
|
+
|
|
|
+ if (is_async)
|
|
|
+ dd->is_async = true;
|
|
|
+ if (err)
|
|
|
+ goto complete;
|
|
|
+
|
|
|
+ offs = req->assoclen + rctx->textlen;
|
|
|
+ authsize = crypto_aead_authsize(tfm);
|
|
|
+ if (enc) {
|
|
|
+ scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
|
|
|
+ } else {
|
|
|
+ scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
|
|
|
+ if (crypto_memneq(idigest, odigest, authsize))
|
|
|
+ err = -EBADMSG;
|
|
|
+ }
|
|
|
+
|
|
|
+complete:
|
|
|
+ return atmel_aes_complete(dd, err);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
|
+ unsigned int keylen)
|
|
|
+{
|
|
|
+ struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
+ struct crypto_authenc_keys keys;
|
|
|
+ u32 flags;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
|
|
+ goto badkey;
|
|
|
+
|
|
|
+ if (keys.enckeylen > sizeof(ctx->base.key))
|
|
|
+ goto badkey;
|
|
|
+
|
|
|
+ /* Save auth key. */
|
|
|
+ flags = crypto_aead_get_flags(tfm);
|
|
|
+ err = atmel_sha_authenc_setkey(ctx->auth,
|
|
|
+ keys.authkey, keys.authkeylen,
|
|
|
+ &flags);
|
|
|
+ crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
|
|
|
+ if (err) {
|
|
|
+ memzero_explicit(&keys, sizeof(keys));
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Save enc key. */
|
|
|
+ ctx->base.keylen = keys.enckeylen;
|
|
|
+ memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
|
|
|
+
|
|
|
+ memzero_explicit(&keys, sizeof(keys));
|
|
|
+ return 0;
|
|
|
+
|
|
|
+badkey:
|
|
|
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
|
|
+ memzero_explicit(&key, sizeof(keys));
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
|
|
|
+ unsigned long auth_mode)
|
|
|
+{
|
|
|
+ struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
+ unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
|
|
|
+
|
|
|
+ ctx->auth = atmel_sha_authenc_spawn(auth_mode);
|
|
|
+ if (IS_ERR(ctx->auth))
|
|
|
+ return PTR_ERR(ctx->auth);
|
|
|
+
|
|
|
+ crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
|
|
|
+ auth_reqsize));
|
|
|
+ ctx->base.start = atmel_aes_authenc_start;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
|
|
|
+}
|
|
|
+
|
|
|
+static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
|
|
|
+{
|
|
|
+ struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
+
|
|
|
+ atmel_sha_authenc_free(ctx->auth);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_crypt(struct aead_request *req,
|
|
|
+ unsigned long mode)
|
|
|
+{
|
|
|
+ struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
+ struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
+ u32 authsize = crypto_aead_authsize(tfm);
|
|
|
+ bool enc = (mode & AES_FLAGS_ENCRYPT);
|
|
|
+ struct atmel_aes_dev *dd;
|
|
|
+
|
|
|
+ /* Compute text length. */
|
|
|
+ if (!enc && req->cryptlen < authsize)
|
|
|
+ return -EINVAL;
|
|
|
+ rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently, empty messages are not supported yet:
|
|
|
+ * the SHA auto-padding can be used only on non-empty messages.
|
|
|
+ * Hence a special case needs to be implemented for empty message.
|
|
|
+ */
|
|
|
+ if (!rctx->textlen && !req->assoclen)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ rctx->base.mode = mode;
|
|
|
+ ctx->block_size = AES_BLOCK_SIZE;
|
|
|
+
|
|
|
+ dd = atmel_aes_find_dev(ctx);
|
|
|
+ if (!dd)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ return atmel_aes_handle_queue(dd, &req->base);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
|
|
|
+}
|
|
|
+
|
|
|
+static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
|
|
|
+{
|
|
|
+ return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
|
|
|
+}
|
|
|
+
|
|
|
+static struct aead_alg aes_authenc_algs[] = {
|
|
|
+{
|
|
|
+ .setkey = atmel_aes_authenc_setkey,
|
|
|
+ .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
+ .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
+ .init = atmel_aes_authenc_hmac_sha1_init_tfm,
|
|
|
+ .exit = atmel_aes_authenc_exit_tfm,
|
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
|
+ .maxauthsize = SHA1_DIGEST_SIZE,
|
|
|
+
|
|
|
+ .base = {
|
|
|
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
|
|
|
+ .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
|
|
|
+ .cra_priority = ATMEL_AES_PRIORITY,
|
|
|
+ .cra_flags = CRYPTO_ALG_ASYNC,
|
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
+ .cra_alignmask = 0xf,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ },
|
|
|
+},
|
|
|
+{
|
|
|
+ .setkey = atmel_aes_authenc_setkey,
|
|
|
+ .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
+ .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
+ .init = atmel_aes_authenc_hmac_sha224_init_tfm,
|
|
|
+ .exit = atmel_aes_authenc_exit_tfm,
|
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
|
+ .maxauthsize = SHA224_DIGEST_SIZE,
|
|
|
+
|
|
|
+ .base = {
|
|
|
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
|
|
|
+ .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
|
|
|
+ .cra_priority = ATMEL_AES_PRIORITY,
|
|
|
+ .cra_flags = CRYPTO_ALG_ASYNC,
|
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
+ .cra_alignmask = 0xf,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ },
|
|
|
+},
|
|
|
+{
|
|
|
+ .setkey = atmel_aes_authenc_setkey,
|
|
|
+ .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
+ .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
+ .init = atmel_aes_authenc_hmac_sha256_init_tfm,
|
|
|
+ .exit = atmel_aes_authenc_exit_tfm,
|
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
|
+ .maxauthsize = SHA256_DIGEST_SIZE,
|
|
|
+
|
|
|
+ .base = {
|
|
|
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
|
|
|
+ .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
|
|
|
+ .cra_priority = ATMEL_AES_PRIORITY,
|
|
|
+ .cra_flags = CRYPTO_ALG_ASYNC,
|
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
+ .cra_alignmask = 0xf,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ },
|
|
|
+},
|
|
|
+{
|
|
|
+ .setkey = atmel_aes_authenc_setkey,
|
|
|
+ .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
+ .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
+ .init = atmel_aes_authenc_hmac_sha384_init_tfm,
|
|
|
+ .exit = atmel_aes_authenc_exit_tfm,
|
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
|
+ .maxauthsize = SHA384_DIGEST_SIZE,
|
|
|
+
|
|
|
+ .base = {
|
|
|
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
|
|
|
+ .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
|
|
|
+ .cra_priority = ATMEL_AES_PRIORITY,
|
|
|
+ .cra_flags = CRYPTO_ALG_ASYNC,
|
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
+ .cra_alignmask = 0xf,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ },
|
|
|
+},
|
|
|
+{
|
|
|
+ .setkey = atmel_aes_authenc_setkey,
|
|
|
+ .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
+ .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
+ .init = atmel_aes_authenc_hmac_sha512_init_tfm,
|
|
|
+ .exit = atmel_aes_authenc_exit_tfm,
|
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
|
+ .maxauthsize = SHA512_DIGEST_SIZE,
|
|
|
+
|
|
|
+ .base = {
|
|
|
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
|
|
|
+ .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
|
|
|
+ .cra_priority = ATMEL_AES_PRIORITY,
|
|
|
+ .cra_flags = CRYPTO_ALG_ASYNC,
|
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
+ .cra_alignmask = 0xf,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ },
|
|
|
+},
|
|
|
+};
|
|
|
+#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
|
|
|
|
|
|
/* Probe functions */
|
|
|
|
|
@@ -2040,6 +2453,12 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+ if (dd->caps.has_authenc)
|
|
|
+ for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
|
|
|
+ crypto_unregister_aead(&aes_authenc_algs[i]);
|
|
|
+#endif
|
|
|
+
|
|
|
if (dd->caps.has_xts)
|
|
|
crypto_unregister_alg(&aes_xts_alg);
|
|
|
|
|
@@ -2081,8 +2500,25 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
|
|
goto err_aes_xts_alg;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+ if (dd->caps.has_authenc) {
|
|
|
+ for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
|
|
|
+ err = crypto_register_aead(&aes_authenc_algs[i]);
|
|
|
+ if (err)
|
|
|
+ goto err_aes_authenc_alg;
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+ /* i = ARRAY_SIZE(aes_authenc_algs); */
|
|
|
+err_aes_authenc_alg:
|
|
|
+ for (j = 0; j < i; j++)
|
|
|
+ crypto_unregister_aead(&aes_authenc_algs[j]);
|
|
|
+ crypto_unregister_alg(&aes_xts_alg);
|
|
|
+#endif
|
|
|
err_aes_xts_alg:
|
|
|
crypto_unregister_aead(&aes_gcm_alg);
|
|
|
err_aes_gcm_alg:
|
|
@@ -2103,6 +2539,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
|
|
dd->caps.has_ctr32 = 0;
|
|
|
dd->caps.has_gcm = 0;
|
|
|
dd->caps.has_xts = 0;
|
|
|
+ dd->caps.has_authenc = 0;
|
|
|
dd->caps.max_burst_size = 1;
|
|
|
|
|
|
/* keep only major version number */
|
|
@@ -2113,6 +2550,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
|
|
dd->caps.has_ctr32 = 1;
|
|
|
dd->caps.has_gcm = 1;
|
|
|
dd->caps.has_xts = 1;
|
|
|
+ dd->caps.has_authenc = 1;
|
|
|
dd->caps.max_burst_size = 4;
|
|
|
break;
|
|
|
case 0x200:
|
|
@@ -2271,6 +2709,13 @@ static int atmel_aes_probe(struct platform_device *pdev)
|
|
|
|
|
|
atmel_aes_get_cap(aes_dd);
|
|
|
|
|
|
+#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
|
|
|
+ if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
|
|
|
+ err = -EPROBE_DEFER;
|
|
|
+ goto iclk_unprepare;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
err = atmel_aes_buff_init(aes_dd);
|
|
|
if (err)
|
|
|
goto err_aes_buff;
|
|
@@ -2307,7 +2752,8 @@ res_err:
|
|
|
tasklet_kill(&aes_dd->done_task);
|
|
|
tasklet_kill(&aes_dd->queue_task);
|
|
|
aes_dd_err:
|
|
|
- dev_err(dev, "initialization failed.\n");
|
|
|
+ if (err != -EPROBE_DEFER)
|
|
|
+ dev_err(dev, "initialization failed.\n");
|
|
|
|
|
|
return err;
|
|
|
}
|