123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621 |
- /*
- * Copyright (C) 2017 Marvell
- *
- * Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
- #include <linux/device.h>
- #include <linux/dma-mapping.h>
- #include <linux/dmapool.h>
- #include <crypto/aes.h>
- #include <crypto/skcipher.h>
- #include <crypto/internal/skcipher.h>
- #include "safexcel.h"
- enum safexcel_cipher_direction {
- SAFEXCEL_ENCRYPT,
- SAFEXCEL_DECRYPT,
- };
- struct safexcel_cipher_ctx {
- struct safexcel_context base;
- struct safexcel_crypto_priv *priv;
- u32 mode;
- __le32 key[8];
- unsigned int key_len;
- };
- struct safexcel_cipher_req {
- enum safexcel_cipher_direction direction;
- bool needs_inv;
- };
- static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
- struct crypto_async_request *async,
- struct safexcel_command_desc *cdesc,
- u32 length)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_token *token;
- unsigned offset = 0;
- if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
- offset = AES_BLOCK_SIZE / sizeof(u32);
- memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
- }
- token = (struct safexcel_token *)(cdesc->control_data.token + offset);
- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
- token[0].packet_length = length;
- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
- token[0].instructions = EIP197_TOKEN_INS_LAST |
- EIP197_TOKEN_INS_TYPE_CRYTO |
- EIP197_TOKEN_INS_TYPE_OUTPUT;
- }
- static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
- unsigned int len)
- {
- struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- struct crypto_aes_ctx aes;
- int ret, i;
- ret = crypto_aes_expand_key(&aes, key, len);
- if (ret) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return ret;
- }
- if (priv->version == EIP197 && ctx->base.ctxr_dma) {
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
- }
- }
- }
- for (i = 0; i < len / sizeof(u32); i++)
- ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
- ctx->key_len = len;
- memzero_explicit(&aes, sizeof(aes));
- return 0;
- }
- static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
- struct crypto_async_request *async,
- struct safexcel_command_desc *cdesc)
- {
- struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- int ctrl_size;
- if (sreq->direction == SAFEXCEL_ENCRYPT)
- cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
- else
- cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
- cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
- cdesc->control_data.control1 |= ctx->mode;
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
- ctrl_size = 4;
- break;
- case AES_KEYSIZE_192:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
- ctrl_size = 6;
- break;
- case AES_KEYSIZE_256:
- cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
- ctrl_size = 8;
- break;
- default:
- dev_err(priv->dev, "aes keysize not supported: %u\n",
- ctx->key_len);
- return -EINVAL;
- }
- cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
- return 0;
- }
- static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_result_desc *rdesc;
- int ndesc = 0;
- *ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
- do {
- rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
- if (IS_ERR(rdesc)) {
- dev_err(priv->dev,
- "cipher: result: could not retrieve the result descriptor\n");
- *ret = PTR_ERR(rdesc);
- break;
- }
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev,
- "cipher: result: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
- ndesc++;
- } while (!rdesc->last_seg);
- safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (req->src == req->dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_BIDIRECTIONAL);
- } else {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
- DMA_FROM_DEVICE);
- }
- *should_complete = true;
- return ndesc;
- }
- static int safexcel_aes_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- struct safexcel_command_desc *cdesc;
- struct safexcel_result_desc *rdesc;
- struct scatterlist *sg;
- int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
- int i, ret = 0;
- if (req->src == req->dst) {
- nr_src = dma_map_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_BIDIRECTIONAL);
- nr_dst = nr_src;
- if (!nr_src)
- return -EINVAL;
- } else {
- nr_src = dma_map_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_TO_DEVICE);
- if (!nr_src)
- return -EINVAL;
- nr_dst = dma_map_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
- DMA_FROM_DEVICE);
- if (!nr_dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_TO_DEVICE);
- return -EINVAL;
- }
- }
- memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
- spin_lock_bh(&priv->ring[ring].egress_lock);
- /* command descriptors */
- for_each_sg(req->src, sg, nr_src, i) {
- int len = sg_dma_len(sg);
- /* Do not overflow the request */
- if (queued - len < 0)
- len = queued;
- cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
- sg_dma_address(sg), len, req->cryptlen,
- ctx->base.ctxr_dma);
- if (IS_ERR(cdesc)) {
- /* No space left in the command descriptor ring */
- ret = PTR_ERR(cdesc);
- goto cdesc_rollback;
- }
- n_cdesc++;
- if (n_cdesc == 1) {
- safexcel_context_control(ctx, async, cdesc);
- safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
- }
- queued -= len;
- if (!queued)
- break;
- }
- /* result descriptors */
- for_each_sg(req->dst, sg, nr_dst, i) {
- bool first = !i, last = (i == nr_dst - 1);
- u32 len = sg_dma_len(sg);
- rdesc = safexcel_add_rdesc(priv, ring, first, last,
- sg_dma_address(sg), len);
- if (IS_ERR(rdesc)) {
- /* No space left in the result descriptor ring */
- ret = PTR_ERR(rdesc);
- goto rdesc_rollback;
- }
- n_rdesc++;
- }
- spin_unlock_bh(&priv->ring[ring].egress_lock);
- request->req = &req->base;
- *commands = n_cdesc;
- *results = n_rdesc;
- return 0;
- rdesc_rollback:
- for (i = 0; i < n_rdesc; i++)
- safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
- cdesc_rollback:
- for (i = 0; i < n_cdesc; i++)
- safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (req->src == req->dst) {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_BIDIRECTIONAL);
- } else {
- dma_unmap_sg(priv->dev, req->src,
- sg_nents_for_len(req->src, req->cryptlen),
- DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, req->dst,
- sg_nents_for_len(req->dst, req->cryptlen),
- DMA_FROM_DEVICE);
- }
- return ret;
- }
- static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
- int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct safexcel_result_desc *rdesc;
- int ndesc = 0, enq_ret;
- *ret = 0;
- spin_lock_bh(&priv->ring[ring].egress_lock);
- do {
- rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
- if (IS_ERR(rdesc)) {
- dev_err(priv->dev,
- "cipher: invalidate: could not retrieve the result descriptor\n");
- *ret = PTR_ERR(rdesc);
- break;
- }
- if (rdesc->result_data.error_code) {
- dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
- rdesc->result_data.error_code);
- *ret = -EIO;
- }
- ndesc++;
- } while (!rdesc->last_seg);
- safexcel_complete(priv, ring);
- spin_unlock_bh(&priv->ring[ring].egress_lock);
- if (ctx->base.exit_inv) {
- dma_pool_free(priv->context_pool, ctx->base.ctxr,
- ctx->base.ctxr_dma);
- *should_complete = true;
- return ndesc;
- }
- ring = safexcel_select_ring(priv);
- ctx->base.ring = ring;
- spin_lock_bh(&priv->ring[ring].queue_lock);
- enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (enq_ret != -EINPROGRESS)
- *ret = enq_ret;
- queue_work(priv->ring[ring].workqueue,
- &priv->ring[ring].work_data.work);
- *should_complete = false;
- return ndesc;
- }
- static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- int err;
- if (sreq->needs_inv) {
- sreq->needs_inv = false;
- err = safexcel_handle_inv_result(priv, ring, async,
- should_complete, ret);
- } else {
- err = safexcel_handle_req_result(priv, ring, async,
- should_complete, ret);
- }
- return err;
- }
- static int safexcel_cipher_send_inv(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
- ret = safexcel_invalidate_cache(async, priv,
- ctx->base.ctxr_dma, ring, request);
- if (unlikely(ret))
- return ret;
- *commands = 1;
- *results = 1;
- return 0;
- }
- static int safexcel_send(struct crypto_async_request *async,
- int ring, struct safexcel_request *request,
- int *commands, int *results)
- {
- struct skcipher_request *req = skcipher_request_cast(async);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
- BUG_ON(priv->version == EIP97 && sreq->needs_inv);
- if (sreq->needs_inv)
- ret = safexcel_cipher_send_inv(async, ring, request,
- commands, results);
- else
- ret = safexcel_aes_send(async, ring, request,
- commands, results);
- return ret;
- }
- static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
- {
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_inv_result result = {};
- int ring = ctx->base.ring;
- memset(req, 0, sizeof(struct skcipher_request));
- /* create invalidation request */
- init_completion(&result.completion);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
- skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req->base.tfm);
- ctx->base.exit_inv = true;
- sreq->needs_inv = true;
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
- queue_work(priv->ring[ring].workqueue,
- &priv->ring[ring].work_data.work);
- wait_for_completion(&result.completion);
- if (result.error) {
- dev_warn(priv->dev,
- "cipher: sync: invalidate: completion error %d\n",
- result.error);
- return result.error;
- }
- return 0;
- }
- static int safexcel_aes(struct skcipher_request *req,
- enum safexcel_cipher_direction dir, u32 mode)
- {
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret, ring;
- sreq->needs_inv = false;
- sreq->direction = dir;
- ctx->mode = mode;
- if (ctx->base.ctxr) {
- if (priv->version == EIP197 && ctx->base.needs_inv) {
- sreq->needs_inv = true;
- ctx->base.needs_inv = false;
- }
- } else {
- ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
- EIP197_GFP_FLAGS(req->base),
- &ctx->base.ctxr_dma);
- if (!ctx->base.ctxr)
- return -ENOMEM;
- }
- ring = ctx->base.ring;
- spin_lock_bh(&priv->ring[ring].queue_lock);
- ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
- queue_work(priv->ring[ring].workqueue,
- &priv->ring[ring].work_data.work);
- return ret;
- }
- static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
- {
- return safexcel_aes(req, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_ECB);
- }
- static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
- {
- return safexcel_aes(req, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_ECB);
- }
- static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
- {
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_alg_template *tmpl =
- container_of(tfm->__crt_alg, struct safexcel_alg_template,
- alg.skcipher.base);
- ctx->priv = tmpl->priv;
- ctx->base.send = safexcel_send;
- ctx->base.handle_result = safexcel_handle_result;
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct safexcel_cipher_req));
- return 0;
- }
- static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
- {
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
- memzero_explicit(ctx->key, 8 * sizeof(u32));
- /* context not allocated, skip invalidation */
- if (!ctx->base.ctxr)
- return;
- memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- if (priv->version == EIP197) {
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
- } else {
- dma_pool_free(priv->context_pool, ctx->base.ctxr,
- ctx->base.ctxr_dma);
- }
- }
- struct safexcel_alg_template safexcel_alg_ecb_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
- .setkey = safexcel_aes_setkey,
- .encrypt = safexcel_ecb_aes_encrypt,
- .decrypt = safexcel_ecb_aes_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .base = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "safexcel-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- };
- static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
- {
- return safexcel_aes(req, SAFEXCEL_ENCRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
- }
- static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
- {
- return safexcel_aes(req, SAFEXCEL_DECRYPT,
- CONTEXT_CONTROL_CRYPTO_MODE_CBC);
- }
- struct safexcel_alg_template safexcel_alg_cbc_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
- .setkey = safexcel_aes_setkey,
- .encrypt = safexcel_cbc_aes_encrypt,
- .decrypt = safexcel_cbc_aes_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .base = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "safexcel-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- };
|