safexcel_cipher.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmapool.h>
  13. #include <crypto/aes.h>
  14. #include <crypto/skcipher.h>
  15. #include <crypto/internal/skcipher.h>
  16. #include "safexcel.h"
  17. enum safexcel_cipher_direction {
  18. SAFEXCEL_ENCRYPT,
  19. SAFEXCEL_DECRYPT,
  20. };
  21. struct safexcel_cipher_ctx {
  22. struct safexcel_context base;
  23. struct safexcel_crypto_priv *priv;
  24. u32 mode;
  25. __le32 key[8];
  26. unsigned int key_len;
  27. };
  28. struct safexcel_cipher_req {
  29. enum safexcel_cipher_direction direction;
  30. bool needs_inv;
  31. };
  32. static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  33. struct crypto_async_request *async,
  34. struct safexcel_command_desc *cdesc,
  35. u32 length)
  36. {
  37. struct skcipher_request *req = skcipher_request_cast(async);
  38. struct safexcel_token *token;
  39. unsigned offset = 0;
  40. if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
  41. offset = AES_BLOCK_SIZE / sizeof(u32);
  42. memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
  43. cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
  44. }
  45. token = (struct safexcel_token *)(cdesc->control_data.token + offset);
  46. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  47. token[0].packet_length = length;
  48. token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
  49. token[0].instructions = EIP197_TOKEN_INS_LAST |
  50. EIP197_TOKEN_INS_TYPE_CRYTO |
  51. EIP197_TOKEN_INS_TYPE_OUTPUT;
  52. }
  53. static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
  54. unsigned int len)
  55. {
  56. struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
  57. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  58. struct safexcel_crypto_priv *priv = ctx->priv;
  59. struct crypto_aes_ctx aes;
  60. int ret, i;
  61. ret = crypto_aes_expand_key(&aes, key, len);
  62. if (ret) {
  63. crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  64. return ret;
  65. }
  66. if (priv->version == EIP197 && ctx->base.ctxr_dma) {
  67. for (i = 0; i < len / sizeof(u32); i++) {
  68. if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
  69. ctx->base.needs_inv = true;
  70. break;
  71. }
  72. }
  73. }
  74. for (i = 0; i < len / sizeof(u32); i++)
  75. ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
  76. ctx->key_len = len;
  77. memzero_explicit(&aes, sizeof(aes));
  78. return 0;
  79. }
  80. static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
  81. struct crypto_async_request *async,
  82. struct safexcel_command_desc *cdesc)
  83. {
  84. struct safexcel_crypto_priv *priv = ctx->priv;
  85. struct skcipher_request *req = skcipher_request_cast(async);
  86. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  87. int ctrl_size;
  88. if (sreq->direction == SAFEXCEL_ENCRYPT)
  89. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
  90. else
  91. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
  92. cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
  93. cdesc->control_data.control1 |= ctx->mode;
  94. switch (ctx->key_len) {
  95. case AES_KEYSIZE_128:
  96. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
  97. ctrl_size = 4;
  98. break;
  99. case AES_KEYSIZE_192:
  100. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
  101. ctrl_size = 6;
  102. break;
  103. case AES_KEYSIZE_256:
  104. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
  105. ctrl_size = 8;
  106. break;
  107. default:
  108. dev_err(priv->dev, "aes keysize not supported: %u\n",
  109. ctx->key_len);
  110. return -EINVAL;
  111. }
  112. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
  113. return 0;
  114. }
  115. static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
  116. struct crypto_async_request *async,
  117. bool *should_complete, int *ret)
  118. {
  119. struct skcipher_request *req = skcipher_request_cast(async);
  120. struct safexcel_result_desc *rdesc;
  121. int ndesc = 0;
  122. *ret = 0;
  123. spin_lock_bh(&priv->ring[ring].egress_lock);
  124. do {
  125. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  126. if (IS_ERR(rdesc)) {
  127. dev_err(priv->dev,
  128. "cipher: result: could not retrieve the result descriptor\n");
  129. *ret = PTR_ERR(rdesc);
  130. break;
  131. }
  132. if (rdesc->result_data.error_code) {
  133. dev_err(priv->dev,
  134. "cipher: result: result descriptor error (%d)\n",
  135. rdesc->result_data.error_code);
  136. *ret = -EIO;
  137. }
  138. ndesc++;
  139. } while (!rdesc->last_seg);
  140. safexcel_complete(priv, ring);
  141. spin_unlock_bh(&priv->ring[ring].egress_lock);
  142. if (req->src == req->dst) {
  143. dma_unmap_sg(priv->dev, req->src,
  144. sg_nents_for_len(req->src, req->cryptlen),
  145. DMA_BIDIRECTIONAL);
  146. } else {
  147. dma_unmap_sg(priv->dev, req->src,
  148. sg_nents_for_len(req->src, req->cryptlen),
  149. DMA_TO_DEVICE);
  150. dma_unmap_sg(priv->dev, req->dst,
  151. sg_nents_for_len(req->dst, req->cryptlen),
  152. DMA_FROM_DEVICE);
  153. }
  154. *should_complete = true;
  155. return ndesc;
  156. }
  157. static int safexcel_aes_send(struct crypto_async_request *async,
  158. int ring, struct safexcel_request *request,
  159. int *commands, int *results)
  160. {
  161. struct skcipher_request *req = skcipher_request_cast(async);
  162. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  163. struct safexcel_crypto_priv *priv = ctx->priv;
  164. struct safexcel_command_desc *cdesc;
  165. struct safexcel_result_desc *rdesc;
  166. struct scatterlist *sg;
  167. int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
  168. int i, ret = 0;
  169. if (req->src == req->dst) {
  170. nr_src = dma_map_sg(priv->dev, req->src,
  171. sg_nents_for_len(req->src, req->cryptlen),
  172. DMA_BIDIRECTIONAL);
  173. nr_dst = nr_src;
  174. if (!nr_src)
  175. return -EINVAL;
  176. } else {
  177. nr_src = dma_map_sg(priv->dev, req->src,
  178. sg_nents_for_len(req->src, req->cryptlen),
  179. DMA_TO_DEVICE);
  180. if (!nr_src)
  181. return -EINVAL;
  182. nr_dst = dma_map_sg(priv->dev, req->dst,
  183. sg_nents_for_len(req->dst, req->cryptlen),
  184. DMA_FROM_DEVICE);
  185. if (!nr_dst) {
  186. dma_unmap_sg(priv->dev, req->src,
  187. sg_nents_for_len(req->src, req->cryptlen),
  188. DMA_TO_DEVICE);
  189. return -EINVAL;
  190. }
  191. }
  192. memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
  193. spin_lock_bh(&priv->ring[ring].egress_lock);
  194. /* command descriptors */
  195. for_each_sg(req->src, sg, nr_src, i) {
  196. int len = sg_dma_len(sg);
  197. /* Do not overflow the request */
  198. if (queued - len < 0)
  199. len = queued;
  200. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
  201. sg_dma_address(sg), len, req->cryptlen,
  202. ctx->base.ctxr_dma);
  203. if (IS_ERR(cdesc)) {
  204. /* No space left in the command descriptor ring */
  205. ret = PTR_ERR(cdesc);
  206. goto cdesc_rollback;
  207. }
  208. n_cdesc++;
  209. if (n_cdesc == 1) {
  210. safexcel_context_control(ctx, async, cdesc);
  211. safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
  212. }
  213. queued -= len;
  214. if (!queued)
  215. break;
  216. }
  217. /* result descriptors */
  218. for_each_sg(req->dst, sg, nr_dst, i) {
  219. bool first = !i, last = (i == nr_dst - 1);
  220. u32 len = sg_dma_len(sg);
  221. rdesc = safexcel_add_rdesc(priv, ring, first, last,
  222. sg_dma_address(sg), len);
  223. if (IS_ERR(rdesc)) {
  224. /* No space left in the result descriptor ring */
  225. ret = PTR_ERR(rdesc);
  226. goto rdesc_rollback;
  227. }
  228. n_rdesc++;
  229. }
  230. spin_unlock_bh(&priv->ring[ring].egress_lock);
  231. request->req = &req->base;
  232. *commands = n_cdesc;
  233. *results = n_rdesc;
  234. return 0;
  235. rdesc_rollback:
  236. for (i = 0; i < n_rdesc; i++)
  237. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
  238. cdesc_rollback:
  239. for (i = 0; i < n_cdesc; i++)
  240. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  241. spin_unlock_bh(&priv->ring[ring].egress_lock);
  242. if (req->src == req->dst) {
  243. dma_unmap_sg(priv->dev, req->src,
  244. sg_nents_for_len(req->src, req->cryptlen),
  245. DMA_BIDIRECTIONAL);
  246. } else {
  247. dma_unmap_sg(priv->dev, req->src,
  248. sg_nents_for_len(req->src, req->cryptlen),
  249. DMA_TO_DEVICE);
  250. dma_unmap_sg(priv->dev, req->dst,
  251. sg_nents_for_len(req->dst, req->cryptlen),
  252. DMA_FROM_DEVICE);
  253. }
  254. return ret;
  255. }
  256. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  257. int ring,
  258. struct crypto_async_request *async,
  259. bool *should_complete, int *ret)
  260. {
  261. struct skcipher_request *req = skcipher_request_cast(async);
  262. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  263. struct safexcel_result_desc *rdesc;
  264. int ndesc = 0, enq_ret;
  265. *ret = 0;
  266. spin_lock_bh(&priv->ring[ring].egress_lock);
  267. do {
  268. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  269. if (IS_ERR(rdesc)) {
  270. dev_err(priv->dev,
  271. "cipher: invalidate: could not retrieve the result descriptor\n");
  272. *ret = PTR_ERR(rdesc);
  273. break;
  274. }
  275. if (rdesc->result_data.error_code) {
  276. dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
  277. rdesc->result_data.error_code);
  278. *ret = -EIO;
  279. }
  280. ndesc++;
  281. } while (!rdesc->last_seg);
  282. safexcel_complete(priv, ring);
  283. spin_unlock_bh(&priv->ring[ring].egress_lock);
  284. if (ctx->base.exit_inv) {
  285. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  286. ctx->base.ctxr_dma);
  287. *should_complete = true;
  288. return ndesc;
  289. }
  290. ring = safexcel_select_ring(priv);
  291. ctx->base.ring = ring;
  292. spin_lock_bh(&priv->ring[ring].queue_lock);
  293. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  294. spin_unlock_bh(&priv->ring[ring].queue_lock);
  295. if (enq_ret != -EINPROGRESS)
  296. *ret = enq_ret;
  297. queue_work(priv->ring[ring].workqueue,
  298. &priv->ring[ring].work_data.work);
  299. *should_complete = false;
  300. return ndesc;
  301. }
  302. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  303. struct crypto_async_request *async,
  304. bool *should_complete, int *ret)
  305. {
  306. struct skcipher_request *req = skcipher_request_cast(async);
  307. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  308. int err;
  309. if (sreq->needs_inv) {
  310. sreq->needs_inv = false;
  311. err = safexcel_handle_inv_result(priv, ring, async,
  312. should_complete, ret);
  313. } else {
  314. err = safexcel_handle_req_result(priv, ring, async,
  315. should_complete, ret);
  316. }
  317. return err;
  318. }
  319. static int safexcel_cipher_send_inv(struct crypto_async_request *async,
  320. int ring, struct safexcel_request *request,
  321. int *commands, int *results)
  322. {
  323. struct skcipher_request *req = skcipher_request_cast(async);
  324. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  325. struct safexcel_crypto_priv *priv = ctx->priv;
  326. int ret;
  327. ret = safexcel_invalidate_cache(async, priv,
  328. ctx->base.ctxr_dma, ring, request);
  329. if (unlikely(ret))
  330. return ret;
  331. *commands = 1;
  332. *results = 1;
  333. return 0;
  334. }
  335. static int safexcel_send(struct crypto_async_request *async,
  336. int ring, struct safexcel_request *request,
  337. int *commands, int *results)
  338. {
  339. struct skcipher_request *req = skcipher_request_cast(async);
  340. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  341. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  342. struct safexcel_crypto_priv *priv = ctx->priv;
  343. int ret;
  344. BUG_ON(priv->version == EIP97 && sreq->needs_inv);
  345. if (sreq->needs_inv)
  346. ret = safexcel_cipher_send_inv(async, ring, request,
  347. commands, results);
  348. else
  349. ret = safexcel_aes_send(async, ring, request,
  350. commands, results);
  351. return ret;
  352. }
  353. static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
  354. {
  355. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  356. struct safexcel_crypto_priv *priv = ctx->priv;
  357. SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
  358. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  359. struct safexcel_inv_result result = {};
  360. int ring = ctx->base.ring;
  361. memset(req, 0, sizeof(struct skcipher_request));
  362. /* create invalidation request */
  363. init_completion(&result.completion);
  364. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  365. safexcel_inv_complete, &result);
  366. skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
  367. ctx = crypto_tfm_ctx(req->base.tfm);
  368. ctx->base.exit_inv = true;
  369. sreq->needs_inv = true;
  370. spin_lock_bh(&priv->ring[ring].queue_lock);
  371. crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  372. spin_unlock_bh(&priv->ring[ring].queue_lock);
  373. queue_work(priv->ring[ring].workqueue,
  374. &priv->ring[ring].work_data.work);
  375. wait_for_completion(&result.completion);
  376. if (result.error) {
  377. dev_warn(priv->dev,
  378. "cipher: sync: invalidate: completion error %d\n",
  379. result.error);
  380. return result.error;
  381. }
  382. return 0;
  383. }
  384. static int safexcel_aes(struct skcipher_request *req,
  385. enum safexcel_cipher_direction dir, u32 mode)
  386. {
  387. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  388. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  389. struct safexcel_crypto_priv *priv = ctx->priv;
  390. int ret, ring;
  391. sreq->needs_inv = false;
  392. sreq->direction = dir;
  393. ctx->mode = mode;
  394. if (ctx->base.ctxr) {
  395. if (priv->version == EIP197 && ctx->base.needs_inv) {
  396. sreq->needs_inv = true;
  397. ctx->base.needs_inv = false;
  398. }
  399. } else {
  400. ctx->base.ring = safexcel_select_ring(priv);
  401. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  402. EIP197_GFP_FLAGS(req->base),
  403. &ctx->base.ctxr_dma);
  404. if (!ctx->base.ctxr)
  405. return -ENOMEM;
  406. }
  407. ring = ctx->base.ring;
  408. spin_lock_bh(&priv->ring[ring].queue_lock);
  409. ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  410. spin_unlock_bh(&priv->ring[ring].queue_lock);
  411. queue_work(priv->ring[ring].workqueue,
  412. &priv->ring[ring].work_data.work);
  413. return ret;
  414. }
  415. static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
  416. {
  417. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  418. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  419. }
  420. static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
  421. {
  422. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  423. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  424. }
  425. static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
  426. {
  427. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  428. struct safexcel_alg_template *tmpl =
  429. container_of(tfm->__crt_alg, struct safexcel_alg_template,
  430. alg.skcipher.base);
  431. ctx->priv = tmpl->priv;
  432. ctx->base.send = safexcel_send;
  433. ctx->base.handle_result = safexcel_handle_result;
  434. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  435. sizeof(struct safexcel_cipher_req));
  436. return 0;
  437. }
  438. static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
  439. {
  440. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  441. struct safexcel_crypto_priv *priv = ctx->priv;
  442. int ret;
  443. memzero_explicit(ctx->key, 8 * sizeof(u32));
  444. /* context not allocated, skip invalidation */
  445. if (!ctx->base.ctxr)
  446. return;
  447. memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
  448. if (priv->version == EIP197) {
  449. ret = safexcel_cipher_exit_inv(tfm);
  450. if (ret)
  451. dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
  452. } else {
  453. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  454. ctx->base.ctxr_dma);
  455. }
  456. }
  457. struct safexcel_alg_template safexcel_alg_ecb_aes = {
  458. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  459. .alg.skcipher = {
  460. .setkey = safexcel_aes_setkey,
  461. .encrypt = safexcel_ecb_aes_encrypt,
  462. .decrypt = safexcel_ecb_aes_decrypt,
  463. .min_keysize = AES_MIN_KEY_SIZE,
  464. .max_keysize = AES_MAX_KEY_SIZE,
  465. .base = {
  466. .cra_name = "ecb(aes)",
  467. .cra_driver_name = "safexcel-ecb-aes",
  468. .cra_priority = 300,
  469. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  470. CRYPTO_ALG_KERN_DRIVER_ONLY,
  471. .cra_blocksize = AES_BLOCK_SIZE,
  472. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  473. .cra_alignmask = 0,
  474. .cra_init = safexcel_skcipher_cra_init,
  475. .cra_exit = safexcel_skcipher_cra_exit,
  476. .cra_module = THIS_MODULE,
  477. },
  478. },
  479. };
  480. static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
  481. {
  482. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  483. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  484. }
  485. static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
  486. {
  487. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  488. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  489. }
  490. struct safexcel_alg_template safexcel_alg_cbc_aes = {
  491. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  492. .alg.skcipher = {
  493. .setkey = safexcel_aes_setkey,
  494. .encrypt = safexcel_cbc_aes_encrypt,
  495. .decrypt = safexcel_cbc_aes_decrypt,
  496. .min_keysize = AES_MIN_KEY_SIZE,
  497. .max_keysize = AES_MAX_KEY_SIZE,
  498. .ivsize = AES_BLOCK_SIZE,
  499. .base = {
  500. .cra_name = "cbc(aes)",
  501. .cra_driver_name = "safexcel-cbc-aes",
  502. .cra_priority = 300,
  503. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  504. CRYPTO_ALG_KERN_DRIVER_ONLY,
  505. .cra_blocksize = AES_BLOCK_SIZE,
  506. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  507. .cra_alignmask = 0,
  508. .cra_init = safexcel_skcipher_cra_init,
  509. .cra_exit = safexcel_skcipher_cra_exit,
  510. .cra_module = THIS_MODULE,
  511. },
  512. },
  513. };