safexcel_cipher.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmapool.h>
  13. #include <crypto/aes.h>
  14. #include <crypto/skcipher.h>
  15. #include <crypto/internal/skcipher.h>
  16. #include "safexcel.h"
  17. enum safexcel_cipher_direction {
  18. SAFEXCEL_ENCRYPT,
  19. SAFEXCEL_DECRYPT,
  20. };
  21. struct safexcel_cipher_ctx {
  22. struct safexcel_context base;
  23. struct safexcel_crypto_priv *priv;
  24. u32 mode;
  25. __le32 key[8];
  26. unsigned int key_len;
  27. };
  28. struct safexcel_cipher_req {
  29. enum safexcel_cipher_direction direction;
  30. bool needs_inv;
  31. };
  32. static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  33. struct crypto_async_request *async,
  34. struct safexcel_command_desc *cdesc,
  35. u32 length)
  36. {
  37. struct skcipher_request *req = skcipher_request_cast(async);
  38. struct safexcel_token *token;
  39. unsigned offset = 0;
  40. if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
  41. offset = AES_BLOCK_SIZE / sizeof(u32);
  42. memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
  43. cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
  44. }
  45. token = (struct safexcel_token *)(cdesc->control_data.token + offset);
  46. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  47. token[0].packet_length = length;
  48. token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
  49. EIP197_TOKEN_STAT_LAST_HASH;
  50. token[0].instructions = EIP197_TOKEN_INS_LAST |
  51. EIP197_TOKEN_INS_TYPE_CRYTO |
  52. EIP197_TOKEN_INS_TYPE_OUTPUT;
  53. }
  54. static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
  55. unsigned int len)
  56. {
  57. struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
  58. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  59. struct safexcel_crypto_priv *priv = ctx->priv;
  60. struct crypto_aes_ctx aes;
  61. int ret, i;
  62. ret = crypto_aes_expand_key(&aes, key, len);
  63. if (ret) {
  64. crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  65. return ret;
  66. }
  67. if (priv->version == EIP197 && ctx->base.ctxr_dma) {
  68. for (i = 0; i < len / sizeof(u32); i++) {
  69. if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
  70. ctx->base.needs_inv = true;
  71. break;
  72. }
  73. }
  74. }
  75. for (i = 0; i < len / sizeof(u32); i++)
  76. ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
  77. ctx->key_len = len;
  78. memzero_explicit(&aes, sizeof(aes));
  79. return 0;
  80. }
  81. static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
  82. struct crypto_async_request *async,
  83. struct safexcel_command_desc *cdesc)
  84. {
  85. struct safexcel_crypto_priv *priv = ctx->priv;
  86. struct skcipher_request *req = skcipher_request_cast(async);
  87. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  88. int ctrl_size;
  89. if (sreq->direction == SAFEXCEL_ENCRYPT)
  90. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
  91. else
  92. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
  93. cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
  94. cdesc->control_data.control1 |= ctx->mode;
  95. switch (ctx->key_len) {
  96. case AES_KEYSIZE_128:
  97. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
  98. ctrl_size = 4;
  99. break;
  100. case AES_KEYSIZE_192:
  101. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
  102. ctrl_size = 6;
  103. break;
  104. case AES_KEYSIZE_256:
  105. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
  106. ctrl_size = 8;
  107. break;
  108. default:
  109. dev_err(priv->dev, "aes keysize not supported: %u\n",
  110. ctx->key_len);
  111. return -EINVAL;
  112. }
  113. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
  114. return 0;
  115. }
  116. static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
  117. struct crypto_async_request *async,
  118. bool *should_complete, int *ret)
  119. {
  120. struct skcipher_request *req = skcipher_request_cast(async);
  121. struct safexcel_result_desc *rdesc;
  122. int ndesc = 0;
  123. *ret = 0;
  124. spin_lock_bh(&priv->ring[ring].egress_lock);
  125. do {
  126. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  127. if (IS_ERR(rdesc)) {
  128. dev_err(priv->dev,
  129. "cipher: result: could not retrieve the result descriptor\n");
  130. *ret = PTR_ERR(rdesc);
  131. break;
  132. }
  133. if (rdesc->result_data.error_code) {
  134. dev_err(priv->dev,
  135. "cipher: result: result descriptor error (%d)\n",
  136. rdesc->result_data.error_code);
  137. *ret = -EIO;
  138. }
  139. ndesc++;
  140. } while (!rdesc->last_seg);
  141. safexcel_complete(priv, ring);
  142. spin_unlock_bh(&priv->ring[ring].egress_lock);
  143. if (req->src == req->dst) {
  144. dma_unmap_sg(priv->dev, req->src,
  145. sg_nents_for_len(req->src, req->cryptlen),
  146. DMA_BIDIRECTIONAL);
  147. } else {
  148. dma_unmap_sg(priv->dev, req->src,
  149. sg_nents_for_len(req->src, req->cryptlen),
  150. DMA_TO_DEVICE);
  151. dma_unmap_sg(priv->dev, req->dst,
  152. sg_nents_for_len(req->dst, req->cryptlen),
  153. DMA_FROM_DEVICE);
  154. }
  155. *should_complete = true;
  156. return ndesc;
  157. }
  158. static int safexcel_aes_send(struct crypto_async_request *async,
  159. int ring, struct safexcel_request *request,
  160. int *commands, int *results)
  161. {
  162. struct skcipher_request *req = skcipher_request_cast(async);
  163. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  164. struct safexcel_crypto_priv *priv = ctx->priv;
  165. struct safexcel_command_desc *cdesc;
  166. struct safexcel_result_desc *rdesc;
  167. struct scatterlist *sg;
  168. int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
  169. int i, ret = 0;
  170. if (req->src == req->dst) {
  171. nr_src = dma_map_sg(priv->dev, req->src,
  172. sg_nents_for_len(req->src, req->cryptlen),
  173. DMA_BIDIRECTIONAL);
  174. nr_dst = nr_src;
  175. if (!nr_src)
  176. return -EINVAL;
  177. } else {
  178. nr_src = dma_map_sg(priv->dev, req->src,
  179. sg_nents_for_len(req->src, req->cryptlen),
  180. DMA_TO_DEVICE);
  181. if (!nr_src)
  182. return -EINVAL;
  183. nr_dst = dma_map_sg(priv->dev, req->dst,
  184. sg_nents_for_len(req->dst, req->cryptlen),
  185. DMA_FROM_DEVICE);
  186. if (!nr_dst) {
  187. dma_unmap_sg(priv->dev, req->src,
  188. sg_nents_for_len(req->src, req->cryptlen),
  189. DMA_TO_DEVICE);
  190. return -EINVAL;
  191. }
  192. }
  193. memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
  194. spin_lock_bh(&priv->ring[ring].egress_lock);
  195. /* command descriptors */
  196. for_each_sg(req->src, sg, nr_src, i) {
  197. int len = sg_dma_len(sg);
  198. /* Do not overflow the request */
  199. if (queued - len < 0)
  200. len = queued;
  201. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
  202. sg_dma_address(sg), len, req->cryptlen,
  203. ctx->base.ctxr_dma);
  204. if (IS_ERR(cdesc)) {
  205. /* No space left in the command descriptor ring */
  206. ret = PTR_ERR(cdesc);
  207. goto cdesc_rollback;
  208. }
  209. n_cdesc++;
  210. if (n_cdesc == 1) {
  211. safexcel_context_control(ctx, async, cdesc);
  212. safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
  213. }
  214. queued -= len;
  215. if (!queued)
  216. break;
  217. }
  218. /* result descriptors */
  219. for_each_sg(req->dst, sg, nr_dst, i) {
  220. bool first = !i, last = (i == nr_dst - 1);
  221. u32 len = sg_dma_len(sg);
  222. rdesc = safexcel_add_rdesc(priv, ring, first, last,
  223. sg_dma_address(sg), len);
  224. if (IS_ERR(rdesc)) {
  225. /* No space left in the result descriptor ring */
  226. ret = PTR_ERR(rdesc);
  227. goto rdesc_rollback;
  228. }
  229. n_rdesc++;
  230. }
  231. spin_unlock_bh(&priv->ring[ring].egress_lock);
  232. request->req = &req->base;
  233. *commands = n_cdesc;
  234. *results = n_rdesc;
  235. return 0;
  236. rdesc_rollback:
  237. for (i = 0; i < n_rdesc; i++)
  238. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
  239. cdesc_rollback:
  240. for (i = 0; i < n_cdesc; i++)
  241. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  242. spin_unlock_bh(&priv->ring[ring].egress_lock);
  243. if (req->src == req->dst) {
  244. dma_unmap_sg(priv->dev, req->src,
  245. sg_nents_for_len(req->src, req->cryptlen),
  246. DMA_BIDIRECTIONAL);
  247. } else {
  248. dma_unmap_sg(priv->dev, req->src,
  249. sg_nents_for_len(req->src, req->cryptlen),
  250. DMA_TO_DEVICE);
  251. dma_unmap_sg(priv->dev, req->dst,
  252. sg_nents_for_len(req->dst, req->cryptlen),
  253. DMA_FROM_DEVICE);
  254. }
  255. return ret;
  256. }
  257. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  258. int ring,
  259. struct crypto_async_request *async,
  260. bool *should_complete, int *ret)
  261. {
  262. struct skcipher_request *req = skcipher_request_cast(async);
  263. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  264. struct safexcel_result_desc *rdesc;
  265. int ndesc = 0, enq_ret;
  266. *ret = 0;
  267. spin_lock_bh(&priv->ring[ring].egress_lock);
  268. do {
  269. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  270. if (IS_ERR(rdesc)) {
  271. dev_err(priv->dev,
  272. "cipher: invalidate: could not retrieve the result descriptor\n");
  273. *ret = PTR_ERR(rdesc);
  274. break;
  275. }
  276. if (rdesc->result_data.error_code) {
  277. dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
  278. rdesc->result_data.error_code);
  279. *ret = -EIO;
  280. }
  281. ndesc++;
  282. } while (!rdesc->last_seg);
  283. safexcel_complete(priv, ring);
  284. spin_unlock_bh(&priv->ring[ring].egress_lock);
  285. if (ctx->base.exit_inv) {
  286. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  287. ctx->base.ctxr_dma);
  288. *should_complete = true;
  289. return ndesc;
  290. }
  291. ring = safexcel_select_ring(priv);
  292. ctx->base.ring = ring;
  293. spin_lock_bh(&priv->ring[ring].queue_lock);
  294. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  295. spin_unlock_bh(&priv->ring[ring].queue_lock);
  296. if (enq_ret != -EINPROGRESS)
  297. *ret = enq_ret;
  298. queue_work(priv->ring[ring].workqueue,
  299. &priv->ring[ring].work_data.work);
  300. *should_complete = false;
  301. return ndesc;
  302. }
  303. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  304. struct crypto_async_request *async,
  305. bool *should_complete, int *ret)
  306. {
  307. struct skcipher_request *req = skcipher_request_cast(async);
  308. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  309. int err;
  310. if (sreq->needs_inv) {
  311. sreq->needs_inv = false;
  312. err = safexcel_handle_inv_result(priv, ring, async,
  313. should_complete, ret);
  314. } else {
  315. err = safexcel_handle_req_result(priv, ring, async,
  316. should_complete, ret);
  317. }
  318. return err;
  319. }
  320. static int safexcel_cipher_send_inv(struct crypto_async_request *async,
  321. int ring, struct safexcel_request *request,
  322. int *commands, int *results)
  323. {
  324. struct skcipher_request *req = skcipher_request_cast(async);
  325. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  326. struct safexcel_crypto_priv *priv = ctx->priv;
  327. int ret;
  328. ret = safexcel_invalidate_cache(async, priv,
  329. ctx->base.ctxr_dma, ring, request);
  330. if (unlikely(ret))
  331. return ret;
  332. *commands = 1;
  333. *results = 1;
  334. return 0;
  335. }
  336. static int safexcel_send(struct crypto_async_request *async,
  337. int ring, struct safexcel_request *request,
  338. int *commands, int *results)
  339. {
  340. struct skcipher_request *req = skcipher_request_cast(async);
  341. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  342. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  343. struct safexcel_crypto_priv *priv = ctx->priv;
  344. int ret;
  345. BUG_ON(priv->version == EIP97 && sreq->needs_inv);
  346. if (sreq->needs_inv)
  347. ret = safexcel_cipher_send_inv(async, ring, request,
  348. commands, results);
  349. else
  350. ret = safexcel_aes_send(async, ring, request,
  351. commands, results);
  352. return ret;
  353. }
  354. static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
  355. {
  356. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  357. struct safexcel_crypto_priv *priv = ctx->priv;
  358. SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
  359. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  360. struct safexcel_inv_result result = {};
  361. int ring = ctx->base.ring;
  362. memset(req, 0, sizeof(struct skcipher_request));
  363. /* create invalidation request */
  364. init_completion(&result.completion);
  365. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  366. safexcel_inv_complete, &result);
  367. skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
  368. ctx = crypto_tfm_ctx(req->base.tfm);
  369. ctx->base.exit_inv = true;
  370. sreq->needs_inv = true;
  371. spin_lock_bh(&priv->ring[ring].queue_lock);
  372. crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  373. spin_unlock_bh(&priv->ring[ring].queue_lock);
  374. queue_work(priv->ring[ring].workqueue,
  375. &priv->ring[ring].work_data.work);
  376. wait_for_completion(&result.completion);
  377. if (result.error) {
  378. dev_warn(priv->dev,
  379. "cipher: sync: invalidate: completion error %d\n",
  380. result.error);
  381. return result.error;
  382. }
  383. return 0;
  384. }
  385. static int safexcel_aes(struct skcipher_request *req,
  386. enum safexcel_cipher_direction dir, u32 mode)
  387. {
  388. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  389. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  390. struct safexcel_crypto_priv *priv = ctx->priv;
  391. int ret, ring;
  392. sreq->needs_inv = false;
  393. sreq->direction = dir;
  394. ctx->mode = mode;
  395. if (ctx->base.ctxr) {
  396. if (priv->version == EIP197 && ctx->base.needs_inv) {
  397. sreq->needs_inv = true;
  398. ctx->base.needs_inv = false;
  399. }
  400. } else {
  401. ctx->base.ring = safexcel_select_ring(priv);
  402. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  403. EIP197_GFP_FLAGS(req->base),
  404. &ctx->base.ctxr_dma);
  405. if (!ctx->base.ctxr)
  406. return -ENOMEM;
  407. }
  408. ring = ctx->base.ring;
  409. spin_lock_bh(&priv->ring[ring].queue_lock);
  410. ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  411. spin_unlock_bh(&priv->ring[ring].queue_lock);
  412. queue_work(priv->ring[ring].workqueue,
  413. &priv->ring[ring].work_data.work);
  414. return ret;
  415. }
  416. static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
  417. {
  418. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  419. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  420. }
  421. static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
  422. {
  423. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  424. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  425. }
  426. static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
  427. {
  428. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  429. struct safexcel_alg_template *tmpl =
  430. container_of(tfm->__crt_alg, struct safexcel_alg_template,
  431. alg.skcipher.base);
  432. ctx->priv = tmpl->priv;
  433. ctx->base.send = safexcel_send;
  434. ctx->base.handle_result = safexcel_handle_result;
  435. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  436. sizeof(struct safexcel_cipher_req));
  437. return 0;
  438. }
  439. static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
  440. {
  441. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  442. struct safexcel_crypto_priv *priv = ctx->priv;
  443. int ret;
  444. memzero_explicit(ctx->key, 8 * sizeof(u32));
  445. /* context not allocated, skip invalidation */
  446. if (!ctx->base.ctxr)
  447. return;
  448. memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
  449. if (priv->version == EIP197) {
  450. ret = safexcel_cipher_exit_inv(tfm);
  451. if (ret)
  452. dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
  453. } else {
  454. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  455. ctx->base.ctxr_dma);
  456. }
  457. }
  458. struct safexcel_alg_template safexcel_alg_ecb_aes = {
  459. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  460. .alg.skcipher = {
  461. .setkey = safexcel_aes_setkey,
  462. .encrypt = safexcel_ecb_aes_encrypt,
  463. .decrypt = safexcel_ecb_aes_decrypt,
  464. .min_keysize = AES_MIN_KEY_SIZE,
  465. .max_keysize = AES_MAX_KEY_SIZE,
  466. .base = {
  467. .cra_name = "ecb(aes)",
  468. .cra_driver_name = "safexcel-ecb-aes",
  469. .cra_priority = 300,
  470. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  471. CRYPTO_ALG_KERN_DRIVER_ONLY,
  472. .cra_blocksize = AES_BLOCK_SIZE,
  473. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  474. .cra_alignmask = 0,
  475. .cra_init = safexcel_skcipher_cra_init,
  476. .cra_exit = safexcel_skcipher_cra_exit,
  477. .cra_module = THIS_MODULE,
  478. },
  479. },
  480. };
  481. static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
  482. {
  483. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  484. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  485. }
  486. static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
  487. {
  488. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  489. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  490. }
  491. struct safexcel_alg_template safexcel_alg_cbc_aes = {
  492. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  493. .alg.skcipher = {
  494. .setkey = safexcel_aes_setkey,
  495. .encrypt = safexcel_cbc_aes_encrypt,
  496. .decrypt = safexcel_cbc_aes_decrypt,
  497. .min_keysize = AES_MIN_KEY_SIZE,
  498. .max_keysize = AES_MAX_KEY_SIZE,
  499. .ivsize = AES_BLOCK_SIZE,
  500. .base = {
  501. .cra_name = "cbc(aes)",
  502. .cra_driver_name = "safexcel-cbc-aes",
  503. .cra_priority = 300,
  504. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  505. CRYPTO_ALG_KERN_DRIVER_ONLY,
  506. .cra_blocksize = AES_BLOCK_SIZE,
  507. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  508. .cra_alignmask = 0,
  509. .cra_init = safexcel_skcipher_cra_init,
  510. .cra_exit = safexcel_skcipher_cra_exit,
  511. .cra_module = THIS_MODULE,
  512. },
  513. },
  514. };