safexcel_cipher.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /*
  2. * Copyright (C) 2017 Marvell
  3. *
  4. * Antoine Tenart <antoine.tenart@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmapool.h>
  13. #include <crypto/aes.h>
  14. #include <crypto/skcipher.h>
  15. #include <crypto/internal/skcipher.h>
  16. #include "safexcel.h"
  17. enum safexcel_cipher_direction {
  18. SAFEXCEL_ENCRYPT,
  19. SAFEXCEL_DECRYPT,
  20. };
  21. struct safexcel_cipher_ctx {
  22. struct safexcel_context base;
  23. struct safexcel_crypto_priv *priv;
  24. u32 mode;
  25. __le32 key[8];
  26. unsigned int key_len;
  27. };
  28. struct safexcel_cipher_req {
  29. enum safexcel_cipher_direction direction;
  30. bool needs_inv;
  31. };
  32. static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
  33. struct crypto_async_request *async,
  34. struct safexcel_command_desc *cdesc,
  35. u32 length)
  36. {
  37. struct skcipher_request *req = skcipher_request_cast(async);
  38. struct safexcel_token *token;
  39. unsigned offset = 0;
  40. if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
  41. offset = AES_BLOCK_SIZE / sizeof(u32);
  42. memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
  43. cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
  44. }
  45. token = (struct safexcel_token *)(cdesc->control_data.token + offset);
  46. token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
  47. token[0].packet_length = length;
  48. token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
  49. token[0].instructions = EIP197_TOKEN_INS_LAST |
  50. EIP197_TOKEN_INS_TYPE_CRYTO |
  51. EIP197_TOKEN_INS_TYPE_OUTPUT;
  52. }
  53. static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
  54. unsigned int len)
  55. {
  56. struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
  57. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  58. struct crypto_aes_ctx aes;
  59. int ret, i;
  60. ret = crypto_aes_expand_key(&aes, key, len);
  61. if (ret) {
  62. crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  63. return ret;
  64. }
  65. if (ctx->base.ctxr_dma) {
  66. for (i = 0; i < len / sizeof(u32); i++) {
  67. if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
  68. ctx->base.needs_inv = true;
  69. break;
  70. }
  71. }
  72. }
  73. for (i = 0; i < len / sizeof(u32); i++)
  74. ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
  75. ctx->key_len = len;
  76. memzero_explicit(&aes, sizeof(aes));
  77. return 0;
  78. }
  79. static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
  80. struct crypto_async_request *async,
  81. struct safexcel_command_desc *cdesc)
  82. {
  83. struct safexcel_crypto_priv *priv = ctx->priv;
  84. struct skcipher_request *req = skcipher_request_cast(async);
  85. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  86. int ctrl_size;
  87. if (sreq->direction == SAFEXCEL_ENCRYPT)
  88. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
  89. else
  90. cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
  91. cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
  92. cdesc->control_data.control1 |= ctx->mode;
  93. switch (ctx->key_len) {
  94. case AES_KEYSIZE_128:
  95. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
  96. ctrl_size = 4;
  97. break;
  98. case AES_KEYSIZE_192:
  99. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
  100. ctrl_size = 6;
  101. break;
  102. case AES_KEYSIZE_256:
  103. cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
  104. ctrl_size = 8;
  105. break;
  106. default:
  107. dev_err(priv->dev, "aes keysize not supported: %u\n",
  108. ctx->key_len);
  109. return -EINVAL;
  110. }
  111. cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
  112. return 0;
  113. }
  114. static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
  115. struct crypto_async_request *async,
  116. bool *should_complete, int *ret)
  117. {
  118. struct skcipher_request *req = skcipher_request_cast(async);
  119. struct safexcel_result_desc *rdesc;
  120. int ndesc = 0;
  121. *ret = 0;
  122. spin_lock_bh(&priv->ring[ring].egress_lock);
  123. do {
  124. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  125. if (IS_ERR(rdesc)) {
  126. dev_err(priv->dev,
  127. "cipher: result: could not retrieve the result descriptor\n");
  128. *ret = PTR_ERR(rdesc);
  129. break;
  130. }
  131. if (rdesc->result_data.error_code) {
  132. dev_err(priv->dev,
  133. "cipher: result: result descriptor error (%d)\n",
  134. rdesc->result_data.error_code);
  135. *ret = -EIO;
  136. }
  137. ndesc++;
  138. } while (!rdesc->last_seg);
  139. safexcel_complete(priv, ring);
  140. spin_unlock_bh(&priv->ring[ring].egress_lock);
  141. if (req->src == req->dst) {
  142. dma_unmap_sg(priv->dev, req->src,
  143. sg_nents_for_len(req->src, req->cryptlen),
  144. DMA_BIDIRECTIONAL);
  145. } else {
  146. dma_unmap_sg(priv->dev, req->src,
  147. sg_nents_for_len(req->src, req->cryptlen),
  148. DMA_TO_DEVICE);
  149. dma_unmap_sg(priv->dev, req->dst,
  150. sg_nents_for_len(req->dst, req->cryptlen),
  151. DMA_FROM_DEVICE);
  152. }
  153. *should_complete = true;
  154. return ndesc;
  155. }
  156. static int safexcel_aes_send(struct crypto_async_request *async,
  157. int ring, struct safexcel_request *request,
  158. int *commands, int *results)
  159. {
  160. struct skcipher_request *req = skcipher_request_cast(async);
  161. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  162. struct safexcel_crypto_priv *priv = ctx->priv;
  163. struct safexcel_command_desc *cdesc;
  164. struct safexcel_result_desc *rdesc;
  165. struct scatterlist *sg;
  166. int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
  167. int i, ret = 0;
  168. if (req->src == req->dst) {
  169. nr_src = dma_map_sg(priv->dev, req->src,
  170. sg_nents_for_len(req->src, req->cryptlen),
  171. DMA_BIDIRECTIONAL);
  172. nr_dst = nr_src;
  173. if (!nr_src)
  174. return -EINVAL;
  175. } else {
  176. nr_src = dma_map_sg(priv->dev, req->src,
  177. sg_nents_for_len(req->src, req->cryptlen),
  178. DMA_TO_DEVICE);
  179. if (!nr_src)
  180. return -EINVAL;
  181. nr_dst = dma_map_sg(priv->dev, req->dst,
  182. sg_nents_for_len(req->dst, req->cryptlen),
  183. DMA_FROM_DEVICE);
  184. if (!nr_dst) {
  185. dma_unmap_sg(priv->dev, req->src,
  186. sg_nents_for_len(req->src, req->cryptlen),
  187. DMA_TO_DEVICE);
  188. return -EINVAL;
  189. }
  190. }
  191. memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
  192. spin_lock_bh(&priv->ring[ring].egress_lock);
  193. /* command descriptors */
  194. for_each_sg(req->src, sg, nr_src, i) {
  195. int len = sg_dma_len(sg);
  196. /* Do not overflow the request */
  197. if (queued - len < 0)
  198. len = queued;
  199. cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
  200. sg_dma_address(sg), len, req->cryptlen,
  201. ctx->base.ctxr_dma);
  202. if (IS_ERR(cdesc)) {
  203. /* No space left in the command descriptor ring */
  204. ret = PTR_ERR(cdesc);
  205. goto cdesc_rollback;
  206. }
  207. n_cdesc++;
  208. if (n_cdesc == 1) {
  209. safexcel_context_control(ctx, async, cdesc);
  210. safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
  211. }
  212. queued -= len;
  213. if (!queued)
  214. break;
  215. }
  216. /* result descriptors */
  217. for_each_sg(req->dst, sg, nr_dst, i) {
  218. bool first = !i, last = (i == nr_dst - 1);
  219. u32 len = sg_dma_len(sg);
  220. rdesc = safexcel_add_rdesc(priv, ring, first, last,
  221. sg_dma_address(sg), len);
  222. if (IS_ERR(rdesc)) {
  223. /* No space left in the result descriptor ring */
  224. ret = PTR_ERR(rdesc);
  225. goto rdesc_rollback;
  226. }
  227. n_rdesc++;
  228. }
  229. spin_unlock_bh(&priv->ring[ring].egress_lock);
  230. request->req = &req->base;
  231. *commands = n_cdesc;
  232. *results = n_rdesc;
  233. return 0;
  234. rdesc_rollback:
  235. for (i = 0; i < n_rdesc; i++)
  236. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
  237. cdesc_rollback:
  238. for (i = 0; i < n_cdesc; i++)
  239. safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
  240. spin_unlock_bh(&priv->ring[ring].egress_lock);
  241. if (req->src == req->dst) {
  242. dma_unmap_sg(priv->dev, req->src,
  243. sg_nents_for_len(req->src, req->cryptlen),
  244. DMA_BIDIRECTIONAL);
  245. } else {
  246. dma_unmap_sg(priv->dev, req->src,
  247. sg_nents_for_len(req->src, req->cryptlen),
  248. DMA_TO_DEVICE);
  249. dma_unmap_sg(priv->dev, req->dst,
  250. sg_nents_for_len(req->dst, req->cryptlen),
  251. DMA_FROM_DEVICE);
  252. }
  253. return ret;
  254. }
  255. static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
  256. int ring,
  257. struct crypto_async_request *async,
  258. bool *should_complete, int *ret)
  259. {
  260. struct skcipher_request *req = skcipher_request_cast(async);
  261. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  262. struct safexcel_result_desc *rdesc;
  263. int ndesc = 0, enq_ret;
  264. *ret = 0;
  265. spin_lock_bh(&priv->ring[ring].egress_lock);
  266. do {
  267. rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
  268. if (IS_ERR(rdesc)) {
  269. dev_err(priv->dev,
  270. "cipher: invalidate: could not retrieve the result descriptor\n");
  271. *ret = PTR_ERR(rdesc);
  272. break;
  273. }
  274. if (rdesc->result_data.error_code) {
  275. dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
  276. rdesc->result_data.error_code);
  277. *ret = -EIO;
  278. }
  279. ndesc++;
  280. } while (!rdesc->last_seg);
  281. safexcel_complete(priv, ring);
  282. spin_unlock_bh(&priv->ring[ring].egress_lock);
  283. if (ctx->base.exit_inv) {
  284. dma_pool_free(priv->context_pool, ctx->base.ctxr,
  285. ctx->base.ctxr_dma);
  286. *should_complete = true;
  287. return ndesc;
  288. }
  289. ring = safexcel_select_ring(priv);
  290. ctx->base.ring = ring;
  291. spin_lock_bh(&priv->ring[ring].queue_lock);
  292. enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
  293. spin_unlock_bh(&priv->ring[ring].queue_lock);
  294. if (enq_ret != -EINPROGRESS)
  295. *ret = enq_ret;
  296. if (!priv->ring[ring].need_dequeue)
  297. safexcel_dequeue(priv, ring);
  298. *should_complete = false;
  299. return ndesc;
  300. }
  301. static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
  302. struct crypto_async_request *async,
  303. bool *should_complete, int *ret)
  304. {
  305. struct skcipher_request *req = skcipher_request_cast(async);
  306. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  307. int err;
  308. if (sreq->needs_inv) {
  309. sreq->needs_inv = false;
  310. err = safexcel_handle_inv_result(priv, ring, async,
  311. should_complete, ret);
  312. } else {
  313. err = safexcel_handle_req_result(priv, ring, async,
  314. should_complete, ret);
  315. }
  316. return err;
  317. }
  318. static int safexcel_cipher_send_inv(struct crypto_async_request *async,
  319. int ring, struct safexcel_request *request,
  320. int *commands, int *results)
  321. {
  322. struct skcipher_request *req = skcipher_request_cast(async);
  323. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  324. struct safexcel_crypto_priv *priv = ctx->priv;
  325. int ret;
  326. ret = safexcel_invalidate_cache(async, priv,
  327. ctx->base.ctxr_dma, ring, request);
  328. if (unlikely(ret))
  329. return ret;
  330. *commands = 1;
  331. *results = 1;
  332. return 0;
  333. }
  334. static int safexcel_send(struct crypto_async_request *async,
  335. int ring, struct safexcel_request *request,
  336. int *commands, int *results)
  337. {
  338. struct skcipher_request *req = skcipher_request_cast(async);
  339. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  340. int ret;
  341. if (sreq->needs_inv)
  342. ret = safexcel_cipher_send_inv(async, ring, request,
  343. commands, results);
  344. else
  345. ret = safexcel_aes_send(async, ring, request,
  346. commands, results);
  347. return ret;
  348. }
  349. static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
  350. {
  351. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  352. struct safexcel_crypto_priv *priv = ctx->priv;
  353. SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
  354. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  355. struct safexcel_inv_result result = {};
  356. int ring = ctx->base.ring;
  357. memset(req, 0, sizeof(struct skcipher_request));
  358. /* create invalidation request */
  359. init_completion(&result.completion);
  360. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  361. safexcel_inv_complete, &result);
  362. skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
  363. ctx = crypto_tfm_ctx(req->base.tfm);
  364. ctx->base.exit_inv = true;
  365. sreq->needs_inv = true;
  366. spin_lock_bh(&priv->ring[ring].queue_lock);
  367. crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  368. spin_unlock_bh(&priv->ring[ring].queue_lock);
  369. if (!priv->ring[ring].need_dequeue)
  370. safexcel_dequeue(priv, ring);
  371. wait_for_completion_interruptible(&result.completion);
  372. if (result.error) {
  373. dev_warn(priv->dev,
  374. "cipher: sync: invalidate: completion error %d\n",
  375. result.error);
  376. return result.error;
  377. }
  378. return 0;
  379. }
  380. static int safexcel_aes(struct skcipher_request *req,
  381. enum safexcel_cipher_direction dir, u32 mode)
  382. {
  383. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
  384. struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
  385. struct safexcel_crypto_priv *priv = ctx->priv;
  386. int ret, ring;
  387. sreq->needs_inv = false;
  388. sreq->direction = dir;
  389. ctx->mode = mode;
  390. if (ctx->base.ctxr) {
  391. if (ctx->base.needs_inv) {
  392. sreq->needs_inv = true;
  393. ctx->base.needs_inv = false;
  394. }
  395. } else {
  396. ctx->base.ring = safexcel_select_ring(priv);
  397. ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
  398. EIP197_GFP_FLAGS(req->base),
  399. &ctx->base.ctxr_dma);
  400. if (!ctx->base.ctxr)
  401. return -ENOMEM;
  402. }
  403. ring = ctx->base.ring;
  404. spin_lock_bh(&priv->ring[ring].queue_lock);
  405. ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
  406. spin_unlock_bh(&priv->ring[ring].queue_lock);
  407. if (!priv->ring[ring].need_dequeue)
  408. safexcel_dequeue(priv, ring);
  409. return ret;
  410. }
  411. static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
  412. {
  413. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  414. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  415. }
  416. static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
  417. {
  418. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  419. CONTEXT_CONTROL_CRYPTO_MODE_ECB);
  420. }
  421. static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
  422. {
  423. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  424. struct safexcel_alg_template *tmpl =
  425. container_of(tfm->__crt_alg, struct safexcel_alg_template,
  426. alg.skcipher.base);
  427. ctx->priv = tmpl->priv;
  428. ctx->base.send = safexcel_send;
  429. ctx->base.handle_result = safexcel_handle_result;
  430. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  431. sizeof(struct safexcel_cipher_req));
  432. return 0;
  433. }
  434. static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
  435. {
  436. struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  437. struct safexcel_crypto_priv *priv = ctx->priv;
  438. int ret;
  439. memzero_explicit(ctx->key, 8 * sizeof(u32));
  440. /* context not allocated, skip invalidation */
  441. if (!ctx->base.ctxr)
  442. return;
  443. memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
  444. ret = safexcel_cipher_exit_inv(tfm);
  445. if (ret)
  446. dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
  447. }
  448. struct safexcel_alg_template safexcel_alg_ecb_aes = {
  449. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  450. .alg.skcipher = {
  451. .setkey = safexcel_aes_setkey,
  452. .encrypt = safexcel_ecb_aes_encrypt,
  453. .decrypt = safexcel_ecb_aes_decrypt,
  454. .min_keysize = AES_MIN_KEY_SIZE,
  455. .max_keysize = AES_MAX_KEY_SIZE,
  456. .base = {
  457. .cra_name = "ecb(aes)",
  458. .cra_driver_name = "safexcel-ecb-aes",
  459. .cra_priority = 300,
  460. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  461. CRYPTO_ALG_KERN_DRIVER_ONLY,
  462. .cra_blocksize = AES_BLOCK_SIZE,
  463. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  464. .cra_alignmask = 0,
  465. .cra_init = safexcel_skcipher_cra_init,
  466. .cra_exit = safexcel_skcipher_cra_exit,
  467. .cra_module = THIS_MODULE,
  468. },
  469. },
  470. };
  471. static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
  472. {
  473. return safexcel_aes(req, SAFEXCEL_ENCRYPT,
  474. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  475. }
  476. static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
  477. {
  478. return safexcel_aes(req, SAFEXCEL_DECRYPT,
  479. CONTEXT_CONTROL_CRYPTO_MODE_CBC);
  480. }
  481. struct safexcel_alg_template safexcel_alg_cbc_aes = {
  482. .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
  483. .alg.skcipher = {
  484. .setkey = safexcel_aes_setkey,
  485. .encrypt = safexcel_cbc_aes_encrypt,
  486. .decrypt = safexcel_cbc_aes_decrypt,
  487. .min_keysize = AES_MIN_KEY_SIZE,
  488. .max_keysize = AES_MAX_KEY_SIZE,
  489. .ivsize = AES_BLOCK_SIZE,
  490. .base = {
  491. .cra_name = "cbc(aes)",
  492. .cra_driver_name = "safexcel-cbc-aes",
  493. .cra_priority = 300,
  494. .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
  495. CRYPTO_ALG_KERN_DRIVER_ONLY,
  496. .cra_blocksize = AES_BLOCK_SIZE,
  497. .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
  498. .cra_alignmask = 0,
  499. .cra_init = safexcel_skcipher_cra_init,
  500. .cra_exit = safexcel_skcipher_cra_exit,
  501. .cra_module = THIS_MODULE,
  502. },
  503. },
  504. };