sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
  19. {
  20. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int i, err = 0;
  32. unsigned int ileft = areq->nbytes;
  33. unsigned int oleft = areq->nbytes;
  34. unsigned int todo;
  35. struct sg_mapping_iter mi, mo;
  36. unsigned int oi, oo; /* offset for in and out */
  37. if (areq->nbytes == 0)
  38. return 0;
  39. if (!areq->info) {
  40. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  41. return -EINVAL;
  42. }
  43. if (!areq->src || !areq->dst) {
  44. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  45. return -EINVAL;
  46. }
  47. spin_lock_bh(&ss->slock);
  48. for (i = 0; i < op->keylen; i += 4)
  49. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  50. if (areq->info) {
  51. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  52. v = *(u32 *)(areq->info + i * 4);
  53. writel(v, ss->base + SS_IV0 + i * 4);
  54. }
  55. }
  56. writel(mode, ss->base + SS_CTL);
  57. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  58. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  59. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  60. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  61. sg_miter_next(&mi);
  62. sg_miter_next(&mo);
  63. if (!mi.addr || !mo.addr) {
  64. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  65. err = -EINVAL;
  66. goto release_ss;
  67. }
  68. ileft = areq->nbytes / 4;
  69. oleft = areq->nbytes / 4;
  70. oi = 0;
  71. oo = 0;
  72. do {
  73. todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
  74. if (todo > 0) {
  75. ileft -= todo;
  76. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  77. oi += todo * 4;
  78. }
  79. if (oi == mi.length) {
  80. sg_miter_next(&mi);
  81. oi = 0;
  82. }
  83. spaces = readl(ss->base + SS_FCSR);
  84. rx_cnt = SS_RXFIFO_SPACES(spaces);
  85. tx_cnt = SS_TXFIFO_SPACES(spaces);
  86. todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
  87. if (todo > 0) {
  88. oleft -= todo;
  89. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  90. oo += todo * 4;
  91. }
  92. if (oo == mo.length) {
  93. sg_miter_next(&mo);
  94. oo = 0;
  95. }
  96. } while (oleft > 0);
  97. if (areq->info) {
  98. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  99. v = readl(ss->base + SS_IV0 + i * 4);
  100. *(u32 *)(areq->info + i * 4) = v;
  101. }
  102. }
  103. release_ss:
  104. sg_miter_stop(&mi);
  105. sg_miter_stop(&mo);
  106. writel(0, ss->base + SS_CTL);
  107. spin_unlock_bh(&ss->slock);
  108. return err;
  109. }
  110. /* Generic function that support SG with size not multiple of 4 */
  111. static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
  112. {
  113. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  114. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  115. struct sun4i_ss_ctx *ss = op->ss;
  116. int no_chunk = 1;
  117. struct scatterlist *in_sg = areq->src;
  118. struct scatterlist *out_sg = areq->dst;
  119. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  120. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  121. u32 mode = ctx->mode;
  122. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  123. u32 rx_cnt = SS_RX_DEFAULT;
  124. u32 tx_cnt = 0;
  125. u32 v;
  126. u32 spaces;
  127. int i, err = 0;
  128. unsigned int ileft = areq->nbytes;
  129. unsigned int oleft = areq->nbytes;
  130. unsigned int todo;
  131. struct sg_mapping_iter mi, mo;
  132. unsigned int oi, oo; /* offset for in and out */
  133. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  134. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  135. unsigned int ob = 0; /* offset in buf */
  136. unsigned int obo = 0; /* offset in bufo*/
  137. unsigned int obl = 0; /* length of data in bufo */
  138. if (areq->nbytes == 0)
  139. return 0;
  140. if (!areq->info) {
  141. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  142. return -EINVAL;
  143. }
  144. if (!areq->src || !areq->dst) {
  145. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  146. return -EINVAL;
  147. }
  148. /*
  149. * if we have only SGs with size multiple of 4,
  150. * we can use the SS optimized function
  151. */
  152. while (in_sg && no_chunk == 1) {
  153. if ((in_sg->length % 4) != 0)
  154. no_chunk = 0;
  155. in_sg = sg_next(in_sg);
  156. }
  157. while (out_sg && no_chunk == 1) {
  158. if ((out_sg->length % 4) != 0)
  159. no_chunk = 0;
  160. out_sg = sg_next(out_sg);
  161. }
  162. if (no_chunk == 1)
  163. return sun4i_ss_opti_poll(areq);
  164. spin_lock_bh(&ss->slock);
  165. for (i = 0; i < op->keylen; i += 4)
  166. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  167. if (areq->info) {
  168. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  169. v = *(u32 *)(areq->info + i * 4);
  170. writel(v, ss->base + SS_IV0 + i * 4);
  171. }
  172. }
  173. writel(mode, ss->base + SS_CTL);
  174. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  175. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  176. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  177. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  178. sg_miter_next(&mi);
  179. sg_miter_next(&mo);
  180. if (!mi.addr || !mo.addr) {
  181. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  182. err = -EINVAL;
  183. goto release_ss;
  184. }
  185. ileft = areq->nbytes;
  186. oleft = areq->nbytes;
  187. oi = 0;
  188. oo = 0;
  189. while (oleft > 0) {
  190. if (ileft > 0) {
  191. /*
  192. * todo is the number of consecutive 4byte word that we
  193. * can read from current SG
  194. */
  195. todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
  196. if (todo > 0 && ob == 0) {
  197. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  198. todo);
  199. ileft -= todo * 4;
  200. oi += todo * 4;
  201. } else {
  202. /*
  203. * not enough consecutive bytes, so we need to
  204. * linearize in buf. todo is in bytes
  205. * After that copy, if we have a multiple of 4
  206. * we need to be able to write all buf in one
  207. * pass, so it is why we min() with rx_cnt
  208. */
  209. todo = min3(rx_cnt * 4 - ob, ileft,
  210. mi.length - oi);
  211. memcpy(buf + ob, mi.addr + oi, todo);
  212. ileft -= todo;
  213. oi += todo;
  214. ob += todo;
  215. if (ob % 4 == 0) {
  216. writesl(ss->base + SS_RXFIFO, buf,
  217. ob / 4);
  218. ob = 0;
  219. }
  220. }
  221. if (oi == mi.length) {
  222. sg_miter_next(&mi);
  223. oi = 0;
  224. }
  225. }
  226. spaces = readl(ss->base + SS_FCSR);
  227. rx_cnt = SS_RXFIFO_SPACES(spaces);
  228. tx_cnt = SS_TXFIFO_SPACES(spaces);
  229. dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
  230. mode,
  231. oi, mi.length, ileft, areq->nbytes, rx_cnt,
  232. oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
  233. if (tx_cnt == 0)
  234. continue;
  235. /* todo in 4bytes word */
  236. todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
  237. if (todo > 0) {
  238. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  239. oleft -= todo * 4;
  240. oo += todo * 4;
  241. if (oo == mo.length) {
  242. sg_miter_next(&mo);
  243. oo = 0;
  244. }
  245. } else {
  246. /*
  247. * read obl bytes in bufo, we read at maximum for
  248. * emptying the device
  249. */
  250. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  251. obl = tx_cnt * 4;
  252. obo = 0;
  253. do {
  254. /*
  255. * how many bytes we can copy ?
  256. * no more than remaining SG size
  257. * no more than remaining buffer
  258. * no need to test against oleft
  259. */
  260. todo = min(mo.length - oo, obl - obo);
  261. memcpy(mo.addr + oo, bufo + obo, todo);
  262. oleft -= todo;
  263. obo += todo;
  264. oo += todo;
  265. if (oo == mo.length) {
  266. sg_miter_next(&mo);
  267. oo = 0;
  268. }
  269. } while (obo < obl);
  270. /* bufo must be fully used here */
  271. }
  272. }
  273. if (areq->info) {
  274. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  275. v = readl(ss->base + SS_IV0 + i * 4);
  276. *(u32 *)(areq->info + i * 4) = v;
  277. }
  278. }
  279. release_ss:
  280. sg_miter_stop(&mi);
  281. sg_miter_stop(&mo);
  282. writel(0, ss->base + SS_CTL);
  283. spin_unlock_bh(&ss->slock);
  284. return err;
  285. }
  286. /* CBC AES */
  287. int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
  288. {
  289. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  290. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  291. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  292. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  293. op->keymode;
  294. return sun4i_ss_cipher_poll(areq);
  295. }
  296. int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
  297. {
  298. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  299. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  300. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  301. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  302. op->keymode;
  303. return sun4i_ss_cipher_poll(areq);
  304. }
  305. /* ECB AES */
  306. int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
  307. {
  308. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  309. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  310. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  311. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  312. op->keymode;
  313. return sun4i_ss_cipher_poll(areq);
  314. }
  315. int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
  316. {
  317. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  318. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  319. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  320. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  321. op->keymode;
  322. return sun4i_ss_cipher_poll(areq);
  323. }
  324. /* CBC DES */
  325. int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
  326. {
  327. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  328. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  329. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  330. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  331. op->keymode;
  332. return sun4i_ss_cipher_poll(areq);
  333. }
  334. int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
  335. {
  336. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  337. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  338. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  339. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  340. op->keymode;
  341. return sun4i_ss_cipher_poll(areq);
  342. }
  343. /* ECB DES */
  344. int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
  345. {
  346. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  347. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  348. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  349. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  350. op->keymode;
  351. return sun4i_ss_cipher_poll(areq);
  352. }
  353. int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
  354. {
  355. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  356. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  357. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  358. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  359. op->keymode;
  360. return sun4i_ss_cipher_poll(areq);
  361. }
  362. /* CBC 3DES */
  363. int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
  364. {
  365. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  366. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  367. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  368. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  369. op->keymode;
  370. return sun4i_ss_cipher_poll(areq);
  371. }
  372. int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
  373. {
  374. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  375. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  376. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  377. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  378. op->keymode;
  379. return sun4i_ss_cipher_poll(areq);
  380. }
  381. /* ECB 3DES */
  382. int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
  383. {
  384. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  385. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  386. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  387. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  388. op->keymode;
  389. return sun4i_ss_cipher_poll(areq);
  390. }
  391. int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
  392. {
  393. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  394. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  395. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  396. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  397. op->keymode;
  398. return sun4i_ss_cipher_poll(areq);
  399. }
  400. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  401. {
  402. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  403. struct crypto_alg *alg = tfm->__crt_alg;
  404. struct sun4i_ss_alg_template *algt;
  405. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  406. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  407. op->ss = algt->ss;
  408. tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
  409. return 0;
  410. }
  411. /* check and set the AES key, prepare the mode to be used */
  412. int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  413. unsigned int keylen)
  414. {
  415. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  416. struct sun4i_ss_ctx *ss = op->ss;
  417. switch (keylen) {
  418. case 128 / 8:
  419. op->keymode = SS_AES_128BITS;
  420. break;
  421. case 192 / 8:
  422. op->keymode = SS_AES_192BITS;
  423. break;
  424. case 256 / 8:
  425. op->keymode = SS_AES_256BITS;
  426. break;
  427. default:
  428. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  429. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  430. return -EINVAL;
  431. }
  432. op->keylen = keylen;
  433. memcpy(op->key, key, keylen);
  434. return 0;
  435. }
  436. /* check and set the DES key, prepare the mode to be used */
  437. int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  438. unsigned int keylen)
  439. {
  440. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  441. struct sun4i_ss_ctx *ss = op->ss;
  442. u32 flags;
  443. u32 tmp[DES_EXPKEY_WORDS];
  444. int ret;
  445. if (unlikely(keylen != DES_KEY_SIZE)) {
  446. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  447. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  448. return -EINVAL;
  449. }
  450. flags = crypto_ablkcipher_get_flags(tfm);
  451. ret = des_ekey(tmp, key);
  452. if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  453. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  454. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  455. return -EINVAL;
  456. }
  457. op->keylen = keylen;
  458. memcpy(op->key, key, keylen);
  459. return 0;
  460. }
  461. /* check and set the 3DES key, prepare the mode to be used */
  462. int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  463. unsigned int keylen)
  464. {
  465. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  466. struct sun4i_ss_ctx *ss = op->ss;
  467. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  468. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  469. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  470. return -EINVAL;
  471. }
  472. op->keylen = keylen;
  473. memcpy(op->key, key, keylen);
  474. return 0;
  475. }