sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
  19. {
  20. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int i, err = 0;
  32. unsigned int ileft = areq->nbytes;
  33. unsigned int oleft = areq->nbytes;
  34. unsigned int todo;
  35. struct sg_mapping_iter mi, mo;
  36. unsigned int oi, oo; /* offset for in and out */
  37. if (areq->nbytes == 0)
  38. return 0;
  39. if (!areq->info) {
  40. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  41. return -EINVAL;
  42. }
  43. if (!areq->src || !areq->dst) {
  44. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  45. return -EINVAL;
  46. }
  47. spin_lock_bh(&ss->slock);
  48. for (i = 0; i < op->keylen; i += 4)
  49. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  50. if (areq->info) {
  51. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  52. v = *(u32 *)(areq->info + i * 4);
  53. writel(v, ss->base + SS_IV0 + i * 4);
  54. }
  55. }
  56. writel(mode, ss->base + SS_CTL);
  57. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  58. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  59. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  60. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  61. sg_miter_next(&mi);
  62. sg_miter_next(&mo);
  63. if (!mi.addr || !mo.addr) {
  64. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  65. err = -EINVAL;
  66. goto release_ss;
  67. }
  68. ileft = areq->nbytes / 4;
  69. oleft = areq->nbytes / 4;
  70. oi = 0;
  71. oo = 0;
  72. do {
  73. todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
  74. if (todo > 0) {
  75. ileft -= todo;
  76. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  77. oi += todo * 4;
  78. }
  79. if (oi == mi.length) {
  80. sg_miter_next(&mi);
  81. oi = 0;
  82. }
  83. spaces = readl(ss->base + SS_FCSR);
  84. rx_cnt = SS_RXFIFO_SPACES(spaces);
  85. tx_cnt = SS_TXFIFO_SPACES(spaces);
  86. todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
  87. if (todo > 0) {
  88. oleft -= todo;
  89. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  90. oo += todo * 4;
  91. }
  92. if (oo == mo.length) {
  93. sg_miter_next(&mo);
  94. oo = 0;
  95. }
  96. } while (oleft > 0);
  97. if (areq->info) {
  98. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  99. v = readl(ss->base + SS_IV0 + i * 4);
  100. *(u32 *)(areq->info + i * 4) = v;
  101. }
  102. }
  103. release_ss:
  104. sg_miter_stop(&mi);
  105. sg_miter_stop(&mo);
  106. writel(0, ss->base + SS_CTL);
  107. spin_unlock_bh(&ss->slock);
  108. return err;
  109. }
  110. /* Generic function that support SG with size not multiple of 4 */
  111. static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
  112. {
  113. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  114. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  115. struct sun4i_ss_ctx *ss = op->ss;
  116. int no_chunk = 1;
  117. struct scatterlist *in_sg = areq->src;
  118. struct scatterlist *out_sg = areq->dst;
  119. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  120. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  121. u32 mode = ctx->mode;
  122. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  123. u32 rx_cnt = SS_RX_DEFAULT;
  124. u32 tx_cnt = 0;
  125. u32 v;
  126. u32 spaces;
  127. int i, err = 0;
  128. unsigned int ileft = areq->nbytes;
  129. unsigned int oleft = areq->nbytes;
  130. unsigned int todo;
  131. struct sg_mapping_iter mi, mo;
  132. unsigned int oi, oo; /* offset for in and out */
  133. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  134. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  135. unsigned int ob = 0; /* offset in buf */
  136. unsigned int obo = 0; /* offset in bufo*/
  137. unsigned int obl = 0; /* length of data in bufo */
  138. if (areq->nbytes == 0)
  139. return 0;
  140. if (!areq->info) {
  141. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  142. return -EINVAL;
  143. }
  144. if (!areq->src || !areq->dst) {
  145. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  146. return -EINVAL;
  147. }
  148. /*
  149. * if we have only SGs with size multiple of 4,
  150. * we can use the SS optimized function
  151. */
  152. while (in_sg && no_chunk == 1) {
  153. if ((in_sg->length % 4) != 0)
  154. no_chunk = 0;
  155. in_sg = sg_next(in_sg);
  156. }
  157. while (out_sg && no_chunk == 1) {
  158. if ((out_sg->length % 4) != 0)
  159. no_chunk = 0;
  160. out_sg = sg_next(out_sg);
  161. }
  162. if (no_chunk == 1)
  163. return sun4i_ss_opti_poll(areq);
  164. spin_lock_bh(&ss->slock);
  165. for (i = 0; i < op->keylen; i += 4)
  166. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  167. if (areq->info) {
  168. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  169. v = *(u32 *)(areq->info + i * 4);
  170. writel(v, ss->base + SS_IV0 + i * 4);
  171. }
  172. }
  173. writel(mode, ss->base + SS_CTL);
  174. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  175. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  176. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  177. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  178. sg_miter_next(&mi);
  179. sg_miter_next(&mo);
  180. if (!mi.addr || !mo.addr) {
  181. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  182. err = -EINVAL;
  183. goto release_ss;
  184. }
  185. ileft = areq->nbytes;
  186. oleft = areq->nbytes;
  187. oi = 0;
  188. oo = 0;
  189. while (oleft > 0) {
  190. if (ileft > 0) {
  191. /*
  192. * todo is the number of consecutive 4byte word that we
  193. * can read from current SG
  194. */
  195. todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
  196. if (todo > 0 && ob == 0) {
  197. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  198. todo);
  199. ileft -= todo * 4;
  200. oi += todo * 4;
  201. } else {
  202. /*
  203. * not enough consecutive bytes, so we need to
  204. * linearize in buf. todo is in bytes
  205. * After that copy, if we have a multiple of 4
  206. * we need to be able to write all buf in one
  207. * pass, so it is why we min() with rx_cnt
  208. */
  209. todo = min3(rx_cnt * 4 - ob, ileft,
  210. mi.length - oi);
  211. memcpy(buf + ob, mi.addr + oi, todo);
  212. ileft -= todo;
  213. oi += todo;
  214. ob += todo;
  215. if (ob % 4 == 0) {
  216. writesl(ss->base + SS_RXFIFO, buf,
  217. ob / 4);
  218. ob = 0;
  219. }
  220. }
  221. if (oi == mi.length) {
  222. sg_miter_next(&mi);
  223. oi = 0;
  224. }
  225. }
  226. spaces = readl(ss->base + SS_FCSR);
  227. rx_cnt = SS_RXFIFO_SPACES(spaces);
  228. tx_cnt = SS_TXFIFO_SPACES(spaces);
  229. dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
  230. mode,
  231. oi, mi.length, ileft, areq->nbytes, rx_cnt,
  232. oo, mo.length, oleft, areq->nbytes, tx_cnt,
  233. todo, ob);
  234. if (tx_cnt == 0)
  235. continue;
  236. /* todo in 4bytes word */
  237. todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
  238. if (todo > 0) {
  239. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  240. oleft -= todo * 4;
  241. oo += todo * 4;
  242. if (oo == mo.length) {
  243. sg_miter_next(&mo);
  244. oo = 0;
  245. }
  246. } else {
  247. /*
  248. * read obl bytes in bufo, we read at maximum for
  249. * emptying the device
  250. */
  251. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  252. obl = tx_cnt * 4;
  253. obo = 0;
  254. do {
  255. /*
  256. * how many bytes we can copy ?
  257. * no more than remaining SG size
  258. * no more than remaining buffer
  259. * no need to test against oleft
  260. */
  261. todo = min(mo.length - oo, obl - obo);
  262. memcpy(mo.addr + oo, bufo + obo, todo);
  263. oleft -= todo;
  264. obo += todo;
  265. oo += todo;
  266. if (oo == mo.length) {
  267. sg_miter_next(&mo);
  268. oo = 0;
  269. }
  270. } while (obo < obl);
  271. /* bufo must be fully used here */
  272. }
  273. }
  274. if (areq->info) {
  275. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  276. v = readl(ss->base + SS_IV0 + i * 4);
  277. *(u32 *)(areq->info + i * 4) = v;
  278. }
  279. }
  280. release_ss:
  281. sg_miter_stop(&mi);
  282. sg_miter_stop(&mo);
  283. writel(0, ss->base + SS_CTL);
  284. spin_unlock_bh(&ss->slock);
  285. return err;
  286. }
  287. /* CBC AES */
  288. int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
  289. {
  290. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  291. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  292. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  293. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  294. op->keymode;
  295. return sun4i_ss_cipher_poll(areq);
  296. }
  297. int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
  298. {
  299. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  300. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  301. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  302. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  303. op->keymode;
  304. return sun4i_ss_cipher_poll(areq);
  305. }
  306. /* ECB AES */
  307. int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
  308. {
  309. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  310. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  311. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  312. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  313. op->keymode;
  314. return sun4i_ss_cipher_poll(areq);
  315. }
  316. int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
  317. {
  318. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  319. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  320. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  321. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  322. op->keymode;
  323. return sun4i_ss_cipher_poll(areq);
  324. }
  325. /* CBC DES */
  326. int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
  327. {
  328. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  329. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  330. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  331. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  332. op->keymode;
  333. return sun4i_ss_cipher_poll(areq);
  334. }
  335. int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
  336. {
  337. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  338. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  339. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  340. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  341. op->keymode;
  342. return sun4i_ss_cipher_poll(areq);
  343. }
  344. /* ECB DES */
  345. int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
  346. {
  347. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  348. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  349. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  350. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  351. op->keymode;
  352. return sun4i_ss_cipher_poll(areq);
  353. }
  354. int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
  355. {
  356. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  357. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  358. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  359. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  360. op->keymode;
  361. return sun4i_ss_cipher_poll(areq);
  362. }
  363. /* CBC 3DES */
  364. int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
  365. {
  366. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  367. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  368. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  369. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  370. op->keymode;
  371. return sun4i_ss_cipher_poll(areq);
  372. }
  373. int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
  374. {
  375. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  376. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  377. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  378. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  379. op->keymode;
  380. return sun4i_ss_cipher_poll(areq);
  381. }
  382. /* ECB 3DES */
  383. int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
  384. {
  385. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  386. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  387. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  388. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  389. op->keymode;
  390. return sun4i_ss_cipher_poll(areq);
  391. }
  392. int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
  393. {
  394. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  395. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  396. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  397. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  398. op->keymode;
  399. return sun4i_ss_cipher_poll(areq);
  400. }
  401. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  402. {
  403. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  404. struct crypto_alg *alg = tfm->__crt_alg;
  405. struct sun4i_ss_alg_template *algt;
  406. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  407. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  408. op->ss = algt->ss;
  409. tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
  410. return 0;
  411. }
  412. /* check and set the AES key, prepare the mode to be used */
  413. int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  414. unsigned int keylen)
  415. {
  416. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  417. struct sun4i_ss_ctx *ss = op->ss;
  418. switch (keylen) {
  419. case 128 / 8:
  420. op->keymode = SS_AES_128BITS;
  421. break;
  422. case 192 / 8:
  423. op->keymode = SS_AES_192BITS;
  424. break;
  425. case 256 / 8:
  426. op->keymode = SS_AES_256BITS;
  427. break;
  428. default:
  429. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  430. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  431. return -EINVAL;
  432. }
  433. op->keylen = keylen;
  434. memcpy(op->key, key, keylen);
  435. return 0;
  436. }
  437. /* check and set the DES key, prepare the mode to be used */
  438. int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  439. unsigned int keylen)
  440. {
  441. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  442. struct sun4i_ss_ctx *ss = op->ss;
  443. u32 flags;
  444. u32 tmp[DES_EXPKEY_WORDS];
  445. int ret;
  446. if (unlikely(keylen != DES_KEY_SIZE)) {
  447. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  448. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  449. return -EINVAL;
  450. }
  451. flags = crypto_ablkcipher_get_flags(tfm);
  452. ret = des_ekey(tmp, key);
  453. if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  454. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  455. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  456. return -EINVAL;
  457. }
  458. op->keylen = keylen;
  459. memcpy(op->key, key, keylen);
  460. return 0;
  461. }
  462. /* check and set the 3DES key, prepare the mode to be used */
  463. int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  464. unsigned int keylen)
  465. {
  466. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  467. struct sun4i_ss_ctx *ss = op->ss;
  468. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  469. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  470. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  471. return -EINVAL;
  472. }
  473. op->keylen = keylen;
  474. memcpy(op->key, key, keylen);
  475. return 0;
  476. }