sec_algs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2016-2017 Hisilicon Limited. */
  3. #include <linux/crypto.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <crypto/des.h>
  12. #include <crypto/skcipher.h>
  13. #include <crypto/xts.h>
  14. #include <crypto/internal/skcipher.h>
  15. #include "sec_drv.h"
  16. #define SEC_MAX_CIPHER_KEY 64
  17. #define SEC_REQ_LIMIT SZ_32M
  18. struct sec_c_alg_cfg {
  19. unsigned c_alg : 3;
  20. unsigned c_mode : 3;
  21. unsigned key_len : 2;
  22. unsigned c_width : 2;
  23. };
  24. static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
  25. [SEC_C_DES_ECB_64] = {
  26. .c_alg = SEC_C_ALG_DES,
  27. .c_mode = SEC_C_MODE_ECB,
  28. .key_len = SEC_KEY_LEN_DES,
  29. },
  30. [SEC_C_DES_CBC_64] = {
  31. .c_alg = SEC_C_ALG_DES,
  32. .c_mode = SEC_C_MODE_CBC,
  33. .key_len = SEC_KEY_LEN_DES,
  34. },
  35. [SEC_C_3DES_ECB_192_3KEY] = {
  36. .c_alg = SEC_C_ALG_3DES,
  37. .c_mode = SEC_C_MODE_ECB,
  38. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  39. },
  40. [SEC_C_3DES_ECB_192_2KEY] = {
  41. .c_alg = SEC_C_ALG_3DES,
  42. .c_mode = SEC_C_MODE_ECB,
  43. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  44. },
  45. [SEC_C_3DES_CBC_192_3KEY] = {
  46. .c_alg = SEC_C_ALG_3DES,
  47. .c_mode = SEC_C_MODE_CBC,
  48. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  49. },
  50. [SEC_C_3DES_CBC_192_2KEY] = {
  51. .c_alg = SEC_C_ALG_3DES,
  52. .c_mode = SEC_C_MODE_CBC,
  53. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  54. },
  55. [SEC_C_AES_ECB_128] = {
  56. .c_alg = SEC_C_ALG_AES,
  57. .c_mode = SEC_C_MODE_ECB,
  58. .key_len = SEC_KEY_LEN_AES_128,
  59. },
  60. [SEC_C_AES_ECB_192] = {
  61. .c_alg = SEC_C_ALG_AES,
  62. .c_mode = SEC_C_MODE_ECB,
  63. .key_len = SEC_KEY_LEN_AES_192,
  64. },
  65. [SEC_C_AES_ECB_256] = {
  66. .c_alg = SEC_C_ALG_AES,
  67. .c_mode = SEC_C_MODE_ECB,
  68. .key_len = SEC_KEY_LEN_AES_256,
  69. },
  70. [SEC_C_AES_CBC_128] = {
  71. .c_alg = SEC_C_ALG_AES,
  72. .c_mode = SEC_C_MODE_CBC,
  73. .key_len = SEC_KEY_LEN_AES_128,
  74. },
  75. [SEC_C_AES_CBC_192] = {
  76. .c_alg = SEC_C_ALG_AES,
  77. .c_mode = SEC_C_MODE_CBC,
  78. .key_len = SEC_KEY_LEN_AES_192,
  79. },
  80. [SEC_C_AES_CBC_256] = {
  81. .c_alg = SEC_C_ALG_AES,
  82. .c_mode = SEC_C_MODE_CBC,
  83. .key_len = SEC_KEY_LEN_AES_256,
  84. },
  85. [SEC_C_AES_CTR_128] = {
  86. .c_alg = SEC_C_ALG_AES,
  87. .c_mode = SEC_C_MODE_CTR,
  88. .key_len = SEC_KEY_LEN_AES_128,
  89. },
  90. [SEC_C_AES_CTR_192] = {
  91. .c_alg = SEC_C_ALG_AES,
  92. .c_mode = SEC_C_MODE_CTR,
  93. .key_len = SEC_KEY_LEN_AES_192,
  94. },
  95. [SEC_C_AES_CTR_256] = {
  96. .c_alg = SEC_C_ALG_AES,
  97. .c_mode = SEC_C_MODE_CTR,
  98. .key_len = SEC_KEY_LEN_AES_256,
  99. },
  100. [SEC_C_AES_XTS_128] = {
  101. .c_alg = SEC_C_ALG_AES,
  102. .c_mode = SEC_C_MODE_XTS,
  103. .key_len = SEC_KEY_LEN_AES_128,
  104. },
  105. [SEC_C_AES_XTS_256] = {
  106. .c_alg = SEC_C_ALG_AES,
  107. .c_mode = SEC_C_MODE_XTS,
  108. .key_len = SEC_KEY_LEN_AES_256,
  109. },
  110. [SEC_C_NULL] = {
  111. },
  112. };
  113. /*
  114. * Mutex used to ensure safe operation of reference count of
  115. * alg providers
  116. */
  117. static DEFINE_MUTEX(algs_lock);
  118. static unsigned int active_devs;
  119. static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
  120. struct sec_bd_info *req,
  121. enum sec_cipher_alg alg)
  122. {
  123. const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
  124. memset(req, 0, sizeof(*req));
  125. req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
  126. req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
  127. req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
  128. req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
  129. req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
  130. req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
  131. }
  132. static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
  133. const u8 *key,
  134. unsigned int keylen,
  135. enum sec_cipher_alg alg)
  136. {
  137. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  138. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  139. ctx->cipher_alg = alg;
  140. memcpy(ctx->key, key, keylen);
  141. sec_alg_skcipher_init_template(ctx, &ctx->req_template,
  142. ctx->cipher_alg);
  143. }
  144. static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
  145. dma_addr_t *psec_sgl,
  146. struct scatterlist *sgl,
  147. int count,
  148. struct sec_dev_info *info)
  149. {
  150. struct sec_hw_sgl *sgl_current = NULL;
  151. struct sec_hw_sgl *sgl_next;
  152. dma_addr_t sgl_next_dma;
  153. struct scatterlist *sg;
  154. int ret, sge_index, i;
  155. if (!count)
  156. return -EINVAL;
  157. for_each_sg(sgl, sg, count, i) {
  158. sge_index = i % SEC_MAX_SGE_NUM;
  159. if (sge_index == 0) {
  160. sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
  161. GFP_KERNEL, &sgl_next_dma);
  162. if (!sgl_next) {
  163. ret = -ENOMEM;
  164. goto err_free_hw_sgls;
  165. }
  166. if (!sgl_current) { /* First one */
  167. *psec_sgl = sgl_next_dma;
  168. *sec_sgl = sgl_next;
  169. } else { /* Chained */
  170. sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
  171. sgl_current->next_sgl = sgl_next_dma;
  172. sgl_current->next = sgl_next;
  173. }
  174. sgl_current = sgl_next;
  175. }
  176. sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
  177. sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
  178. sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
  179. }
  180. sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
  181. sgl_current->next_sgl = 0;
  182. (*sec_sgl)->entry_sum_in_chain = count;
  183. return 0;
  184. err_free_hw_sgls:
  185. sgl_current = *sec_sgl;
  186. while (sgl_current) {
  187. sgl_next = sgl_current->next;
  188. dma_pool_free(info->hw_sgl_pool, sgl_current,
  189. sgl_current->next_sgl);
  190. sgl_current = sgl_next;
  191. }
  192. *psec_sgl = 0;
  193. return ret;
  194. }
  195. static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
  196. dma_addr_t psec_sgl, struct sec_dev_info *info)
  197. {
  198. struct sec_hw_sgl *sgl_current, *sgl_next;
  199. if (!hw_sgl)
  200. return;
  201. sgl_current = hw_sgl;
  202. while (sgl_current->next) {
  203. sgl_next = sgl_current->next;
  204. dma_pool_free(info->hw_sgl_pool, sgl_current,
  205. sgl_current->next_sgl);
  206. sgl_current = sgl_next;
  207. }
  208. dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
  209. }
  210. static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
  211. const u8 *key, unsigned int keylen,
  212. enum sec_cipher_alg alg)
  213. {
  214. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  215. struct device *dev = ctx->queue->dev_info->dev;
  216. mutex_lock(&ctx->lock);
  217. if (ctx->key) {
  218. /* rekeying */
  219. memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
  220. } else {
  221. /* new key */
  222. ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
  223. &ctx->pkey, GFP_KERNEL);
  224. if (!ctx->key) {
  225. mutex_unlock(&ctx->lock);
  226. return -ENOMEM;
  227. }
  228. }
  229. mutex_unlock(&ctx->lock);
  230. sec_alg_skcipher_init_context(tfm, key, keylen, alg);
  231. return 0;
  232. }
  233. static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
  234. const u8 *key, unsigned int keylen)
  235. {
  236. enum sec_cipher_alg alg;
  237. switch (keylen) {
  238. case AES_KEYSIZE_128:
  239. alg = SEC_C_AES_ECB_128;
  240. break;
  241. case AES_KEYSIZE_192:
  242. alg = SEC_C_AES_ECB_192;
  243. break;
  244. case AES_KEYSIZE_256:
  245. alg = SEC_C_AES_ECB_256;
  246. break;
  247. default:
  248. return -EINVAL;
  249. }
  250. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  251. }
  252. static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
  253. const u8 *key, unsigned int keylen)
  254. {
  255. enum sec_cipher_alg alg;
  256. switch (keylen) {
  257. case AES_KEYSIZE_128:
  258. alg = SEC_C_AES_CBC_128;
  259. break;
  260. case AES_KEYSIZE_192:
  261. alg = SEC_C_AES_CBC_192;
  262. break;
  263. case AES_KEYSIZE_256:
  264. alg = SEC_C_AES_CBC_256;
  265. break;
  266. default:
  267. return -EINVAL;
  268. }
  269. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  270. }
  271. static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
  272. const u8 *key, unsigned int keylen)
  273. {
  274. enum sec_cipher_alg alg;
  275. switch (keylen) {
  276. case AES_KEYSIZE_128:
  277. alg = SEC_C_AES_CTR_128;
  278. break;
  279. case AES_KEYSIZE_192:
  280. alg = SEC_C_AES_CTR_192;
  281. break;
  282. case AES_KEYSIZE_256:
  283. alg = SEC_C_AES_CTR_256;
  284. break;
  285. default:
  286. return -EINVAL;
  287. }
  288. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  289. }
  290. static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
  291. const u8 *key, unsigned int keylen)
  292. {
  293. enum sec_cipher_alg alg;
  294. int ret;
  295. ret = xts_verify_key(tfm, key, keylen);
  296. if (ret)
  297. return ret;
  298. switch (keylen) {
  299. case AES_KEYSIZE_128 * 2:
  300. alg = SEC_C_AES_XTS_128;
  301. break;
  302. case AES_KEYSIZE_256 * 2:
  303. alg = SEC_C_AES_XTS_256;
  304. break;
  305. default:
  306. return -EINVAL;
  307. }
  308. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  309. }
  310. static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
  311. const u8 *key, unsigned int keylen)
  312. {
  313. if (keylen != DES_KEY_SIZE)
  314. return -EINVAL;
  315. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
  316. }
  317. static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
  318. const u8 *key, unsigned int keylen)
  319. {
  320. if (keylen != DES_KEY_SIZE)
  321. return -EINVAL;
  322. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
  323. }
  324. static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
  325. const u8 *key, unsigned int keylen)
  326. {
  327. if (keylen != DES_KEY_SIZE * 3)
  328. return -EINVAL;
  329. return sec_alg_skcipher_setkey(tfm, key, keylen,
  330. SEC_C_3DES_ECB_192_3KEY);
  331. }
  332. static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
  333. const u8 *key, unsigned int keylen)
  334. {
  335. if (keylen != DES3_EDE_KEY_SIZE)
  336. return -EINVAL;
  337. return sec_alg_skcipher_setkey(tfm, key, keylen,
  338. SEC_C_3DES_CBC_192_3KEY);
  339. }
  340. static void sec_alg_free_el(struct sec_request_el *el,
  341. struct sec_dev_info *info)
  342. {
  343. sec_free_hw_sgl(el->out, el->dma_out, info);
  344. sec_free_hw_sgl(el->in, el->dma_in, info);
  345. kfree(el->sgl_in);
  346. kfree(el->sgl_out);
  347. kfree(el);
  348. }
  349. /* queuelock must be held */
  350. static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
  351. {
  352. struct sec_request_el *el, *temp;
  353. int ret = 0;
  354. mutex_lock(&sec_req->lock);
  355. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  356. /*
  357. * Add to hardware queue only under following circumstances
  358. * 1) Software and hardware queue empty so no chain dependencies
  359. * 2) No dependencies as new IV - (check software queue empty
  360. * to maintain order)
  361. * 3) No dependencies because the mode does no chaining.
  362. *
  363. * In other cases first insert onto the software queue which
  364. * is then emptied as requests complete
  365. */
  366. if (!queue->havesoftqueue ||
  367. (kfifo_is_empty(&queue->softqueue) &&
  368. sec_queue_empty(queue))) {
  369. ret = sec_queue_send(queue, &el->req, sec_req);
  370. if (ret == -EAGAIN) {
  371. /* Wait unti we can send then try again */
  372. /* DEAD if here - should not happen */
  373. ret = -EBUSY;
  374. goto err_unlock;
  375. }
  376. } else {
  377. kfifo_put(&queue->softqueue, el);
  378. }
  379. }
  380. err_unlock:
  381. mutex_unlock(&sec_req->lock);
  382. return ret;
  383. }
  384. static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
  385. struct crypto_async_request *req_base)
  386. {
  387. struct skcipher_request *skreq = container_of(req_base,
  388. struct skcipher_request,
  389. base);
  390. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  391. struct sec_request *backlog_req;
  392. struct sec_request_el *sec_req_el, *nextrequest;
  393. struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
  394. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  395. struct device *dev = ctx->queue->dev_info->dev;
  396. int icv_or_skey_en, ret;
  397. bool done;
  398. sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
  399. head);
  400. icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
  401. SEC_BD_W0_ICV_OR_SKEY_EN_S;
  402. if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
  403. dev_err(dev, "Got an invalid answer %lu %d\n",
  404. sec_resp->w1 & SEC_BD_W1_BD_INVALID,
  405. icv_or_skey_en);
  406. sec_req->err = -EINVAL;
  407. /*
  408. * We need to muddle on to avoid getting stuck with elements
  409. * on the queue. Error will be reported so requester so
  410. * it should be able to handle appropriately.
  411. */
  412. }
  413. mutex_lock(&ctx->queue->queuelock);
  414. /* Put the IV in place for chained cases */
  415. switch (ctx->cipher_alg) {
  416. case SEC_C_AES_CBC_128:
  417. case SEC_C_AES_CBC_192:
  418. case SEC_C_AES_CBC_256:
  419. if (sec_req_el->req.w0 & SEC_BD_W0_DE)
  420. sg_pcopy_to_buffer(sec_req_el->sgl_out,
  421. sg_nents(sec_req_el->sgl_out),
  422. skreq->iv,
  423. crypto_skcipher_ivsize(atfm),
  424. sec_req_el->el_length -
  425. crypto_skcipher_ivsize(atfm));
  426. else
  427. sg_pcopy_to_buffer(sec_req_el->sgl_in,
  428. sg_nents(sec_req_el->sgl_in),
  429. skreq->iv,
  430. crypto_skcipher_ivsize(atfm),
  431. sec_req_el->el_length -
  432. crypto_skcipher_ivsize(atfm));
  433. /* No need to sync to the device as coherent DMA */
  434. break;
  435. case SEC_C_AES_CTR_128:
  436. case SEC_C_AES_CTR_192:
  437. case SEC_C_AES_CTR_256:
  438. crypto_inc(skreq->iv, 16);
  439. break;
  440. default:
  441. /* Do not update */
  442. break;
  443. }
  444. if (ctx->queue->havesoftqueue &&
  445. !kfifo_is_empty(&ctx->queue->softqueue) &&
  446. sec_queue_empty(ctx->queue)) {
  447. ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
  448. if (ret <= 0)
  449. dev_err(dev,
  450. "Error getting next element from kfifo %d\n",
  451. ret);
  452. else
  453. /* We know there is space so this cannot fail */
  454. sec_queue_send(ctx->queue, &nextrequest->req,
  455. nextrequest->sec_req);
  456. } else if (!list_empty(&ctx->backlog)) {
  457. /* Need to verify there is room first */
  458. backlog_req = list_first_entry(&ctx->backlog,
  459. typeof(*backlog_req),
  460. backlog_head);
  461. if (sec_queue_can_enqueue(ctx->queue,
  462. backlog_req->num_elements) ||
  463. (ctx->queue->havesoftqueue &&
  464. kfifo_avail(&ctx->queue->softqueue) >
  465. backlog_req->num_elements)) {
  466. sec_send_request(backlog_req, ctx->queue);
  467. backlog_req->req_base->complete(backlog_req->req_base,
  468. -EINPROGRESS);
  469. list_del(&backlog_req->backlog_head);
  470. }
  471. }
  472. mutex_unlock(&ctx->queue->queuelock);
  473. mutex_lock(&sec_req->lock);
  474. list_del(&sec_req_el->head);
  475. mutex_unlock(&sec_req->lock);
  476. sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
  477. /*
  478. * Request is done.
  479. * The dance is needed as the lock is freed in the completion
  480. */
  481. mutex_lock(&sec_req->lock);
  482. done = list_empty(&sec_req->elements);
  483. mutex_unlock(&sec_req->lock);
  484. if (done) {
  485. if (crypto_skcipher_ivsize(atfm)) {
  486. dma_unmap_single(dev, sec_req->dma_iv,
  487. crypto_skcipher_ivsize(atfm),
  488. DMA_TO_DEVICE);
  489. }
  490. dma_unmap_sg(dev, skreq->src, sec_req->len_in,
  491. DMA_BIDIRECTIONAL);
  492. if (skreq->src != skreq->dst)
  493. dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
  494. DMA_BIDIRECTIONAL);
  495. skreq->base.complete(&skreq->base, sec_req->err);
  496. }
  497. }
  498. void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
  499. {
  500. struct sec_request *sec_req = shadow;
  501. sec_req->cb(resp, sec_req->req_base);
  502. }
  503. static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
  504. int *steps)
  505. {
  506. size_t *sizes;
  507. int i;
  508. /* Split into suitable sized blocks */
  509. *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
  510. sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
  511. if (!sizes)
  512. return -ENOMEM;
  513. for (i = 0; i < *steps - 1; i++)
  514. sizes[i] = SEC_REQ_LIMIT;
  515. sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
  516. *split_sizes = sizes;
  517. return 0;
  518. }
  519. static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
  520. int steps, struct scatterlist ***splits,
  521. int **splits_nents,
  522. int sgl_len_in,
  523. struct device *dev)
  524. {
  525. int ret, count;
  526. count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  527. if (!count)
  528. return -EINVAL;
  529. *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
  530. if (!*splits) {
  531. ret = -ENOMEM;
  532. goto err_unmap_sg;
  533. }
  534. *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
  535. if (!*splits_nents) {
  536. ret = -ENOMEM;
  537. goto err_free_splits;
  538. }
  539. /* output the scatter list before and after this */
  540. ret = sg_split(sgl, count, 0, steps, split_sizes,
  541. *splits, *splits_nents, GFP_KERNEL);
  542. if (ret) {
  543. ret = -ENOMEM;
  544. goto err_free_splits_nents;
  545. }
  546. return 0;
  547. err_free_splits_nents:
  548. kfree(*splits_nents);
  549. err_free_splits:
  550. kfree(*splits);
  551. err_unmap_sg:
  552. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  553. return ret;
  554. }
  555. /*
  556. * Reverses the sec_map_and_split_sg call for messages not yet added to
  557. * the queues.
  558. */
  559. static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
  560. struct scatterlist **splits, int *splits_nents,
  561. int sgl_len_in, struct device *dev)
  562. {
  563. int i;
  564. for (i = 0; i < steps; i++)
  565. kfree(splits[i]);
  566. kfree(splits_nents);
  567. kfree(splits);
  568. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  569. }
  570. static struct sec_request_el
  571. *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
  572. int el_size, bool different_dest,
  573. struct scatterlist *sgl_in, int n_ents_in,
  574. struct scatterlist *sgl_out, int n_ents_out,
  575. struct sec_dev_info *info)
  576. {
  577. struct sec_request_el *el;
  578. struct sec_bd_info *req;
  579. int ret;
  580. el = kzalloc(sizeof(*el), GFP_KERNEL);
  581. if (!el)
  582. return ERR_PTR(-ENOMEM);
  583. el->el_length = el_size;
  584. req = &el->req;
  585. memcpy(req, template, sizeof(*req));
  586. req->w0 &= ~SEC_BD_W0_CIPHER_M;
  587. if (encrypt)
  588. req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
  589. else
  590. req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
  591. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  592. req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
  593. SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  594. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  595. req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
  596. SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  597. /* Writing whole u32 so no need to take care of masking */
  598. req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
  599. ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
  600. SEC_BD_W2_C_GRAN_SIZE_15_0_M);
  601. req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
  602. req->w1 |= SEC_BD_W1_ADDR_TYPE;
  603. el->sgl_in = sgl_in;
  604. ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
  605. n_ents_in, info);
  606. if (ret)
  607. goto err_free_el;
  608. req->data_addr_lo = lower_32_bits(el->dma_in);
  609. req->data_addr_hi = upper_32_bits(el->dma_in);
  610. if (different_dest) {
  611. el->sgl_out = sgl_out;
  612. ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
  613. el->sgl_out,
  614. n_ents_out, info);
  615. if (ret)
  616. goto err_free_hw_sgl_in;
  617. req->w0 |= SEC_BD_W0_DE;
  618. req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
  619. req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
  620. } else {
  621. req->w0 &= ~SEC_BD_W0_DE;
  622. req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
  623. req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
  624. }
  625. return el;
  626. err_free_hw_sgl_in:
  627. sec_free_hw_sgl(el->in, el->dma_in, info);
  628. err_free_el:
  629. kfree(el);
  630. return ERR_PTR(ret);
  631. }
  632. static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
  633. bool encrypt)
  634. {
  635. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  636. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  637. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  638. struct sec_queue *queue = ctx->queue;
  639. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  640. struct sec_dev_info *info = queue->dev_info;
  641. int i, ret, steps;
  642. size_t *split_sizes;
  643. struct scatterlist **splits_in;
  644. struct scatterlist **splits_out = NULL;
  645. int *splits_in_nents;
  646. int *splits_out_nents = NULL;
  647. struct sec_request_el *el, *temp;
  648. bool split = skreq->src != skreq->dst;
  649. mutex_init(&sec_req->lock);
  650. sec_req->req_base = &skreq->base;
  651. sec_req->err = 0;
  652. /* SGL mapping out here to allow us to break it up as necessary */
  653. sec_req->len_in = sg_nents(skreq->src);
  654. ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
  655. &steps);
  656. if (ret)
  657. return ret;
  658. sec_req->num_elements = steps;
  659. ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
  660. &splits_in_nents, sec_req->len_in,
  661. info->dev);
  662. if (ret)
  663. goto err_free_split_sizes;
  664. if (split) {
  665. sec_req->len_out = sg_nents(skreq->dst);
  666. ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
  667. &splits_out, &splits_out_nents,
  668. sec_req->len_out, info->dev);
  669. if (ret)
  670. goto err_unmap_in_sg;
  671. }
  672. /* Shared info stored in seq_req - applies to all BDs */
  673. sec_req->tfm_ctx = ctx;
  674. sec_req->cb = sec_skcipher_alg_callback;
  675. INIT_LIST_HEAD(&sec_req->elements);
  676. /*
  677. * Future optimization.
  678. * In the chaining case we can't use a dma pool bounce buffer
  679. * but in the case where we know there is no chaining we can
  680. */
  681. if (crypto_skcipher_ivsize(atfm)) {
  682. sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
  683. crypto_skcipher_ivsize(atfm),
  684. DMA_TO_DEVICE);
  685. if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
  686. ret = -ENOMEM;
  687. goto err_unmap_out_sg;
  688. }
  689. }
  690. /* Set them all up then queue - cleaner error handling. */
  691. for (i = 0; i < steps; i++) {
  692. el = sec_alg_alloc_and_fill_el(&ctx->req_template,
  693. encrypt ? 1 : 0,
  694. split_sizes[i],
  695. skreq->src != skreq->dst,
  696. splits_in[i], splits_in_nents[i],
  697. split ? splits_out[i] : NULL,
  698. split ? splits_out_nents[i] : 0,
  699. info);
  700. if (IS_ERR(el)) {
  701. ret = PTR_ERR(el);
  702. goto err_free_elements;
  703. }
  704. el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
  705. el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
  706. el->sec_req = sec_req;
  707. list_add_tail(&el->head, &sec_req->elements);
  708. }
  709. /*
  710. * Only attempt to queue if the whole lot can fit in the queue -
  711. * we can't successfully cleanup after a partial queing so this
  712. * must succeed or fail atomically.
  713. *
  714. * Big hammer test of both software and hardware queues - could be
  715. * more refined but this is unlikely to happen so no need.
  716. */
  717. /* Grab a big lock for a long time to avoid concurrency issues */
  718. mutex_lock(&queue->queuelock);
  719. /*
  720. * Can go on to queue if we have space in either:
  721. * 1) The hardware queue and no software queue
  722. * 2) The software queue
  723. * AND there is nothing in the backlog. If there is backlog we
  724. * have to only queue to the backlog queue and return busy.
  725. */
  726. if ((!sec_queue_can_enqueue(queue, steps) &&
  727. (!queue->havesoftqueue ||
  728. kfifo_avail(&queue->softqueue) > steps)) ||
  729. !list_empty(&ctx->backlog)) {
  730. ret = -EBUSY;
  731. if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  732. list_add_tail(&sec_req->backlog_head, &ctx->backlog);
  733. mutex_unlock(&queue->queuelock);
  734. goto out;
  735. }
  736. mutex_unlock(&queue->queuelock);
  737. goto err_free_elements;
  738. }
  739. ret = sec_send_request(sec_req, queue);
  740. mutex_unlock(&queue->queuelock);
  741. if (ret)
  742. goto err_free_elements;
  743. ret = -EINPROGRESS;
  744. out:
  745. /* Cleanup - all elements in pointer arrays have been copied */
  746. kfree(splits_in_nents);
  747. kfree(splits_in);
  748. kfree(splits_out_nents);
  749. kfree(splits_out);
  750. kfree(split_sizes);
  751. return ret;
  752. err_free_elements:
  753. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  754. list_del(&el->head);
  755. sec_alg_free_el(el, info);
  756. }
  757. if (crypto_skcipher_ivsize(atfm))
  758. dma_unmap_single(info->dev, sec_req->dma_iv,
  759. crypto_skcipher_ivsize(atfm),
  760. DMA_BIDIRECTIONAL);
  761. err_unmap_out_sg:
  762. if (split)
  763. sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
  764. splits_out_nents, sec_req->len_out,
  765. info->dev);
  766. err_unmap_in_sg:
  767. sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
  768. sec_req->len_in, info->dev);
  769. err_free_split_sizes:
  770. kfree(split_sizes);
  771. return ret;
  772. }
  773. static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
  774. {
  775. return sec_alg_skcipher_crypto(req, true);
  776. }
  777. static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
  778. {
  779. return sec_alg_skcipher_crypto(req, false);
  780. }
  781. static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
  782. {
  783. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  784. mutex_init(&ctx->lock);
  785. INIT_LIST_HEAD(&ctx->backlog);
  786. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
  787. ctx->queue = sec_queue_alloc_start_safe();
  788. if (IS_ERR(ctx->queue))
  789. return PTR_ERR(ctx->queue);
  790. mutex_init(&ctx->queue->queuelock);
  791. ctx->queue->havesoftqueue = false;
  792. return 0;
  793. }
  794. static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
  795. {
  796. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  797. struct device *dev = ctx->queue->dev_info->dev;
  798. if (ctx->key) {
  799. memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
  800. dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
  801. ctx->pkey);
  802. }
  803. sec_queue_stop_release(ctx->queue);
  804. }
  805. static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
  806. {
  807. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  808. int ret;
  809. ret = sec_alg_skcipher_init(tfm);
  810. if (ret)
  811. return ret;
  812. INIT_KFIFO(ctx->queue->softqueue);
  813. ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
  814. if (ret) {
  815. sec_alg_skcipher_exit(tfm);
  816. return ret;
  817. }
  818. ctx->queue->havesoftqueue = true;
  819. return 0;
  820. }
  821. static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
  822. {
  823. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  824. kfifo_free(&ctx->queue->softqueue);
  825. sec_alg_skcipher_exit(tfm);
  826. }
  827. static struct skcipher_alg sec_algs[] = {
  828. {
  829. .base = {
  830. .cra_name = "ecb(aes)",
  831. .cra_driver_name = "hisi_sec_aes_ecb",
  832. .cra_priority = 4001,
  833. .cra_flags = CRYPTO_ALG_ASYNC,
  834. .cra_blocksize = AES_BLOCK_SIZE,
  835. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  836. .cra_alignmask = 0,
  837. .cra_module = THIS_MODULE,
  838. },
  839. .init = sec_alg_skcipher_init,
  840. .exit = sec_alg_skcipher_exit,
  841. .setkey = sec_alg_skcipher_setkey_aes_ecb,
  842. .decrypt = sec_alg_skcipher_decrypt,
  843. .encrypt = sec_alg_skcipher_encrypt,
  844. .min_keysize = AES_MIN_KEY_SIZE,
  845. .max_keysize = AES_MAX_KEY_SIZE,
  846. .ivsize = 0,
  847. }, {
  848. .base = {
  849. .cra_name = "cbc(aes)",
  850. .cra_driver_name = "hisi_sec_aes_cbc",
  851. .cra_priority = 4001,
  852. .cra_flags = CRYPTO_ALG_ASYNC,
  853. .cra_blocksize = AES_BLOCK_SIZE,
  854. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  855. .cra_alignmask = 0,
  856. .cra_module = THIS_MODULE,
  857. },
  858. .init = sec_alg_skcipher_init_with_queue,
  859. .exit = sec_alg_skcipher_exit_with_queue,
  860. .setkey = sec_alg_skcipher_setkey_aes_cbc,
  861. .decrypt = sec_alg_skcipher_decrypt,
  862. .encrypt = sec_alg_skcipher_encrypt,
  863. .min_keysize = AES_MIN_KEY_SIZE,
  864. .max_keysize = AES_MAX_KEY_SIZE,
  865. .ivsize = AES_BLOCK_SIZE,
  866. }, {
  867. .base = {
  868. .cra_name = "ctr(aes)",
  869. .cra_driver_name = "hisi_sec_aes_ctr",
  870. .cra_priority = 4001,
  871. .cra_flags = CRYPTO_ALG_ASYNC,
  872. .cra_blocksize = AES_BLOCK_SIZE,
  873. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  874. .cra_alignmask = 0,
  875. .cra_module = THIS_MODULE,
  876. },
  877. .init = sec_alg_skcipher_init_with_queue,
  878. .exit = sec_alg_skcipher_exit_with_queue,
  879. .setkey = sec_alg_skcipher_setkey_aes_ctr,
  880. .decrypt = sec_alg_skcipher_decrypt,
  881. .encrypt = sec_alg_skcipher_encrypt,
  882. .min_keysize = AES_MIN_KEY_SIZE,
  883. .max_keysize = AES_MAX_KEY_SIZE,
  884. .ivsize = AES_BLOCK_SIZE,
  885. }, {
  886. .base = {
  887. .cra_name = "xts(aes)",
  888. .cra_driver_name = "hisi_sec_aes_xts",
  889. .cra_priority = 4001,
  890. .cra_flags = CRYPTO_ALG_ASYNC,
  891. .cra_blocksize = AES_BLOCK_SIZE,
  892. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  893. .cra_alignmask = 0,
  894. .cra_module = THIS_MODULE,
  895. },
  896. .init = sec_alg_skcipher_init,
  897. .exit = sec_alg_skcipher_exit,
  898. .setkey = sec_alg_skcipher_setkey_aes_xts,
  899. .decrypt = sec_alg_skcipher_decrypt,
  900. .encrypt = sec_alg_skcipher_encrypt,
  901. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  902. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  903. .ivsize = AES_BLOCK_SIZE,
  904. }, {
  905. /* Unable to find any test vectors so untested */
  906. .base = {
  907. .cra_name = "ecb(des)",
  908. .cra_driver_name = "hisi_sec_des_ecb",
  909. .cra_priority = 4001,
  910. .cra_flags = CRYPTO_ALG_ASYNC,
  911. .cra_blocksize = DES_BLOCK_SIZE,
  912. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  913. .cra_alignmask = 0,
  914. .cra_module = THIS_MODULE,
  915. },
  916. .init = sec_alg_skcipher_init,
  917. .exit = sec_alg_skcipher_exit,
  918. .setkey = sec_alg_skcipher_setkey_des_ecb,
  919. .decrypt = sec_alg_skcipher_decrypt,
  920. .encrypt = sec_alg_skcipher_encrypt,
  921. .min_keysize = DES_KEY_SIZE,
  922. .max_keysize = DES_KEY_SIZE,
  923. .ivsize = 0,
  924. }, {
  925. .base = {
  926. .cra_name = "cbc(des)",
  927. .cra_driver_name = "hisi_sec_des_cbc",
  928. .cra_priority = 4001,
  929. .cra_flags = CRYPTO_ALG_ASYNC,
  930. .cra_blocksize = DES_BLOCK_SIZE,
  931. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  932. .cra_alignmask = 0,
  933. .cra_module = THIS_MODULE,
  934. },
  935. .init = sec_alg_skcipher_init_with_queue,
  936. .exit = sec_alg_skcipher_exit_with_queue,
  937. .setkey = sec_alg_skcipher_setkey_des_cbc,
  938. .decrypt = sec_alg_skcipher_decrypt,
  939. .encrypt = sec_alg_skcipher_encrypt,
  940. .min_keysize = DES_KEY_SIZE,
  941. .max_keysize = DES_KEY_SIZE,
  942. .ivsize = DES_BLOCK_SIZE,
  943. }, {
  944. .base = {
  945. .cra_name = "cbc(des3_ede)",
  946. .cra_driver_name = "hisi_sec_3des_cbc",
  947. .cra_priority = 4001,
  948. .cra_flags = CRYPTO_ALG_ASYNC,
  949. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  950. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  951. .cra_alignmask = 0,
  952. .cra_module = THIS_MODULE,
  953. },
  954. .init = sec_alg_skcipher_init_with_queue,
  955. .exit = sec_alg_skcipher_exit_with_queue,
  956. .setkey = sec_alg_skcipher_setkey_3des_cbc,
  957. .decrypt = sec_alg_skcipher_decrypt,
  958. .encrypt = sec_alg_skcipher_encrypt,
  959. .min_keysize = DES3_EDE_KEY_SIZE,
  960. .max_keysize = DES3_EDE_KEY_SIZE,
  961. .ivsize = DES3_EDE_BLOCK_SIZE,
  962. }, {
  963. .base = {
  964. .cra_name = "ecb(des3_ede)",
  965. .cra_driver_name = "hisi_sec_3des_ecb",
  966. .cra_priority = 4001,
  967. .cra_flags = CRYPTO_ALG_ASYNC,
  968. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  969. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  970. .cra_alignmask = 0,
  971. .cra_module = THIS_MODULE,
  972. },
  973. .init = sec_alg_skcipher_init,
  974. .exit = sec_alg_skcipher_exit,
  975. .setkey = sec_alg_skcipher_setkey_3des_ecb,
  976. .decrypt = sec_alg_skcipher_decrypt,
  977. .encrypt = sec_alg_skcipher_encrypt,
  978. .min_keysize = DES3_EDE_KEY_SIZE,
  979. .max_keysize = DES3_EDE_KEY_SIZE,
  980. .ivsize = 0,
  981. }
  982. };
  983. int sec_algs_register(void)
  984. {
  985. int ret = 0;
  986. mutex_lock(&algs_lock);
  987. if (++active_devs != 1)
  988. goto unlock;
  989. ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  990. if (ret)
  991. --active_devs;
  992. unlock:
  993. mutex_unlock(&algs_lock);
  994. return ret;
  995. }
  996. void sec_algs_unregister(void)
  997. {
  998. mutex_lock(&algs_lock);
  999. if (--active_devs != 0)
  1000. goto unlock;
  1001. crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  1002. unlock:
  1003. mutex_unlock(&algs_lock);
  1004. }