sec_algs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2016-2017 Hisilicon Limited. */
  3. #include <linux/crypto.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <crypto/des.h>
  12. #include <crypto/skcipher.h>
  13. #include <crypto/xts.h>
  14. #include <crypto/internal/skcipher.h>
  15. #include "sec_drv.h"
  16. #define SEC_MAX_CIPHER_KEY 64
  17. #define SEC_REQ_LIMIT SZ_32M
  18. struct sec_c_alg_cfg {
  19. unsigned c_alg : 3;
  20. unsigned c_mode : 3;
  21. unsigned key_len : 2;
  22. unsigned c_width : 2;
  23. };
  24. static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
  25. [SEC_C_DES_ECB_64] = {
  26. .c_alg = SEC_C_ALG_DES,
  27. .c_mode = SEC_C_MODE_ECB,
  28. .key_len = SEC_KEY_LEN_DES,
  29. },
  30. [SEC_C_DES_CBC_64] = {
  31. .c_alg = SEC_C_ALG_DES,
  32. .c_mode = SEC_C_MODE_CBC,
  33. .key_len = SEC_KEY_LEN_DES,
  34. },
  35. [SEC_C_3DES_ECB_192_3KEY] = {
  36. .c_alg = SEC_C_ALG_3DES,
  37. .c_mode = SEC_C_MODE_ECB,
  38. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  39. },
  40. [SEC_C_3DES_ECB_192_2KEY] = {
  41. .c_alg = SEC_C_ALG_3DES,
  42. .c_mode = SEC_C_MODE_ECB,
  43. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  44. },
  45. [SEC_C_3DES_CBC_192_3KEY] = {
  46. .c_alg = SEC_C_ALG_3DES,
  47. .c_mode = SEC_C_MODE_CBC,
  48. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  49. },
  50. [SEC_C_3DES_CBC_192_2KEY] = {
  51. .c_alg = SEC_C_ALG_3DES,
  52. .c_mode = SEC_C_MODE_CBC,
  53. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  54. },
  55. [SEC_C_AES_ECB_128] = {
  56. .c_alg = SEC_C_ALG_AES,
  57. .c_mode = SEC_C_MODE_ECB,
  58. .key_len = SEC_KEY_LEN_AES_128,
  59. },
  60. [SEC_C_AES_ECB_192] = {
  61. .c_alg = SEC_C_ALG_AES,
  62. .c_mode = SEC_C_MODE_ECB,
  63. .key_len = SEC_KEY_LEN_AES_192,
  64. },
  65. [SEC_C_AES_ECB_256] = {
  66. .c_alg = SEC_C_ALG_AES,
  67. .c_mode = SEC_C_MODE_ECB,
  68. .key_len = SEC_KEY_LEN_AES_256,
  69. },
  70. [SEC_C_AES_CBC_128] = {
  71. .c_alg = SEC_C_ALG_AES,
  72. .c_mode = SEC_C_MODE_CBC,
  73. .key_len = SEC_KEY_LEN_AES_128,
  74. },
  75. [SEC_C_AES_CBC_192] = {
  76. .c_alg = SEC_C_ALG_AES,
  77. .c_mode = SEC_C_MODE_CBC,
  78. .key_len = SEC_KEY_LEN_AES_192,
  79. },
  80. [SEC_C_AES_CBC_256] = {
  81. .c_alg = SEC_C_ALG_AES,
  82. .c_mode = SEC_C_MODE_CBC,
  83. .key_len = SEC_KEY_LEN_AES_256,
  84. },
  85. [SEC_C_AES_CTR_128] = {
  86. .c_alg = SEC_C_ALG_AES,
  87. .c_mode = SEC_C_MODE_CTR,
  88. .key_len = SEC_KEY_LEN_AES_128,
  89. },
  90. [SEC_C_AES_CTR_192] = {
  91. .c_alg = SEC_C_ALG_AES,
  92. .c_mode = SEC_C_MODE_CTR,
  93. .key_len = SEC_KEY_LEN_AES_192,
  94. },
  95. [SEC_C_AES_CTR_256] = {
  96. .c_alg = SEC_C_ALG_AES,
  97. .c_mode = SEC_C_MODE_CTR,
  98. .key_len = SEC_KEY_LEN_AES_256,
  99. },
  100. [SEC_C_AES_XTS_128] = {
  101. .c_alg = SEC_C_ALG_AES,
  102. .c_mode = SEC_C_MODE_XTS,
  103. .key_len = SEC_KEY_LEN_AES_128,
  104. },
  105. [SEC_C_AES_XTS_256] = {
  106. .c_alg = SEC_C_ALG_AES,
  107. .c_mode = SEC_C_MODE_XTS,
  108. .key_len = SEC_KEY_LEN_AES_256,
  109. },
  110. [SEC_C_NULL] = {
  111. },
  112. };
  113. /*
  114. * Mutex used to ensure safe operation of reference count of
  115. * alg providers
  116. */
  117. static DEFINE_MUTEX(algs_lock);
  118. static unsigned int active_devs;
  119. static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
  120. struct sec_bd_info *req,
  121. enum sec_cipher_alg alg)
  122. {
  123. const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
  124. memset(req, 0, sizeof(*req));
  125. req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
  126. req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
  127. req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
  128. req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
  129. req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
  130. req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
  131. }
  132. static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
  133. const u8 *key,
  134. unsigned int keylen,
  135. enum sec_cipher_alg alg)
  136. {
  137. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  138. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  139. ctx->cipher_alg = alg;
  140. memcpy(ctx->key, key, keylen);
  141. sec_alg_skcipher_init_template(ctx, &ctx->req_template,
  142. ctx->cipher_alg);
  143. }
  144. static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
  145. dma_addr_t *psec_sgl,
  146. struct scatterlist *sgl,
  147. int count,
  148. struct sec_dev_info *info)
  149. {
  150. struct sec_hw_sgl *sgl_current = NULL;
  151. struct sec_hw_sgl *sgl_next;
  152. dma_addr_t sgl_next_dma;
  153. struct scatterlist *sg;
  154. int ret, sge_index, i;
  155. if (!count)
  156. return -EINVAL;
  157. for_each_sg(sgl, sg, count, i) {
  158. sge_index = i % SEC_MAX_SGE_NUM;
  159. if (sge_index == 0) {
  160. sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
  161. GFP_KERNEL, &sgl_next_dma);
  162. if (!sgl_next) {
  163. ret = -ENOMEM;
  164. goto err_free_hw_sgls;
  165. }
  166. if (!sgl_current) { /* First one */
  167. *psec_sgl = sgl_next_dma;
  168. *sec_sgl = sgl_next;
  169. } else { /* Chained */
  170. sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
  171. sgl_current->next_sgl = sgl_next_dma;
  172. sgl_current->next = sgl_next;
  173. }
  174. sgl_current = sgl_next;
  175. }
  176. sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
  177. sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
  178. sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
  179. }
  180. sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
  181. sgl_current->next_sgl = 0;
  182. (*sec_sgl)->entry_sum_in_chain = count;
  183. return 0;
  184. err_free_hw_sgls:
  185. sgl_current = *sec_sgl;
  186. while (sgl_current) {
  187. sgl_next = sgl_current->next;
  188. dma_pool_free(info->hw_sgl_pool, sgl_current,
  189. sgl_current->next_sgl);
  190. sgl_current = sgl_next;
  191. }
  192. *psec_sgl = 0;
  193. return ret;
  194. }
  195. static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
  196. dma_addr_t psec_sgl, struct sec_dev_info *info)
  197. {
  198. struct sec_hw_sgl *sgl_current, *sgl_next;
  199. if (!hw_sgl)
  200. return;
  201. sgl_current = hw_sgl;
  202. while (sgl_current->next) {
  203. sgl_next = sgl_current->next;
  204. dma_pool_free(info->hw_sgl_pool, sgl_current,
  205. sgl_current->next_sgl);
  206. sgl_current = sgl_next;
  207. }
  208. dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
  209. }
  210. static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
  211. const u8 *key, unsigned int keylen,
  212. enum sec_cipher_alg alg)
  213. {
  214. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  215. struct device *dev = ctx->queue->dev_info->dev;
  216. mutex_lock(&ctx->lock);
  217. if (ctx->key) {
  218. /* rekeying */
  219. memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
  220. } else {
  221. /* new key */
  222. ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
  223. &ctx->pkey, GFP_KERNEL);
  224. if (!ctx->key) {
  225. mutex_unlock(&ctx->lock);
  226. return -ENOMEM;
  227. }
  228. }
  229. mutex_unlock(&ctx->lock);
  230. sec_alg_skcipher_init_context(tfm, key, keylen, alg);
  231. return 0;
  232. }
  233. static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
  234. const u8 *key, unsigned int keylen)
  235. {
  236. enum sec_cipher_alg alg;
  237. switch (keylen) {
  238. case AES_KEYSIZE_128:
  239. alg = SEC_C_AES_ECB_128;
  240. break;
  241. case AES_KEYSIZE_192:
  242. alg = SEC_C_AES_ECB_192;
  243. break;
  244. case AES_KEYSIZE_256:
  245. alg = SEC_C_AES_ECB_256;
  246. break;
  247. default:
  248. return -EINVAL;
  249. }
  250. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  251. }
  252. static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
  253. const u8 *key, unsigned int keylen)
  254. {
  255. enum sec_cipher_alg alg;
  256. switch (keylen) {
  257. case AES_KEYSIZE_128:
  258. alg = SEC_C_AES_CBC_128;
  259. break;
  260. case AES_KEYSIZE_192:
  261. alg = SEC_C_AES_CBC_192;
  262. break;
  263. case AES_KEYSIZE_256:
  264. alg = SEC_C_AES_CBC_256;
  265. break;
  266. default:
  267. return -EINVAL;
  268. }
  269. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  270. }
  271. static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
  272. const u8 *key, unsigned int keylen)
  273. {
  274. enum sec_cipher_alg alg;
  275. switch (keylen) {
  276. case AES_KEYSIZE_128:
  277. alg = SEC_C_AES_CTR_128;
  278. break;
  279. case AES_KEYSIZE_192:
  280. alg = SEC_C_AES_CTR_192;
  281. break;
  282. case AES_KEYSIZE_256:
  283. alg = SEC_C_AES_CTR_256;
  284. break;
  285. default:
  286. return -EINVAL;
  287. }
  288. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  289. }
  290. static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
  291. const u8 *key, unsigned int keylen)
  292. {
  293. enum sec_cipher_alg alg;
  294. int ret;
  295. ret = xts_verify_key(tfm, key, keylen);
  296. if (ret)
  297. return ret;
  298. switch (keylen) {
  299. case AES_KEYSIZE_128 * 2:
  300. alg = SEC_C_AES_XTS_128;
  301. break;
  302. case AES_KEYSIZE_256 * 2:
  303. alg = SEC_C_AES_XTS_256;
  304. break;
  305. default:
  306. return -EINVAL;
  307. }
  308. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  309. }
  310. static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
  311. const u8 *key, unsigned int keylen)
  312. {
  313. if (keylen != DES_KEY_SIZE)
  314. return -EINVAL;
  315. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
  316. }
  317. static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
  318. const u8 *key, unsigned int keylen)
  319. {
  320. if (keylen != DES_KEY_SIZE)
  321. return -EINVAL;
  322. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
  323. }
  324. static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
  325. const u8 *key, unsigned int keylen)
  326. {
  327. if (keylen != DES_KEY_SIZE * 3)
  328. return -EINVAL;
  329. return sec_alg_skcipher_setkey(tfm, key, keylen,
  330. SEC_C_3DES_ECB_192_3KEY);
  331. }
  332. static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
  333. const u8 *key, unsigned int keylen)
  334. {
  335. if (keylen != DES3_EDE_KEY_SIZE)
  336. return -EINVAL;
  337. return sec_alg_skcipher_setkey(tfm, key, keylen,
  338. SEC_C_3DES_CBC_192_3KEY);
  339. }
  340. static void sec_alg_free_el(struct sec_request_el *el,
  341. struct sec_dev_info *info)
  342. {
  343. sec_free_hw_sgl(el->out, el->dma_out, info);
  344. sec_free_hw_sgl(el->in, el->dma_in, info);
  345. kfree(el->sgl_in);
  346. kfree(el->sgl_out);
  347. kfree(el);
  348. }
  349. /* queuelock must be held */
  350. static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
  351. {
  352. struct sec_request_el *el, *temp;
  353. int ret = 0;
  354. mutex_lock(&sec_req->lock);
  355. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  356. /*
  357. * Add to hardware queue only under following circumstances
  358. * 1) Software and hardware queue empty so no chain dependencies
  359. * 2) No dependencies as new IV - (check software queue empty
  360. * to maintain order)
  361. * 3) No dependencies because the mode does no chaining.
  362. *
  363. * In other cases first insert onto the software queue which
  364. * is then emptied as requests complete
  365. */
  366. if (!queue->havesoftqueue ||
  367. (kfifo_is_empty(&queue->softqueue) &&
  368. sec_queue_empty(queue))) {
  369. ret = sec_queue_send(queue, &el->req, sec_req);
  370. if (ret == -EAGAIN) {
  371. /* Wait unti we can send then try again */
  372. /* DEAD if here - should not happen */
  373. ret = -EBUSY;
  374. goto err_unlock;
  375. }
  376. } else {
  377. kfifo_put(&queue->softqueue, el);
  378. }
  379. }
  380. err_unlock:
  381. mutex_unlock(&sec_req->lock);
  382. return ret;
  383. }
  384. static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
  385. struct crypto_async_request *req_base)
  386. {
  387. struct skcipher_request *skreq = container_of(req_base,
  388. struct skcipher_request,
  389. base);
  390. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  391. struct sec_request *backlog_req;
  392. struct sec_request_el *sec_req_el, *nextrequest;
  393. struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
  394. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  395. struct device *dev = ctx->queue->dev_info->dev;
  396. int icv_or_skey_en, ret;
  397. bool done;
  398. sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
  399. head);
  400. icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
  401. SEC_BD_W0_ICV_OR_SKEY_EN_S;
  402. if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
  403. dev_err(dev, "Got an invalid answer %lu %d\n",
  404. sec_resp->w1 & SEC_BD_W1_BD_INVALID,
  405. icv_or_skey_en);
  406. sec_req->err = -EINVAL;
  407. /*
  408. * We need to muddle on to avoid getting stuck with elements
  409. * on the queue. Error will be reported so requester so
  410. * it should be able to handle appropriately.
  411. */
  412. }
  413. mutex_lock(&ctx->queue->queuelock);
  414. /* Put the IV in place for chained cases */
  415. switch (ctx->cipher_alg) {
  416. case SEC_C_AES_CBC_128:
  417. case SEC_C_AES_CBC_192:
  418. case SEC_C_AES_CBC_256:
  419. if (sec_req_el->req.w0 & SEC_BD_W0_DE)
  420. sg_pcopy_to_buffer(sec_req_el->sgl_out,
  421. sg_nents(sec_req_el->sgl_out),
  422. skreq->iv,
  423. crypto_skcipher_ivsize(atfm),
  424. sec_req_el->el_length -
  425. crypto_skcipher_ivsize(atfm));
  426. else
  427. sg_pcopy_to_buffer(sec_req_el->sgl_in,
  428. sg_nents(sec_req_el->sgl_in),
  429. skreq->iv,
  430. crypto_skcipher_ivsize(atfm),
  431. sec_req_el->el_length -
  432. crypto_skcipher_ivsize(atfm));
  433. /* No need to sync to the device as coherent DMA */
  434. break;
  435. case SEC_C_AES_CTR_128:
  436. case SEC_C_AES_CTR_192:
  437. case SEC_C_AES_CTR_256:
  438. crypto_inc(skreq->iv, 16);
  439. break;
  440. default:
  441. /* Do not update */
  442. break;
  443. }
  444. if (ctx->queue->havesoftqueue &&
  445. !kfifo_is_empty(&ctx->queue->softqueue) &&
  446. sec_queue_empty(ctx->queue)) {
  447. ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
  448. if (ret <= 0)
  449. dev_err(dev,
  450. "Error getting next element from kfifo %d\n",
  451. ret);
  452. else
  453. /* We know there is space so this cannot fail */
  454. sec_queue_send(ctx->queue, &nextrequest->req,
  455. nextrequest->sec_req);
  456. } else if (!list_empty(&ctx->backlog)) {
  457. /* Need to verify there is room first */
  458. backlog_req = list_first_entry(&ctx->backlog,
  459. typeof(*backlog_req),
  460. backlog_head);
  461. if (sec_queue_can_enqueue(ctx->queue,
  462. backlog_req->num_elements) ||
  463. (ctx->queue->havesoftqueue &&
  464. kfifo_avail(&ctx->queue->softqueue) >
  465. backlog_req->num_elements)) {
  466. sec_send_request(backlog_req, ctx->queue);
  467. backlog_req->req_base->complete(backlog_req->req_base,
  468. -EINPROGRESS);
  469. list_del(&backlog_req->backlog_head);
  470. }
  471. }
  472. mutex_unlock(&ctx->queue->queuelock);
  473. mutex_lock(&sec_req->lock);
  474. list_del(&sec_req_el->head);
  475. mutex_unlock(&sec_req->lock);
  476. sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
  477. /*
  478. * Request is done.
  479. * The dance is needed as the lock is freed in the completion
  480. */
  481. mutex_lock(&sec_req->lock);
  482. done = list_empty(&sec_req->elements);
  483. mutex_unlock(&sec_req->lock);
  484. if (done) {
  485. if (crypto_skcipher_ivsize(atfm)) {
  486. dma_unmap_single(dev, sec_req->dma_iv,
  487. crypto_skcipher_ivsize(atfm),
  488. DMA_TO_DEVICE);
  489. }
  490. dma_unmap_sg(dev, skreq->src, sec_req->len_in,
  491. DMA_BIDIRECTIONAL);
  492. if (skreq->src != skreq->dst)
  493. dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
  494. DMA_BIDIRECTIONAL);
  495. skreq->base.complete(&skreq->base, sec_req->err);
  496. }
  497. }
  498. void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
  499. {
  500. struct sec_request *sec_req = shadow;
  501. sec_req->cb(resp, sec_req->req_base);
  502. }
  503. static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
  504. int *steps)
  505. {
  506. size_t *sizes;
  507. int i;
  508. /* Split into suitable sized blocks */
  509. *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
  510. sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
  511. if (!sizes)
  512. return -ENOMEM;
  513. for (i = 0; i < *steps - 1; i++)
  514. sizes[i] = SEC_REQ_LIMIT;
  515. sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
  516. *split_sizes = sizes;
  517. return 0;
  518. }
  519. static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
  520. int steps, struct scatterlist ***splits,
  521. int **splits_nents,
  522. int sgl_len_in,
  523. struct device *dev)
  524. {
  525. int ret, count;
  526. count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  527. if (!count)
  528. return -EINVAL;
  529. *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
  530. if (!*splits) {
  531. ret = -ENOMEM;
  532. goto err_unmap_sg;
  533. }
  534. *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
  535. if (!*splits_nents) {
  536. ret = -ENOMEM;
  537. goto err_free_splits;
  538. }
  539. /* output the scatter list before and after this */
  540. ret = sg_split(sgl, count, 0, steps, split_sizes,
  541. *splits, *splits_nents, GFP_KERNEL);
  542. if (ret) {
  543. ret = -ENOMEM;
  544. goto err_free_splits_nents;
  545. }
  546. return 0;
  547. err_free_splits_nents:
  548. kfree(*splits_nents);
  549. err_free_splits:
  550. kfree(*splits);
  551. err_unmap_sg:
  552. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  553. return ret;
  554. }
  555. /*
  556. * Reverses the sec_map_and_split_sg call for messages not yet added to
  557. * the queues.
  558. */
  559. static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
  560. struct scatterlist **splits, int *splits_nents,
  561. int sgl_len_in, struct device *dev)
  562. {
  563. int i;
  564. for (i = 0; i < steps; i++)
  565. kfree(splits[i]);
  566. kfree(splits_nents);
  567. kfree(splits);
  568. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  569. }
  570. static struct sec_request_el
  571. *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
  572. int el_size, bool different_dest,
  573. struct scatterlist *sgl_in, int n_ents_in,
  574. struct scatterlist *sgl_out, int n_ents_out,
  575. struct sec_dev_info *info)
  576. {
  577. struct sec_request_el *el;
  578. struct sec_bd_info *req;
  579. int ret;
  580. el = kzalloc(sizeof(*el), GFP_KERNEL);
  581. if (!el)
  582. return ERR_PTR(-ENOMEM);
  583. el->el_length = el_size;
  584. req = &el->req;
  585. memcpy(req, template, sizeof(*req));
  586. req->w0 &= ~SEC_BD_W0_CIPHER_M;
  587. if (encrypt)
  588. req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
  589. else
  590. req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
  591. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  592. req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
  593. SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  594. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  595. req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
  596. SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  597. /* Writing whole u32 so no need to take care of masking */
  598. req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
  599. ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
  600. SEC_BD_W2_C_GRAN_SIZE_15_0_M);
  601. req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
  602. req->w1 |= SEC_BD_W1_ADDR_TYPE;
  603. el->sgl_in = sgl_in;
  604. ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
  605. n_ents_in, info);
  606. if (ret)
  607. goto err_free_el;
  608. req->data_addr_lo = lower_32_bits(el->dma_in);
  609. req->data_addr_hi = upper_32_bits(el->dma_in);
  610. if (different_dest) {
  611. el->sgl_out = sgl_out;
  612. ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
  613. el->sgl_out,
  614. n_ents_out, info);
  615. if (ret)
  616. goto err_free_hw_sgl_in;
  617. req->w0 |= SEC_BD_W0_DE;
  618. req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
  619. req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
  620. } else {
  621. req->w0 &= ~SEC_BD_W0_DE;
  622. req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
  623. req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
  624. }
  625. return el;
  626. err_free_hw_sgl_in:
  627. sec_free_hw_sgl(el->in, el->dma_in, info);
  628. err_free_el:
  629. kfree(el);
  630. return ERR_PTR(ret);
  631. }
  632. static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
  633. bool encrypt)
  634. {
  635. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  636. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  637. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  638. struct sec_queue *queue = ctx->queue;
  639. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  640. struct sec_dev_info *info = queue->dev_info;
  641. int i, ret, steps;
  642. size_t *split_sizes;
  643. struct scatterlist **splits_in;
  644. struct scatterlist **splits_out = NULL;
  645. int *splits_in_nents;
  646. int *splits_out_nents = NULL;
  647. struct sec_request_el *el, *temp;
  648. mutex_init(&sec_req->lock);
  649. sec_req->req_base = &skreq->base;
  650. sec_req->err = 0;
  651. /* SGL mapping out here to allow us to break it up as necessary */
  652. sec_req->len_in = sg_nents(skreq->src);
  653. ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
  654. &steps);
  655. if (ret)
  656. return ret;
  657. sec_req->num_elements = steps;
  658. ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
  659. &splits_in_nents, sec_req->len_in,
  660. info->dev);
  661. if (ret)
  662. goto err_free_split_sizes;
  663. if (skreq->src != skreq->dst) {
  664. sec_req->len_out = sg_nents(skreq->dst);
  665. ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
  666. &splits_out, &splits_out_nents,
  667. sec_req->len_out, info->dev);
  668. if (ret)
  669. goto err_unmap_in_sg;
  670. }
  671. /* Shared info stored in seq_req - applies to all BDs */
  672. sec_req->tfm_ctx = ctx;
  673. sec_req->cb = sec_skcipher_alg_callback;
  674. INIT_LIST_HEAD(&sec_req->elements);
  675. /*
  676. * Future optimization.
  677. * In the chaining case we can't use a dma pool bounce buffer
  678. * but in the case where we know there is no chaining we can
  679. */
  680. if (crypto_skcipher_ivsize(atfm)) {
  681. sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
  682. crypto_skcipher_ivsize(atfm),
  683. DMA_TO_DEVICE);
  684. if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
  685. ret = -ENOMEM;
  686. goto err_unmap_out_sg;
  687. }
  688. }
  689. /* Set them all up then queue - cleaner error handling. */
  690. for (i = 0; i < steps; i++) {
  691. el = sec_alg_alloc_and_fill_el(&ctx->req_template,
  692. encrypt ? 1 : 0,
  693. split_sizes[i],
  694. skreq->src != skreq->dst,
  695. splits_in[i], splits_in_nents[i],
  696. splits_out[i],
  697. splits_out_nents[i], info);
  698. if (IS_ERR(el)) {
  699. ret = PTR_ERR(el);
  700. goto err_free_elements;
  701. }
  702. el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
  703. el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
  704. el->sec_req = sec_req;
  705. list_add_tail(&el->head, &sec_req->elements);
  706. }
  707. /*
  708. * Only attempt to queue if the whole lot can fit in the queue -
  709. * we can't successfully cleanup after a partial queing so this
  710. * must succeed or fail atomically.
  711. *
  712. * Big hammer test of both software and hardware queues - could be
  713. * more refined but this is unlikely to happen so no need.
  714. */
  715. /* Cleanup - all elements in pointer arrays have been coppied */
  716. kfree(splits_in_nents);
  717. kfree(splits_in);
  718. kfree(splits_out_nents);
  719. kfree(splits_out);
  720. kfree(split_sizes);
  721. /* Grab a big lock for a long time to avoid concurrency issues */
  722. mutex_lock(&queue->queuelock);
  723. /*
  724. * Can go on to queue if we have space in either:
  725. * 1) The hardware queue and no software queue
  726. * 2) The software queue
  727. * AND there is nothing in the backlog. If there is backlog we
  728. * have to only queue to the backlog queue and return busy.
  729. */
  730. if ((!sec_queue_can_enqueue(queue, steps) &&
  731. (!queue->havesoftqueue ||
  732. kfifo_avail(&queue->softqueue) > steps)) ||
  733. !list_empty(&ctx->backlog)) {
  734. if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  735. list_add_tail(&sec_req->backlog_head, &ctx->backlog);
  736. mutex_unlock(&queue->queuelock);
  737. return -EBUSY;
  738. }
  739. ret = -EBUSY;
  740. mutex_unlock(&queue->queuelock);
  741. goto err_free_elements;
  742. }
  743. ret = sec_send_request(sec_req, queue);
  744. mutex_unlock(&queue->queuelock);
  745. if (ret)
  746. goto err_free_elements;
  747. return -EINPROGRESS;
  748. err_free_elements:
  749. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  750. list_del(&el->head);
  751. sec_alg_free_el(el, info);
  752. }
  753. if (crypto_skcipher_ivsize(atfm))
  754. dma_unmap_single(info->dev, sec_req->dma_iv,
  755. crypto_skcipher_ivsize(atfm),
  756. DMA_BIDIRECTIONAL);
  757. err_unmap_out_sg:
  758. if (skreq->src != skreq->dst)
  759. sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
  760. splits_out_nents, sec_req->len_out,
  761. info->dev);
  762. err_unmap_in_sg:
  763. sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
  764. sec_req->len_in, info->dev);
  765. err_free_split_sizes:
  766. kfree(split_sizes);
  767. return ret;
  768. }
  769. static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
  770. {
  771. return sec_alg_skcipher_crypto(req, true);
  772. }
  773. static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
  774. {
  775. return sec_alg_skcipher_crypto(req, false);
  776. }
  777. static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
  778. {
  779. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  780. mutex_init(&ctx->lock);
  781. INIT_LIST_HEAD(&ctx->backlog);
  782. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
  783. ctx->queue = sec_queue_alloc_start_safe();
  784. if (IS_ERR(ctx->queue))
  785. return PTR_ERR(ctx->queue);
  786. mutex_init(&ctx->queue->queuelock);
  787. ctx->queue->havesoftqueue = false;
  788. return 0;
  789. }
  790. static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
  791. {
  792. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  793. struct device *dev = ctx->queue->dev_info->dev;
  794. if (ctx->key) {
  795. memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
  796. dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
  797. ctx->pkey);
  798. }
  799. sec_queue_stop_release(ctx->queue);
  800. }
  801. static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
  802. {
  803. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  804. int ret;
  805. ret = sec_alg_skcipher_init(tfm);
  806. if (ret)
  807. return ret;
  808. INIT_KFIFO(ctx->queue->softqueue);
  809. ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
  810. if (ret) {
  811. sec_alg_skcipher_exit(tfm);
  812. return ret;
  813. }
  814. ctx->queue->havesoftqueue = true;
  815. return 0;
  816. }
  817. static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
  818. {
  819. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  820. kfifo_free(&ctx->queue->softqueue);
  821. sec_alg_skcipher_exit(tfm);
  822. }
  823. static struct skcipher_alg sec_algs[] = {
  824. {
  825. .base = {
  826. .cra_name = "ecb(aes)",
  827. .cra_driver_name = "hisi_sec_aes_ecb",
  828. .cra_priority = 4001,
  829. .cra_flags = CRYPTO_ALG_ASYNC,
  830. .cra_blocksize = AES_BLOCK_SIZE,
  831. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  832. .cra_alignmask = 0,
  833. .cra_module = THIS_MODULE,
  834. },
  835. .init = sec_alg_skcipher_init,
  836. .exit = sec_alg_skcipher_exit,
  837. .setkey = sec_alg_skcipher_setkey_aes_ecb,
  838. .decrypt = sec_alg_skcipher_decrypt,
  839. .encrypt = sec_alg_skcipher_encrypt,
  840. .min_keysize = AES_MIN_KEY_SIZE,
  841. .max_keysize = AES_MAX_KEY_SIZE,
  842. .ivsize = 0,
  843. }, {
  844. .base = {
  845. .cra_name = "cbc(aes)",
  846. .cra_driver_name = "hisi_sec_aes_cbc",
  847. .cra_priority = 4001,
  848. .cra_flags = CRYPTO_ALG_ASYNC,
  849. .cra_blocksize = AES_BLOCK_SIZE,
  850. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  851. .cra_alignmask = 0,
  852. .cra_module = THIS_MODULE,
  853. },
  854. .init = sec_alg_skcipher_init_with_queue,
  855. .exit = sec_alg_skcipher_exit_with_queue,
  856. .setkey = sec_alg_skcipher_setkey_aes_cbc,
  857. .decrypt = sec_alg_skcipher_decrypt,
  858. .encrypt = sec_alg_skcipher_encrypt,
  859. .min_keysize = AES_MIN_KEY_SIZE,
  860. .max_keysize = AES_MAX_KEY_SIZE,
  861. .ivsize = AES_BLOCK_SIZE,
  862. }, {
  863. .base = {
  864. .cra_name = "ctr(aes)",
  865. .cra_driver_name = "hisi_sec_aes_ctr",
  866. .cra_priority = 4001,
  867. .cra_flags = CRYPTO_ALG_ASYNC,
  868. .cra_blocksize = AES_BLOCK_SIZE,
  869. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  870. .cra_alignmask = 0,
  871. .cra_module = THIS_MODULE,
  872. },
  873. .init = sec_alg_skcipher_init_with_queue,
  874. .exit = sec_alg_skcipher_exit_with_queue,
  875. .setkey = sec_alg_skcipher_setkey_aes_ctr,
  876. .decrypt = sec_alg_skcipher_decrypt,
  877. .encrypt = sec_alg_skcipher_encrypt,
  878. .min_keysize = AES_MIN_KEY_SIZE,
  879. .max_keysize = AES_MAX_KEY_SIZE,
  880. .ivsize = AES_BLOCK_SIZE,
  881. }, {
  882. .base = {
  883. .cra_name = "xts(aes)",
  884. .cra_driver_name = "hisi_sec_aes_xts",
  885. .cra_priority = 4001,
  886. .cra_flags = CRYPTO_ALG_ASYNC,
  887. .cra_blocksize = AES_BLOCK_SIZE,
  888. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  889. .cra_alignmask = 0,
  890. .cra_module = THIS_MODULE,
  891. },
  892. .init = sec_alg_skcipher_init,
  893. .exit = sec_alg_skcipher_exit,
  894. .setkey = sec_alg_skcipher_setkey_aes_xts,
  895. .decrypt = sec_alg_skcipher_decrypt,
  896. .encrypt = sec_alg_skcipher_encrypt,
  897. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  898. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  899. .ivsize = AES_BLOCK_SIZE,
  900. }, {
  901. /* Unable to find any test vectors so untested */
  902. .base = {
  903. .cra_name = "ecb(des)",
  904. .cra_driver_name = "hisi_sec_des_ecb",
  905. .cra_priority = 4001,
  906. .cra_flags = CRYPTO_ALG_ASYNC,
  907. .cra_blocksize = DES_BLOCK_SIZE,
  908. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  909. .cra_alignmask = 0,
  910. .cra_module = THIS_MODULE,
  911. },
  912. .init = sec_alg_skcipher_init,
  913. .exit = sec_alg_skcipher_exit,
  914. .setkey = sec_alg_skcipher_setkey_des_ecb,
  915. .decrypt = sec_alg_skcipher_decrypt,
  916. .encrypt = sec_alg_skcipher_encrypt,
  917. .min_keysize = DES_KEY_SIZE,
  918. .max_keysize = DES_KEY_SIZE,
  919. .ivsize = 0,
  920. }, {
  921. .base = {
  922. .cra_name = "cbc(des)",
  923. .cra_driver_name = "hisi_sec_des_cbc",
  924. .cra_priority = 4001,
  925. .cra_flags = CRYPTO_ALG_ASYNC,
  926. .cra_blocksize = DES_BLOCK_SIZE,
  927. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  928. .cra_alignmask = 0,
  929. .cra_module = THIS_MODULE,
  930. },
  931. .init = sec_alg_skcipher_init_with_queue,
  932. .exit = sec_alg_skcipher_exit_with_queue,
  933. .setkey = sec_alg_skcipher_setkey_des_cbc,
  934. .decrypt = sec_alg_skcipher_decrypt,
  935. .encrypt = sec_alg_skcipher_encrypt,
  936. .min_keysize = DES_KEY_SIZE,
  937. .max_keysize = DES_KEY_SIZE,
  938. .ivsize = DES_BLOCK_SIZE,
  939. }, {
  940. .base = {
  941. .cra_name = "cbc(des3_ede)",
  942. .cra_driver_name = "hisi_sec_3des_cbc",
  943. .cra_priority = 4001,
  944. .cra_flags = CRYPTO_ALG_ASYNC,
  945. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  946. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  947. .cra_alignmask = 0,
  948. .cra_module = THIS_MODULE,
  949. },
  950. .init = sec_alg_skcipher_init_with_queue,
  951. .exit = sec_alg_skcipher_exit_with_queue,
  952. .setkey = sec_alg_skcipher_setkey_3des_cbc,
  953. .decrypt = sec_alg_skcipher_decrypt,
  954. .encrypt = sec_alg_skcipher_encrypt,
  955. .min_keysize = DES3_EDE_KEY_SIZE,
  956. .max_keysize = DES3_EDE_KEY_SIZE,
  957. .ivsize = DES3_EDE_BLOCK_SIZE,
  958. }, {
  959. .base = {
  960. .cra_name = "ecb(des3_ede)",
  961. .cra_driver_name = "hisi_sec_3des_ecb",
  962. .cra_priority = 4001,
  963. .cra_flags = CRYPTO_ALG_ASYNC,
  964. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  965. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  966. .cra_alignmask = 0,
  967. .cra_module = THIS_MODULE,
  968. },
  969. .init = sec_alg_skcipher_init,
  970. .exit = sec_alg_skcipher_exit,
  971. .setkey = sec_alg_skcipher_setkey_3des_ecb,
  972. .decrypt = sec_alg_skcipher_decrypt,
  973. .encrypt = sec_alg_skcipher_encrypt,
  974. .min_keysize = DES3_EDE_KEY_SIZE,
  975. .max_keysize = DES3_EDE_KEY_SIZE,
  976. .ivsize = 0,
  977. }
  978. };
  979. int sec_algs_register(void)
  980. {
  981. int ret = 0;
  982. mutex_lock(&algs_lock);
  983. if (++active_devs != 1)
  984. goto unlock;
  985. ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  986. if (ret)
  987. --active_devs;
  988. unlock:
  989. mutex_unlock(&algs_lock);
  990. return ret;
  991. }
  992. void sec_algs_unregister(void)
  993. {
  994. mutex_lock(&algs_lock);
  995. if (--active_devs != 0)
  996. goto unlock;
  997. crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  998. unlock:
  999. mutex_unlock(&algs_lock);
  1000. }