mcryptd.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * Software multibuffer async crypto daemon.
  3. *
  4. * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
  5. *
  6. * Adapted from crypto daemon.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <crypto/algapi.h>
  15. #include <crypto/internal/hash.h>
  16. #include <crypto/internal/aead.h>
  17. #include <crypto/mcryptd.h>
  18. #include <crypto/crypto_wq.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/hardirq.h>
  28. #define MCRYPTD_MAX_CPU_QLEN 100
  29. #define MCRYPTD_BATCH 9
  30. static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  31. unsigned int tail);
  32. struct mcryptd_flush_list {
  33. struct list_head list;
  34. struct mutex lock;
  35. };
  36. static struct mcryptd_flush_list __percpu *mcryptd_flist;
  37. struct hashd_instance_ctx {
  38. struct crypto_shash_spawn spawn;
  39. struct mcryptd_queue *queue;
  40. };
  41. static void mcryptd_queue_worker(struct work_struct *work);
  42. void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
  43. {
  44. struct mcryptd_flush_list *flist;
  45. if (!cstate->flusher_engaged) {
  46. /* put the flusher on the flush list */
  47. flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
  48. mutex_lock(&flist->lock);
  49. list_add_tail(&cstate->flush_list, &flist->list);
  50. cstate->flusher_engaged = true;
  51. cstate->next_flush = jiffies + delay;
  52. queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
  53. &cstate->flush, delay);
  54. mutex_unlock(&flist->lock);
  55. }
  56. }
  57. EXPORT_SYMBOL(mcryptd_arm_flusher);
  58. static int mcryptd_init_queue(struct mcryptd_queue *queue,
  59. unsigned int max_cpu_qlen)
  60. {
  61. int cpu;
  62. struct mcryptd_cpu_queue *cpu_queue;
  63. queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
  64. pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
  65. if (!queue->cpu_queue)
  66. return -ENOMEM;
  67. for_each_possible_cpu(cpu) {
  68. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  69. pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
  70. crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  71. INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
  72. }
  73. return 0;
  74. }
  75. static void mcryptd_fini_queue(struct mcryptd_queue *queue)
  76. {
  77. int cpu;
  78. struct mcryptd_cpu_queue *cpu_queue;
  79. for_each_possible_cpu(cpu) {
  80. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  81. BUG_ON(cpu_queue->queue.qlen);
  82. }
  83. free_percpu(queue->cpu_queue);
  84. }
  85. static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
  86. struct crypto_async_request *request,
  87. struct mcryptd_hash_request_ctx *rctx)
  88. {
  89. int cpu, err;
  90. struct mcryptd_cpu_queue *cpu_queue;
  91. cpu = get_cpu();
  92. cpu_queue = this_cpu_ptr(queue->cpu_queue);
  93. rctx->tag.cpu = cpu;
  94. err = crypto_enqueue_request(&cpu_queue->queue, request);
  95. pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
  96. cpu, cpu_queue, request);
  97. queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
  98. put_cpu();
  99. return err;
  100. }
  101. /*
  102. * Try to opportunisticlly flush the partially completed jobs if
  103. * crypto daemon is the only task running.
  104. */
  105. static void mcryptd_opportunistic_flush(void)
  106. {
  107. struct mcryptd_flush_list *flist;
  108. struct mcryptd_alg_cstate *cstate;
  109. flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
  110. while (single_task_running()) {
  111. mutex_lock(&flist->lock);
  112. if (list_empty(&flist->list)) {
  113. mutex_unlock(&flist->lock);
  114. return;
  115. }
  116. cstate = list_entry(flist->list.next,
  117. struct mcryptd_alg_cstate, flush_list);
  118. if (!cstate->flusher_engaged) {
  119. mutex_unlock(&flist->lock);
  120. return;
  121. }
  122. list_del(&cstate->flush_list);
  123. cstate->flusher_engaged = false;
  124. mutex_unlock(&flist->lock);
  125. cstate->alg_state->flusher(cstate);
  126. }
  127. }
  128. /*
  129. * Called in workqueue context, do one real cryption work (via
  130. * req->complete) and reschedule itself if there are more work to
  131. * do.
  132. */
  133. static void mcryptd_queue_worker(struct work_struct *work)
  134. {
  135. struct mcryptd_cpu_queue *cpu_queue;
  136. struct crypto_async_request *req, *backlog;
  137. int i;
  138. /*
  139. * Need to loop through more than once for multi-buffer to
  140. * be effective.
  141. */
  142. cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
  143. i = 0;
  144. while (i < MCRYPTD_BATCH || single_task_running()) {
  145. /*
  146. * preempt_disable/enable is used to prevent
  147. * being preempted by mcryptd_enqueue_request()
  148. */
  149. local_bh_disable();
  150. preempt_disable();
  151. backlog = crypto_get_backlog(&cpu_queue->queue);
  152. req = crypto_dequeue_request(&cpu_queue->queue);
  153. preempt_enable();
  154. local_bh_enable();
  155. if (!req) {
  156. mcryptd_opportunistic_flush();
  157. return;
  158. }
  159. if (backlog)
  160. backlog->complete(backlog, -EINPROGRESS);
  161. req->complete(req, 0);
  162. if (!cpu_queue->queue.qlen)
  163. return;
  164. ++i;
  165. }
  166. if (cpu_queue->queue.qlen)
  167. queue_work(kcrypto_wq, &cpu_queue->work);
  168. }
  169. void mcryptd_flusher(struct work_struct *__work)
  170. {
  171. struct mcryptd_alg_cstate *alg_cpu_state;
  172. struct mcryptd_alg_state *alg_state;
  173. struct mcryptd_flush_list *flist;
  174. int cpu;
  175. cpu = smp_processor_id();
  176. alg_cpu_state = container_of(to_delayed_work(__work),
  177. struct mcryptd_alg_cstate, flush);
  178. alg_state = alg_cpu_state->alg_state;
  179. if (alg_cpu_state->cpu != cpu)
  180. pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
  181. cpu, alg_cpu_state->cpu);
  182. if (alg_cpu_state->flusher_engaged) {
  183. flist = per_cpu_ptr(mcryptd_flist, cpu);
  184. mutex_lock(&flist->lock);
  185. list_del(&alg_cpu_state->flush_list);
  186. alg_cpu_state->flusher_engaged = false;
  187. mutex_unlock(&flist->lock);
  188. alg_state->flusher(alg_cpu_state);
  189. }
  190. }
  191. EXPORT_SYMBOL_GPL(mcryptd_flusher);
  192. static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
  193. {
  194. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  195. struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  196. return ictx->queue;
  197. }
  198. static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  199. unsigned int tail)
  200. {
  201. char *p;
  202. struct crypto_instance *inst;
  203. int err;
  204. p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
  205. if (!p)
  206. return ERR_PTR(-ENOMEM);
  207. inst = (void *)(p + head);
  208. err = -ENAMETOOLONG;
  209. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  210. "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  211. goto out_free_inst;
  212. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  213. inst->alg.cra_priority = alg->cra_priority + 50;
  214. inst->alg.cra_blocksize = alg->cra_blocksize;
  215. inst->alg.cra_alignmask = alg->cra_alignmask;
  216. out:
  217. return p;
  218. out_free_inst:
  219. kfree(p);
  220. p = ERR_PTR(err);
  221. goto out;
  222. }
  223. static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
  224. {
  225. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  226. struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
  227. struct crypto_shash_spawn *spawn = &ictx->spawn;
  228. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  229. struct crypto_shash *hash;
  230. hash = crypto_spawn_shash(spawn);
  231. if (IS_ERR(hash))
  232. return PTR_ERR(hash);
  233. ctx->child = hash;
  234. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  235. sizeof(struct mcryptd_hash_request_ctx) +
  236. crypto_shash_descsize(hash));
  237. return 0;
  238. }
  239. static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
  240. {
  241. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  242. crypto_free_shash(ctx->child);
  243. }
  244. static int mcryptd_hash_setkey(struct crypto_ahash *parent,
  245. const u8 *key, unsigned int keylen)
  246. {
  247. struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
  248. struct crypto_shash *child = ctx->child;
  249. int err;
  250. crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  251. crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
  252. CRYPTO_TFM_REQ_MASK);
  253. err = crypto_shash_setkey(child, key, keylen);
  254. crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
  255. CRYPTO_TFM_RES_MASK);
  256. return err;
  257. }
  258. static int mcryptd_hash_enqueue(struct ahash_request *req,
  259. crypto_completion_t complete)
  260. {
  261. int ret;
  262. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  263. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  264. struct mcryptd_queue *queue =
  265. mcryptd_get_queue(crypto_ahash_tfm(tfm));
  266. rctx->complete = req->base.complete;
  267. req->base.complete = complete;
  268. ret = mcryptd_enqueue_request(queue, &req->base, rctx);
  269. return ret;
  270. }
  271. static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
  272. {
  273. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  274. struct crypto_shash *child = ctx->child;
  275. struct ahash_request *req = ahash_request_cast(req_async);
  276. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  277. struct shash_desc *desc = &rctx->desc;
  278. if (unlikely(err == -EINPROGRESS))
  279. goto out;
  280. desc->tfm = child;
  281. desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  282. err = crypto_shash_init(desc);
  283. req->base.complete = rctx->complete;
  284. out:
  285. local_bh_disable();
  286. rctx->complete(&req->base, err);
  287. local_bh_enable();
  288. }
  289. static int mcryptd_hash_init_enqueue(struct ahash_request *req)
  290. {
  291. return mcryptd_hash_enqueue(req, mcryptd_hash_init);
  292. }
  293. static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
  294. {
  295. struct ahash_request *req = ahash_request_cast(req_async);
  296. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  297. if (unlikely(err == -EINPROGRESS))
  298. goto out;
  299. err = shash_ahash_mcryptd_update(req, &rctx->desc);
  300. if (err) {
  301. req->base.complete = rctx->complete;
  302. goto out;
  303. }
  304. return;
  305. out:
  306. local_bh_disable();
  307. rctx->complete(&req->base, err);
  308. local_bh_enable();
  309. }
  310. static int mcryptd_hash_update_enqueue(struct ahash_request *req)
  311. {
  312. return mcryptd_hash_enqueue(req, mcryptd_hash_update);
  313. }
  314. static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
  315. {
  316. struct ahash_request *req = ahash_request_cast(req_async);
  317. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  318. if (unlikely(err == -EINPROGRESS))
  319. goto out;
  320. err = shash_ahash_mcryptd_final(req, &rctx->desc);
  321. if (err) {
  322. req->base.complete = rctx->complete;
  323. goto out;
  324. }
  325. return;
  326. out:
  327. local_bh_disable();
  328. rctx->complete(&req->base, err);
  329. local_bh_enable();
  330. }
  331. static int mcryptd_hash_final_enqueue(struct ahash_request *req)
  332. {
  333. return mcryptd_hash_enqueue(req, mcryptd_hash_final);
  334. }
  335. static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
  336. {
  337. struct ahash_request *req = ahash_request_cast(req_async);
  338. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  339. if (unlikely(err == -EINPROGRESS))
  340. goto out;
  341. err = shash_ahash_mcryptd_finup(req, &rctx->desc);
  342. if (err) {
  343. req->base.complete = rctx->complete;
  344. goto out;
  345. }
  346. return;
  347. out:
  348. local_bh_disable();
  349. rctx->complete(&req->base, err);
  350. local_bh_enable();
  351. }
  352. static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
  353. {
  354. return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
  355. }
  356. static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
  357. {
  358. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  359. struct crypto_shash *child = ctx->child;
  360. struct ahash_request *req = ahash_request_cast(req_async);
  361. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  362. struct shash_desc *desc = &rctx->desc;
  363. if (unlikely(err == -EINPROGRESS))
  364. goto out;
  365. desc->tfm = child;
  366. desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */
  367. err = shash_ahash_mcryptd_digest(req, desc);
  368. if (err) {
  369. req->base.complete = rctx->complete;
  370. goto out;
  371. }
  372. return;
  373. out:
  374. local_bh_disable();
  375. rctx->complete(&req->base, err);
  376. local_bh_enable();
  377. }
  378. static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
  379. {
  380. return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
  381. }
  382. static int mcryptd_hash_export(struct ahash_request *req, void *out)
  383. {
  384. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  385. return crypto_shash_export(&rctx->desc, out);
  386. }
  387. static int mcryptd_hash_import(struct ahash_request *req, const void *in)
  388. {
  389. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  390. return crypto_shash_import(&rctx->desc, in);
  391. }
  392. static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
  393. struct mcryptd_queue *queue)
  394. {
  395. struct hashd_instance_ctx *ctx;
  396. struct ahash_instance *inst;
  397. struct shash_alg *salg;
  398. struct crypto_alg *alg;
  399. int err;
  400. salg = shash_attr_alg(tb[1], 0, 0);
  401. if (IS_ERR(salg))
  402. return PTR_ERR(salg);
  403. alg = &salg->base;
  404. pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
  405. inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
  406. sizeof(*ctx));
  407. err = PTR_ERR(inst);
  408. if (IS_ERR(inst))
  409. goto out_put_alg;
  410. ctx = ahash_instance_ctx(inst);
  411. ctx->queue = queue;
  412. err = crypto_init_shash_spawn(&ctx->spawn, salg,
  413. ahash_crypto_instance(inst));
  414. if (err)
  415. goto out_free_inst;
  416. inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  417. inst->alg.halg.digestsize = salg->digestsize;
  418. inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
  419. inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
  420. inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
  421. inst->alg.init = mcryptd_hash_init_enqueue;
  422. inst->alg.update = mcryptd_hash_update_enqueue;
  423. inst->alg.final = mcryptd_hash_final_enqueue;
  424. inst->alg.finup = mcryptd_hash_finup_enqueue;
  425. inst->alg.export = mcryptd_hash_export;
  426. inst->alg.import = mcryptd_hash_import;
  427. inst->alg.setkey = mcryptd_hash_setkey;
  428. inst->alg.digest = mcryptd_hash_digest_enqueue;
  429. err = ahash_register_instance(tmpl, inst);
  430. if (err) {
  431. crypto_drop_shash(&ctx->spawn);
  432. out_free_inst:
  433. kfree(inst);
  434. }
  435. out_put_alg:
  436. crypto_mod_put(alg);
  437. return err;
  438. }
  439. static struct mcryptd_queue mqueue;
  440. static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
  441. {
  442. struct crypto_attr_type *algt;
  443. algt = crypto_get_attr_type(tb);
  444. if (IS_ERR(algt))
  445. return PTR_ERR(algt);
  446. switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
  447. case CRYPTO_ALG_TYPE_DIGEST:
  448. return mcryptd_create_hash(tmpl, tb, &mqueue);
  449. break;
  450. }
  451. return -EINVAL;
  452. }
  453. static void mcryptd_free(struct crypto_instance *inst)
  454. {
  455. struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
  456. struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
  457. switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
  458. case CRYPTO_ALG_TYPE_AHASH:
  459. crypto_drop_shash(&hctx->spawn);
  460. kfree(ahash_instance(inst));
  461. return;
  462. default:
  463. crypto_drop_spawn(&ctx->spawn);
  464. kfree(inst);
  465. }
  466. }
  467. static struct crypto_template mcryptd_tmpl = {
  468. .name = "mcryptd",
  469. .create = mcryptd_create,
  470. .free = mcryptd_free,
  471. .module = THIS_MODULE,
  472. };
  473. struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
  474. u32 type, u32 mask)
  475. {
  476. char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  477. struct crypto_ahash *tfm;
  478. if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  479. "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  480. return ERR_PTR(-EINVAL);
  481. tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
  482. if (IS_ERR(tfm))
  483. return ERR_CAST(tfm);
  484. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  485. crypto_free_ahash(tfm);
  486. return ERR_PTR(-EINVAL);
  487. }
  488. return __mcryptd_ahash_cast(tfm);
  489. }
  490. EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
  491. int shash_ahash_mcryptd_digest(struct ahash_request *req,
  492. struct shash_desc *desc)
  493. {
  494. int err;
  495. err = crypto_shash_init(desc) ?:
  496. shash_ahash_mcryptd_finup(req, desc);
  497. return err;
  498. }
  499. EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
  500. int shash_ahash_mcryptd_update(struct ahash_request *req,
  501. struct shash_desc *desc)
  502. {
  503. struct crypto_shash *tfm = desc->tfm;
  504. struct shash_alg *shash = crypto_shash_alg(tfm);
  505. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  506. return shash->update(desc, NULL, 0);
  507. }
  508. EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
  509. int shash_ahash_mcryptd_finup(struct ahash_request *req,
  510. struct shash_desc *desc)
  511. {
  512. struct crypto_shash *tfm = desc->tfm;
  513. struct shash_alg *shash = crypto_shash_alg(tfm);
  514. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  515. return shash->finup(desc, NULL, 0, req->result);
  516. }
  517. EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
  518. int shash_ahash_mcryptd_final(struct ahash_request *req,
  519. struct shash_desc *desc)
  520. {
  521. struct crypto_shash *tfm = desc->tfm;
  522. struct shash_alg *shash = crypto_shash_alg(tfm);
  523. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  524. return shash->final(desc, req->result);
  525. }
  526. EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
  527. struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
  528. {
  529. struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  530. return ctx->child;
  531. }
  532. EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
  533. struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
  534. {
  535. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  536. return &rctx->desc;
  537. }
  538. EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
  539. void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
  540. {
  541. crypto_free_ahash(&tfm->base);
  542. }
  543. EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
  544. static int __init mcryptd_init(void)
  545. {
  546. int err, cpu;
  547. struct mcryptd_flush_list *flist;
  548. mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
  549. for_each_possible_cpu(cpu) {
  550. flist = per_cpu_ptr(mcryptd_flist, cpu);
  551. INIT_LIST_HEAD(&flist->list);
  552. mutex_init(&flist->lock);
  553. }
  554. err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
  555. if (err) {
  556. free_percpu(mcryptd_flist);
  557. return err;
  558. }
  559. err = crypto_register_template(&mcryptd_tmpl);
  560. if (err) {
  561. mcryptd_fini_queue(&mqueue);
  562. free_percpu(mcryptd_flist);
  563. }
  564. return err;
  565. }
  566. static void __exit mcryptd_exit(void)
  567. {
  568. mcryptd_fini_queue(&mqueue);
  569. crypto_unregister_template(&mcryptd_tmpl);
  570. free_percpu(mcryptd_flist);
  571. }
  572. subsys_initcall(mcryptd_init);
  573. module_exit(mcryptd_exit);
  574. MODULE_LICENSE("GPL");
  575. MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
  576. MODULE_ALIAS_CRYPTO("mcryptd");