mcryptd.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. /*
  2. * Software multibuffer async crypto daemon.
  3. *
  4. * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
  5. *
  6. * Adapted from crypto daemon.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <crypto/algapi.h>
  15. #include <crypto/internal/hash.h>
  16. #include <crypto/internal/aead.h>
  17. #include <crypto/mcryptd.h>
  18. #include <crypto/crypto_wq.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/hardirq.h>
  28. #define MCRYPTD_MAX_CPU_QLEN 100
  29. #define MCRYPTD_BATCH 9
  30. static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  31. unsigned int tail);
  32. struct mcryptd_flush_list {
  33. struct list_head list;
  34. struct mutex lock;
  35. };
  36. static struct mcryptd_flush_list __percpu *mcryptd_flist;
  37. struct hashd_instance_ctx {
  38. struct crypto_ahash_spawn spawn;
  39. struct mcryptd_queue *queue;
  40. };
  41. static void mcryptd_queue_worker(struct work_struct *work);
  42. void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
  43. {
  44. struct mcryptd_flush_list *flist;
  45. if (!cstate->flusher_engaged) {
  46. /* put the flusher on the flush list */
  47. flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
  48. mutex_lock(&flist->lock);
  49. list_add_tail(&cstate->flush_list, &flist->list);
  50. cstate->flusher_engaged = true;
  51. cstate->next_flush = jiffies + delay;
  52. queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
  53. &cstate->flush, delay);
  54. mutex_unlock(&flist->lock);
  55. }
  56. }
  57. EXPORT_SYMBOL(mcryptd_arm_flusher);
  58. static int mcryptd_init_queue(struct mcryptd_queue *queue,
  59. unsigned int max_cpu_qlen)
  60. {
  61. int cpu;
  62. struct mcryptd_cpu_queue *cpu_queue;
  63. queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
  64. pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
  65. if (!queue->cpu_queue)
  66. return -ENOMEM;
  67. for_each_possible_cpu(cpu) {
  68. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  69. pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
  70. crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  71. INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
  72. }
  73. return 0;
  74. }
  75. static void mcryptd_fini_queue(struct mcryptd_queue *queue)
  76. {
  77. int cpu;
  78. struct mcryptd_cpu_queue *cpu_queue;
  79. for_each_possible_cpu(cpu) {
  80. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  81. BUG_ON(cpu_queue->queue.qlen);
  82. }
  83. free_percpu(queue->cpu_queue);
  84. }
  85. static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
  86. struct crypto_async_request *request,
  87. struct mcryptd_hash_request_ctx *rctx)
  88. {
  89. int cpu, err;
  90. struct mcryptd_cpu_queue *cpu_queue;
  91. cpu = get_cpu();
  92. cpu_queue = this_cpu_ptr(queue->cpu_queue);
  93. rctx->tag.cpu = cpu;
  94. err = crypto_enqueue_request(&cpu_queue->queue, request);
  95. pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
  96. cpu, cpu_queue, request);
  97. queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
  98. put_cpu();
  99. return err;
  100. }
  101. /*
  102. * Try to opportunisticlly flush the partially completed jobs if
  103. * crypto daemon is the only task running.
  104. */
  105. static void mcryptd_opportunistic_flush(void)
  106. {
  107. struct mcryptd_flush_list *flist;
  108. struct mcryptd_alg_cstate *cstate;
  109. flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
  110. while (single_task_running()) {
  111. mutex_lock(&flist->lock);
  112. cstate = list_first_entry_or_null(&flist->list,
  113. struct mcryptd_alg_cstate, flush_list);
  114. if (!cstate || !cstate->flusher_engaged) {
  115. mutex_unlock(&flist->lock);
  116. return;
  117. }
  118. list_del(&cstate->flush_list);
  119. cstate->flusher_engaged = false;
  120. mutex_unlock(&flist->lock);
  121. cstate->alg_state->flusher(cstate);
  122. }
  123. }
  124. /*
  125. * Called in workqueue context, do one real cryption work (via
  126. * req->complete) and reschedule itself if there are more work to
  127. * do.
  128. */
  129. static void mcryptd_queue_worker(struct work_struct *work)
  130. {
  131. struct mcryptd_cpu_queue *cpu_queue;
  132. struct crypto_async_request *req, *backlog;
  133. int i;
  134. /*
  135. * Need to loop through more than once for multi-buffer to
  136. * be effective.
  137. */
  138. cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
  139. i = 0;
  140. while (i < MCRYPTD_BATCH || single_task_running()) {
  141. /*
  142. * preempt_disable/enable is used to prevent
  143. * being preempted by mcryptd_enqueue_request()
  144. */
  145. local_bh_disable();
  146. preempt_disable();
  147. backlog = crypto_get_backlog(&cpu_queue->queue);
  148. req = crypto_dequeue_request(&cpu_queue->queue);
  149. preempt_enable();
  150. local_bh_enable();
  151. if (!req) {
  152. mcryptd_opportunistic_flush();
  153. return;
  154. }
  155. if (backlog)
  156. backlog->complete(backlog, -EINPROGRESS);
  157. req->complete(req, 0);
  158. if (!cpu_queue->queue.qlen)
  159. return;
  160. ++i;
  161. }
  162. if (cpu_queue->queue.qlen)
  163. queue_work(kcrypto_wq, &cpu_queue->work);
  164. }
  165. void mcryptd_flusher(struct work_struct *__work)
  166. {
  167. struct mcryptd_alg_cstate *alg_cpu_state;
  168. struct mcryptd_alg_state *alg_state;
  169. struct mcryptd_flush_list *flist;
  170. int cpu;
  171. cpu = smp_processor_id();
  172. alg_cpu_state = container_of(to_delayed_work(__work),
  173. struct mcryptd_alg_cstate, flush);
  174. alg_state = alg_cpu_state->alg_state;
  175. if (alg_cpu_state->cpu != cpu)
  176. pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
  177. cpu, alg_cpu_state->cpu);
  178. if (alg_cpu_state->flusher_engaged) {
  179. flist = per_cpu_ptr(mcryptd_flist, cpu);
  180. mutex_lock(&flist->lock);
  181. list_del(&alg_cpu_state->flush_list);
  182. alg_cpu_state->flusher_engaged = false;
  183. mutex_unlock(&flist->lock);
  184. alg_state->flusher(alg_cpu_state);
  185. }
  186. }
  187. EXPORT_SYMBOL_GPL(mcryptd_flusher);
  188. static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
  189. {
  190. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  191. struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  192. return ictx->queue;
  193. }
  194. static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  195. unsigned int tail)
  196. {
  197. char *p;
  198. struct crypto_instance *inst;
  199. int err;
  200. p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
  201. if (!p)
  202. return ERR_PTR(-ENOMEM);
  203. inst = (void *)(p + head);
  204. err = -ENAMETOOLONG;
  205. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  206. "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  207. goto out_free_inst;
  208. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  209. inst->alg.cra_priority = alg->cra_priority + 50;
  210. inst->alg.cra_blocksize = alg->cra_blocksize;
  211. inst->alg.cra_alignmask = alg->cra_alignmask;
  212. out:
  213. return p;
  214. out_free_inst:
  215. kfree(p);
  216. p = ERR_PTR(err);
  217. goto out;
  218. }
  219. static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
  220. u32 *mask)
  221. {
  222. struct crypto_attr_type *algt;
  223. algt = crypto_get_attr_type(tb);
  224. if (IS_ERR(algt))
  225. return;
  226. if ((algt->type & CRYPTO_ALG_INTERNAL))
  227. *type |= CRYPTO_ALG_INTERNAL;
  228. if ((algt->mask & CRYPTO_ALG_INTERNAL))
  229. *mask |= CRYPTO_ALG_INTERNAL;
  230. }
  231. static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
  232. {
  233. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  234. struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
  235. struct crypto_ahash_spawn *spawn = &ictx->spawn;
  236. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  237. struct crypto_ahash *hash;
  238. hash = crypto_spawn_ahash(spawn);
  239. if (IS_ERR(hash))
  240. return PTR_ERR(hash);
  241. ctx->child = hash;
  242. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  243. sizeof(struct mcryptd_hash_request_ctx) +
  244. crypto_ahash_reqsize(hash));
  245. return 0;
  246. }
  247. static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
  248. {
  249. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  250. crypto_free_ahash(ctx->child);
  251. }
  252. static int mcryptd_hash_setkey(struct crypto_ahash *parent,
  253. const u8 *key, unsigned int keylen)
  254. {
  255. struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
  256. struct crypto_ahash *child = ctx->child;
  257. int err;
  258. crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  259. crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
  260. CRYPTO_TFM_REQ_MASK);
  261. err = crypto_ahash_setkey(child, key, keylen);
  262. crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
  263. CRYPTO_TFM_RES_MASK);
  264. return err;
  265. }
  266. static int mcryptd_hash_enqueue(struct ahash_request *req,
  267. crypto_completion_t complete)
  268. {
  269. int ret;
  270. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  271. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  272. struct mcryptd_queue *queue =
  273. mcryptd_get_queue(crypto_ahash_tfm(tfm));
  274. rctx->complete = req->base.complete;
  275. req->base.complete = complete;
  276. ret = mcryptd_enqueue_request(queue, &req->base, rctx);
  277. return ret;
  278. }
  279. static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
  280. {
  281. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  282. struct crypto_ahash *child = ctx->child;
  283. struct ahash_request *req = ahash_request_cast(req_async);
  284. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  285. struct ahash_request *desc = &rctx->areq;
  286. if (unlikely(err == -EINPROGRESS))
  287. goto out;
  288. ahash_request_set_tfm(desc, child);
  289. ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
  290. rctx->complete, req_async);
  291. rctx->out = req->result;
  292. err = crypto_ahash_init(desc);
  293. out:
  294. local_bh_disable();
  295. rctx->complete(&req->base, err);
  296. local_bh_enable();
  297. }
  298. static int mcryptd_hash_init_enqueue(struct ahash_request *req)
  299. {
  300. return mcryptd_hash_enqueue(req, mcryptd_hash_init);
  301. }
  302. static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
  303. {
  304. struct ahash_request *req = ahash_request_cast(req_async);
  305. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  306. if (unlikely(err == -EINPROGRESS))
  307. goto out;
  308. rctx->out = req->result;
  309. err = ahash_mcryptd_update(&rctx->areq);
  310. if (err) {
  311. req->base.complete = rctx->complete;
  312. goto out;
  313. }
  314. return;
  315. out:
  316. local_bh_disable();
  317. rctx->complete(&req->base, err);
  318. local_bh_enable();
  319. }
  320. static int mcryptd_hash_update_enqueue(struct ahash_request *req)
  321. {
  322. return mcryptd_hash_enqueue(req, mcryptd_hash_update);
  323. }
  324. static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
  325. {
  326. struct ahash_request *req = ahash_request_cast(req_async);
  327. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  328. if (unlikely(err == -EINPROGRESS))
  329. goto out;
  330. rctx->out = req->result;
  331. err = ahash_mcryptd_final(&rctx->areq);
  332. if (err) {
  333. req->base.complete = rctx->complete;
  334. goto out;
  335. }
  336. return;
  337. out:
  338. local_bh_disable();
  339. rctx->complete(&req->base, err);
  340. local_bh_enable();
  341. }
  342. static int mcryptd_hash_final_enqueue(struct ahash_request *req)
  343. {
  344. return mcryptd_hash_enqueue(req, mcryptd_hash_final);
  345. }
  346. static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
  347. {
  348. struct ahash_request *req = ahash_request_cast(req_async);
  349. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  350. if (unlikely(err == -EINPROGRESS))
  351. goto out;
  352. rctx->out = req->result;
  353. err = ahash_mcryptd_finup(&rctx->areq);
  354. if (err) {
  355. req->base.complete = rctx->complete;
  356. goto out;
  357. }
  358. return;
  359. out:
  360. local_bh_disable();
  361. rctx->complete(&req->base, err);
  362. local_bh_enable();
  363. }
  364. static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
  365. {
  366. return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
  367. }
  368. static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
  369. {
  370. struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  371. struct crypto_ahash *child = ctx->child;
  372. struct ahash_request *req = ahash_request_cast(req_async);
  373. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  374. struct ahash_request *desc = &rctx->areq;
  375. if (unlikely(err == -EINPROGRESS))
  376. goto out;
  377. ahash_request_set_tfm(desc, child);
  378. ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
  379. rctx->complete, req_async);
  380. rctx->out = req->result;
  381. err = ahash_mcryptd_digest(desc);
  382. out:
  383. local_bh_disable();
  384. rctx->complete(&req->base, err);
  385. local_bh_enable();
  386. }
  387. static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
  388. {
  389. return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
  390. }
  391. static int mcryptd_hash_export(struct ahash_request *req, void *out)
  392. {
  393. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  394. return crypto_ahash_export(&rctx->areq, out);
  395. }
  396. static int mcryptd_hash_import(struct ahash_request *req, const void *in)
  397. {
  398. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  399. return crypto_ahash_import(&rctx->areq, in);
  400. }
  401. static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
  402. struct mcryptd_queue *queue)
  403. {
  404. struct hashd_instance_ctx *ctx;
  405. struct ahash_instance *inst;
  406. struct hash_alg_common *halg;
  407. struct crypto_alg *alg;
  408. u32 type = 0;
  409. u32 mask = 0;
  410. int err;
  411. mcryptd_check_internal(tb, &type, &mask);
  412. halg = ahash_attr_alg(tb[1], type, mask);
  413. if (IS_ERR(halg))
  414. return PTR_ERR(halg);
  415. alg = &halg->base;
  416. pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
  417. inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
  418. sizeof(*ctx));
  419. err = PTR_ERR(inst);
  420. if (IS_ERR(inst))
  421. goto out_put_alg;
  422. ctx = ahash_instance_ctx(inst);
  423. ctx->queue = queue;
  424. err = crypto_init_ahash_spawn(&ctx->spawn, halg,
  425. ahash_crypto_instance(inst));
  426. if (err)
  427. goto out_free_inst;
  428. type = CRYPTO_ALG_ASYNC;
  429. if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
  430. type |= CRYPTO_ALG_INTERNAL;
  431. inst->alg.halg.base.cra_flags = type;
  432. inst->alg.halg.digestsize = halg->digestsize;
  433. inst->alg.halg.statesize = halg->statesize;
  434. inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
  435. inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
  436. inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
  437. inst->alg.init = mcryptd_hash_init_enqueue;
  438. inst->alg.update = mcryptd_hash_update_enqueue;
  439. inst->alg.final = mcryptd_hash_final_enqueue;
  440. inst->alg.finup = mcryptd_hash_finup_enqueue;
  441. inst->alg.export = mcryptd_hash_export;
  442. inst->alg.import = mcryptd_hash_import;
  443. inst->alg.setkey = mcryptd_hash_setkey;
  444. inst->alg.digest = mcryptd_hash_digest_enqueue;
  445. err = ahash_register_instance(tmpl, inst);
  446. if (err) {
  447. crypto_drop_ahash(&ctx->spawn);
  448. out_free_inst:
  449. kfree(inst);
  450. }
  451. out_put_alg:
  452. crypto_mod_put(alg);
  453. return err;
  454. }
  455. static struct mcryptd_queue mqueue;
  456. static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
  457. {
  458. struct crypto_attr_type *algt;
  459. algt = crypto_get_attr_type(tb);
  460. if (IS_ERR(algt))
  461. return PTR_ERR(algt);
  462. switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
  463. case CRYPTO_ALG_TYPE_DIGEST:
  464. return mcryptd_create_hash(tmpl, tb, &mqueue);
  465. break;
  466. }
  467. return -EINVAL;
  468. }
  469. static void mcryptd_free(struct crypto_instance *inst)
  470. {
  471. struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
  472. struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
  473. switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
  474. case CRYPTO_ALG_TYPE_AHASH:
  475. crypto_drop_ahash(&hctx->spawn);
  476. kfree(ahash_instance(inst));
  477. return;
  478. default:
  479. crypto_drop_spawn(&ctx->spawn);
  480. kfree(inst);
  481. }
  482. }
  483. static struct crypto_template mcryptd_tmpl = {
  484. .name = "mcryptd",
  485. .create = mcryptd_create,
  486. .free = mcryptd_free,
  487. .module = THIS_MODULE,
  488. };
  489. struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
  490. u32 type, u32 mask)
  491. {
  492. char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  493. struct crypto_ahash *tfm;
  494. if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  495. "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  496. return ERR_PTR(-EINVAL);
  497. tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
  498. if (IS_ERR(tfm))
  499. return ERR_CAST(tfm);
  500. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  501. crypto_free_ahash(tfm);
  502. return ERR_PTR(-EINVAL);
  503. }
  504. return __mcryptd_ahash_cast(tfm);
  505. }
  506. EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
  507. int ahash_mcryptd_digest(struct ahash_request *desc)
  508. {
  509. return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
  510. }
  511. int ahash_mcryptd_update(struct ahash_request *desc)
  512. {
  513. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  514. return crypto_ahash_update(desc);
  515. }
  516. int ahash_mcryptd_finup(struct ahash_request *desc)
  517. {
  518. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  519. return crypto_ahash_finup(desc);
  520. }
  521. int ahash_mcryptd_final(struct ahash_request *desc)
  522. {
  523. /* alignment is to be done by multi-buffer crypto algorithm if needed */
  524. return crypto_ahash_final(desc);
  525. }
  526. struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
  527. {
  528. struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  529. return ctx->child;
  530. }
  531. EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
  532. struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
  533. {
  534. struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  535. return &rctx->areq;
  536. }
  537. EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
  538. void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
  539. {
  540. crypto_free_ahash(&tfm->base);
  541. }
  542. EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
  543. static int __init mcryptd_init(void)
  544. {
  545. int err, cpu;
  546. struct mcryptd_flush_list *flist;
  547. mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
  548. for_each_possible_cpu(cpu) {
  549. flist = per_cpu_ptr(mcryptd_flist, cpu);
  550. INIT_LIST_HEAD(&flist->list);
  551. mutex_init(&flist->lock);
  552. }
  553. err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
  554. if (err) {
  555. free_percpu(mcryptd_flist);
  556. return err;
  557. }
  558. err = crypto_register_template(&mcryptd_tmpl);
  559. if (err) {
  560. mcryptd_fini_queue(&mqueue);
  561. free_percpu(mcryptd_flist);
  562. }
  563. return err;
  564. }
  565. static void __exit mcryptd_exit(void)
  566. {
  567. mcryptd_fini_queue(&mqueue);
  568. crypto_unregister_template(&mcryptd_tmpl);
  569. free_percpu(mcryptd_flist);
  570. }
  571. subsys_initcall(mcryptd_init);
  572. module_exit(mcryptd_exit);
  573. MODULE_LICENSE("GPL");
  574. MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
  575. MODULE_ALIAS_CRYPTO("mcryptd");