cryptd.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399
  1. /*
  2. * Software async crypto daemon.
  3. *
  4. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5. *
  6. * Added AEAD support to cryptd.
  7. * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8. * Adrian Hoban <adrian.hoban@intel.com>
  9. * Gabriele Paoloni <gabriele.paoloni@intel.com>
  10. * Aidan O'Mahony (aidan.o.mahony@intel.com)
  11. * Copyright (c) 2010, Intel Corporation.
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the Free
  15. * Software Foundation; either version 2 of the License, or (at your option)
  16. * any later version.
  17. *
  18. */
  19. #include <crypto/internal/hash.h>
  20. #include <crypto/internal/aead.h>
  21. #include <crypto/internal/skcipher.h>
  22. #include <crypto/cryptd.h>
  23. #include <crypto/crypto_wq.h>
  24. #include <linux/atomic.h>
  25. #include <linux/err.h>
  26. #include <linux/init.h>
  27. #include <linux/kernel.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/sched.h>
  32. #include <linux/slab.h>
  33. #define CRYPTD_MAX_CPU_QLEN 1000
  34. struct cryptd_cpu_queue {
  35. struct crypto_queue queue;
  36. struct work_struct work;
  37. };
  38. struct cryptd_queue {
  39. struct cryptd_cpu_queue __percpu *cpu_queue;
  40. };
  41. struct cryptd_instance_ctx {
  42. struct crypto_spawn spawn;
  43. struct cryptd_queue *queue;
  44. };
  45. struct skcipherd_instance_ctx {
  46. struct crypto_skcipher_spawn spawn;
  47. struct cryptd_queue *queue;
  48. };
  49. struct hashd_instance_ctx {
  50. struct crypto_shash_spawn spawn;
  51. struct cryptd_queue *queue;
  52. };
  53. struct aead_instance_ctx {
  54. struct crypto_aead_spawn aead_spawn;
  55. struct cryptd_queue *queue;
  56. };
  57. struct cryptd_blkcipher_ctx {
  58. atomic_t refcnt;
  59. struct crypto_blkcipher *child;
  60. };
  61. struct cryptd_blkcipher_request_ctx {
  62. crypto_completion_t complete;
  63. };
  64. struct cryptd_skcipher_ctx {
  65. atomic_t refcnt;
  66. struct crypto_skcipher *child;
  67. };
  68. struct cryptd_skcipher_request_ctx {
  69. crypto_completion_t complete;
  70. };
  71. struct cryptd_hash_ctx {
  72. atomic_t refcnt;
  73. struct crypto_shash *child;
  74. };
  75. struct cryptd_hash_request_ctx {
  76. crypto_completion_t complete;
  77. struct shash_desc desc;
  78. };
  79. struct cryptd_aead_ctx {
  80. atomic_t refcnt;
  81. struct crypto_aead *child;
  82. };
  83. struct cryptd_aead_request_ctx {
  84. crypto_completion_t complete;
  85. };
  86. static void cryptd_queue_worker(struct work_struct *work);
  87. static int cryptd_init_queue(struct cryptd_queue *queue,
  88. unsigned int max_cpu_qlen)
  89. {
  90. int cpu;
  91. struct cryptd_cpu_queue *cpu_queue;
  92. queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
  93. if (!queue->cpu_queue)
  94. return -ENOMEM;
  95. for_each_possible_cpu(cpu) {
  96. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  97. crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  98. INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
  99. }
  100. return 0;
  101. }
  102. static void cryptd_fini_queue(struct cryptd_queue *queue)
  103. {
  104. int cpu;
  105. struct cryptd_cpu_queue *cpu_queue;
  106. for_each_possible_cpu(cpu) {
  107. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  108. BUG_ON(cpu_queue->queue.qlen);
  109. }
  110. free_percpu(queue->cpu_queue);
  111. }
  112. static int cryptd_enqueue_request(struct cryptd_queue *queue,
  113. struct crypto_async_request *request)
  114. {
  115. int cpu, err;
  116. struct cryptd_cpu_queue *cpu_queue;
  117. atomic_t *refcnt;
  118. bool may_backlog;
  119. cpu = get_cpu();
  120. cpu_queue = this_cpu_ptr(queue->cpu_queue);
  121. err = crypto_enqueue_request(&cpu_queue->queue, request);
  122. refcnt = crypto_tfm_ctx(request->tfm);
  123. may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
  124. if (err == -EBUSY && !may_backlog)
  125. goto out_put_cpu;
  126. queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
  127. if (!atomic_read(refcnt))
  128. goto out_put_cpu;
  129. atomic_inc(refcnt);
  130. out_put_cpu:
  131. put_cpu();
  132. return err;
  133. }
  134. /* Called in workqueue context, do one real cryption work (via
  135. * req->complete) and reschedule itself if there are more work to
  136. * do. */
  137. static void cryptd_queue_worker(struct work_struct *work)
  138. {
  139. struct cryptd_cpu_queue *cpu_queue;
  140. struct crypto_async_request *req, *backlog;
  141. cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
  142. /*
  143. * Only handle one request at a time to avoid hogging crypto workqueue.
  144. * preempt_disable/enable is used to prevent being preempted by
  145. * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
  146. * cryptd_enqueue_request() being accessed from software interrupts.
  147. */
  148. local_bh_disable();
  149. preempt_disable();
  150. backlog = crypto_get_backlog(&cpu_queue->queue);
  151. req = crypto_dequeue_request(&cpu_queue->queue);
  152. preempt_enable();
  153. local_bh_enable();
  154. if (!req)
  155. return;
  156. if (backlog)
  157. backlog->complete(backlog, -EINPROGRESS);
  158. req->complete(req, 0);
  159. if (cpu_queue->queue.qlen)
  160. queue_work(kcrypto_wq, &cpu_queue->work);
  161. }
  162. static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
  163. {
  164. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  165. struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  166. return ictx->queue;
  167. }
  168. static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
  169. u32 *mask)
  170. {
  171. struct crypto_attr_type *algt;
  172. algt = crypto_get_attr_type(tb);
  173. if (IS_ERR(algt))
  174. return;
  175. *type |= algt->type & CRYPTO_ALG_INTERNAL;
  176. *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
  177. }
  178. static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
  179. const u8 *key, unsigned int keylen)
  180. {
  181. struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
  182. struct crypto_blkcipher *child = ctx->child;
  183. int err;
  184. crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  185. crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
  186. CRYPTO_TFM_REQ_MASK);
  187. err = crypto_blkcipher_setkey(child, key, keylen);
  188. crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
  189. CRYPTO_TFM_RES_MASK);
  190. return err;
  191. }
  192. static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
  193. struct crypto_blkcipher *child,
  194. int err,
  195. int (*crypt)(struct blkcipher_desc *desc,
  196. struct scatterlist *dst,
  197. struct scatterlist *src,
  198. unsigned int len))
  199. {
  200. struct cryptd_blkcipher_request_ctx *rctx;
  201. struct cryptd_blkcipher_ctx *ctx;
  202. struct crypto_ablkcipher *tfm;
  203. struct blkcipher_desc desc;
  204. int refcnt;
  205. rctx = ablkcipher_request_ctx(req);
  206. if (unlikely(err == -EINPROGRESS))
  207. goto out;
  208. desc.tfm = child;
  209. desc.info = req->info;
  210. desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  211. err = crypt(&desc, req->dst, req->src, req->nbytes);
  212. req->base.complete = rctx->complete;
  213. out:
  214. tfm = crypto_ablkcipher_reqtfm(req);
  215. ctx = crypto_ablkcipher_ctx(tfm);
  216. refcnt = atomic_read(&ctx->refcnt);
  217. local_bh_disable();
  218. rctx->complete(&req->base, err);
  219. local_bh_enable();
  220. if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
  221. crypto_free_ablkcipher(tfm);
  222. }
  223. static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
  224. {
  225. struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
  226. struct crypto_blkcipher *child = ctx->child;
  227. cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
  228. crypto_blkcipher_crt(child)->encrypt);
  229. }
  230. static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
  231. {
  232. struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
  233. struct crypto_blkcipher *child = ctx->child;
  234. cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
  235. crypto_blkcipher_crt(child)->decrypt);
  236. }
  237. static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
  238. crypto_completion_t compl)
  239. {
  240. struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
  241. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  242. struct cryptd_queue *queue;
  243. queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
  244. rctx->complete = req->base.complete;
  245. req->base.complete = compl;
  246. return cryptd_enqueue_request(queue, &req->base);
  247. }
  248. static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
  249. {
  250. return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
  251. }
  252. static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
  253. {
  254. return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
  255. }
  256. static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
  257. {
  258. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  259. struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  260. struct crypto_spawn *spawn = &ictx->spawn;
  261. struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  262. struct crypto_blkcipher *cipher;
  263. cipher = crypto_spawn_blkcipher(spawn);
  264. if (IS_ERR(cipher))
  265. return PTR_ERR(cipher);
  266. ctx->child = cipher;
  267. tfm->crt_ablkcipher.reqsize =
  268. sizeof(struct cryptd_blkcipher_request_ctx);
  269. return 0;
  270. }
  271. static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
  272. {
  273. struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
  274. crypto_free_blkcipher(ctx->child);
  275. }
  276. static int cryptd_init_instance(struct crypto_instance *inst,
  277. struct crypto_alg *alg)
  278. {
  279. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  280. "cryptd(%s)",
  281. alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  282. return -ENAMETOOLONG;
  283. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  284. inst->alg.cra_priority = alg->cra_priority + 50;
  285. inst->alg.cra_blocksize = alg->cra_blocksize;
  286. inst->alg.cra_alignmask = alg->cra_alignmask;
  287. return 0;
  288. }
  289. static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  290. unsigned int tail)
  291. {
  292. char *p;
  293. struct crypto_instance *inst;
  294. int err;
  295. p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
  296. if (!p)
  297. return ERR_PTR(-ENOMEM);
  298. inst = (void *)(p + head);
  299. err = cryptd_init_instance(inst, alg);
  300. if (err)
  301. goto out_free_inst;
  302. out:
  303. return p;
  304. out_free_inst:
  305. kfree(p);
  306. p = ERR_PTR(err);
  307. goto out;
  308. }
  309. static int cryptd_create_blkcipher(struct crypto_template *tmpl,
  310. struct rtattr **tb,
  311. struct cryptd_queue *queue)
  312. {
  313. struct cryptd_instance_ctx *ctx;
  314. struct crypto_instance *inst;
  315. struct crypto_alg *alg;
  316. u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
  317. u32 mask = CRYPTO_ALG_TYPE_MASK;
  318. int err;
  319. cryptd_check_internal(tb, &type, &mask);
  320. alg = crypto_get_attr_alg(tb, type, mask);
  321. if (IS_ERR(alg))
  322. return PTR_ERR(alg);
  323. inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
  324. err = PTR_ERR(inst);
  325. if (IS_ERR(inst))
  326. goto out_put_alg;
  327. ctx = crypto_instance_ctx(inst);
  328. ctx->queue = queue;
  329. err = crypto_init_spawn(&ctx->spawn, alg, inst,
  330. CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
  331. if (err)
  332. goto out_free_inst;
  333. type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
  334. if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
  335. type |= CRYPTO_ALG_INTERNAL;
  336. inst->alg.cra_flags = type;
  337. inst->alg.cra_type = &crypto_ablkcipher_type;
  338. inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
  339. inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  340. inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  341. inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
  342. inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
  343. inst->alg.cra_init = cryptd_blkcipher_init_tfm;
  344. inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
  345. inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
  346. inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
  347. inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
  348. err = crypto_register_instance(tmpl, inst);
  349. if (err) {
  350. crypto_drop_spawn(&ctx->spawn);
  351. out_free_inst:
  352. kfree(inst);
  353. }
  354. out_put_alg:
  355. crypto_mod_put(alg);
  356. return err;
  357. }
  358. static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
  359. const u8 *key, unsigned int keylen)
  360. {
  361. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
  362. struct crypto_skcipher *child = ctx->child;
  363. int err;
  364. crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  365. crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
  366. CRYPTO_TFM_REQ_MASK);
  367. err = crypto_skcipher_setkey(child, key, keylen);
  368. crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
  369. CRYPTO_TFM_RES_MASK);
  370. return err;
  371. }
  372. static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
  373. {
  374. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  375. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  376. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  377. int refcnt = atomic_read(&ctx->refcnt);
  378. local_bh_disable();
  379. rctx->complete(&req->base, err);
  380. local_bh_enable();
  381. if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
  382. crypto_free_skcipher(tfm);
  383. }
  384. static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
  385. int err)
  386. {
  387. struct skcipher_request *req = skcipher_request_cast(base);
  388. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  389. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  390. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  391. struct crypto_skcipher *child = ctx->child;
  392. SKCIPHER_REQUEST_ON_STACK(subreq, child);
  393. if (unlikely(err == -EINPROGRESS))
  394. goto out;
  395. skcipher_request_set_tfm(subreq, child);
  396. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  397. NULL, NULL);
  398. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  399. req->iv);
  400. err = crypto_skcipher_encrypt(subreq);
  401. skcipher_request_zero(subreq);
  402. req->base.complete = rctx->complete;
  403. out:
  404. cryptd_skcipher_complete(req, err);
  405. }
  406. static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
  407. int err)
  408. {
  409. struct skcipher_request *req = skcipher_request_cast(base);
  410. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  411. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  412. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  413. struct crypto_skcipher *child = ctx->child;
  414. SKCIPHER_REQUEST_ON_STACK(subreq, child);
  415. if (unlikely(err == -EINPROGRESS))
  416. goto out;
  417. skcipher_request_set_tfm(subreq, child);
  418. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  419. NULL, NULL);
  420. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  421. req->iv);
  422. err = crypto_skcipher_decrypt(subreq);
  423. skcipher_request_zero(subreq);
  424. req->base.complete = rctx->complete;
  425. out:
  426. cryptd_skcipher_complete(req, err);
  427. }
  428. static int cryptd_skcipher_enqueue(struct skcipher_request *req,
  429. crypto_completion_t compl)
  430. {
  431. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  432. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  433. struct cryptd_queue *queue;
  434. queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
  435. rctx->complete = req->base.complete;
  436. req->base.complete = compl;
  437. return cryptd_enqueue_request(queue, &req->base);
  438. }
  439. static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
  440. {
  441. return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
  442. }
  443. static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
  444. {
  445. return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
  446. }
  447. static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
  448. {
  449. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  450. struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
  451. struct crypto_skcipher_spawn *spawn = &ictx->spawn;
  452. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  453. struct crypto_skcipher *cipher;
  454. cipher = crypto_spawn_skcipher(spawn);
  455. if (IS_ERR(cipher))
  456. return PTR_ERR(cipher);
  457. ctx->child = cipher;
  458. crypto_skcipher_set_reqsize(
  459. tfm, sizeof(struct cryptd_skcipher_request_ctx));
  460. return 0;
  461. }
  462. static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
  463. {
  464. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  465. crypto_free_skcipher(ctx->child);
  466. }
  467. static void cryptd_skcipher_free(struct skcipher_instance *inst)
  468. {
  469. struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
  470. crypto_drop_skcipher(&ctx->spawn);
  471. }
  472. static int cryptd_create_skcipher(struct crypto_template *tmpl,
  473. struct rtattr **tb,
  474. struct cryptd_queue *queue)
  475. {
  476. struct skcipherd_instance_ctx *ctx;
  477. struct skcipher_instance *inst;
  478. struct skcipher_alg *alg;
  479. const char *name;
  480. u32 type;
  481. u32 mask;
  482. int err;
  483. type = 0;
  484. mask = CRYPTO_ALG_ASYNC;
  485. cryptd_check_internal(tb, &type, &mask);
  486. name = crypto_attr_alg_name(tb[1]);
  487. if (IS_ERR(name))
  488. return PTR_ERR(name);
  489. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  490. if (!inst)
  491. return -ENOMEM;
  492. ctx = skcipher_instance_ctx(inst);
  493. ctx->queue = queue;
  494. crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
  495. err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
  496. if (err)
  497. goto out_free_inst;
  498. alg = crypto_spawn_skcipher_alg(&ctx->spawn);
  499. err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
  500. if (err)
  501. goto out_drop_skcipher;
  502. inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
  503. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  504. inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
  505. inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
  506. inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
  507. inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
  508. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
  509. inst->alg.init = cryptd_skcipher_init_tfm;
  510. inst->alg.exit = cryptd_skcipher_exit_tfm;
  511. inst->alg.setkey = cryptd_skcipher_setkey;
  512. inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
  513. inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
  514. inst->free = cryptd_skcipher_free;
  515. err = skcipher_register_instance(tmpl, inst);
  516. if (err) {
  517. out_drop_skcipher:
  518. crypto_drop_skcipher(&ctx->spawn);
  519. out_free_inst:
  520. kfree(inst);
  521. }
  522. return err;
  523. }
  524. static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
  525. {
  526. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  527. struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
  528. struct crypto_shash_spawn *spawn = &ictx->spawn;
  529. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  530. struct crypto_shash *hash;
  531. hash = crypto_spawn_shash(spawn);
  532. if (IS_ERR(hash))
  533. return PTR_ERR(hash);
  534. ctx->child = hash;
  535. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  536. sizeof(struct cryptd_hash_request_ctx) +
  537. crypto_shash_descsize(hash));
  538. return 0;
  539. }
  540. static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
  541. {
  542. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  543. crypto_free_shash(ctx->child);
  544. }
  545. static int cryptd_hash_setkey(struct crypto_ahash *parent,
  546. const u8 *key, unsigned int keylen)
  547. {
  548. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
  549. struct crypto_shash *child = ctx->child;
  550. int err;
  551. crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  552. crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
  553. CRYPTO_TFM_REQ_MASK);
  554. err = crypto_shash_setkey(child, key, keylen);
  555. crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
  556. CRYPTO_TFM_RES_MASK);
  557. return err;
  558. }
  559. static int cryptd_hash_enqueue(struct ahash_request *req,
  560. crypto_completion_t compl)
  561. {
  562. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  563. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  564. struct cryptd_queue *queue =
  565. cryptd_get_queue(crypto_ahash_tfm(tfm));
  566. rctx->complete = req->base.complete;
  567. req->base.complete = compl;
  568. return cryptd_enqueue_request(queue, &req->base);
  569. }
  570. static void cryptd_hash_complete(struct ahash_request *req, int err)
  571. {
  572. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  573. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  574. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  575. int refcnt = atomic_read(&ctx->refcnt);
  576. local_bh_disable();
  577. rctx->complete(&req->base, err);
  578. local_bh_enable();
  579. if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
  580. crypto_free_ahash(tfm);
  581. }
  582. static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
  583. {
  584. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  585. struct crypto_shash *child = ctx->child;
  586. struct ahash_request *req = ahash_request_cast(req_async);
  587. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  588. struct shash_desc *desc = &rctx->desc;
  589. if (unlikely(err == -EINPROGRESS))
  590. goto out;
  591. desc->tfm = child;
  592. desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  593. err = crypto_shash_init(desc);
  594. req->base.complete = rctx->complete;
  595. out:
  596. cryptd_hash_complete(req, err);
  597. }
  598. static int cryptd_hash_init_enqueue(struct ahash_request *req)
  599. {
  600. return cryptd_hash_enqueue(req, cryptd_hash_init);
  601. }
  602. static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
  603. {
  604. struct ahash_request *req = ahash_request_cast(req_async);
  605. struct cryptd_hash_request_ctx *rctx;
  606. rctx = ahash_request_ctx(req);
  607. if (unlikely(err == -EINPROGRESS))
  608. goto out;
  609. err = shash_ahash_update(req, &rctx->desc);
  610. req->base.complete = rctx->complete;
  611. out:
  612. cryptd_hash_complete(req, err);
  613. }
  614. static int cryptd_hash_update_enqueue(struct ahash_request *req)
  615. {
  616. return cryptd_hash_enqueue(req, cryptd_hash_update);
  617. }
  618. static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
  619. {
  620. struct ahash_request *req = ahash_request_cast(req_async);
  621. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  622. if (unlikely(err == -EINPROGRESS))
  623. goto out;
  624. err = crypto_shash_final(&rctx->desc, req->result);
  625. req->base.complete = rctx->complete;
  626. out:
  627. cryptd_hash_complete(req, err);
  628. }
  629. static int cryptd_hash_final_enqueue(struct ahash_request *req)
  630. {
  631. return cryptd_hash_enqueue(req, cryptd_hash_final);
  632. }
  633. static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
  634. {
  635. struct ahash_request *req = ahash_request_cast(req_async);
  636. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  637. if (unlikely(err == -EINPROGRESS))
  638. goto out;
  639. err = shash_ahash_finup(req, &rctx->desc);
  640. req->base.complete = rctx->complete;
  641. out:
  642. cryptd_hash_complete(req, err);
  643. }
  644. static int cryptd_hash_finup_enqueue(struct ahash_request *req)
  645. {
  646. return cryptd_hash_enqueue(req, cryptd_hash_finup);
  647. }
  648. static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
  649. {
  650. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  651. struct crypto_shash *child = ctx->child;
  652. struct ahash_request *req = ahash_request_cast(req_async);
  653. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  654. struct shash_desc *desc = &rctx->desc;
  655. if (unlikely(err == -EINPROGRESS))
  656. goto out;
  657. desc->tfm = child;
  658. desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
  659. err = shash_ahash_digest(req, desc);
  660. req->base.complete = rctx->complete;
  661. out:
  662. cryptd_hash_complete(req, err);
  663. }
  664. static int cryptd_hash_digest_enqueue(struct ahash_request *req)
  665. {
  666. return cryptd_hash_enqueue(req, cryptd_hash_digest);
  667. }
  668. static int cryptd_hash_export(struct ahash_request *req, void *out)
  669. {
  670. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  671. return crypto_shash_export(&rctx->desc, out);
  672. }
  673. static int cryptd_hash_import(struct ahash_request *req, const void *in)
  674. {
  675. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  676. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  677. struct shash_desc *desc = cryptd_shash_desc(req);
  678. desc->tfm = ctx->child;
  679. desc->flags = req->base.flags;
  680. return crypto_shash_import(desc, in);
  681. }
  682. static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
  683. struct cryptd_queue *queue)
  684. {
  685. struct hashd_instance_ctx *ctx;
  686. struct ahash_instance *inst;
  687. struct shash_alg *salg;
  688. struct crypto_alg *alg;
  689. u32 type = 0;
  690. u32 mask = 0;
  691. int err;
  692. cryptd_check_internal(tb, &type, &mask);
  693. salg = shash_attr_alg(tb[1], type, mask);
  694. if (IS_ERR(salg))
  695. return PTR_ERR(salg);
  696. alg = &salg->base;
  697. inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
  698. sizeof(*ctx));
  699. err = PTR_ERR(inst);
  700. if (IS_ERR(inst))
  701. goto out_put_alg;
  702. ctx = ahash_instance_ctx(inst);
  703. ctx->queue = queue;
  704. err = crypto_init_shash_spawn(&ctx->spawn, salg,
  705. ahash_crypto_instance(inst));
  706. if (err)
  707. goto out_free_inst;
  708. type = CRYPTO_ALG_ASYNC;
  709. if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
  710. type |= CRYPTO_ALG_INTERNAL;
  711. inst->alg.halg.base.cra_flags = type;
  712. inst->alg.halg.digestsize = salg->digestsize;
  713. inst->alg.halg.statesize = salg->statesize;
  714. inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
  715. inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
  716. inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
  717. inst->alg.init = cryptd_hash_init_enqueue;
  718. inst->alg.update = cryptd_hash_update_enqueue;
  719. inst->alg.final = cryptd_hash_final_enqueue;
  720. inst->alg.finup = cryptd_hash_finup_enqueue;
  721. inst->alg.export = cryptd_hash_export;
  722. inst->alg.import = cryptd_hash_import;
  723. inst->alg.setkey = cryptd_hash_setkey;
  724. inst->alg.digest = cryptd_hash_digest_enqueue;
  725. err = ahash_register_instance(tmpl, inst);
  726. if (err) {
  727. crypto_drop_shash(&ctx->spawn);
  728. out_free_inst:
  729. kfree(inst);
  730. }
  731. out_put_alg:
  732. crypto_mod_put(alg);
  733. return err;
  734. }
  735. static int cryptd_aead_setkey(struct crypto_aead *parent,
  736. const u8 *key, unsigned int keylen)
  737. {
  738. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  739. struct crypto_aead *child = ctx->child;
  740. return crypto_aead_setkey(child, key, keylen);
  741. }
  742. static int cryptd_aead_setauthsize(struct crypto_aead *parent,
  743. unsigned int authsize)
  744. {
  745. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  746. struct crypto_aead *child = ctx->child;
  747. return crypto_aead_setauthsize(child, authsize);
  748. }
  749. static void cryptd_aead_crypt(struct aead_request *req,
  750. struct crypto_aead *child,
  751. int err,
  752. int (*crypt)(struct aead_request *req))
  753. {
  754. struct cryptd_aead_request_ctx *rctx;
  755. struct cryptd_aead_ctx *ctx;
  756. crypto_completion_t compl;
  757. struct crypto_aead *tfm;
  758. int refcnt;
  759. rctx = aead_request_ctx(req);
  760. compl = rctx->complete;
  761. tfm = crypto_aead_reqtfm(req);
  762. if (unlikely(err == -EINPROGRESS))
  763. goto out;
  764. aead_request_set_tfm(req, child);
  765. err = crypt( req );
  766. out:
  767. ctx = crypto_aead_ctx(tfm);
  768. refcnt = atomic_read(&ctx->refcnt);
  769. local_bh_disable();
  770. compl(&req->base, err);
  771. local_bh_enable();
  772. if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
  773. crypto_free_aead(tfm);
  774. }
  775. static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
  776. {
  777. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  778. struct crypto_aead *child = ctx->child;
  779. struct aead_request *req;
  780. req = container_of(areq, struct aead_request, base);
  781. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
  782. }
  783. static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
  784. {
  785. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  786. struct crypto_aead *child = ctx->child;
  787. struct aead_request *req;
  788. req = container_of(areq, struct aead_request, base);
  789. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
  790. }
  791. static int cryptd_aead_enqueue(struct aead_request *req,
  792. crypto_completion_t compl)
  793. {
  794. struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
  795. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  796. struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
  797. rctx->complete = req->base.complete;
  798. req->base.complete = compl;
  799. return cryptd_enqueue_request(queue, &req->base);
  800. }
  801. static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
  802. {
  803. return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
  804. }
  805. static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
  806. {
  807. return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
  808. }
  809. static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
  810. {
  811. struct aead_instance *inst = aead_alg_instance(tfm);
  812. struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
  813. struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
  814. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  815. struct crypto_aead *cipher;
  816. cipher = crypto_spawn_aead(spawn);
  817. if (IS_ERR(cipher))
  818. return PTR_ERR(cipher);
  819. ctx->child = cipher;
  820. crypto_aead_set_reqsize(
  821. tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
  822. crypto_aead_reqsize(cipher)));
  823. return 0;
  824. }
  825. static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
  826. {
  827. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  828. crypto_free_aead(ctx->child);
  829. }
  830. static int cryptd_create_aead(struct crypto_template *tmpl,
  831. struct rtattr **tb,
  832. struct cryptd_queue *queue)
  833. {
  834. struct aead_instance_ctx *ctx;
  835. struct aead_instance *inst;
  836. struct aead_alg *alg;
  837. const char *name;
  838. u32 type = 0;
  839. u32 mask = CRYPTO_ALG_ASYNC;
  840. int err;
  841. cryptd_check_internal(tb, &type, &mask);
  842. name = crypto_attr_alg_name(tb[1]);
  843. if (IS_ERR(name))
  844. return PTR_ERR(name);
  845. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  846. if (!inst)
  847. return -ENOMEM;
  848. ctx = aead_instance_ctx(inst);
  849. ctx->queue = queue;
  850. crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
  851. err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
  852. if (err)
  853. goto out_free_inst;
  854. alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
  855. err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
  856. if (err)
  857. goto out_drop_aead;
  858. inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
  859. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  860. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
  861. inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
  862. inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
  863. inst->alg.init = cryptd_aead_init_tfm;
  864. inst->alg.exit = cryptd_aead_exit_tfm;
  865. inst->alg.setkey = cryptd_aead_setkey;
  866. inst->alg.setauthsize = cryptd_aead_setauthsize;
  867. inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
  868. inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
  869. err = aead_register_instance(tmpl, inst);
  870. if (err) {
  871. out_drop_aead:
  872. crypto_drop_aead(&ctx->aead_spawn);
  873. out_free_inst:
  874. kfree(inst);
  875. }
  876. return err;
  877. }
  878. static struct cryptd_queue queue;
  879. static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
  880. {
  881. struct crypto_attr_type *algt;
  882. algt = crypto_get_attr_type(tb);
  883. if (IS_ERR(algt))
  884. return PTR_ERR(algt);
  885. switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
  886. case CRYPTO_ALG_TYPE_BLKCIPHER:
  887. if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
  888. CRYPTO_ALG_TYPE_BLKCIPHER)
  889. return cryptd_create_blkcipher(tmpl, tb, &queue);
  890. return cryptd_create_skcipher(tmpl, tb, &queue);
  891. case CRYPTO_ALG_TYPE_DIGEST:
  892. return cryptd_create_hash(tmpl, tb, &queue);
  893. case CRYPTO_ALG_TYPE_AEAD:
  894. return cryptd_create_aead(tmpl, tb, &queue);
  895. }
  896. return -EINVAL;
  897. }
  898. static void cryptd_free(struct crypto_instance *inst)
  899. {
  900. struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
  901. struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
  902. struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
  903. switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
  904. case CRYPTO_ALG_TYPE_AHASH:
  905. crypto_drop_shash(&hctx->spawn);
  906. kfree(ahash_instance(inst));
  907. return;
  908. case CRYPTO_ALG_TYPE_AEAD:
  909. crypto_drop_aead(&aead_ctx->aead_spawn);
  910. kfree(aead_instance(inst));
  911. return;
  912. default:
  913. crypto_drop_spawn(&ctx->spawn);
  914. kfree(inst);
  915. }
  916. }
  917. static struct crypto_template cryptd_tmpl = {
  918. .name = "cryptd",
  919. .create = cryptd_create,
  920. .free = cryptd_free,
  921. .module = THIS_MODULE,
  922. };
  923. struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
  924. u32 type, u32 mask)
  925. {
  926. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  927. struct cryptd_blkcipher_ctx *ctx;
  928. struct crypto_tfm *tfm;
  929. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  930. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  931. return ERR_PTR(-EINVAL);
  932. type = crypto_skcipher_type(type);
  933. mask &= ~CRYPTO_ALG_TYPE_MASK;
  934. mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
  935. tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
  936. if (IS_ERR(tfm))
  937. return ERR_CAST(tfm);
  938. if (tfm->__crt_alg->cra_module != THIS_MODULE) {
  939. crypto_free_tfm(tfm);
  940. return ERR_PTR(-EINVAL);
  941. }
  942. ctx = crypto_tfm_ctx(tfm);
  943. atomic_set(&ctx->refcnt, 1);
  944. return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
  945. }
  946. EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
  947. struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
  948. {
  949. struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
  950. return ctx->child;
  951. }
  952. EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
  953. bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
  954. {
  955. struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
  956. return atomic_read(&ctx->refcnt) - 1;
  957. }
  958. EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
  959. void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
  960. {
  961. struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
  962. if (atomic_dec_and_test(&ctx->refcnt))
  963. crypto_free_ablkcipher(&tfm->base);
  964. }
  965. EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
  966. struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
  967. u32 type, u32 mask)
  968. {
  969. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  970. struct cryptd_skcipher_ctx *ctx;
  971. struct crypto_skcipher *tfm;
  972. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  973. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  974. return ERR_PTR(-EINVAL);
  975. tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
  976. if (IS_ERR(tfm))
  977. return ERR_CAST(tfm);
  978. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  979. crypto_free_skcipher(tfm);
  980. return ERR_PTR(-EINVAL);
  981. }
  982. ctx = crypto_skcipher_ctx(tfm);
  983. atomic_set(&ctx->refcnt, 1);
  984. return container_of(tfm, struct cryptd_skcipher, base);
  985. }
  986. EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
  987. struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
  988. {
  989. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  990. return ctx->child;
  991. }
  992. EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
  993. bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
  994. {
  995. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  996. return atomic_read(&ctx->refcnt) - 1;
  997. }
  998. EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
  999. void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
  1000. {
  1001. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  1002. if (atomic_dec_and_test(&ctx->refcnt))
  1003. crypto_free_skcipher(&tfm->base);
  1004. }
  1005. EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
  1006. struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
  1007. u32 type, u32 mask)
  1008. {
  1009. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  1010. struct cryptd_hash_ctx *ctx;
  1011. struct crypto_ahash *tfm;
  1012. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  1013. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  1014. return ERR_PTR(-EINVAL);
  1015. tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
  1016. if (IS_ERR(tfm))
  1017. return ERR_CAST(tfm);
  1018. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  1019. crypto_free_ahash(tfm);
  1020. return ERR_PTR(-EINVAL);
  1021. }
  1022. ctx = crypto_ahash_ctx(tfm);
  1023. atomic_set(&ctx->refcnt, 1);
  1024. return __cryptd_ahash_cast(tfm);
  1025. }
  1026. EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
  1027. struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
  1028. {
  1029. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  1030. return ctx->child;
  1031. }
  1032. EXPORT_SYMBOL_GPL(cryptd_ahash_child);
  1033. struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
  1034. {
  1035. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  1036. return &rctx->desc;
  1037. }
  1038. EXPORT_SYMBOL_GPL(cryptd_shash_desc);
  1039. bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
  1040. {
  1041. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  1042. return atomic_read(&ctx->refcnt) - 1;
  1043. }
  1044. EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
  1045. void cryptd_free_ahash(struct cryptd_ahash *tfm)
  1046. {
  1047. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  1048. if (atomic_dec_and_test(&ctx->refcnt))
  1049. crypto_free_ahash(&tfm->base);
  1050. }
  1051. EXPORT_SYMBOL_GPL(cryptd_free_ahash);
  1052. struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
  1053. u32 type, u32 mask)
  1054. {
  1055. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  1056. struct cryptd_aead_ctx *ctx;
  1057. struct crypto_aead *tfm;
  1058. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  1059. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  1060. return ERR_PTR(-EINVAL);
  1061. tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
  1062. if (IS_ERR(tfm))
  1063. return ERR_CAST(tfm);
  1064. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  1065. crypto_free_aead(tfm);
  1066. return ERR_PTR(-EINVAL);
  1067. }
  1068. ctx = crypto_aead_ctx(tfm);
  1069. atomic_set(&ctx->refcnt, 1);
  1070. return __cryptd_aead_cast(tfm);
  1071. }
  1072. EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
  1073. struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
  1074. {
  1075. struct cryptd_aead_ctx *ctx;
  1076. ctx = crypto_aead_ctx(&tfm->base);
  1077. return ctx->child;
  1078. }
  1079. EXPORT_SYMBOL_GPL(cryptd_aead_child);
  1080. bool cryptd_aead_queued(struct cryptd_aead *tfm)
  1081. {
  1082. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  1083. return atomic_read(&ctx->refcnt) - 1;
  1084. }
  1085. EXPORT_SYMBOL_GPL(cryptd_aead_queued);
  1086. void cryptd_free_aead(struct cryptd_aead *tfm)
  1087. {
  1088. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  1089. if (atomic_dec_and_test(&ctx->refcnt))
  1090. crypto_free_aead(&tfm->base);
  1091. }
  1092. EXPORT_SYMBOL_GPL(cryptd_free_aead);
  1093. static int __init cryptd_init(void)
  1094. {
  1095. int err;
  1096. err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
  1097. if (err)
  1098. return err;
  1099. err = crypto_register_template(&cryptd_tmpl);
  1100. if (err)
  1101. cryptd_fini_queue(&queue);
  1102. return err;
  1103. }
  1104. static void __exit cryptd_exit(void)
  1105. {
  1106. cryptd_fini_queue(&queue);
  1107. crypto_unregister_template(&cryptd_tmpl);
  1108. }
  1109. subsys_initcall(cryptd_init);
  1110. module_exit(cryptd_exit);
  1111. MODULE_LICENSE("GPL");
  1112. MODULE_DESCRIPTION("Software async crypto daemon");
  1113. MODULE_ALIAS_CRYPTO("cryptd");