algif_skcipher.c 21 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * algif_skcipher: User-space interface for skcipher algorithms
  3. *
  4. * This file provides the user-space API for symmetric key ciphers.
  5. *
  6. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <crypto/scatterwalk.h>
  15. #include <crypto/skcipher.h>
  16. #include <crypto/if_alg.h>
  17. #include <linux/init.h>
  18. #include <linux/list.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched/signal.h>
  21. #include <linux/mm.h>
  22. #include <linux/module.h>
  23. #include <linux/net.h>
  24. #include <net/sock.h>
  25. struct skcipher_sg_list {
  26. struct list_head list;
  27. int cur;
  28. struct scatterlist sg[0];
  29. };
  30. struct skcipher_tfm {
  31. struct crypto_skcipher *skcipher;
  32. bool has_key;
  33. };
  34. struct skcipher_ctx {
  35. struct list_head tsgl;
  36. struct af_alg_sgl rsgl;
  37. void *iv;
  38. struct af_alg_completion completion;
  39. atomic_t inflight;
  40. size_t used;
  41. unsigned int len;
  42. bool more;
  43. bool merge;
  44. bool enc;
  45. struct skcipher_request req;
  46. };
  47. struct skcipher_async_rsgl {
  48. struct af_alg_sgl sgl;
  49. struct list_head list;
  50. };
  51. struct skcipher_async_req {
  52. struct kiocb *iocb;
  53. struct skcipher_async_rsgl first_sgl;
  54. struct list_head list;
  55. struct scatterlist *tsg;
  56. atomic_t *inflight;
  57. struct skcipher_request req;
  58. };
  59. #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
  60. sizeof(struct scatterlist) - 1)
  61. static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
  62. {
  63. struct skcipher_async_rsgl *rsgl, *tmp;
  64. struct scatterlist *sgl;
  65. struct scatterlist *sg;
  66. int i, n;
  67. list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
  68. af_alg_free_sg(&rsgl->sgl);
  69. if (rsgl != &sreq->first_sgl)
  70. kfree(rsgl);
  71. }
  72. sgl = sreq->tsg;
  73. n = sg_nents(sgl);
  74. for_each_sg(sgl, sg, n, i)
  75. put_page(sg_page(sg));
  76. kfree(sreq->tsg);
  77. }
  78. static void skcipher_async_cb(struct crypto_async_request *req, int err)
  79. {
  80. struct skcipher_async_req *sreq = req->data;
  81. struct kiocb *iocb = sreq->iocb;
  82. atomic_dec(sreq->inflight);
  83. skcipher_free_async_sgls(sreq);
  84. kzfree(sreq);
  85. iocb->ki_complete(iocb, err, err);
  86. }
  87. static inline int skcipher_sndbuf(struct sock *sk)
  88. {
  89. struct alg_sock *ask = alg_sk(sk);
  90. struct skcipher_ctx *ctx = ask->private;
  91. return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
  92. ctx->used, 0);
  93. }
  94. static inline bool skcipher_writable(struct sock *sk)
  95. {
  96. return PAGE_SIZE <= skcipher_sndbuf(sk);
  97. }
  98. static int skcipher_alloc_sgl(struct sock *sk)
  99. {
  100. struct alg_sock *ask = alg_sk(sk);
  101. struct skcipher_ctx *ctx = ask->private;
  102. struct skcipher_sg_list *sgl;
  103. struct scatterlist *sg = NULL;
  104. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  105. if (!list_empty(&ctx->tsgl))
  106. sg = sgl->sg;
  107. if (!sg || sgl->cur >= MAX_SGL_ENTS) {
  108. sgl = sock_kmalloc(sk, sizeof(*sgl) +
  109. sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
  110. GFP_KERNEL);
  111. if (!sgl)
  112. return -ENOMEM;
  113. sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
  114. sgl->cur = 0;
  115. if (sg)
  116. sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
  117. list_add_tail(&sgl->list, &ctx->tsgl);
  118. }
  119. return 0;
  120. }
  121. static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
  122. {
  123. struct alg_sock *ask = alg_sk(sk);
  124. struct skcipher_ctx *ctx = ask->private;
  125. struct skcipher_sg_list *sgl;
  126. struct scatterlist *sg;
  127. int i;
  128. while (!list_empty(&ctx->tsgl)) {
  129. sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
  130. list);
  131. sg = sgl->sg;
  132. for (i = 0; i < sgl->cur; i++) {
  133. size_t plen = min_t(size_t, used, sg[i].length);
  134. if (!sg_page(sg + i))
  135. continue;
  136. sg[i].length -= plen;
  137. sg[i].offset += plen;
  138. used -= plen;
  139. ctx->used -= plen;
  140. if (sg[i].length)
  141. return;
  142. if (put)
  143. put_page(sg_page(sg + i));
  144. sg_assign_page(sg + i, NULL);
  145. }
  146. list_del(&sgl->list);
  147. sock_kfree_s(sk, sgl,
  148. sizeof(*sgl) + sizeof(sgl->sg[0]) *
  149. (MAX_SGL_ENTS + 1));
  150. }
  151. if (!ctx->used)
  152. ctx->merge = 0;
  153. }
  154. static void skcipher_free_sgl(struct sock *sk)
  155. {
  156. struct alg_sock *ask = alg_sk(sk);
  157. struct skcipher_ctx *ctx = ask->private;
  158. skcipher_pull_sgl(sk, ctx->used, 1);
  159. }
  160. static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
  161. {
  162. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  163. int err = -ERESTARTSYS;
  164. long timeout;
  165. if (flags & MSG_DONTWAIT)
  166. return -EAGAIN;
  167. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  168. add_wait_queue(sk_sleep(sk), &wait);
  169. for (;;) {
  170. if (signal_pending(current))
  171. break;
  172. timeout = MAX_SCHEDULE_TIMEOUT;
  173. if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
  174. err = 0;
  175. break;
  176. }
  177. }
  178. remove_wait_queue(sk_sleep(sk), &wait);
  179. return err;
  180. }
  181. static void skcipher_wmem_wakeup(struct sock *sk)
  182. {
  183. struct socket_wq *wq;
  184. if (!skcipher_writable(sk))
  185. return;
  186. rcu_read_lock();
  187. wq = rcu_dereference(sk->sk_wq);
  188. if (skwq_has_sleeper(wq))
  189. wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
  190. POLLRDNORM |
  191. POLLRDBAND);
  192. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  193. rcu_read_unlock();
  194. }
  195. static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
  196. {
  197. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  198. struct alg_sock *ask = alg_sk(sk);
  199. struct skcipher_ctx *ctx = ask->private;
  200. long timeout;
  201. int err = -ERESTARTSYS;
  202. if (flags & MSG_DONTWAIT) {
  203. return -EAGAIN;
  204. }
  205. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  206. add_wait_queue(sk_sleep(sk), &wait);
  207. for (;;) {
  208. if (signal_pending(current))
  209. break;
  210. timeout = MAX_SCHEDULE_TIMEOUT;
  211. if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
  212. err = 0;
  213. break;
  214. }
  215. }
  216. remove_wait_queue(sk_sleep(sk), &wait);
  217. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  218. return err;
  219. }
  220. static void skcipher_data_wakeup(struct sock *sk)
  221. {
  222. struct alg_sock *ask = alg_sk(sk);
  223. struct skcipher_ctx *ctx = ask->private;
  224. struct socket_wq *wq;
  225. if (!ctx->used)
  226. return;
  227. rcu_read_lock();
  228. wq = rcu_dereference(sk->sk_wq);
  229. if (skwq_has_sleeper(wq))
  230. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  231. POLLRDNORM |
  232. POLLRDBAND);
  233. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  234. rcu_read_unlock();
  235. }
  236. static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
  237. size_t size)
  238. {
  239. struct sock *sk = sock->sk;
  240. struct alg_sock *ask = alg_sk(sk);
  241. struct sock *psk = ask->parent;
  242. struct alg_sock *pask = alg_sk(psk);
  243. struct skcipher_ctx *ctx = ask->private;
  244. struct skcipher_tfm *skc = pask->private;
  245. struct crypto_skcipher *tfm = skc->skcipher;
  246. unsigned ivsize = crypto_skcipher_ivsize(tfm);
  247. struct skcipher_sg_list *sgl;
  248. struct af_alg_control con = {};
  249. long copied = 0;
  250. bool enc = 0;
  251. bool init = 0;
  252. int err;
  253. int i;
  254. if (msg->msg_controllen) {
  255. err = af_alg_cmsg_send(msg, &con);
  256. if (err)
  257. return err;
  258. init = 1;
  259. switch (con.op) {
  260. case ALG_OP_ENCRYPT:
  261. enc = 1;
  262. break;
  263. case ALG_OP_DECRYPT:
  264. enc = 0;
  265. break;
  266. default:
  267. return -EINVAL;
  268. }
  269. if (con.iv && con.iv->ivlen != ivsize)
  270. return -EINVAL;
  271. }
  272. err = -EINVAL;
  273. lock_sock(sk);
  274. if (!ctx->more && ctx->used)
  275. goto unlock;
  276. if (init) {
  277. ctx->enc = enc;
  278. if (con.iv)
  279. memcpy(ctx->iv, con.iv->iv, ivsize);
  280. }
  281. while (size) {
  282. struct scatterlist *sg;
  283. unsigned long len = size;
  284. size_t plen;
  285. if (ctx->merge) {
  286. sgl = list_entry(ctx->tsgl.prev,
  287. struct skcipher_sg_list, list);
  288. sg = sgl->sg + sgl->cur - 1;
  289. len = min_t(unsigned long, len,
  290. PAGE_SIZE - sg->offset - sg->length);
  291. err = memcpy_from_msg(page_address(sg_page(sg)) +
  292. sg->offset + sg->length,
  293. msg, len);
  294. if (err)
  295. goto unlock;
  296. sg->length += len;
  297. ctx->merge = (sg->offset + sg->length) &
  298. (PAGE_SIZE - 1);
  299. ctx->used += len;
  300. copied += len;
  301. size -= len;
  302. continue;
  303. }
  304. if (!skcipher_writable(sk)) {
  305. err = skcipher_wait_for_wmem(sk, msg->msg_flags);
  306. if (err)
  307. goto unlock;
  308. }
  309. len = min_t(unsigned long, len, skcipher_sndbuf(sk));
  310. err = skcipher_alloc_sgl(sk);
  311. if (err)
  312. goto unlock;
  313. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  314. sg = sgl->sg;
  315. if (sgl->cur)
  316. sg_unmark_end(sg + sgl->cur - 1);
  317. do {
  318. i = sgl->cur;
  319. plen = min_t(size_t, len, PAGE_SIZE);
  320. sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
  321. err = -ENOMEM;
  322. if (!sg_page(sg + i))
  323. goto unlock;
  324. err = memcpy_from_msg(page_address(sg_page(sg + i)),
  325. msg, plen);
  326. if (err) {
  327. __free_page(sg_page(sg + i));
  328. sg_assign_page(sg + i, NULL);
  329. goto unlock;
  330. }
  331. sg[i].length = plen;
  332. len -= plen;
  333. ctx->used += plen;
  334. copied += plen;
  335. size -= plen;
  336. sgl->cur++;
  337. } while (len && sgl->cur < MAX_SGL_ENTS);
  338. if (!size)
  339. sg_mark_end(sg + sgl->cur - 1);
  340. ctx->merge = plen & (PAGE_SIZE - 1);
  341. }
  342. err = 0;
  343. ctx->more = msg->msg_flags & MSG_MORE;
  344. unlock:
  345. skcipher_data_wakeup(sk);
  346. release_sock(sk);
  347. return copied ?: err;
  348. }
  349. static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
  350. int offset, size_t size, int flags)
  351. {
  352. struct sock *sk = sock->sk;
  353. struct alg_sock *ask = alg_sk(sk);
  354. struct skcipher_ctx *ctx = ask->private;
  355. struct skcipher_sg_list *sgl;
  356. int err = -EINVAL;
  357. if (flags & MSG_SENDPAGE_NOTLAST)
  358. flags |= MSG_MORE;
  359. lock_sock(sk);
  360. if (!ctx->more && ctx->used)
  361. goto unlock;
  362. if (!size)
  363. goto done;
  364. if (!skcipher_writable(sk)) {
  365. err = skcipher_wait_for_wmem(sk, flags);
  366. if (err)
  367. goto unlock;
  368. }
  369. err = skcipher_alloc_sgl(sk);
  370. if (err)
  371. goto unlock;
  372. ctx->merge = 0;
  373. sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
  374. if (sgl->cur)
  375. sg_unmark_end(sgl->sg + sgl->cur - 1);
  376. sg_mark_end(sgl->sg + sgl->cur);
  377. get_page(page);
  378. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  379. sgl->cur++;
  380. ctx->used += size;
  381. done:
  382. ctx->more = flags & MSG_MORE;
  383. unlock:
  384. skcipher_data_wakeup(sk);
  385. release_sock(sk);
  386. return err ?: size;
  387. }
  388. static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
  389. {
  390. struct skcipher_sg_list *sgl;
  391. struct scatterlist *sg;
  392. int nents = 0;
  393. list_for_each_entry(sgl, &ctx->tsgl, list) {
  394. sg = sgl->sg;
  395. while (!sg->length)
  396. sg++;
  397. nents += sg_nents(sg);
  398. }
  399. return nents;
  400. }
  401. static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
  402. int flags)
  403. {
  404. struct sock *sk = sock->sk;
  405. struct alg_sock *ask = alg_sk(sk);
  406. struct sock *psk = ask->parent;
  407. struct alg_sock *pask = alg_sk(psk);
  408. struct skcipher_ctx *ctx = ask->private;
  409. struct skcipher_tfm *skc = pask->private;
  410. struct crypto_skcipher *tfm = skc->skcipher;
  411. struct skcipher_sg_list *sgl;
  412. struct scatterlist *sg;
  413. struct skcipher_async_req *sreq;
  414. struct skcipher_request *req;
  415. struct skcipher_async_rsgl *last_rsgl = NULL;
  416. unsigned int txbufs = 0, len = 0, tx_nents;
  417. unsigned int reqsize = crypto_skcipher_reqsize(tfm);
  418. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  419. int err = -ENOMEM;
  420. bool mark = false;
  421. char *iv;
  422. sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
  423. if (unlikely(!sreq))
  424. goto out;
  425. req = &sreq->req;
  426. iv = (char *)(req + 1) + reqsize;
  427. sreq->iocb = msg->msg_iocb;
  428. INIT_LIST_HEAD(&sreq->list);
  429. sreq->inflight = &ctx->inflight;
  430. lock_sock(sk);
  431. tx_nents = skcipher_all_sg_nents(ctx);
  432. sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
  433. if (unlikely(!sreq->tsg))
  434. goto unlock;
  435. sg_init_table(sreq->tsg, tx_nents);
  436. memcpy(iv, ctx->iv, ivsize);
  437. skcipher_request_set_tfm(req, tfm);
  438. skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
  439. skcipher_async_cb, sreq);
  440. while (iov_iter_count(&msg->msg_iter)) {
  441. struct skcipher_async_rsgl *rsgl;
  442. int used;
  443. if (!ctx->used) {
  444. err = skcipher_wait_for_data(sk, flags);
  445. if (err)
  446. goto free;
  447. }
  448. sgl = list_first_entry(&ctx->tsgl,
  449. struct skcipher_sg_list, list);
  450. sg = sgl->sg;
  451. while (!sg->length)
  452. sg++;
  453. used = min_t(unsigned long, ctx->used,
  454. iov_iter_count(&msg->msg_iter));
  455. used = min_t(unsigned long, used, sg->length);
  456. if (txbufs == tx_nents) {
  457. struct scatterlist *tmp;
  458. int x;
  459. /* Ran out of tx slots in async request
  460. * need to expand */
  461. tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
  462. GFP_KERNEL);
  463. if (!tmp) {
  464. err = -ENOMEM;
  465. goto free;
  466. }
  467. sg_init_table(tmp, tx_nents * 2);
  468. for (x = 0; x < tx_nents; x++)
  469. sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
  470. sreq->tsg[x].length,
  471. sreq->tsg[x].offset);
  472. kfree(sreq->tsg);
  473. sreq->tsg = tmp;
  474. tx_nents *= 2;
  475. mark = true;
  476. }
  477. /* Need to take over the tx sgl from ctx
  478. * to the asynch req - these sgls will be freed later */
  479. sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
  480. sg->offset);
  481. if (list_empty(&sreq->list)) {
  482. rsgl = &sreq->first_sgl;
  483. list_add_tail(&rsgl->list, &sreq->list);
  484. } else {
  485. rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
  486. if (!rsgl) {
  487. err = -ENOMEM;
  488. goto free;
  489. }
  490. list_add_tail(&rsgl->list, &sreq->list);
  491. }
  492. used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
  493. err = used;
  494. if (used < 0)
  495. goto free;
  496. if (last_rsgl)
  497. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  498. last_rsgl = rsgl;
  499. len += used;
  500. skcipher_pull_sgl(sk, used, 0);
  501. iov_iter_advance(&msg->msg_iter, used);
  502. }
  503. if (mark)
  504. sg_mark_end(sreq->tsg + txbufs - 1);
  505. skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
  506. len, iv);
  507. err = ctx->enc ? crypto_skcipher_encrypt(req) :
  508. crypto_skcipher_decrypt(req);
  509. if (err == -EINPROGRESS) {
  510. atomic_inc(&ctx->inflight);
  511. err = -EIOCBQUEUED;
  512. sreq = NULL;
  513. goto unlock;
  514. }
  515. free:
  516. skcipher_free_async_sgls(sreq);
  517. unlock:
  518. skcipher_wmem_wakeup(sk);
  519. release_sock(sk);
  520. kzfree(sreq);
  521. out:
  522. return err;
  523. }
  524. static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
  525. int flags)
  526. {
  527. struct sock *sk = sock->sk;
  528. struct alg_sock *ask = alg_sk(sk);
  529. struct sock *psk = ask->parent;
  530. struct alg_sock *pask = alg_sk(psk);
  531. struct skcipher_ctx *ctx = ask->private;
  532. struct skcipher_tfm *skc = pask->private;
  533. struct crypto_skcipher *tfm = skc->skcipher;
  534. unsigned bs = crypto_skcipher_blocksize(tfm);
  535. struct skcipher_sg_list *sgl;
  536. struct scatterlist *sg;
  537. int err = -EAGAIN;
  538. int used;
  539. long copied = 0;
  540. lock_sock(sk);
  541. while (msg_data_left(msg)) {
  542. if (!ctx->used) {
  543. err = skcipher_wait_for_data(sk, flags);
  544. if (err)
  545. goto unlock;
  546. }
  547. used = min_t(unsigned long, ctx->used, msg_data_left(msg));
  548. used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
  549. err = used;
  550. if (err < 0)
  551. goto unlock;
  552. if (ctx->more || used < ctx->used)
  553. used -= used % bs;
  554. err = -EINVAL;
  555. if (!used)
  556. goto free;
  557. sgl = list_first_entry(&ctx->tsgl,
  558. struct skcipher_sg_list, list);
  559. sg = sgl->sg;
  560. while (!sg->length)
  561. sg++;
  562. skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
  563. ctx->iv);
  564. err = af_alg_wait_for_completion(
  565. ctx->enc ?
  566. crypto_skcipher_encrypt(&ctx->req) :
  567. crypto_skcipher_decrypt(&ctx->req),
  568. &ctx->completion);
  569. free:
  570. af_alg_free_sg(&ctx->rsgl);
  571. if (err)
  572. goto unlock;
  573. copied += used;
  574. skcipher_pull_sgl(sk, used, 1);
  575. iov_iter_advance(&msg->msg_iter, used);
  576. }
  577. err = 0;
  578. unlock:
  579. skcipher_wmem_wakeup(sk);
  580. release_sock(sk);
  581. return copied ?: err;
  582. }
  583. static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
  584. size_t ignored, int flags)
  585. {
  586. return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
  587. skcipher_recvmsg_async(sock, msg, flags) :
  588. skcipher_recvmsg_sync(sock, msg, flags);
  589. }
  590. static unsigned int skcipher_poll(struct file *file, struct socket *sock,
  591. poll_table *wait)
  592. {
  593. struct sock *sk = sock->sk;
  594. struct alg_sock *ask = alg_sk(sk);
  595. struct skcipher_ctx *ctx = ask->private;
  596. unsigned int mask;
  597. sock_poll_wait(file, sk_sleep(sk), wait);
  598. mask = 0;
  599. if (ctx->used)
  600. mask |= POLLIN | POLLRDNORM;
  601. if (skcipher_writable(sk))
  602. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  603. return mask;
  604. }
  605. static struct proto_ops algif_skcipher_ops = {
  606. .family = PF_ALG,
  607. .connect = sock_no_connect,
  608. .socketpair = sock_no_socketpair,
  609. .getname = sock_no_getname,
  610. .ioctl = sock_no_ioctl,
  611. .listen = sock_no_listen,
  612. .shutdown = sock_no_shutdown,
  613. .getsockopt = sock_no_getsockopt,
  614. .mmap = sock_no_mmap,
  615. .bind = sock_no_bind,
  616. .accept = sock_no_accept,
  617. .setsockopt = sock_no_setsockopt,
  618. .release = af_alg_release,
  619. .sendmsg = skcipher_sendmsg,
  620. .sendpage = skcipher_sendpage,
  621. .recvmsg = skcipher_recvmsg,
  622. .poll = skcipher_poll,
  623. };
  624. static int skcipher_check_key(struct socket *sock)
  625. {
  626. int err = 0;
  627. struct sock *psk;
  628. struct alg_sock *pask;
  629. struct skcipher_tfm *tfm;
  630. struct sock *sk = sock->sk;
  631. struct alg_sock *ask = alg_sk(sk);
  632. lock_sock(sk);
  633. if (ask->refcnt)
  634. goto unlock_child;
  635. psk = ask->parent;
  636. pask = alg_sk(ask->parent);
  637. tfm = pask->private;
  638. err = -ENOKEY;
  639. lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
  640. if (!tfm->has_key)
  641. goto unlock;
  642. if (!pask->refcnt++)
  643. sock_hold(psk);
  644. ask->refcnt = 1;
  645. sock_put(psk);
  646. err = 0;
  647. unlock:
  648. release_sock(psk);
  649. unlock_child:
  650. release_sock(sk);
  651. return err;
  652. }
  653. static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
  654. size_t size)
  655. {
  656. int err;
  657. err = skcipher_check_key(sock);
  658. if (err)
  659. return err;
  660. return skcipher_sendmsg(sock, msg, size);
  661. }
  662. static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
  663. int offset, size_t size, int flags)
  664. {
  665. int err;
  666. err = skcipher_check_key(sock);
  667. if (err)
  668. return err;
  669. return skcipher_sendpage(sock, page, offset, size, flags);
  670. }
  671. static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
  672. size_t ignored, int flags)
  673. {
  674. int err;
  675. err = skcipher_check_key(sock);
  676. if (err)
  677. return err;
  678. return skcipher_recvmsg(sock, msg, ignored, flags);
  679. }
  680. static struct proto_ops algif_skcipher_ops_nokey = {
  681. .family = PF_ALG,
  682. .connect = sock_no_connect,
  683. .socketpair = sock_no_socketpair,
  684. .getname = sock_no_getname,
  685. .ioctl = sock_no_ioctl,
  686. .listen = sock_no_listen,
  687. .shutdown = sock_no_shutdown,
  688. .getsockopt = sock_no_getsockopt,
  689. .mmap = sock_no_mmap,
  690. .bind = sock_no_bind,
  691. .accept = sock_no_accept,
  692. .setsockopt = sock_no_setsockopt,
  693. .release = af_alg_release,
  694. .sendmsg = skcipher_sendmsg_nokey,
  695. .sendpage = skcipher_sendpage_nokey,
  696. .recvmsg = skcipher_recvmsg_nokey,
  697. .poll = skcipher_poll,
  698. };
  699. static void *skcipher_bind(const char *name, u32 type, u32 mask)
  700. {
  701. struct skcipher_tfm *tfm;
  702. struct crypto_skcipher *skcipher;
  703. tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
  704. if (!tfm)
  705. return ERR_PTR(-ENOMEM);
  706. skcipher = crypto_alloc_skcipher(name, type, mask);
  707. if (IS_ERR(skcipher)) {
  708. kfree(tfm);
  709. return ERR_CAST(skcipher);
  710. }
  711. tfm->skcipher = skcipher;
  712. return tfm;
  713. }
  714. static void skcipher_release(void *private)
  715. {
  716. struct skcipher_tfm *tfm = private;
  717. crypto_free_skcipher(tfm->skcipher);
  718. kfree(tfm);
  719. }
  720. static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
  721. {
  722. struct skcipher_tfm *tfm = private;
  723. int err;
  724. err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
  725. tfm->has_key = !err;
  726. return err;
  727. }
  728. static void skcipher_wait(struct sock *sk)
  729. {
  730. struct alg_sock *ask = alg_sk(sk);
  731. struct skcipher_ctx *ctx = ask->private;
  732. int ctr = 0;
  733. while (atomic_read(&ctx->inflight) && ctr++ < 100)
  734. msleep(100);
  735. }
  736. static void skcipher_sock_destruct(struct sock *sk)
  737. {
  738. struct alg_sock *ask = alg_sk(sk);
  739. struct skcipher_ctx *ctx = ask->private;
  740. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
  741. if (atomic_read(&ctx->inflight))
  742. skcipher_wait(sk);
  743. skcipher_free_sgl(sk);
  744. sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
  745. sock_kfree_s(sk, ctx, ctx->len);
  746. af_alg_release_parent(sk);
  747. }
  748. static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
  749. {
  750. struct skcipher_ctx *ctx;
  751. struct alg_sock *ask = alg_sk(sk);
  752. struct skcipher_tfm *tfm = private;
  753. struct crypto_skcipher *skcipher = tfm->skcipher;
  754. unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
  755. ctx = sock_kmalloc(sk, len, GFP_KERNEL);
  756. if (!ctx)
  757. return -ENOMEM;
  758. ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
  759. GFP_KERNEL);
  760. if (!ctx->iv) {
  761. sock_kfree_s(sk, ctx, len);
  762. return -ENOMEM;
  763. }
  764. memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
  765. INIT_LIST_HEAD(&ctx->tsgl);
  766. ctx->len = len;
  767. ctx->used = 0;
  768. ctx->more = 0;
  769. ctx->merge = 0;
  770. ctx->enc = 0;
  771. atomic_set(&ctx->inflight, 0);
  772. af_alg_init_completion(&ctx->completion);
  773. ask->private = ctx;
  774. skcipher_request_set_tfm(&ctx->req, skcipher);
  775. skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  776. CRYPTO_TFM_REQ_MAY_BACKLOG,
  777. af_alg_complete, &ctx->completion);
  778. sk->sk_destruct = skcipher_sock_destruct;
  779. return 0;
  780. }
  781. static int skcipher_accept_parent(void *private, struct sock *sk)
  782. {
  783. struct skcipher_tfm *tfm = private;
  784. if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
  785. return -ENOKEY;
  786. return skcipher_accept_parent_nokey(private, sk);
  787. }
  788. static const struct af_alg_type algif_type_skcipher = {
  789. .bind = skcipher_bind,
  790. .release = skcipher_release,
  791. .setkey = skcipher_setkey,
  792. .accept = skcipher_accept_parent,
  793. .accept_nokey = skcipher_accept_parent_nokey,
  794. .ops = &algif_skcipher_ops,
  795. .ops_nokey = &algif_skcipher_ops_nokey,
  796. .name = "skcipher",
  797. .owner = THIS_MODULE
  798. };
  799. static int __init algif_skcipher_init(void)
  800. {
  801. return af_alg_register_type(&algif_type_skcipher);
  802. }
  803. static void __exit algif_skcipher_exit(void)
  804. {
  805. int err = af_alg_unregister_type(&algif_type_skcipher);
  806. BUG_ON(err);
  807. }
  808. module_init(algif_skcipher_init);
  809. module_exit(algif_skcipher_exit);
  810. MODULE_LICENSE("GPL");