algif_aead.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * algif_aead: User-space interface for AEAD algorithms
  3. *
  4. * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  5. *
  6. * This file provides the user-space API for AEAD ciphers.
  7. *
  8. * This file is derived from algif_skcipher.c.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. */
  15. #include <crypto/internal/aead.h>
  16. #include <crypto/scatterwalk.h>
  17. #include <crypto/if_alg.h>
  18. #include <linux/init.h>
  19. #include <linux/list.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched/signal.h>
  22. #include <linux/mm.h>
  23. #include <linux/module.h>
  24. #include <linux/net.h>
  25. #include <net/sock.h>
  26. struct aead_sg_list {
  27. unsigned int cur;
  28. struct scatterlist sg[ALG_MAX_PAGES];
  29. };
  30. struct aead_async_rsgl {
  31. struct af_alg_sgl sgl;
  32. struct list_head list;
  33. };
  34. struct aead_async_req {
  35. struct scatterlist *tsgl;
  36. struct aead_async_rsgl first_rsgl;
  37. struct list_head list;
  38. struct kiocb *iocb;
  39. unsigned int tsgls;
  40. char iv[];
  41. };
  42. struct aead_ctx {
  43. struct aead_sg_list tsgl;
  44. struct aead_async_rsgl first_rsgl;
  45. struct list_head list;
  46. void *iv;
  47. struct af_alg_completion completion;
  48. unsigned long used;
  49. unsigned int len;
  50. bool more;
  51. bool merge;
  52. bool enc;
  53. size_t aead_assoclen;
  54. struct aead_request aead_req;
  55. };
  56. static inline int aead_sndbuf(struct sock *sk)
  57. {
  58. struct alg_sock *ask = alg_sk(sk);
  59. struct aead_ctx *ctx = ask->private;
  60. return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
  61. ctx->used, 0);
  62. }
  63. static inline bool aead_writable(struct sock *sk)
  64. {
  65. return PAGE_SIZE <= aead_sndbuf(sk);
  66. }
  67. static inline bool aead_sufficient_data(struct aead_ctx *ctx)
  68. {
  69. unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
  70. /*
  71. * The minimum amount of memory needed for an AEAD cipher is
  72. * the AAD and in case of decryption the tag.
  73. */
  74. return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
  75. }
  76. static void aead_reset_ctx(struct aead_ctx *ctx)
  77. {
  78. struct aead_sg_list *sgl = &ctx->tsgl;
  79. sg_init_table(sgl->sg, ALG_MAX_PAGES);
  80. sgl->cur = 0;
  81. ctx->used = 0;
  82. ctx->more = 0;
  83. ctx->merge = 0;
  84. }
  85. static void aead_put_sgl(struct sock *sk)
  86. {
  87. struct alg_sock *ask = alg_sk(sk);
  88. struct aead_ctx *ctx = ask->private;
  89. struct aead_sg_list *sgl = &ctx->tsgl;
  90. struct scatterlist *sg = sgl->sg;
  91. unsigned int i;
  92. for (i = 0; i < sgl->cur; i++) {
  93. if (!sg_page(sg + i))
  94. continue;
  95. put_page(sg_page(sg + i));
  96. sg_assign_page(sg + i, NULL);
  97. }
  98. aead_reset_ctx(ctx);
  99. }
  100. static void aead_wmem_wakeup(struct sock *sk)
  101. {
  102. struct socket_wq *wq;
  103. if (!aead_writable(sk))
  104. return;
  105. rcu_read_lock();
  106. wq = rcu_dereference(sk->sk_wq);
  107. if (skwq_has_sleeper(wq))
  108. wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
  109. POLLRDNORM |
  110. POLLRDBAND);
  111. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  112. rcu_read_unlock();
  113. }
  114. static int aead_wait_for_data(struct sock *sk, unsigned flags)
  115. {
  116. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  117. struct alg_sock *ask = alg_sk(sk);
  118. struct aead_ctx *ctx = ask->private;
  119. long timeout;
  120. int err = -ERESTARTSYS;
  121. if (flags & MSG_DONTWAIT)
  122. return -EAGAIN;
  123. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  124. add_wait_queue(sk_sleep(sk), &wait);
  125. for (;;) {
  126. if (signal_pending(current))
  127. break;
  128. timeout = MAX_SCHEDULE_TIMEOUT;
  129. if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
  130. err = 0;
  131. break;
  132. }
  133. }
  134. remove_wait_queue(sk_sleep(sk), &wait);
  135. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  136. return err;
  137. }
  138. static void aead_data_wakeup(struct sock *sk)
  139. {
  140. struct alg_sock *ask = alg_sk(sk);
  141. struct aead_ctx *ctx = ask->private;
  142. struct socket_wq *wq;
  143. if (ctx->more)
  144. return;
  145. if (!ctx->used)
  146. return;
  147. rcu_read_lock();
  148. wq = rcu_dereference(sk->sk_wq);
  149. if (skwq_has_sleeper(wq))
  150. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  151. POLLRDNORM |
  152. POLLRDBAND);
  153. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  154. rcu_read_unlock();
  155. }
  156. static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  157. {
  158. struct sock *sk = sock->sk;
  159. struct alg_sock *ask = alg_sk(sk);
  160. struct aead_ctx *ctx = ask->private;
  161. unsigned ivsize =
  162. crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
  163. struct aead_sg_list *sgl = &ctx->tsgl;
  164. struct af_alg_control con = {};
  165. long copied = 0;
  166. bool enc = 0;
  167. bool init = 0;
  168. int err = -EINVAL;
  169. if (msg->msg_controllen) {
  170. err = af_alg_cmsg_send(msg, &con);
  171. if (err)
  172. return err;
  173. init = 1;
  174. switch (con.op) {
  175. case ALG_OP_ENCRYPT:
  176. enc = 1;
  177. break;
  178. case ALG_OP_DECRYPT:
  179. enc = 0;
  180. break;
  181. default:
  182. return -EINVAL;
  183. }
  184. if (con.iv && con.iv->ivlen != ivsize)
  185. return -EINVAL;
  186. }
  187. lock_sock(sk);
  188. if (!ctx->more && ctx->used)
  189. goto unlock;
  190. if (init) {
  191. ctx->enc = enc;
  192. if (con.iv)
  193. memcpy(ctx->iv, con.iv->iv, ivsize);
  194. ctx->aead_assoclen = con.aead_assoclen;
  195. }
  196. while (size) {
  197. size_t len = size;
  198. struct scatterlist *sg = NULL;
  199. /* use the existing memory in an allocated page */
  200. if (ctx->merge) {
  201. sg = sgl->sg + sgl->cur - 1;
  202. len = min_t(unsigned long, len,
  203. PAGE_SIZE - sg->offset - sg->length);
  204. err = memcpy_from_msg(page_address(sg_page(sg)) +
  205. sg->offset + sg->length,
  206. msg, len);
  207. if (err)
  208. goto unlock;
  209. sg->length += len;
  210. ctx->merge = (sg->offset + sg->length) &
  211. (PAGE_SIZE - 1);
  212. ctx->used += len;
  213. copied += len;
  214. size -= len;
  215. continue;
  216. }
  217. if (!aead_writable(sk)) {
  218. /* user space sent too much data */
  219. aead_put_sgl(sk);
  220. err = -EMSGSIZE;
  221. goto unlock;
  222. }
  223. /* allocate a new page */
  224. len = min_t(unsigned long, size, aead_sndbuf(sk));
  225. while (len) {
  226. size_t plen = 0;
  227. if (sgl->cur >= ALG_MAX_PAGES) {
  228. aead_put_sgl(sk);
  229. err = -E2BIG;
  230. goto unlock;
  231. }
  232. sg = sgl->sg + sgl->cur;
  233. plen = min_t(size_t, len, PAGE_SIZE);
  234. sg_assign_page(sg, alloc_page(GFP_KERNEL));
  235. err = -ENOMEM;
  236. if (!sg_page(sg))
  237. goto unlock;
  238. err = memcpy_from_msg(page_address(sg_page(sg)),
  239. msg, plen);
  240. if (err) {
  241. __free_page(sg_page(sg));
  242. sg_assign_page(sg, NULL);
  243. goto unlock;
  244. }
  245. sg->offset = 0;
  246. sg->length = plen;
  247. len -= plen;
  248. ctx->used += plen;
  249. copied += plen;
  250. sgl->cur++;
  251. size -= plen;
  252. ctx->merge = plen & (PAGE_SIZE - 1);
  253. }
  254. }
  255. err = 0;
  256. ctx->more = msg->msg_flags & MSG_MORE;
  257. if (!ctx->more && !aead_sufficient_data(ctx)) {
  258. aead_put_sgl(sk);
  259. err = -EMSGSIZE;
  260. }
  261. unlock:
  262. aead_data_wakeup(sk);
  263. release_sock(sk);
  264. return err ?: copied;
  265. }
  266. static ssize_t aead_sendpage(struct socket *sock, struct page *page,
  267. int offset, size_t size, int flags)
  268. {
  269. struct sock *sk = sock->sk;
  270. struct alg_sock *ask = alg_sk(sk);
  271. struct aead_ctx *ctx = ask->private;
  272. struct aead_sg_list *sgl = &ctx->tsgl;
  273. int err = -EINVAL;
  274. if (flags & MSG_SENDPAGE_NOTLAST)
  275. flags |= MSG_MORE;
  276. if (sgl->cur >= ALG_MAX_PAGES)
  277. return -E2BIG;
  278. lock_sock(sk);
  279. if (!ctx->more && ctx->used)
  280. goto unlock;
  281. if (!size)
  282. goto done;
  283. if (!aead_writable(sk)) {
  284. /* user space sent too much data */
  285. aead_put_sgl(sk);
  286. err = -EMSGSIZE;
  287. goto unlock;
  288. }
  289. ctx->merge = 0;
  290. get_page(page);
  291. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  292. sgl->cur++;
  293. ctx->used += size;
  294. err = 0;
  295. done:
  296. ctx->more = flags & MSG_MORE;
  297. if (!ctx->more && !aead_sufficient_data(ctx)) {
  298. aead_put_sgl(sk);
  299. err = -EMSGSIZE;
  300. }
  301. unlock:
  302. aead_data_wakeup(sk);
  303. release_sock(sk);
  304. return err ?: size;
  305. }
  306. #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
  307. ((char *)req + sizeof(struct aead_request) + \
  308. crypto_aead_reqsize(tfm))
  309. #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
  310. crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
  311. sizeof(struct aead_request)
  312. static void aead_async_cb(struct crypto_async_request *_req, int err)
  313. {
  314. struct sock *sk = _req->data;
  315. struct alg_sock *ask = alg_sk(sk);
  316. struct aead_ctx *ctx = ask->private;
  317. struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
  318. struct aead_request *req = aead_request_cast(_req);
  319. struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
  320. struct scatterlist *sg = areq->tsgl;
  321. struct aead_async_rsgl *rsgl;
  322. struct kiocb *iocb = areq->iocb;
  323. unsigned int i, reqlen = GET_REQ_SIZE(tfm);
  324. list_for_each_entry(rsgl, &areq->list, list) {
  325. af_alg_free_sg(&rsgl->sgl);
  326. if (rsgl != &areq->first_rsgl)
  327. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  328. }
  329. for (i = 0; i < areq->tsgls; i++)
  330. put_page(sg_page(sg + i));
  331. sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
  332. sock_kfree_s(sk, req, reqlen);
  333. __sock_put(sk);
  334. iocb->ki_complete(iocb, err, err);
  335. }
  336. static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
  337. int flags)
  338. {
  339. struct sock *sk = sock->sk;
  340. struct alg_sock *ask = alg_sk(sk);
  341. struct aead_ctx *ctx = ask->private;
  342. struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
  343. struct aead_async_req *areq;
  344. struct aead_request *req = NULL;
  345. struct aead_sg_list *sgl = &ctx->tsgl;
  346. struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
  347. unsigned int as = crypto_aead_authsize(tfm);
  348. unsigned int i, reqlen = GET_REQ_SIZE(tfm);
  349. int err = -ENOMEM;
  350. unsigned long used;
  351. size_t outlen = 0;
  352. size_t usedpages = 0;
  353. lock_sock(sk);
  354. if (ctx->more) {
  355. err = aead_wait_for_data(sk, flags);
  356. if (err)
  357. goto unlock;
  358. }
  359. if (!aead_sufficient_data(ctx))
  360. goto unlock;
  361. used = ctx->used;
  362. if (ctx->enc)
  363. outlen = used + as;
  364. else
  365. outlen = used - as;
  366. req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
  367. if (unlikely(!req))
  368. goto unlock;
  369. areq = GET_ASYM_REQ(req, tfm);
  370. memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
  371. INIT_LIST_HEAD(&areq->list);
  372. areq->iocb = msg->msg_iocb;
  373. memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
  374. aead_request_set_tfm(req, tfm);
  375. aead_request_set_ad(req, ctx->aead_assoclen);
  376. aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  377. aead_async_cb, sk);
  378. used -= ctx->aead_assoclen;
  379. /* take over all tx sgls from ctx */
  380. areq->tsgl = sock_kmalloc(sk,
  381. sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
  382. GFP_KERNEL);
  383. if (unlikely(!areq->tsgl))
  384. goto free;
  385. sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
  386. for (i = 0; i < sgl->cur; i++)
  387. sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
  388. sgl->sg[i].length, sgl->sg[i].offset);
  389. areq->tsgls = sgl->cur;
  390. /* create rx sgls */
  391. while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
  392. size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
  393. (outlen - usedpages));
  394. if (list_empty(&areq->list)) {
  395. rsgl = &areq->first_rsgl;
  396. } else {
  397. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  398. if (unlikely(!rsgl)) {
  399. err = -ENOMEM;
  400. goto free;
  401. }
  402. }
  403. rsgl->sgl.npages = 0;
  404. list_add_tail(&rsgl->list, &areq->list);
  405. /* make one iovec available as scatterlist */
  406. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  407. if (err < 0)
  408. goto free;
  409. usedpages += err;
  410. /* chain the new scatterlist with previous one */
  411. if (last_rsgl)
  412. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  413. last_rsgl = rsgl;
  414. iov_iter_advance(&msg->msg_iter, err);
  415. }
  416. /* ensure output buffer is sufficiently large */
  417. if (usedpages < outlen) {
  418. err = -EINVAL;
  419. goto unlock;
  420. }
  421. aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
  422. areq->iv);
  423. err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
  424. if (err) {
  425. if (err == -EINPROGRESS) {
  426. sock_hold(sk);
  427. err = -EIOCBQUEUED;
  428. aead_reset_ctx(ctx);
  429. goto unlock;
  430. } else if (err == -EBADMSG) {
  431. aead_put_sgl(sk);
  432. }
  433. goto free;
  434. }
  435. aead_put_sgl(sk);
  436. free:
  437. list_for_each_entry(rsgl, &areq->list, list) {
  438. af_alg_free_sg(&rsgl->sgl);
  439. if (rsgl != &areq->first_rsgl)
  440. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  441. }
  442. if (areq->tsgl)
  443. sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
  444. if (req)
  445. sock_kfree_s(sk, req, reqlen);
  446. unlock:
  447. aead_wmem_wakeup(sk);
  448. release_sock(sk);
  449. return err ? err : outlen;
  450. }
  451. static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
  452. {
  453. struct sock *sk = sock->sk;
  454. struct alg_sock *ask = alg_sk(sk);
  455. struct aead_ctx *ctx = ask->private;
  456. unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
  457. struct aead_sg_list *sgl = &ctx->tsgl;
  458. struct aead_async_rsgl *last_rsgl = NULL;
  459. struct aead_async_rsgl *rsgl, *tmp;
  460. int err = -EINVAL;
  461. unsigned long used = 0;
  462. size_t outlen = 0;
  463. size_t usedpages = 0;
  464. lock_sock(sk);
  465. /*
  466. * Please see documentation of aead_request_set_crypt for the
  467. * description of the AEAD memory structure expected from the caller.
  468. */
  469. if (ctx->more) {
  470. err = aead_wait_for_data(sk, flags);
  471. if (err)
  472. goto unlock;
  473. }
  474. /* data length provided by caller via sendmsg/sendpage */
  475. used = ctx->used;
  476. /*
  477. * Make sure sufficient data is present -- note, the same check is
  478. * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
  479. * shall provide an information to the data sender that something is
  480. * wrong, but they are irrelevant to maintain the kernel integrity.
  481. * We need this check here too in case user space decides to not honor
  482. * the error message in sendmsg/sendpage and still call recvmsg. This
  483. * check here protects the kernel integrity.
  484. */
  485. if (!aead_sufficient_data(ctx))
  486. goto unlock;
  487. /*
  488. * Calculate the minimum output buffer size holding the result of the
  489. * cipher operation. When encrypting data, the receiving buffer is
  490. * larger by the tag length compared to the input buffer as the
  491. * encryption operation generates the tag. For decryption, the input
  492. * buffer provides the tag which is consumed resulting in only the
  493. * plaintext without a buffer for the tag returned to the caller.
  494. */
  495. if (ctx->enc)
  496. outlen = used + as;
  497. else
  498. outlen = used - as;
  499. /*
  500. * The cipher operation input data is reduced by the associated data
  501. * length as this data is processed separately later on.
  502. */
  503. used -= ctx->aead_assoclen;
  504. /* convert iovecs of output buffers into scatterlists */
  505. while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
  506. size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
  507. (outlen - usedpages));
  508. if (list_empty(&ctx->list)) {
  509. rsgl = &ctx->first_rsgl;
  510. } else {
  511. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  512. if (unlikely(!rsgl)) {
  513. err = -ENOMEM;
  514. goto unlock;
  515. }
  516. }
  517. rsgl->sgl.npages = 0;
  518. list_add_tail(&rsgl->list, &ctx->list);
  519. /* make one iovec available as scatterlist */
  520. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  521. if (err < 0)
  522. goto unlock;
  523. usedpages += err;
  524. /* chain the new scatterlist with previous one */
  525. if (last_rsgl)
  526. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  527. last_rsgl = rsgl;
  528. iov_iter_advance(&msg->msg_iter, err);
  529. }
  530. /* ensure output buffer is sufficiently large */
  531. if (usedpages < outlen) {
  532. err = -EINVAL;
  533. goto unlock;
  534. }
  535. sg_mark_end(sgl->sg + sgl->cur - 1);
  536. aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
  537. used, ctx->iv);
  538. aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
  539. err = af_alg_wait_for_completion(ctx->enc ?
  540. crypto_aead_encrypt(&ctx->aead_req) :
  541. crypto_aead_decrypt(&ctx->aead_req),
  542. &ctx->completion);
  543. if (err) {
  544. /* EBADMSG implies a valid cipher operation took place */
  545. if (err == -EBADMSG)
  546. aead_put_sgl(sk);
  547. goto unlock;
  548. }
  549. aead_put_sgl(sk);
  550. err = 0;
  551. unlock:
  552. list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
  553. af_alg_free_sg(&rsgl->sgl);
  554. list_del(&rsgl->list);
  555. if (rsgl != &ctx->first_rsgl)
  556. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  557. }
  558. INIT_LIST_HEAD(&ctx->list);
  559. aead_wmem_wakeup(sk);
  560. release_sock(sk);
  561. return err ? err : outlen;
  562. }
  563. static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
  564. int flags)
  565. {
  566. return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
  567. aead_recvmsg_async(sock, msg, flags) :
  568. aead_recvmsg_sync(sock, msg, flags);
  569. }
  570. static unsigned int aead_poll(struct file *file, struct socket *sock,
  571. poll_table *wait)
  572. {
  573. struct sock *sk = sock->sk;
  574. struct alg_sock *ask = alg_sk(sk);
  575. struct aead_ctx *ctx = ask->private;
  576. unsigned int mask;
  577. sock_poll_wait(file, sk_sleep(sk), wait);
  578. mask = 0;
  579. if (!ctx->more)
  580. mask |= POLLIN | POLLRDNORM;
  581. if (aead_writable(sk))
  582. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  583. return mask;
  584. }
  585. static struct proto_ops algif_aead_ops = {
  586. .family = PF_ALG,
  587. .connect = sock_no_connect,
  588. .socketpair = sock_no_socketpair,
  589. .getname = sock_no_getname,
  590. .ioctl = sock_no_ioctl,
  591. .listen = sock_no_listen,
  592. .shutdown = sock_no_shutdown,
  593. .getsockopt = sock_no_getsockopt,
  594. .mmap = sock_no_mmap,
  595. .bind = sock_no_bind,
  596. .accept = sock_no_accept,
  597. .setsockopt = sock_no_setsockopt,
  598. .release = af_alg_release,
  599. .sendmsg = aead_sendmsg,
  600. .sendpage = aead_sendpage,
  601. .recvmsg = aead_recvmsg,
  602. .poll = aead_poll,
  603. };
  604. static void *aead_bind(const char *name, u32 type, u32 mask)
  605. {
  606. return crypto_alloc_aead(name, type, mask);
  607. }
  608. static void aead_release(void *private)
  609. {
  610. crypto_free_aead(private);
  611. }
  612. static int aead_setauthsize(void *private, unsigned int authsize)
  613. {
  614. return crypto_aead_setauthsize(private, authsize);
  615. }
  616. static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
  617. {
  618. return crypto_aead_setkey(private, key, keylen);
  619. }
  620. static void aead_sock_destruct(struct sock *sk)
  621. {
  622. struct alg_sock *ask = alg_sk(sk);
  623. struct aead_ctx *ctx = ask->private;
  624. unsigned int ivlen = crypto_aead_ivsize(
  625. crypto_aead_reqtfm(&ctx->aead_req));
  626. WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
  627. aead_put_sgl(sk);
  628. sock_kzfree_s(sk, ctx->iv, ivlen);
  629. sock_kfree_s(sk, ctx, ctx->len);
  630. af_alg_release_parent(sk);
  631. }
  632. static int aead_accept_parent(void *private, struct sock *sk)
  633. {
  634. struct aead_ctx *ctx;
  635. struct alg_sock *ask = alg_sk(sk);
  636. unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
  637. unsigned int ivlen = crypto_aead_ivsize(private);
  638. ctx = sock_kmalloc(sk, len, GFP_KERNEL);
  639. if (!ctx)
  640. return -ENOMEM;
  641. memset(ctx, 0, len);
  642. ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
  643. if (!ctx->iv) {
  644. sock_kfree_s(sk, ctx, len);
  645. return -ENOMEM;
  646. }
  647. memset(ctx->iv, 0, ivlen);
  648. ctx->len = len;
  649. ctx->used = 0;
  650. ctx->more = 0;
  651. ctx->merge = 0;
  652. ctx->enc = 0;
  653. ctx->tsgl.cur = 0;
  654. ctx->aead_assoclen = 0;
  655. af_alg_init_completion(&ctx->completion);
  656. sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
  657. INIT_LIST_HEAD(&ctx->list);
  658. ask->private = ctx;
  659. aead_request_set_tfm(&ctx->aead_req, private);
  660. aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  661. af_alg_complete, &ctx->completion);
  662. sk->sk_destruct = aead_sock_destruct;
  663. return 0;
  664. }
  665. static const struct af_alg_type algif_type_aead = {
  666. .bind = aead_bind,
  667. .release = aead_release,
  668. .setkey = aead_setkey,
  669. .setauthsize = aead_setauthsize,
  670. .accept = aead_accept_parent,
  671. .ops = &algif_aead_ops,
  672. .name = "aead",
  673. .owner = THIS_MODULE
  674. };
  675. static int __init algif_aead_init(void)
  676. {
  677. return af_alg_register_type(&algif_type_aead);
  678. }
  679. static void __exit algif_aead_exit(void)
  680. {
  681. int err = af_alg_unregister_type(&algif_type_aead);
  682. BUG_ON(err);
  683. }
  684. module_init(algif_aead_init);
  685. module_exit(algif_aead_exit);
  686. MODULE_LICENSE("GPL");
  687. MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
  688. MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");