algif_aead.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * algif_aead: User-space interface for AEAD algorithms
  3. *
  4. * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
  5. *
  6. * This file provides the user-space API for AEAD ciphers.
  7. *
  8. * This file is derived from algif_skcipher.c.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. */
  15. #include <crypto/internal/aead.h>
  16. #include <crypto/scatterwalk.h>
  17. #include <crypto/if_alg.h>
  18. #include <linux/init.h>
  19. #include <linux/list.h>
  20. #include <linux/kernel.h>
  21. #include <linux/mm.h>
  22. #include <linux/module.h>
  23. #include <linux/net.h>
  24. #include <net/sock.h>
  25. struct aead_sg_list {
  26. unsigned int cur;
  27. struct scatterlist sg[ALG_MAX_PAGES];
  28. };
  29. struct aead_async_rsgl {
  30. struct af_alg_sgl sgl;
  31. struct list_head list;
  32. };
  33. struct aead_async_req {
  34. struct scatterlist *tsgl;
  35. struct aead_async_rsgl first_rsgl;
  36. struct list_head list;
  37. struct kiocb *iocb;
  38. unsigned int tsgls;
  39. char iv[];
  40. };
  41. struct aead_ctx {
  42. struct aead_sg_list tsgl;
  43. struct aead_async_rsgl first_rsgl;
  44. struct list_head list;
  45. void *iv;
  46. struct af_alg_completion completion;
  47. unsigned long used;
  48. unsigned int len;
  49. bool more;
  50. bool merge;
  51. bool enc;
  52. size_t aead_assoclen;
  53. struct aead_request aead_req;
  54. };
  55. static inline int aead_sndbuf(struct sock *sk)
  56. {
  57. struct alg_sock *ask = alg_sk(sk);
  58. struct aead_ctx *ctx = ask->private;
  59. return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
  60. ctx->used, 0);
  61. }
  62. static inline bool aead_writable(struct sock *sk)
  63. {
  64. return PAGE_SIZE <= aead_sndbuf(sk);
  65. }
  66. static inline bool aead_sufficient_data(struct aead_ctx *ctx)
  67. {
  68. unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
  69. /*
  70. * The minimum amount of memory needed for an AEAD cipher is
  71. * the AAD and in case of decryption the tag.
  72. */
  73. return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
  74. }
  75. static void aead_reset_ctx(struct aead_ctx *ctx)
  76. {
  77. struct aead_sg_list *sgl = &ctx->tsgl;
  78. sg_init_table(sgl->sg, ALG_MAX_PAGES);
  79. sgl->cur = 0;
  80. ctx->used = 0;
  81. ctx->more = 0;
  82. ctx->merge = 0;
  83. }
  84. static void aead_put_sgl(struct sock *sk)
  85. {
  86. struct alg_sock *ask = alg_sk(sk);
  87. struct aead_ctx *ctx = ask->private;
  88. struct aead_sg_list *sgl = &ctx->tsgl;
  89. struct scatterlist *sg = sgl->sg;
  90. unsigned int i;
  91. for (i = 0; i < sgl->cur; i++) {
  92. if (!sg_page(sg + i))
  93. continue;
  94. put_page(sg_page(sg + i));
  95. sg_assign_page(sg + i, NULL);
  96. }
  97. aead_reset_ctx(ctx);
  98. }
  99. static void aead_wmem_wakeup(struct sock *sk)
  100. {
  101. struct socket_wq *wq;
  102. if (!aead_writable(sk))
  103. return;
  104. rcu_read_lock();
  105. wq = rcu_dereference(sk->sk_wq);
  106. if (skwq_has_sleeper(wq))
  107. wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
  108. POLLRDNORM |
  109. POLLRDBAND);
  110. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  111. rcu_read_unlock();
  112. }
  113. static int aead_wait_for_data(struct sock *sk, unsigned flags)
  114. {
  115. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  116. struct alg_sock *ask = alg_sk(sk);
  117. struct aead_ctx *ctx = ask->private;
  118. long timeout;
  119. int err = -ERESTARTSYS;
  120. if (flags & MSG_DONTWAIT)
  121. return -EAGAIN;
  122. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  123. add_wait_queue(sk_sleep(sk), &wait);
  124. for (;;) {
  125. if (signal_pending(current))
  126. break;
  127. timeout = MAX_SCHEDULE_TIMEOUT;
  128. if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
  129. err = 0;
  130. break;
  131. }
  132. }
  133. remove_wait_queue(sk_sleep(sk), &wait);
  134. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  135. return err;
  136. }
  137. static void aead_data_wakeup(struct sock *sk)
  138. {
  139. struct alg_sock *ask = alg_sk(sk);
  140. struct aead_ctx *ctx = ask->private;
  141. struct socket_wq *wq;
  142. if (ctx->more)
  143. return;
  144. if (!ctx->used)
  145. return;
  146. rcu_read_lock();
  147. wq = rcu_dereference(sk->sk_wq);
  148. if (skwq_has_sleeper(wq))
  149. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  150. POLLRDNORM |
  151. POLLRDBAND);
  152. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  153. rcu_read_unlock();
  154. }
  155. static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  156. {
  157. struct sock *sk = sock->sk;
  158. struct alg_sock *ask = alg_sk(sk);
  159. struct aead_ctx *ctx = ask->private;
  160. unsigned ivsize =
  161. crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
  162. struct aead_sg_list *sgl = &ctx->tsgl;
  163. struct af_alg_control con = {};
  164. long copied = 0;
  165. bool enc = 0;
  166. bool init = 0;
  167. int err = -EINVAL;
  168. if (msg->msg_controllen) {
  169. err = af_alg_cmsg_send(msg, &con);
  170. if (err)
  171. return err;
  172. init = 1;
  173. switch (con.op) {
  174. case ALG_OP_ENCRYPT:
  175. enc = 1;
  176. break;
  177. case ALG_OP_DECRYPT:
  178. enc = 0;
  179. break;
  180. default:
  181. return -EINVAL;
  182. }
  183. if (con.iv && con.iv->ivlen != ivsize)
  184. return -EINVAL;
  185. }
  186. lock_sock(sk);
  187. if (!ctx->more && ctx->used)
  188. goto unlock;
  189. if (init) {
  190. ctx->enc = enc;
  191. if (con.iv)
  192. memcpy(ctx->iv, con.iv->iv, ivsize);
  193. ctx->aead_assoclen = con.aead_assoclen;
  194. }
  195. while (size) {
  196. size_t len = size;
  197. struct scatterlist *sg = NULL;
  198. /* use the existing memory in an allocated page */
  199. if (ctx->merge) {
  200. sg = sgl->sg + sgl->cur - 1;
  201. len = min_t(unsigned long, len,
  202. PAGE_SIZE - sg->offset - sg->length);
  203. err = memcpy_from_msg(page_address(sg_page(sg)) +
  204. sg->offset + sg->length,
  205. msg, len);
  206. if (err)
  207. goto unlock;
  208. sg->length += len;
  209. ctx->merge = (sg->offset + sg->length) &
  210. (PAGE_SIZE - 1);
  211. ctx->used += len;
  212. copied += len;
  213. size -= len;
  214. continue;
  215. }
  216. if (!aead_writable(sk)) {
  217. /* user space sent too much data */
  218. aead_put_sgl(sk);
  219. err = -EMSGSIZE;
  220. goto unlock;
  221. }
  222. /* allocate a new page */
  223. len = min_t(unsigned long, size, aead_sndbuf(sk));
  224. while (len) {
  225. size_t plen = 0;
  226. if (sgl->cur >= ALG_MAX_PAGES) {
  227. aead_put_sgl(sk);
  228. err = -E2BIG;
  229. goto unlock;
  230. }
  231. sg = sgl->sg + sgl->cur;
  232. plen = min_t(size_t, len, PAGE_SIZE);
  233. sg_assign_page(sg, alloc_page(GFP_KERNEL));
  234. err = -ENOMEM;
  235. if (!sg_page(sg))
  236. goto unlock;
  237. err = memcpy_from_msg(page_address(sg_page(sg)),
  238. msg, plen);
  239. if (err) {
  240. __free_page(sg_page(sg));
  241. sg_assign_page(sg, NULL);
  242. goto unlock;
  243. }
  244. sg->offset = 0;
  245. sg->length = plen;
  246. len -= plen;
  247. ctx->used += plen;
  248. copied += plen;
  249. sgl->cur++;
  250. size -= plen;
  251. ctx->merge = plen & (PAGE_SIZE - 1);
  252. }
  253. }
  254. err = 0;
  255. ctx->more = msg->msg_flags & MSG_MORE;
  256. if (!ctx->more && !aead_sufficient_data(ctx)) {
  257. aead_put_sgl(sk);
  258. err = -EMSGSIZE;
  259. }
  260. unlock:
  261. aead_data_wakeup(sk);
  262. release_sock(sk);
  263. return err ?: copied;
  264. }
  265. static ssize_t aead_sendpage(struct socket *sock, struct page *page,
  266. int offset, size_t size, int flags)
  267. {
  268. struct sock *sk = sock->sk;
  269. struct alg_sock *ask = alg_sk(sk);
  270. struct aead_ctx *ctx = ask->private;
  271. struct aead_sg_list *sgl = &ctx->tsgl;
  272. int err = -EINVAL;
  273. if (flags & MSG_SENDPAGE_NOTLAST)
  274. flags |= MSG_MORE;
  275. if (sgl->cur >= ALG_MAX_PAGES)
  276. return -E2BIG;
  277. lock_sock(sk);
  278. if (!ctx->more && ctx->used)
  279. goto unlock;
  280. if (!size)
  281. goto done;
  282. if (!aead_writable(sk)) {
  283. /* user space sent too much data */
  284. aead_put_sgl(sk);
  285. err = -EMSGSIZE;
  286. goto unlock;
  287. }
  288. ctx->merge = 0;
  289. get_page(page);
  290. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  291. sgl->cur++;
  292. ctx->used += size;
  293. err = 0;
  294. done:
  295. ctx->more = flags & MSG_MORE;
  296. if (!ctx->more && !aead_sufficient_data(ctx)) {
  297. aead_put_sgl(sk);
  298. err = -EMSGSIZE;
  299. }
  300. unlock:
  301. aead_data_wakeup(sk);
  302. release_sock(sk);
  303. return err ?: size;
  304. }
  305. #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
  306. ((char *)req + sizeof(struct aead_request) + \
  307. crypto_aead_reqsize(tfm))
  308. #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
  309. crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
  310. sizeof(struct aead_request)
  311. static void aead_async_cb(struct crypto_async_request *_req, int err)
  312. {
  313. struct sock *sk = _req->data;
  314. struct alg_sock *ask = alg_sk(sk);
  315. struct aead_ctx *ctx = ask->private;
  316. struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
  317. struct aead_request *req = aead_request_cast(_req);
  318. struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
  319. struct scatterlist *sg = areq->tsgl;
  320. struct aead_async_rsgl *rsgl;
  321. struct kiocb *iocb = areq->iocb;
  322. unsigned int i, reqlen = GET_REQ_SIZE(tfm);
  323. list_for_each_entry(rsgl, &areq->list, list) {
  324. af_alg_free_sg(&rsgl->sgl);
  325. if (rsgl != &areq->first_rsgl)
  326. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  327. }
  328. for (i = 0; i < areq->tsgls; i++)
  329. put_page(sg_page(sg + i));
  330. sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
  331. sock_kfree_s(sk, req, reqlen);
  332. __sock_put(sk);
  333. iocb->ki_complete(iocb, err, err);
  334. }
  335. static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
  336. int flags)
  337. {
  338. struct sock *sk = sock->sk;
  339. struct alg_sock *ask = alg_sk(sk);
  340. struct aead_ctx *ctx = ask->private;
  341. struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
  342. struct aead_async_req *areq;
  343. struct aead_request *req = NULL;
  344. struct aead_sg_list *sgl = &ctx->tsgl;
  345. struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
  346. unsigned int as = crypto_aead_authsize(tfm);
  347. unsigned int i, reqlen = GET_REQ_SIZE(tfm);
  348. int err = -ENOMEM;
  349. unsigned long used;
  350. size_t outlen = 0;
  351. size_t usedpages = 0;
  352. lock_sock(sk);
  353. if (ctx->more) {
  354. err = aead_wait_for_data(sk, flags);
  355. if (err)
  356. goto unlock;
  357. }
  358. if (!aead_sufficient_data(ctx))
  359. goto unlock;
  360. used = ctx->used;
  361. if (ctx->enc)
  362. outlen = used + as;
  363. else
  364. outlen = used - as;
  365. req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
  366. if (unlikely(!req))
  367. goto unlock;
  368. areq = GET_ASYM_REQ(req, tfm);
  369. memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
  370. INIT_LIST_HEAD(&areq->list);
  371. areq->iocb = msg->msg_iocb;
  372. memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
  373. aead_request_set_tfm(req, tfm);
  374. aead_request_set_ad(req, ctx->aead_assoclen);
  375. aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  376. aead_async_cb, sk);
  377. used -= ctx->aead_assoclen;
  378. /* take over all tx sgls from ctx */
  379. areq->tsgl = sock_kmalloc(sk,
  380. sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
  381. GFP_KERNEL);
  382. if (unlikely(!areq->tsgl))
  383. goto free;
  384. sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
  385. for (i = 0; i < sgl->cur; i++)
  386. sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
  387. sgl->sg[i].length, sgl->sg[i].offset);
  388. areq->tsgls = sgl->cur;
  389. /* create rx sgls */
  390. while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
  391. size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
  392. (outlen - usedpages));
  393. if (list_empty(&areq->list)) {
  394. rsgl = &areq->first_rsgl;
  395. } else {
  396. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  397. if (unlikely(!rsgl)) {
  398. err = -ENOMEM;
  399. goto free;
  400. }
  401. }
  402. rsgl->sgl.npages = 0;
  403. list_add_tail(&rsgl->list, &areq->list);
  404. /* make one iovec available as scatterlist */
  405. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  406. if (err < 0)
  407. goto free;
  408. usedpages += err;
  409. /* chain the new scatterlist with previous one */
  410. if (last_rsgl)
  411. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  412. last_rsgl = rsgl;
  413. iov_iter_advance(&msg->msg_iter, err);
  414. }
  415. /* ensure output buffer is sufficiently large */
  416. if (usedpages < outlen) {
  417. err = -EINVAL;
  418. goto unlock;
  419. }
  420. aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
  421. areq->iv);
  422. err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
  423. if (err) {
  424. if (err == -EINPROGRESS) {
  425. sock_hold(sk);
  426. err = -EIOCBQUEUED;
  427. aead_reset_ctx(ctx);
  428. goto unlock;
  429. } else if (err == -EBADMSG) {
  430. aead_put_sgl(sk);
  431. }
  432. goto free;
  433. }
  434. aead_put_sgl(sk);
  435. free:
  436. list_for_each_entry(rsgl, &areq->list, list) {
  437. af_alg_free_sg(&rsgl->sgl);
  438. if (rsgl != &areq->first_rsgl)
  439. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  440. }
  441. if (areq->tsgl)
  442. sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
  443. if (req)
  444. sock_kfree_s(sk, req, reqlen);
  445. unlock:
  446. aead_wmem_wakeup(sk);
  447. release_sock(sk);
  448. return err ? err : outlen;
  449. }
  450. static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
  451. {
  452. struct sock *sk = sock->sk;
  453. struct alg_sock *ask = alg_sk(sk);
  454. struct aead_ctx *ctx = ask->private;
  455. unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
  456. struct aead_sg_list *sgl = &ctx->tsgl;
  457. struct aead_async_rsgl *last_rsgl = NULL;
  458. struct aead_async_rsgl *rsgl, *tmp;
  459. int err = -EINVAL;
  460. unsigned long used = 0;
  461. size_t outlen = 0;
  462. size_t usedpages = 0;
  463. lock_sock(sk);
  464. /*
  465. * Please see documentation of aead_request_set_crypt for the
  466. * description of the AEAD memory structure expected from the caller.
  467. */
  468. if (ctx->more) {
  469. err = aead_wait_for_data(sk, flags);
  470. if (err)
  471. goto unlock;
  472. }
  473. /* data length provided by caller via sendmsg/sendpage */
  474. used = ctx->used;
  475. /*
  476. * Make sure sufficient data is present -- note, the same check is
  477. * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
  478. * shall provide an information to the data sender that something is
  479. * wrong, but they are irrelevant to maintain the kernel integrity.
  480. * We need this check here too in case user space decides to not honor
  481. * the error message in sendmsg/sendpage and still call recvmsg. This
  482. * check here protects the kernel integrity.
  483. */
  484. if (!aead_sufficient_data(ctx))
  485. goto unlock;
  486. /*
  487. * Calculate the minimum output buffer size holding the result of the
  488. * cipher operation. When encrypting data, the receiving buffer is
  489. * larger by the tag length compared to the input buffer as the
  490. * encryption operation generates the tag. For decryption, the input
  491. * buffer provides the tag which is consumed resulting in only the
  492. * plaintext without a buffer for the tag returned to the caller.
  493. */
  494. if (ctx->enc)
  495. outlen = used + as;
  496. else
  497. outlen = used - as;
  498. /*
  499. * The cipher operation input data is reduced by the associated data
  500. * length as this data is processed separately later on.
  501. */
  502. used -= ctx->aead_assoclen;
  503. /* convert iovecs of output buffers into scatterlists */
  504. while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
  505. size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
  506. (outlen - usedpages));
  507. if (list_empty(&ctx->list)) {
  508. rsgl = &ctx->first_rsgl;
  509. } else {
  510. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  511. if (unlikely(!rsgl)) {
  512. err = -ENOMEM;
  513. goto unlock;
  514. }
  515. }
  516. rsgl->sgl.npages = 0;
  517. list_add_tail(&rsgl->list, &ctx->list);
  518. /* make one iovec available as scatterlist */
  519. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  520. if (err < 0)
  521. goto unlock;
  522. usedpages += err;
  523. /* chain the new scatterlist with previous one */
  524. if (last_rsgl)
  525. af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
  526. last_rsgl = rsgl;
  527. iov_iter_advance(&msg->msg_iter, err);
  528. }
  529. /* ensure output buffer is sufficiently large */
  530. if (usedpages < outlen) {
  531. err = -EINVAL;
  532. goto unlock;
  533. }
  534. sg_mark_end(sgl->sg + sgl->cur - 1);
  535. aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
  536. used, ctx->iv);
  537. aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
  538. err = af_alg_wait_for_completion(ctx->enc ?
  539. crypto_aead_encrypt(&ctx->aead_req) :
  540. crypto_aead_decrypt(&ctx->aead_req),
  541. &ctx->completion);
  542. if (err) {
  543. /* EBADMSG implies a valid cipher operation took place */
  544. if (err == -EBADMSG)
  545. aead_put_sgl(sk);
  546. goto unlock;
  547. }
  548. aead_put_sgl(sk);
  549. err = 0;
  550. unlock:
  551. list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
  552. af_alg_free_sg(&rsgl->sgl);
  553. if (rsgl != &ctx->first_rsgl)
  554. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  555. list_del(&rsgl->list);
  556. }
  557. INIT_LIST_HEAD(&ctx->list);
  558. aead_wmem_wakeup(sk);
  559. release_sock(sk);
  560. return err ? err : outlen;
  561. }
  562. static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
  563. int flags)
  564. {
  565. return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
  566. aead_recvmsg_async(sock, msg, flags) :
  567. aead_recvmsg_sync(sock, msg, flags);
  568. }
  569. static unsigned int aead_poll(struct file *file, struct socket *sock,
  570. poll_table *wait)
  571. {
  572. struct sock *sk = sock->sk;
  573. struct alg_sock *ask = alg_sk(sk);
  574. struct aead_ctx *ctx = ask->private;
  575. unsigned int mask;
  576. sock_poll_wait(file, sk_sleep(sk), wait);
  577. mask = 0;
  578. if (!ctx->more)
  579. mask |= POLLIN | POLLRDNORM;
  580. if (aead_writable(sk))
  581. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  582. return mask;
  583. }
  584. static struct proto_ops algif_aead_ops = {
  585. .family = PF_ALG,
  586. .connect = sock_no_connect,
  587. .socketpair = sock_no_socketpair,
  588. .getname = sock_no_getname,
  589. .ioctl = sock_no_ioctl,
  590. .listen = sock_no_listen,
  591. .shutdown = sock_no_shutdown,
  592. .getsockopt = sock_no_getsockopt,
  593. .mmap = sock_no_mmap,
  594. .bind = sock_no_bind,
  595. .accept = sock_no_accept,
  596. .setsockopt = sock_no_setsockopt,
  597. .release = af_alg_release,
  598. .sendmsg = aead_sendmsg,
  599. .sendpage = aead_sendpage,
  600. .recvmsg = aead_recvmsg,
  601. .poll = aead_poll,
  602. };
  603. static void *aead_bind(const char *name, u32 type, u32 mask)
  604. {
  605. return crypto_alloc_aead(name, type, mask);
  606. }
  607. static void aead_release(void *private)
  608. {
  609. crypto_free_aead(private);
  610. }
  611. static int aead_setauthsize(void *private, unsigned int authsize)
  612. {
  613. return crypto_aead_setauthsize(private, authsize);
  614. }
  615. static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
  616. {
  617. return crypto_aead_setkey(private, key, keylen);
  618. }
  619. static void aead_sock_destruct(struct sock *sk)
  620. {
  621. struct alg_sock *ask = alg_sk(sk);
  622. struct aead_ctx *ctx = ask->private;
  623. unsigned int ivlen = crypto_aead_ivsize(
  624. crypto_aead_reqtfm(&ctx->aead_req));
  625. WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
  626. aead_put_sgl(sk);
  627. sock_kzfree_s(sk, ctx->iv, ivlen);
  628. sock_kfree_s(sk, ctx, ctx->len);
  629. af_alg_release_parent(sk);
  630. }
  631. static int aead_accept_parent(void *private, struct sock *sk)
  632. {
  633. struct aead_ctx *ctx;
  634. struct alg_sock *ask = alg_sk(sk);
  635. unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
  636. unsigned int ivlen = crypto_aead_ivsize(private);
  637. ctx = sock_kmalloc(sk, len, GFP_KERNEL);
  638. if (!ctx)
  639. return -ENOMEM;
  640. memset(ctx, 0, len);
  641. ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
  642. if (!ctx->iv) {
  643. sock_kfree_s(sk, ctx, len);
  644. return -ENOMEM;
  645. }
  646. memset(ctx->iv, 0, ivlen);
  647. ctx->len = len;
  648. ctx->used = 0;
  649. ctx->more = 0;
  650. ctx->merge = 0;
  651. ctx->enc = 0;
  652. ctx->tsgl.cur = 0;
  653. ctx->aead_assoclen = 0;
  654. af_alg_init_completion(&ctx->completion);
  655. sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
  656. INIT_LIST_HEAD(&ctx->list);
  657. ask->private = ctx;
  658. aead_request_set_tfm(&ctx->aead_req, private);
  659. aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  660. af_alg_complete, &ctx->completion);
  661. sk->sk_destruct = aead_sock_destruct;
  662. return 0;
  663. }
  664. static const struct af_alg_type algif_type_aead = {
  665. .bind = aead_bind,
  666. .release = aead_release,
  667. .setkey = aead_setkey,
  668. .setauthsize = aead_setauthsize,
  669. .accept = aead_accept_parent,
  670. .ops = &algif_aead_ops,
  671. .name = "aead",
  672. .owner = THIS_MODULE
  673. };
  674. static int __init algif_aead_init(void)
  675. {
  676. return af_alg_register_type(&algif_type_aead);
  677. }
  678. static void __exit algif_aead_exit(void)
  679. {
  680. int err = af_alg_unregister_type(&algif_type_aead);
  681. BUG_ON(err);
  682. }
  683. module_init(algif_aead_init);
  684. module_exit(algif_aead_exit);
  685. MODULE_LICENSE("GPL");
  686. MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
  687. MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");