tls_sw.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #include <crypto/aead.h>
  39. #include <net/strparser.h>
  40. #include <net/tls.h>
  41. #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
  42. static int tls_do_decryption(struct sock *sk,
  43. struct scatterlist *sgin,
  44. struct scatterlist *sgout,
  45. char *iv_recv,
  46. size_t data_len,
  47. struct sk_buff *skb,
  48. gfp_t flags)
  49. {
  50. struct tls_context *tls_ctx = tls_get_ctx(sk);
  51. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  52. struct aead_request *aead_req;
  53. int ret;
  54. aead_req = aead_request_alloc(ctx->aead_recv, flags);
  55. if (!aead_req)
  56. return -ENOMEM;
  57. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  58. aead_request_set_crypt(aead_req, sgin, sgout,
  59. data_len + tls_ctx->rx.tag_size,
  60. (u8 *)iv_recv);
  61. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  62. crypto_req_done, &ctx->async_wait);
  63. ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
  64. aead_request_free(aead_req);
  65. return ret;
  66. }
  67. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  68. int *sg_num_elem, unsigned int *sg_size, int target_size)
  69. {
  70. int i = *sg_num_elem - 1;
  71. int trim = *sg_size - target_size;
  72. if (trim <= 0) {
  73. WARN_ON(trim < 0);
  74. return;
  75. }
  76. *sg_size = target_size;
  77. while (trim >= sg[i].length) {
  78. trim -= sg[i].length;
  79. sk_mem_uncharge(sk, sg[i].length);
  80. put_page(sg_page(&sg[i]));
  81. i--;
  82. if (i < 0)
  83. goto out;
  84. }
  85. sg[i].length -= trim;
  86. sk_mem_uncharge(sk, trim);
  87. out:
  88. *sg_num_elem = i + 1;
  89. }
  90. static void trim_both_sgl(struct sock *sk, int target_size)
  91. {
  92. struct tls_context *tls_ctx = tls_get_ctx(sk);
  93. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  94. trim_sg(sk, ctx->sg_plaintext_data,
  95. &ctx->sg_plaintext_num_elem,
  96. &ctx->sg_plaintext_size,
  97. target_size);
  98. if (target_size > 0)
  99. target_size += tls_ctx->tx.overhead_size;
  100. trim_sg(sk, ctx->sg_encrypted_data,
  101. &ctx->sg_encrypted_num_elem,
  102. &ctx->sg_encrypted_size,
  103. target_size);
  104. }
  105. static int alloc_encrypted_sg(struct sock *sk, int len)
  106. {
  107. struct tls_context *tls_ctx = tls_get_ctx(sk);
  108. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  109. int rc = 0;
  110. rc = sk_alloc_sg(sk, len,
  111. ctx->sg_encrypted_data, 0,
  112. &ctx->sg_encrypted_num_elem,
  113. &ctx->sg_encrypted_size, 0);
  114. return rc;
  115. }
  116. static int alloc_plaintext_sg(struct sock *sk, int len)
  117. {
  118. struct tls_context *tls_ctx = tls_get_ctx(sk);
  119. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  120. int rc = 0;
  121. rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
  122. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  123. tls_ctx->pending_open_record_frags);
  124. return rc;
  125. }
  126. static void free_sg(struct sock *sk, struct scatterlist *sg,
  127. int *sg_num_elem, unsigned int *sg_size)
  128. {
  129. int i, n = *sg_num_elem;
  130. for (i = 0; i < n; ++i) {
  131. sk_mem_uncharge(sk, sg[i].length);
  132. put_page(sg_page(&sg[i]));
  133. }
  134. *sg_num_elem = 0;
  135. *sg_size = 0;
  136. }
  137. static void tls_free_both_sg(struct sock *sk)
  138. {
  139. struct tls_context *tls_ctx = tls_get_ctx(sk);
  140. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  141. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  142. &ctx->sg_encrypted_size);
  143. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  144. &ctx->sg_plaintext_size);
  145. }
  146. static int tls_do_encryption(struct tls_context *tls_ctx,
  147. struct tls_sw_context_tx *ctx,
  148. struct aead_request *aead_req,
  149. size_t data_len)
  150. {
  151. int rc;
  152. ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
  153. ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
  154. aead_request_set_tfm(aead_req, ctx->aead_send);
  155. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  156. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  157. data_len, tls_ctx->tx.iv);
  158. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  159. crypto_req_done, &ctx->async_wait);
  160. rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
  161. ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
  162. ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
  163. return rc;
  164. }
  165. static int tls_push_record(struct sock *sk, int flags,
  166. unsigned char record_type)
  167. {
  168. struct tls_context *tls_ctx = tls_get_ctx(sk);
  169. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  170. struct aead_request *req;
  171. int rc;
  172. req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
  173. if (!req)
  174. return -ENOMEM;
  175. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  176. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  177. tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
  178. tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
  179. record_type);
  180. tls_fill_prepend(tls_ctx,
  181. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  182. ctx->sg_encrypted_data[0].offset,
  183. ctx->sg_plaintext_size, record_type);
  184. tls_ctx->pending_open_record_frags = 0;
  185. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  186. rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
  187. if (rc < 0) {
  188. /* If we are called from write_space and
  189. * we fail, we need to set this SOCK_NOSPACE
  190. * to trigger another write_space in the future.
  191. */
  192. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  193. goto out_req;
  194. }
  195. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  196. &ctx->sg_plaintext_size);
  197. ctx->sg_encrypted_num_elem = 0;
  198. ctx->sg_encrypted_size = 0;
  199. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  200. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  201. if (rc < 0 && rc != -EAGAIN)
  202. tls_err_abort(sk, EBADMSG);
  203. tls_advance_record_sn(sk, &tls_ctx->tx);
  204. out_req:
  205. aead_request_free(req);
  206. return rc;
  207. }
  208. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  209. {
  210. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  211. }
  212. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  213. int length, int *pages_used,
  214. unsigned int *size_used,
  215. struct scatterlist *to, int to_max_pages,
  216. bool charge, bool revert)
  217. {
  218. struct page *pages[MAX_SKB_FRAGS];
  219. size_t offset;
  220. ssize_t copied, use;
  221. int i = 0;
  222. unsigned int size = *size_used;
  223. int num_elem = *pages_used;
  224. int rc = 0;
  225. int maxpages;
  226. while (length > 0) {
  227. i = 0;
  228. maxpages = to_max_pages - num_elem;
  229. if (maxpages == 0) {
  230. rc = -EFAULT;
  231. goto out;
  232. }
  233. copied = iov_iter_get_pages(from, pages,
  234. length,
  235. maxpages, &offset);
  236. if (copied <= 0) {
  237. rc = -EFAULT;
  238. goto out;
  239. }
  240. iov_iter_advance(from, copied);
  241. length -= copied;
  242. size += copied;
  243. while (copied) {
  244. use = min_t(int, copied, PAGE_SIZE - offset);
  245. sg_set_page(&to[num_elem],
  246. pages[i], use, offset);
  247. sg_unmark_end(&to[num_elem]);
  248. if (charge)
  249. sk_mem_charge(sk, use);
  250. offset = 0;
  251. copied -= use;
  252. ++i;
  253. ++num_elem;
  254. }
  255. }
  256. out:
  257. *size_used = size;
  258. *pages_used = num_elem;
  259. if (revert)
  260. iov_iter_revert(from, size);
  261. return rc;
  262. }
  263. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  264. int bytes)
  265. {
  266. struct tls_context *tls_ctx = tls_get_ctx(sk);
  267. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  268. struct scatterlist *sg = ctx->sg_plaintext_data;
  269. int copy, i, rc = 0;
  270. for (i = tls_ctx->pending_open_record_frags;
  271. i < ctx->sg_plaintext_num_elem; ++i) {
  272. copy = sg[i].length;
  273. if (copy_from_iter(
  274. page_address(sg_page(&sg[i])) + sg[i].offset,
  275. copy, from) != copy) {
  276. rc = -EFAULT;
  277. goto out;
  278. }
  279. bytes -= copy;
  280. ++tls_ctx->pending_open_record_frags;
  281. if (!bytes)
  282. break;
  283. }
  284. out:
  285. return rc;
  286. }
  287. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  288. {
  289. struct tls_context *tls_ctx = tls_get_ctx(sk);
  290. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  291. int ret = 0;
  292. int required_size;
  293. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  294. bool eor = !(msg->msg_flags & MSG_MORE);
  295. size_t try_to_copy, copied = 0;
  296. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  297. int record_room;
  298. bool full_record;
  299. int orig_size;
  300. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  301. return -ENOTSUPP;
  302. lock_sock(sk);
  303. if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
  304. goto send_end;
  305. if (unlikely(msg->msg_controllen)) {
  306. ret = tls_proccess_cmsg(sk, msg, &record_type);
  307. if (ret)
  308. goto send_end;
  309. }
  310. while (msg_data_left(msg)) {
  311. if (sk->sk_err) {
  312. ret = -sk->sk_err;
  313. goto send_end;
  314. }
  315. orig_size = ctx->sg_plaintext_size;
  316. full_record = false;
  317. try_to_copy = msg_data_left(msg);
  318. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  319. if (try_to_copy >= record_room) {
  320. try_to_copy = record_room;
  321. full_record = true;
  322. }
  323. required_size = ctx->sg_plaintext_size + try_to_copy +
  324. tls_ctx->tx.overhead_size;
  325. if (!sk_stream_memory_free(sk))
  326. goto wait_for_sndbuf;
  327. alloc_encrypted:
  328. ret = alloc_encrypted_sg(sk, required_size);
  329. if (ret) {
  330. if (ret != -ENOSPC)
  331. goto wait_for_memory;
  332. /* Adjust try_to_copy according to the amount that was
  333. * actually allocated. The difference is due
  334. * to max sg elements limit
  335. */
  336. try_to_copy -= required_size - ctx->sg_encrypted_size;
  337. full_record = true;
  338. }
  339. if (full_record || eor) {
  340. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  341. try_to_copy, &ctx->sg_plaintext_num_elem,
  342. &ctx->sg_plaintext_size,
  343. ctx->sg_plaintext_data,
  344. ARRAY_SIZE(ctx->sg_plaintext_data),
  345. true, false);
  346. if (ret)
  347. goto fallback_to_reg_send;
  348. copied += try_to_copy;
  349. ret = tls_push_record(sk, msg->msg_flags, record_type);
  350. if (!ret)
  351. continue;
  352. if (ret < 0)
  353. goto send_end;
  354. copied -= try_to_copy;
  355. fallback_to_reg_send:
  356. iov_iter_revert(&msg->msg_iter,
  357. ctx->sg_plaintext_size - orig_size);
  358. trim_sg(sk, ctx->sg_plaintext_data,
  359. &ctx->sg_plaintext_num_elem,
  360. &ctx->sg_plaintext_size,
  361. orig_size);
  362. }
  363. required_size = ctx->sg_plaintext_size + try_to_copy;
  364. alloc_plaintext:
  365. ret = alloc_plaintext_sg(sk, required_size);
  366. if (ret) {
  367. if (ret != -ENOSPC)
  368. goto wait_for_memory;
  369. /* Adjust try_to_copy according to the amount that was
  370. * actually allocated. The difference is due
  371. * to max sg elements limit
  372. */
  373. try_to_copy -= required_size - ctx->sg_plaintext_size;
  374. full_record = true;
  375. trim_sg(sk, ctx->sg_encrypted_data,
  376. &ctx->sg_encrypted_num_elem,
  377. &ctx->sg_encrypted_size,
  378. ctx->sg_plaintext_size +
  379. tls_ctx->tx.overhead_size);
  380. }
  381. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  382. if (ret)
  383. goto trim_sgl;
  384. copied += try_to_copy;
  385. if (full_record || eor) {
  386. push_record:
  387. ret = tls_push_record(sk, msg->msg_flags, record_type);
  388. if (ret) {
  389. if (ret == -ENOMEM)
  390. goto wait_for_memory;
  391. goto send_end;
  392. }
  393. }
  394. continue;
  395. wait_for_sndbuf:
  396. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  397. wait_for_memory:
  398. ret = sk_stream_wait_memory(sk, &timeo);
  399. if (ret) {
  400. trim_sgl:
  401. trim_both_sgl(sk, orig_size);
  402. goto send_end;
  403. }
  404. if (tls_is_pending_closed_record(tls_ctx))
  405. goto push_record;
  406. if (ctx->sg_encrypted_size < required_size)
  407. goto alloc_encrypted;
  408. goto alloc_plaintext;
  409. }
  410. send_end:
  411. ret = sk_stream_error(sk, msg->msg_flags, ret);
  412. release_sock(sk);
  413. return copied ? copied : ret;
  414. }
  415. int tls_sw_sendpage(struct sock *sk, struct page *page,
  416. int offset, size_t size, int flags)
  417. {
  418. struct tls_context *tls_ctx = tls_get_ctx(sk);
  419. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  420. int ret = 0;
  421. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  422. bool eor;
  423. size_t orig_size = size;
  424. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  425. struct scatterlist *sg;
  426. bool full_record;
  427. int record_room;
  428. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  429. MSG_SENDPAGE_NOTLAST))
  430. return -ENOTSUPP;
  431. /* No MSG_EOR from splice, only look at MSG_MORE */
  432. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  433. lock_sock(sk);
  434. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  435. if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
  436. goto sendpage_end;
  437. /* Call the sk_stream functions to manage the sndbuf mem. */
  438. while (size > 0) {
  439. size_t copy, required_size;
  440. if (sk->sk_err) {
  441. ret = -sk->sk_err;
  442. goto sendpage_end;
  443. }
  444. full_record = false;
  445. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  446. copy = size;
  447. if (copy >= record_room) {
  448. copy = record_room;
  449. full_record = true;
  450. }
  451. required_size = ctx->sg_plaintext_size + copy +
  452. tls_ctx->tx.overhead_size;
  453. if (!sk_stream_memory_free(sk))
  454. goto wait_for_sndbuf;
  455. alloc_payload:
  456. ret = alloc_encrypted_sg(sk, required_size);
  457. if (ret) {
  458. if (ret != -ENOSPC)
  459. goto wait_for_memory;
  460. /* Adjust copy according to the amount that was
  461. * actually allocated. The difference is due
  462. * to max sg elements limit
  463. */
  464. copy -= required_size - ctx->sg_plaintext_size;
  465. full_record = true;
  466. }
  467. get_page(page);
  468. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  469. sg_set_page(sg, page, copy, offset);
  470. sg_unmark_end(sg);
  471. ctx->sg_plaintext_num_elem++;
  472. sk_mem_charge(sk, copy);
  473. offset += copy;
  474. size -= copy;
  475. ctx->sg_plaintext_size += copy;
  476. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  477. if (full_record || eor ||
  478. ctx->sg_plaintext_num_elem ==
  479. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  480. push_record:
  481. ret = tls_push_record(sk, flags, record_type);
  482. if (ret) {
  483. if (ret == -ENOMEM)
  484. goto wait_for_memory;
  485. goto sendpage_end;
  486. }
  487. }
  488. continue;
  489. wait_for_sndbuf:
  490. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  491. wait_for_memory:
  492. ret = sk_stream_wait_memory(sk, &timeo);
  493. if (ret) {
  494. trim_both_sgl(sk, ctx->sg_plaintext_size);
  495. goto sendpage_end;
  496. }
  497. if (tls_is_pending_closed_record(tls_ctx))
  498. goto push_record;
  499. goto alloc_payload;
  500. }
  501. sendpage_end:
  502. if (orig_size > size)
  503. ret = orig_size - size;
  504. else
  505. ret = sk_stream_error(sk, flags, ret);
  506. release_sock(sk);
  507. return ret;
  508. }
  509. static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
  510. long timeo, int *err)
  511. {
  512. struct tls_context *tls_ctx = tls_get_ctx(sk);
  513. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  514. struct sk_buff *skb;
  515. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  516. while (!(skb = ctx->recv_pkt)) {
  517. if (sk->sk_err) {
  518. *err = sock_error(sk);
  519. return NULL;
  520. }
  521. if (sk->sk_shutdown & RCV_SHUTDOWN)
  522. return NULL;
  523. if (sock_flag(sk, SOCK_DONE))
  524. return NULL;
  525. if ((flags & MSG_DONTWAIT) || !timeo) {
  526. *err = -EAGAIN;
  527. return NULL;
  528. }
  529. add_wait_queue(sk_sleep(sk), &wait);
  530. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  531. sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
  532. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  533. remove_wait_queue(sk_sleep(sk), &wait);
  534. /* Handle signals */
  535. if (signal_pending(current)) {
  536. *err = sock_intr_errno(timeo);
  537. return NULL;
  538. }
  539. }
  540. return skb;
  541. }
  542. static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
  543. struct scatterlist *sgout, bool *zc)
  544. {
  545. struct tls_context *tls_ctx = tls_get_ctx(sk);
  546. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  547. struct strp_msg *rxm = strp_msg(skb);
  548. int err = 0;
  549. #ifdef CONFIG_TLS_DEVICE
  550. err = tls_device_decrypted(sk, skb);
  551. if (err < 0)
  552. return err;
  553. #endif
  554. if (!ctx->decrypted) {
  555. err = decrypt_skb(sk, skb, sgout);
  556. if (err < 0)
  557. return err;
  558. } else {
  559. *zc = false;
  560. }
  561. rxm->offset += tls_ctx->rx.prepend_size;
  562. rxm->full_len -= tls_ctx->rx.overhead_size;
  563. tls_advance_record_sn(sk, &tls_ctx->rx);
  564. ctx->decrypted = true;
  565. ctx->saved_data_ready(sk);
  566. return err;
  567. }
  568. int decrypt_skb(struct sock *sk, struct sk_buff *skb,
  569. struct scatterlist *sgout)
  570. {
  571. struct tls_context *tls_ctx = tls_get_ctx(sk);
  572. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  573. char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
  574. struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
  575. struct scatterlist *sgin = &sgin_arr[0];
  576. struct strp_msg *rxm = strp_msg(skb);
  577. int ret, nsg = ARRAY_SIZE(sgin_arr);
  578. struct sk_buff *unused;
  579. ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
  580. iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  581. tls_ctx->rx.iv_size);
  582. if (ret < 0)
  583. return ret;
  584. memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  585. if (!sgout) {
  586. nsg = skb_cow_data(skb, 0, &unused) + 1;
  587. sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
  588. sgout = sgin;
  589. }
  590. sg_init_table(sgin, nsg);
  591. sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
  592. nsg = skb_to_sgvec(skb, &sgin[1],
  593. rxm->offset + tls_ctx->rx.prepend_size,
  594. rxm->full_len - tls_ctx->rx.prepend_size);
  595. if (nsg < 0) {
  596. ret = nsg;
  597. goto out;
  598. }
  599. tls_make_aad(ctx->rx_aad_ciphertext,
  600. rxm->full_len - tls_ctx->rx.overhead_size,
  601. tls_ctx->rx.rec_seq,
  602. tls_ctx->rx.rec_seq_size,
  603. ctx->control);
  604. ret = tls_do_decryption(sk, sgin, sgout, iv,
  605. rxm->full_len - tls_ctx->rx.overhead_size,
  606. skb, sk->sk_allocation);
  607. out:
  608. if (sgin != &sgin_arr[0])
  609. kfree(sgin);
  610. return ret;
  611. }
  612. static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
  613. unsigned int len)
  614. {
  615. struct tls_context *tls_ctx = tls_get_ctx(sk);
  616. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  617. struct strp_msg *rxm = strp_msg(skb);
  618. if (len < rxm->full_len) {
  619. rxm->offset += len;
  620. rxm->full_len -= len;
  621. return false;
  622. }
  623. /* Finished with message */
  624. ctx->recv_pkt = NULL;
  625. kfree_skb(skb);
  626. __strp_unpause(&ctx->strp);
  627. return true;
  628. }
  629. int tls_sw_recvmsg(struct sock *sk,
  630. struct msghdr *msg,
  631. size_t len,
  632. int nonblock,
  633. int flags,
  634. int *addr_len)
  635. {
  636. struct tls_context *tls_ctx = tls_get_ctx(sk);
  637. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  638. unsigned char control;
  639. struct strp_msg *rxm;
  640. struct sk_buff *skb;
  641. ssize_t copied = 0;
  642. bool cmsg = false;
  643. int target, err = 0;
  644. long timeo;
  645. flags |= nonblock;
  646. if (unlikely(flags & MSG_ERRQUEUE))
  647. return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
  648. lock_sock(sk);
  649. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  650. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  651. do {
  652. bool zc = false;
  653. int chunk = 0;
  654. skb = tls_wait_data(sk, flags, timeo, &err);
  655. if (!skb)
  656. goto recv_end;
  657. rxm = strp_msg(skb);
  658. if (!cmsg) {
  659. int cerr;
  660. cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
  661. sizeof(ctx->control), &ctx->control);
  662. cmsg = true;
  663. control = ctx->control;
  664. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  665. if (cerr || msg->msg_flags & MSG_CTRUNC) {
  666. err = -EIO;
  667. goto recv_end;
  668. }
  669. }
  670. } else if (control != ctx->control) {
  671. goto recv_end;
  672. }
  673. if (!ctx->decrypted) {
  674. int page_count;
  675. int to_copy;
  676. page_count = iov_iter_npages(&msg->msg_iter,
  677. MAX_SKB_FRAGS);
  678. to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
  679. if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
  680. likely(!(flags & MSG_PEEK))) {
  681. struct scatterlist sgin[MAX_SKB_FRAGS + 1];
  682. int pages = 0;
  683. zc = true;
  684. sg_init_table(sgin, MAX_SKB_FRAGS + 1);
  685. sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
  686. TLS_AAD_SPACE_SIZE);
  687. err = zerocopy_from_iter(sk, &msg->msg_iter,
  688. to_copy, &pages,
  689. &chunk, &sgin[1],
  690. MAX_SKB_FRAGS, false, true);
  691. if (err < 0)
  692. goto fallback_to_reg_recv;
  693. err = decrypt_skb_update(sk, skb, sgin, &zc);
  694. for (; pages > 0; pages--)
  695. put_page(sg_page(&sgin[pages]));
  696. if (err < 0) {
  697. tls_err_abort(sk, EBADMSG);
  698. goto recv_end;
  699. }
  700. } else {
  701. fallback_to_reg_recv:
  702. err = decrypt_skb_update(sk, skb, NULL, &zc);
  703. if (err < 0) {
  704. tls_err_abort(sk, EBADMSG);
  705. goto recv_end;
  706. }
  707. }
  708. ctx->decrypted = true;
  709. }
  710. if (!zc) {
  711. chunk = min_t(unsigned int, rxm->full_len, len);
  712. err = skb_copy_datagram_msg(skb, rxm->offset, msg,
  713. chunk);
  714. if (err < 0)
  715. goto recv_end;
  716. }
  717. copied += chunk;
  718. len -= chunk;
  719. if (likely(!(flags & MSG_PEEK))) {
  720. u8 control = ctx->control;
  721. if (tls_sw_advance_skb(sk, skb, chunk)) {
  722. /* Return full control message to
  723. * userspace before trying to parse
  724. * another message type
  725. */
  726. msg->msg_flags |= MSG_EOR;
  727. if (control != TLS_RECORD_TYPE_DATA)
  728. goto recv_end;
  729. }
  730. }
  731. /* If we have a new message from strparser, continue now. */
  732. if (copied >= target && !ctx->recv_pkt)
  733. break;
  734. } while (len);
  735. recv_end:
  736. release_sock(sk);
  737. return copied ? : err;
  738. }
  739. ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
  740. struct pipe_inode_info *pipe,
  741. size_t len, unsigned int flags)
  742. {
  743. struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
  744. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  745. struct strp_msg *rxm = NULL;
  746. struct sock *sk = sock->sk;
  747. struct sk_buff *skb;
  748. ssize_t copied = 0;
  749. int err = 0;
  750. long timeo;
  751. int chunk;
  752. bool zc;
  753. lock_sock(sk);
  754. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  755. skb = tls_wait_data(sk, flags, timeo, &err);
  756. if (!skb)
  757. goto splice_read_end;
  758. /* splice does not support reading control messages */
  759. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  760. err = -ENOTSUPP;
  761. goto splice_read_end;
  762. }
  763. if (!ctx->decrypted) {
  764. err = decrypt_skb_update(sk, skb, NULL, &zc);
  765. if (err < 0) {
  766. tls_err_abort(sk, EBADMSG);
  767. goto splice_read_end;
  768. }
  769. ctx->decrypted = true;
  770. }
  771. rxm = strp_msg(skb);
  772. chunk = min_t(unsigned int, rxm->full_len, len);
  773. copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
  774. if (copied < 0)
  775. goto splice_read_end;
  776. if (likely(!(flags & MSG_PEEK)))
  777. tls_sw_advance_skb(sk, skb, copied);
  778. splice_read_end:
  779. release_sock(sk);
  780. return copied ? : err;
  781. }
  782. unsigned int tls_sw_poll(struct file *file, struct socket *sock,
  783. struct poll_table_struct *wait)
  784. {
  785. unsigned int ret;
  786. struct sock *sk = sock->sk;
  787. struct tls_context *tls_ctx = tls_get_ctx(sk);
  788. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  789. /* Grab POLLOUT and POLLHUP from the underlying socket */
  790. ret = ctx->sk_poll(file, sock, wait);
  791. /* Clear POLLIN bits, and set based on recv_pkt */
  792. ret &= ~(POLLIN | POLLRDNORM);
  793. if (ctx->recv_pkt)
  794. ret |= POLLIN | POLLRDNORM;
  795. return ret;
  796. }
  797. static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
  798. {
  799. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  800. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  801. char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
  802. struct strp_msg *rxm = strp_msg(skb);
  803. size_t cipher_overhead;
  804. size_t data_len = 0;
  805. int ret;
  806. /* Verify that we have a full TLS header, or wait for more data */
  807. if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
  808. return 0;
  809. /* Sanity-check size of on-stack buffer. */
  810. if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
  811. ret = -EINVAL;
  812. goto read_failure;
  813. }
  814. /* Linearize header to local buffer */
  815. ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
  816. if (ret < 0)
  817. goto read_failure;
  818. ctx->control = header[0];
  819. data_len = ((header[4] & 0xFF) | (header[3] << 8));
  820. cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
  821. if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
  822. ret = -EMSGSIZE;
  823. goto read_failure;
  824. }
  825. if (data_len < cipher_overhead) {
  826. ret = -EBADMSG;
  827. goto read_failure;
  828. }
  829. if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
  830. header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
  831. ret = -EINVAL;
  832. goto read_failure;
  833. }
  834. #ifdef CONFIG_TLS_DEVICE
  835. handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
  836. *(u64*)tls_ctx->rx.rec_seq);
  837. #endif
  838. return data_len + TLS_HEADER_SIZE;
  839. read_failure:
  840. tls_err_abort(strp->sk, ret);
  841. return ret;
  842. }
  843. static void tls_queue(struct strparser *strp, struct sk_buff *skb)
  844. {
  845. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  846. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  847. ctx->decrypted = false;
  848. ctx->recv_pkt = skb;
  849. strp_pause(strp);
  850. strp->sk->sk_state_change(strp->sk);
  851. }
  852. static void tls_data_ready(struct sock *sk)
  853. {
  854. struct tls_context *tls_ctx = tls_get_ctx(sk);
  855. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  856. strp_data_ready(&ctx->strp);
  857. }
  858. void tls_sw_free_resources_tx(struct sock *sk)
  859. {
  860. struct tls_context *tls_ctx = tls_get_ctx(sk);
  861. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  862. crypto_free_aead(ctx->aead_send);
  863. tls_free_both_sg(sk);
  864. kfree(ctx);
  865. }
  866. void tls_sw_release_resources_rx(struct sock *sk)
  867. {
  868. struct tls_context *tls_ctx = tls_get_ctx(sk);
  869. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  870. if (ctx->aead_recv) {
  871. kfree_skb(ctx->recv_pkt);
  872. ctx->recv_pkt = NULL;
  873. crypto_free_aead(ctx->aead_recv);
  874. strp_stop(&ctx->strp);
  875. write_lock_bh(&sk->sk_callback_lock);
  876. sk->sk_data_ready = ctx->saved_data_ready;
  877. write_unlock_bh(&sk->sk_callback_lock);
  878. release_sock(sk);
  879. strp_done(&ctx->strp);
  880. lock_sock(sk);
  881. }
  882. }
  883. void tls_sw_free_resources_rx(struct sock *sk)
  884. {
  885. struct tls_context *tls_ctx = tls_get_ctx(sk);
  886. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  887. tls_sw_release_resources_rx(sk);
  888. kfree(ctx);
  889. }
  890. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
  891. {
  892. char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
  893. struct tls_crypto_info *crypto_info;
  894. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  895. struct tls_sw_context_tx *sw_ctx_tx = NULL;
  896. struct tls_sw_context_rx *sw_ctx_rx = NULL;
  897. struct cipher_context *cctx;
  898. struct crypto_aead **aead;
  899. struct strp_callbacks cb;
  900. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  901. char *iv, *rec_seq;
  902. int rc = 0;
  903. if (!ctx) {
  904. rc = -EINVAL;
  905. goto out;
  906. }
  907. if (tx) {
  908. if (!ctx->priv_ctx_tx) {
  909. sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
  910. if (!sw_ctx_tx) {
  911. rc = -ENOMEM;
  912. goto out;
  913. }
  914. ctx->priv_ctx_tx = sw_ctx_tx;
  915. } else {
  916. sw_ctx_tx =
  917. (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
  918. }
  919. } else {
  920. if (!ctx->priv_ctx_rx) {
  921. sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
  922. if (!sw_ctx_rx) {
  923. rc = -ENOMEM;
  924. goto out;
  925. }
  926. ctx->priv_ctx_rx = sw_ctx_rx;
  927. } else {
  928. sw_ctx_rx =
  929. (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
  930. }
  931. }
  932. if (tx) {
  933. crypto_init_wait(&sw_ctx_tx->async_wait);
  934. crypto_info = &ctx->crypto_send;
  935. cctx = &ctx->tx;
  936. aead = &sw_ctx_tx->aead_send;
  937. } else {
  938. crypto_init_wait(&sw_ctx_rx->async_wait);
  939. crypto_info = &ctx->crypto_recv;
  940. cctx = &ctx->rx;
  941. aead = &sw_ctx_rx->aead_recv;
  942. }
  943. switch (crypto_info->cipher_type) {
  944. case TLS_CIPHER_AES_GCM_128: {
  945. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  946. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  947. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  948. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  949. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  950. rec_seq =
  951. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  952. gcm_128_info =
  953. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  954. break;
  955. }
  956. default:
  957. rc = -EINVAL;
  958. goto free_priv;
  959. }
  960. /* Sanity-check the IV size for stack allocations. */
  961. if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
  962. rc = -EINVAL;
  963. goto free_priv;
  964. }
  965. cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  966. cctx->tag_size = tag_size;
  967. cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
  968. cctx->iv_size = iv_size;
  969. cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  970. GFP_KERNEL);
  971. if (!cctx->iv) {
  972. rc = -ENOMEM;
  973. goto free_priv;
  974. }
  975. memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  976. memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  977. cctx->rec_seq_size = rec_seq_size;
  978. cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
  979. if (!cctx->rec_seq) {
  980. rc = -ENOMEM;
  981. goto free_iv;
  982. }
  983. memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
  984. if (sw_ctx_tx) {
  985. sg_init_table(sw_ctx_tx->sg_encrypted_data,
  986. ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
  987. sg_init_table(sw_ctx_tx->sg_plaintext_data,
  988. ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
  989. sg_init_table(sw_ctx_tx->sg_aead_in, 2);
  990. sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
  991. sizeof(sw_ctx_tx->aad_space));
  992. sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
  993. sg_chain(sw_ctx_tx->sg_aead_in, 2,
  994. sw_ctx_tx->sg_plaintext_data);
  995. sg_init_table(sw_ctx_tx->sg_aead_out, 2);
  996. sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
  997. sizeof(sw_ctx_tx->aad_space));
  998. sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
  999. sg_chain(sw_ctx_tx->sg_aead_out, 2,
  1000. sw_ctx_tx->sg_encrypted_data);
  1001. }
  1002. if (!*aead) {
  1003. *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
  1004. if (IS_ERR(*aead)) {
  1005. rc = PTR_ERR(*aead);
  1006. *aead = NULL;
  1007. goto free_rec_seq;
  1008. }
  1009. }
  1010. ctx->push_pending_record = tls_sw_push_pending_record;
  1011. memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  1012. rc = crypto_aead_setkey(*aead, keyval,
  1013. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  1014. if (rc)
  1015. goto free_aead;
  1016. rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
  1017. if (rc)
  1018. goto free_aead;
  1019. if (sw_ctx_rx) {
  1020. /* Set up strparser */
  1021. memset(&cb, 0, sizeof(cb));
  1022. cb.rcv_msg = tls_queue;
  1023. cb.parse_msg = tls_read_size;
  1024. strp_init(&sw_ctx_rx->strp, sk, &cb);
  1025. write_lock_bh(&sk->sk_callback_lock);
  1026. sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
  1027. sk->sk_data_ready = tls_data_ready;
  1028. write_unlock_bh(&sk->sk_callback_lock);
  1029. sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
  1030. strp_check_rcv(&sw_ctx_rx->strp);
  1031. }
  1032. goto out;
  1033. free_aead:
  1034. crypto_free_aead(*aead);
  1035. *aead = NULL;
  1036. free_rec_seq:
  1037. kfree(cctx->rec_seq);
  1038. cctx->rec_seq = NULL;
  1039. free_iv:
  1040. kfree(cctx->iv);
  1041. cctx->iv = NULL;
  1042. free_priv:
  1043. if (tx) {
  1044. kfree(ctx->priv_ctx_tx);
  1045. ctx->priv_ctx_tx = NULL;
  1046. } else {
  1047. kfree(ctx->priv_ctx_rx);
  1048. ctx->priv_ctx_rx = NULL;
  1049. }
  1050. out:
  1051. return rc;
  1052. }