tls_sw.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #include <crypto/aead.h>
  39. #include <net/strparser.h>
  40. #include <net/tls.h>
  41. #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
  42. static int tls_do_decryption(struct sock *sk,
  43. struct scatterlist *sgin,
  44. struct scatterlist *sgout,
  45. char *iv_recv,
  46. size_t data_len,
  47. struct aead_request *aead_req)
  48. {
  49. struct tls_context *tls_ctx = tls_get_ctx(sk);
  50. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  51. int ret;
  52. aead_request_set_tfm(aead_req, ctx->aead_recv);
  53. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  54. aead_request_set_crypt(aead_req, sgin, sgout,
  55. data_len + tls_ctx->rx.tag_size,
  56. (u8 *)iv_recv);
  57. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  58. crypto_req_done, &ctx->async_wait);
  59. ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
  60. return ret;
  61. }
  62. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  63. int *sg_num_elem, unsigned int *sg_size, int target_size)
  64. {
  65. int i = *sg_num_elem - 1;
  66. int trim = *sg_size - target_size;
  67. if (trim <= 0) {
  68. WARN_ON(trim < 0);
  69. return;
  70. }
  71. *sg_size = target_size;
  72. while (trim >= sg[i].length) {
  73. trim -= sg[i].length;
  74. sk_mem_uncharge(sk, sg[i].length);
  75. put_page(sg_page(&sg[i]));
  76. i--;
  77. if (i < 0)
  78. goto out;
  79. }
  80. sg[i].length -= trim;
  81. sk_mem_uncharge(sk, trim);
  82. out:
  83. *sg_num_elem = i + 1;
  84. }
  85. static void trim_both_sgl(struct sock *sk, int target_size)
  86. {
  87. struct tls_context *tls_ctx = tls_get_ctx(sk);
  88. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  89. trim_sg(sk, ctx->sg_plaintext_data,
  90. &ctx->sg_plaintext_num_elem,
  91. &ctx->sg_plaintext_size,
  92. target_size);
  93. if (target_size > 0)
  94. target_size += tls_ctx->tx.overhead_size;
  95. trim_sg(sk, ctx->sg_encrypted_data,
  96. &ctx->sg_encrypted_num_elem,
  97. &ctx->sg_encrypted_size,
  98. target_size);
  99. }
  100. static int alloc_encrypted_sg(struct sock *sk, int len)
  101. {
  102. struct tls_context *tls_ctx = tls_get_ctx(sk);
  103. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  104. int rc = 0;
  105. rc = sk_alloc_sg(sk, len,
  106. ctx->sg_encrypted_data, 0,
  107. &ctx->sg_encrypted_num_elem,
  108. &ctx->sg_encrypted_size, 0);
  109. return rc;
  110. }
  111. static int alloc_plaintext_sg(struct sock *sk, int len)
  112. {
  113. struct tls_context *tls_ctx = tls_get_ctx(sk);
  114. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  115. int rc = 0;
  116. rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
  117. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  118. tls_ctx->pending_open_record_frags);
  119. return rc;
  120. }
  121. static void free_sg(struct sock *sk, struct scatterlist *sg,
  122. int *sg_num_elem, unsigned int *sg_size)
  123. {
  124. int i, n = *sg_num_elem;
  125. for (i = 0; i < n; ++i) {
  126. sk_mem_uncharge(sk, sg[i].length);
  127. put_page(sg_page(&sg[i]));
  128. }
  129. *sg_num_elem = 0;
  130. *sg_size = 0;
  131. }
  132. static void tls_free_both_sg(struct sock *sk)
  133. {
  134. struct tls_context *tls_ctx = tls_get_ctx(sk);
  135. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  136. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  137. &ctx->sg_encrypted_size);
  138. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  139. &ctx->sg_plaintext_size);
  140. }
  141. static int tls_do_encryption(struct tls_context *tls_ctx,
  142. struct tls_sw_context_tx *ctx,
  143. struct aead_request *aead_req,
  144. size_t data_len)
  145. {
  146. int rc;
  147. ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
  148. ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
  149. aead_request_set_tfm(aead_req, ctx->aead_send);
  150. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  151. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  152. data_len, tls_ctx->tx.iv);
  153. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  154. crypto_req_done, &ctx->async_wait);
  155. rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
  156. ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
  157. ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
  158. return rc;
  159. }
  160. static int tls_push_record(struct sock *sk, int flags,
  161. unsigned char record_type)
  162. {
  163. struct tls_context *tls_ctx = tls_get_ctx(sk);
  164. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  165. struct aead_request *req;
  166. int rc;
  167. req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
  168. if (!req)
  169. return -ENOMEM;
  170. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  171. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  172. tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
  173. tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
  174. record_type);
  175. tls_fill_prepend(tls_ctx,
  176. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  177. ctx->sg_encrypted_data[0].offset,
  178. ctx->sg_plaintext_size, record_type);
  179. tls_ctx->pending_open_record_frags = 0;
  180. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  181. rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
  182. if (rc < 0) {
  183. /* If we are called from write_space and
  184. * we fail, we need to set this SOCK_NOSPACE
  185. * to trigger another write_space in the future.
  186. */
  187. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  188. goto out_req;
  189. }
  190. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  191. &ctx->sg_plaintext_size);
  192. ctx->sg_encrypted_num_elem = 0;
  193. ctx->sg_encrypted_size = 0;
  194. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  195. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  196. if (rc < 0 && rc != -EAGAIN)
  197. tls_err_abort(sk, EBADMSG);
  198. tls_advance_record_sn(sk, &tls_ctx->tx);
  199. out_req:
  200. aead_request_free(req);
  201. return rc;
  202. }
  203. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  204. {
  205. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  206. }
  207. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  208. int length, int *pages_used,
  209. unsigned int *size_used,
  210. struct scatterlist *to, int to_max_pages,
  211. bool charge)
  212. {
  213. struct page *pages[MAX_SKB_FRAGS];
  214. size_t offset;
  215. ssize_t copied, use;
  216. int i = 0;
  217. unsigned int size = *size_used;
  218. int num_elem = *pages_used;
  219. int rc = 0;
  220. int maxpages;
  221. while (length > 0) {
  222. i = 0;
  223. maxpages = to_max_pages - num_elem;
  224. if (maxpages == 0) {
  225. rc = -EFAULT;
  226. goto out;
  227. }
  228. copied = iov_iter_get_pages(from, pages,
  229. length,
  230. maxpages, &offset);
  231. if (copied <= 0) {
  232. rc = -EFAULT;
  233. goto out;
  234. }
  235. iov_iter_advance(from, copied);
  236. length -= copied;
  237. size += copied;
  238. while (copied) {
  239. use = min_t(int, copied, PAGE_SIZE - offset);
  240. sg_set_page(&to[num_elem],
  241. pages[i], use, offset);
  242. sg_unmark_end(&to[num_elem]);
  243. if (charge)
  244. sk_mem_charge(sk, use);
  245. offset = 0;
  246. copied -= use;
  247. ++i;
  248. ++num_elem;
  249. }
  250. }
  251. /* Mark the end in the last sg entry if newly added */
  252. if (num_elem > *pages_used)
  253. sg_mark_end(&to[num_elem - 1]);
  254. out:
  255. if (rc)
  256. iov_iter_revert(from, size - *size_used);
  257. *size_used = size;
  258. *pages_used = num_elem;
  259. return rc;
  260. }
  261. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  262. int bytes)
  263. {
  264. struct tls_context *tls_ctx = tls_get_ctx(sk);
  265. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  266. struct scatterlist *sg = ctx->sg_plaintext_data;
  267. int copy, i, rc = 0;
  268. for (i = tls_ctx->pending_open_record_frags;
  269. i < ctx->sg_plaintext_num_elem; ++i) {
  270. copy = sg[i].length;
  271. if (copy_from_iter(
  272. page_address(sg_page(&sg[i])) + sg[i].offset,
  273. copy, from) != copy) {
  274. rc = -EFAULT;
  275. goto out;
  276. }
  277. bytes -= copy;
  278. ++tls_ctx->pending_open_record_frags;
  279. if (!bytes)
  280. break;
  281. }
  282. out:
  283. return rc;
  284. }
  285. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  286. {
  287. struct tls_context *tls_ctx = tls_get_ctx(sk);
  288. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  289. int ret = 0;
  290. int required_size;
  291. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  292. bool eor = !(msg->msg_flags & MSG_MORE);
  293. size_t try_to_copy, copied = 0;
  294. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  295. int record_room;
  296. bool full_record;
  297. int orig_size;
  298. bool is_kvec = msg->msg_iter.type & ITER_KVEC;
  299. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  300. return -ENOTSUPP;
  301. lock_sock(sk);
  302. if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
  303. goto send_end;
  304. if (unlikely(msg->msg_controllen)) {
  305. ret = tls_proccess_cmsg(sk, msg, &record_type);
  306. if (ret)
  307. goto send_end;
  308. }
  309. while (msg_data_left(msg)) {
  310. if (sk->sk_err) {
  311. ret = -sk->sk_err;
  312. goto send_end;
  313. }
  314. orig_size = ctx->sg_plaintext_size;
  315. full_record = false;
  316. try_to_copy = msg_data_left(msg);
  317. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  318. if (try_to_copy >= record_room) {
  319. try_to_copy = record_room;
  320. full_record = true;
  321. }
  322. required_size = ctx->sg_plaintext_size + try_to_copy +
  323. tls_ctx->tx.overhead_size;
  324. if (!sk_stream_memory_free(sk))
  325. goto wait_for_sndbuf;
  326. alloc_encrypted:
  327. ret = alloc_encrypted_sg(sk, required_size);
  328. if (ret) {
  329. if (ret != -ENOSPC)
  330. goto wait_for_memory;
  331. /* Adjust try_to_copy according to the amount that was
  332. * actually allocated. The difference is due
  333. * to max sg elements limit
  334. */
  335. try_to_copy -= required_size - ctx->sg_encrypted_size;
  336. full_record = true;
  337. }
  338. if (!is_kvec && (full_record || eor)) {
  339. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  340. try_to_copy, &ctx->sg_plaintext_num_elem,
  341. &ctx->sg_plaintext_size,
  342. ctx->sg_plaintext_data,
  343. ARRAY_SIZE(ctx->sg_plaintext_data),
  344. true);
  345. if (ret)
  346. goto fallback_to_reg_send;
  347. copied += try_to_copy;
  348. ret = tls_push_record(sk, msg->msg_flags, record_type);
  349. if (ret)
  350. goto send_end;
  351. continue;
  352. fallback_to_reg_send:
  353. trim_sg(sk, ctx->sg_plaintext_data,
  354. &ctx->sg_plaintext_num_elem,
  355. &ctx->sg_plaintext_size,
  356. orig_size);
  357. }
  358. required_size = ctx->sg_plaintext_size + try_to_copy;
  359. alloc_plaintext:
  360. ret = alloc_plaintext_sg(sk, required_size);
  361. if (ret) {
  362. if (ret != -ENOSPC)
  363. goto wait_for_memory;
  364. /* Adjust try_to_copy according to the amount that was
  365. * actually allocated. The difference is due
  366. * to max sg elements limit
  367. */
  368. try_to_copy -= required_size - ctx->sg_plaintext_size;
  369. full_record = true;
  370. trim_sg(sk, ctx->sg_encrypted_data,
  371. &ctx->sg_encrypted_num_elem,
  372. &ctx->sg_encrypted_size,
  373. ctx->sg_plaintext_size +
  374. tls_ctx->tx.overhead_size);
  375. }
  376. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  377. if (ret)
  378. goto trim_sgl;
  379. copied += try_to_copy;
  380. if (full_record || eor) {
  381. push_record:
  382. ret = tls_push_record(sk, msg->msg_flags, record_type);
  383. if (ret) {
  384. if (ret == -ENOMEM)
  385. goto wait_for_memory;
  386. goto send_end;
  387. }
  388. }
  389. continue;
  390. wait_for_sndbuf:
  391. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  392. wait_for_memory:
  393. ret = sk_stream_wait_memory(sk, &timeo);
  394. if (ret) {
  395. trim_sgl:
  396. trim_both_sgl(sk, orig_size);
  397. goto send_end;
  398. }
  399. if (tls_is_pending_closed_record(tls_ctx))
  400. goto push_record;
  401. if (ctx->sg_encrypted_size < required_size)
  402. goto alloc_encrypted;
  403. goto alloc_plaintext;
  404. }
  405. send_end:
  406. ret = sk_stream_error(sk, msg->msg_flags, ret);
  407. release_sock(sk);
  408. return copied ? copied : ret;
  409. }
  410. int tls_sw_sendpage(struct sock *sk, struct page *page,
  411. int offset, size_t size, int flags)
  412. {
  413. struct tls_context *tls_ctx = tls_get_ctx(sk);
  414. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  415. int ret = 0;
  416. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  417. bool eor;
  418. size_t orig_size = size;
  419. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  420. struct scatterlist *sg;
  421. bool full_record;
  422. int record_room;
  423. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  424. MSG_SENDPAGE_NOTLAST))
  425. return -ENOTSUPP;
  426. /* No MSG_EOR from splice, only look at MSG_MORE */
  427. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  428. lock_sock(sk);
  429. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  430. if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
  431. goto sendpage_end;
  432. /* Call the sk_stream functions to manage the sndbuf mem. */
  433. while (size > 0) {
  434. size_t copy, required_size;
  435. if (sk->sk_err) {
  436. ret = -sk->sk_err;
  437. goto sendpage_end;
  438. }
  439. full_record = false;
  440. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  441. copy = size;
  442. if (copy >= record_room) {
  443. copy = record_room;
  444. full_record = true;
  445. }
  446. required_size = ctx->sg_plaintext_size + copy +
  447. tls_ctx->tx.overhead_size;
  448. if (!sk_stream_memory_free(sk))
  449. goto wait_for_sndbuf;
  450. alloc_payload:
  451. ret = alloc_encrypted_sg(sk, required_size);
  452. if (ret) {
  453. if (ret != -ENOSPC)
  454. goto wait_for_memory;
  455. /* Adjust copy according to the amount that was
  456. * actually allocated. The difference is due
  457. * to max sg elements limit
  458. */
  459. copy -= required_size - ctx->sg_plaintext_size;
  460. full_record = true;
  461. }
  462. get_page(page);
  463. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  464. sg_set_page(sg, page, copy, offset);
  465. sg_unmark_end(sg);
  466. ctx->sg_plaintext_num_elem++;
  467. sk_mem_charge(sk, copy);
  468. offset += copy;
  469. size -= copy;
  470. ctx->sg_plaintext_size += copy;
  471. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  472. if (full_record || eor ||
  473. ctx->sg_plaintext_num_elem ==
  474. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  475. push_record:
  476. ret = tls_push_record(sk, flags, record_type);
  477. if (ret) {
  478. if (ret == -ENOMEM)
  479. goto wait_for_memory;
  480. goto sendpage_end;
  481. }
  482. }
  483. continue;
  484. wait_for_sndbuf:
  485. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  486. wait_for_memory:
  487. ret = sk_stream_wait_memory(sk, &timeo);
  488. if (ret) {
  489. trim_both_sgl(sk, ctx->sg_plaintext_size);
  490. goto sendpage_end;
  491. }
  492. if (tls_is_pending_closed_record(tls_ctx))
  493. goto push_record;
  494. goto alloc_payload;
  495. }
  496. sendpage_end:
  497. if (orig_size > size)
  498. ret = orig_size - size;
  499. else
  500. ret = sk_stream_error(sk, flags, ret);
  501. release_sock(sk);
  502. return ret;
  503. }
  504. static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
  505. long timeo, int *err)
  506. {
  507. struct tls_context *tls_ctx = tls_get_ctx(sk);
  508. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  509. struct sk_buff *skb;
  510. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  511. while (!(skb = ctx->recv_pkt)) {
  512. if (sk->sk_err) {
  513. *err = sock_error(sk);
  514. return NULL;
  515. }
  516. if (sk->sk_shutdown & RCV_SHUTDOWN)
  517. return NULL;
  518. if (sock_flag(sk, SOCK_DONE))
  519. return NULL;
  520. if ((flags & MSG_DONTWAIT) || !timeo) {
  521. *err = -EAGAIN;
  522. return NULL;
  523. }
  524. add_wait_queue(sk_sleep(sk), &wait);
  525. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  526. sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
  527. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  528. remove_wait_queue(sk_sleep(sk), &wait);
  529. /* Handle signals */
  530. if (signal_pending(current)) {
  531. *err = sock_intr_errno(timeo);
  532. return NULL;
  533. }
  534. }
  535. return skb;
  536. }
  537. /* This function decrypts the input skb into either out_iov or in out_sg
  538. * or in skb buffers itself. The input parameter 'zc' indicates if
  539. * zero-copy mode needs to be tried or not. With zero-copy mode, either
  540. * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
  541. * NULL, then the decryption happens inside skb buffers itself, i.e.
  542. * zero-copy gets disabled and 'zc' is updated.
  543. */
  544. static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
  545. struct iov_iter *out_iov,
  546. struct scatterlist *out_sg,
  547. int *chunk, bool *zc)
  548. {
  549. struct tls_context *tls_ctx = tls_get_ctx(sk);
  550. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  551. struct strp_msg *rxm = strp_msg(skb);
  552. int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
  553. struct aead_request *aead_req;
  554. struct sk_buff *unused;
  555. u8 *aad, *iv, *mem = NULL;
  556. struct scatterlist *sgin = NULL;
  557. struct scatterlist *sgout = NULL;
  558. const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
  559. if (*zc && (out_iov || out_sg)) {
  560. if (out_iov)
  561. n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
  562. else
  563. n_sgout = sg_nents(out_sg);
  564. } else {
  565. n_sgout = 0;
  566. *zc = false;
  567. }
  568. n_sgin = skb_cow_data(skb, 0, &unused);
  569. if (n_sgin < 1)
  570. return -EBADMSG;
  571. /* Increment to accommodate AAD */
  572. n_sgin = n_sgin + 1;
  573. nsg = n_sgin + n_sgout;
  574. aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
  575. mem_size = aead_size + (nsg * sizeof(struct scatterlist));
  576. mem_size = mem_size + TLS_AAD_SPACE_SIZE;
  577. mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
  578. /* Allocate a single block of memory which contains
  579. * aead_req || sgin[] || sgout[] || aad || iv.
  580. * This order achieves correct alignment for aead_req, sgin, sgout.
  581. */
  582. mem = kmalloc(mem_size, sk->sk_allocation);
  583. if (!mem)
  584. return -ENOMEM;
  585. /* Segment the allocated memory */
  586. aead_req = (struct aead_request *)mem;
  587. sgin = (struct scatterlist *)(mem + aead_size);
  588. sgout = sgin + n_sgin;
  589. aad = (u8 *)(sgout + n_sgout);
  590. iv = aad + TLS_AAD_SPACE_SIZE;
  591. /* Prepare IV */
  592. err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
  593. iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  594. tls_ctx->rx.iv_size);
  595. if (err < 0) {
  596. kfree(mem);
  597. return err;
  598. }
  599. memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  600. /* Prepare AAD */
  601. tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
  602. tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
  603. ctx->control);
  604. /* Prepare sgin */
  605. sg_init_table(sgin, n_sgin);
  606. sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
  607. err = skb_to_sgvec(skb, &sgin[1],
  608. rxm->offset + tls_ctx->rx.prepend_size,
  609. rxm->full_len - tls_ctx->rx.prepend_size);
  610. if (err < 0) {
  611. kfree(mem);
  612. return err;
  613. }
  614. if (n_sgout) {
  615. if (out_iov) {
  616. sg_init_table(sgout, n_sgout);
  617. sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
  618. *chunk = 0;
  619. err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
  620. chunk, &sgout[1],
  621. (n_sgout - 1), false);
  622. if (err < 0)
  623. goto fallback_to_reg_recv;
  624. } else if (out_sg) {
  625. memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
  626. } else {
  627. goto fallback_to_reg_recv;
  628. }
  629. } else {
  630. fallback_to_reg_recv:
  631. sgout = sgin;
  632. pages = 0;
  633. *chunk = 0;
  634. *zc = false;
  635. }
  636. /* Prepare and submit AEAD request */
  637. err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
  638. /* Release the pages in case iov was mapped to pages */
  639. for (; pages > 0; pages--)
  640. put_page(sg_page(&sgout[pages]));
  641. kfree(mem);
  642. return err;
  643. }
  644. static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
  645. struct iov_iter *dest, int *chunk, bool *zc)
  646. {
  647. struct tls_context *tls_ctx = tls_get_ctx(sk);
  648. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  649. struct strp_msg *rxm = strp_msg(skb);
  650. int err = 0;
  651. #ifdef CONFIG_TLS_DEVICE
  652. err = tls_device_decrypted(sk, skb);
  653. if (err < 0)
  654. return err;
  655. #endif
  656. if (!ctx->decrypted) {
  657. err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
  658. if (err < 0)
  659. return err;
  660. } else {
  661. *zc = false;
  662. }
  663. rxm->offset += tls_ctx->rx.prepend_size;
  664. rxm->full_len -= tls_ctx->rx.overhead_size;
  665. tls_advance_record_sn(sk, &tls_ctx->rx);
  666. ctx->decrypted = true;
  667. ctx->saved_data_ready(sk);
  668. return err;
  669. }
  670. int decrypt_skb(struct sock *sk, struct sk_buff *skb,
  671. struct scatterlist *sgout)
  672. {
  673. bool zc = true;
  674. int chunk;
  675. return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
  676. }
  677. static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
  678. unsigned int len)
  679. {
  680. struct tls_context *tls_ctx = tls_get_ctx(sk);
  681. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  682. struct strp_msg *rxm = strp_msg(skb);
  683. if (len < rxm->full_len) {
  684. rxm->offset += len;
  685. rxm->full_len -= len;
  686. return false;
  687. }
  688. /* Finished with message */
  689. ctx->recv_pkt = NULL;
  690. kfree_skb(skb);
  691. __strp_unpause(&ctx->strp);
  692. return true;
  693. }
  694. int tls_sw_recvmsg(struct sock *sk,
  695. struct msghdr *msg,
  696. size_t len,
  697. int nonblock,
  698. int flags,
  699. int *addr_len)
  700. {
  701. struct tls_context *tls_ctx = tls_get_ctx(sk);
  702. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  703. unsigned char control;
  704. struct strp_msg *rxm;
  705. struct sk_buff *skb;
  706. ssize_t copied = 0;
  707. bool cmsg = false;
  708. int target, err = 0;
  709. long timeo;
  710. bool is_kvec = msg->msg_iter.type & ITER_KVEC;
  711. flags |= nonblock;
  712. if (unlikely(flags & MSG_ERRQUEUE))
  713. return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
  714. lock_sock(sk);
  715. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  716. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  717. do {
  718. bool zc = false;
  719. int chunk = 0;
  720. skb = tls_wait_data(sk, flags, timeo, &err);
  721. if (!skb)
  722. goto recv_end;
  723. rxm = strp_msg(skb);
  724. if (!cmsg) {
  725. int cerr;
  726. cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
  727. sizeof(ctx->control), &ctx->control);
  728. cmsg = true;
  729. control = ctx->control;
  730. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  731. if (cerr || msg->msg_flags & MSG_CTRUNC) {
  732. err = -EIO;
  733. goto recv_end;
  734. }
  735. }
  736. } else if (control != ctx->control) {
  737. goto recv_end;
  738. }
  739. if (!ctx->decrypted) {
  740. int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
  741. if (!is_kvec && to_copy <= len &&
  742. likely(!(flags & MSG_PEEK)))
  743. zc = true;
  744. err = decrypt_skb_update(sk, skb, &msg->msg_iter,
  745. &chunk, &zc);
  746. if (err < 0) {
  747. tls_err_abort(sk, EBADMSG);
  748. goto recv_end;
  749. }
  750. ctx->decrypted = true;
  751. }
  752. if (!zc) {
  753. chunk = min_t(unsigned int, rxm->full_len, len);
  754. err = skb_copy_datagram_msg(skb, rxm->offset, msg,
  755. chunk);
  756. if (err < 0)
  757. goto recv_end;
  758. }
  759. copied += chunk;
  760. len -= chunk;
  761. if (likely(!(flags & MSG_PEEK))) {
  762. u8 control = ctx->control;
  763. if (tls_sw_advance_skb(sk, skb, chunk)) {
  764. /* Return full control message to
  765. * userspace before trying to parse
  766. * another message type
  767. */
  768. msg->msg_flags |= MSG_EOR;
  769. if (control != TLS_RECORD_TYPE_DATA)
  770. goto recv_end;
  771. }
  772. }
  773. /* If we have a new message from strparser, continue now. */
  774. if (copied >= target && !ctx->recv_pkt)
  775. break;
  776. } while (len);
  777. recv_end:
  778. release_sock(sk);
  779. return copied ? : err;
  780. }
  781. ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
  782. struct pipe_inode_info *pipe,
  783. size_t len, unsigned int flags)
  784. {
  785. struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
  786. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  787. struct strp_msg *rxm = NULL;
  788. struct sock *sk = sock->sk;
  789. struct sk_buff *skb;
  790. ssize_t copied = 0;
  791. int err = 0;
  792. long timeo;
  793. int chunk;
  794. bool zc = false;
  795. lock_sock(sk);
  796. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  797. skb = tls_wait_data(sk, flags, timeo, &err);
  798. if (!skb)
  799. goto splice_read_end;
  800. /* splice does not support reading control messages */
  801. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  802. err = -ENOTSUPP;
  803. goto splice_read_end;
  804. }
  805. if (!ctx->decrypted) {
  806. err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
  807. if (err < 0) {
  808. tls_err_abort(sk, EBADMSG);
  809. goto splice_read_end;
  810. }
  811. ctx->decrypted = true;
  812. }
  813. rxm = strp_msg(skb);
  814. chunk = min_t(unsigned int, rxm->full_len, len);
  815. copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
  816. if (copied < 0)
  817. goto splice_read_end;
  818. if (likely(!(flags & MSG_PEEK)))
  819. tls_sw_advance_skb(sk, skb, copied);
  820. splice_read_end:
  821. release_sock(sk);
  822. return copied ? : err;
  823. }
  824. unsigned int tls_sw_poll(struct file *file, struct socket *sock,
  825. struct poll_table_struct *wait)
  826. {
  827. unsigned int ret;
  828. struct sock *sk = sock->sk;
  829. struct tls_context *tls_ctx = tls_get_ctx(sk);
  830. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  831. /* Grab POLLOUT and POLLHUP from the underlying socket */
  832. ret = ctx->sk_poll(file, sock, wait);
  833. /* Clear POLLIN bits, and set based on recv_pkt */
  834. ret &= ~(POLLIN | POLLRDNORM);
  835. if (ctx->recv_pkt)
  836. ret |= POLLIN | POLLRDNORM;
  837. return ret;
  838. }
  839. static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
  840. {
  841. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  842. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  843. char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
  844. struct strp_msg *rxm = strp_msg(skb);
  845. size_t cipher_overhead;
  846. size_t data_len = 0;
  847. int ret;
  848. /* Verify that we have a full TLS header, or wait for more data */
  849. if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
  850. return 0;
  851. /* Sanity-check size of on-stack buffer. */
  852. if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
  853. ret = -EINVAL;
  854. goto read_failure;
  855. }
  856. /* Linearize header to local buffer */
  857. ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
  858. if (ret < 0)
  859. goto read_failure;
  860. ctx->control = header[0];
  861. data_len = ((header[4] & 0xFF) | (header[3] << 8));
  862. cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
  863. if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
  864. ret = -EMSGSIZE;
  865. goto read_failure;
  866. }
  867. if (data_len < cipher_overhead) {
  868. ret = -EBADMSG;
  869. goto read_failure;
  870. }
  871. if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
  872. header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
  873. ret = -EINVAL;
  874. goto read_failure;
  875. }
  876. #ifdef CONFIG_TLS_DEVICE
  877. handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
  878. *(u64*)tls_ctx->rx.rec_seq);
  879. #endif
  880. return data_len + TLS_HEADER_SIZE;
  881. read_failure:
  882. tls_err_abort(strp->sk, ret);
  883. return ret;
  884. }
  885. static void tls_queue(struct strparser *strp, struct sk_buff *skb)
  886. {
  887. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  888. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  889. ctx->decrypted = false;
  890. ctx->recv_pkt = skb;
  891. strp_pause(strp);
  892. ctx->saved_data_ready(strp->sk);
  893. }
  894. static void tls_data_ready(struct sock *sk)
  895. {
  896. struct tls_context *tls_ctx = tls_get_ctx(sk);
  897. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  898. strp_data_ready(&ctx->strp);
  899. }
  900. void tls_sw_free_resources_tx(struct sock *sk)
  901. {
  902. struct tls_context *tls_ctx = tls_get_ctx(sk);
  903. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  904. crypto_free_aead(ctx->aead_send);
  905. tls_free_both_sg(sk);
  906. kfree(ctx);
  907. }
  908. void tls_sw_release_resources_rx(struct sock *sk)
  909. {
  910. struct tls_context *tls_ctx = tls_get_ctx(sk);
  911. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  912. if (ctx->aead_recv) {
  913. kfree_skb(ctx->recv_pkt);
  914. ctx->recv_pkt = NULL;
  915. crypto_free_aead(ctx->aead_recv);
  916. strp_stop(&ctx->strp);
  917. write_lock_bh(&sk->sk_callback_lock);
  918. sk->sk_data_ready = ctx->saved_data_ready;
  919. write_unlock_bh(&sk->sk_callback_lock);
  920. release_sock(sk);
  921. strp_done(&ctx->strp);
  922. lock_sock(sk);
  923. }
  924. }
  925. void tls_sw_free_resources_rx(struct sock *sk)
  926. {
  927. struct tls_context *tls_ctx = tls_get_ctx(sk);
  928. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  929. tls_sw_release_resources_rx(sk);
  930. kfree(ctx);
  931. }
  932. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
  933. {
  934. char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
  935. struct tls_crypto_info *crypto_info;
  936. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  937. struct tls_sw_context_tx *sw_ctx_tx = NULL;
  938. struct tls_sw_context_rx *sw_ctx_rx = NULL;
  939. struct cipher_context *cctx;
  940. struct crypto_aead **aead;
  941. struct strp_callbacks cb;
  942. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  943. char *iv, *rec_seq;
  944. int rc = 0;
  945. if (!ctx) {
  946. rc = -EINVAL;
  947. goto out;
  948. }
  949. if (tx) {
  950. if (!ctx->priv_ctx_tx) {
  951. sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
  952. if (!sw_ctx_tx) {
  953. rc = -ENOMEM;
  954. goto out;
  955. }
  956. ctx->priv_ctx_tx = sw_ctx_tx;
  957. } else {
  958. sw_ctx_tx =
  959. (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
  960. }
  961. } else {
  962. if (!ctx->priv_ctx_rx) {
  963. sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
  964. if (!sw_ctx_rx) {
  965. rc = -ENOMEM;
  966. goto out;
  967. }
  968. ctx->priv_ctx_rx = sw_ctx_rx;
  969. } else {
  970. sw_ctx_rx =
  971. (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
  972. }
  973. }
  974. if (tx) {
  975. crypto_init_wait(&sw_ctx_tx->async_wait);
  976. crypto_info = &ctx->crypto_send;
  977. cctx = &ctx->tx;
  978. aead = &sw_ctx_tx->aead_send;
  979. } else {
  980. crypto_init_wait(&sw_ctx_rx->async_wait);
  981. crypto_info = &ctx->crypto_recv;
  982. cctx = &ctx->rx;
  983. aead = &sw_ctx_rx->aead_recv;
  984. }
  985. switch (crypto_info->cipher_type) {
  986. case TLS_CIPHER_AES_GCM_128: {
  987. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  988. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  989. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  990. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  991. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  992. rec_seq =
  993. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  994. gcm_128_info =
  995. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  996. break;
  997. }
  998. default:
  999. rc = -EINVAL;
  1000. goto free_priv;
  1001. }
  1002. /* Sanity-check the IV size for stack allocations. */
  1003. if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
  1004. rc = -EINVAL;
  1005. goto free_priv;
  1006. }
  1007. cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  1008. cctx->tag_size = tag_size;
  1009. cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
  1010. cctx->iv_size = iv_size;
  1011. cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  1012. GFP_KERNEL);
  1013. if (!cctx->iv) {
  1014. rc = -ENOMEM;
  1015. goto free_priv;
  1016. }
  1017. memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  1018. memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  1019. cctx->rec_seq_size = rec_seq_size;
  1020. cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
  1021. if (!cctx->rec_seq) {
  1022. rc = -ENOMEM;
  1023. goto free_iv;
  1024. }
  1025. if (sw_ctx_tx) {
  1026. sg_init_table(sw_ctx_tx->sg_encrypted_data,
  1027. ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
  1028. sg_init_table(sw_ctx_tx->sg_plaintext_data,
  1029. ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
  1030. sg_init_table(sw_ctx_tx->sg_aead_in, 2);
  1031. sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
  1032. sizeof(sw_ctx_tx->aad_space));
  1033. sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
  1034. sg_chain(sw_ctx_tx->sg_aead_in, 2,
  1035. sw_ctx_tx->sg_plaintext_data);
  1036. sg_init_table(sw_ctx_tx->sg_aead_out, 2);
  1037. sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
  1038. sizeof(sw_ctx_tx->aad_space));
  1039. sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
  1040. sg_chain(sw_ctx_tx->sg_aead_out, 2,
  1041. sw_ctx_tx->sg_encrypted_data);
  1042. }
  1043. if (!*aead) {
  1044. *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
  1045. if (IS_ERR(*aead)) {
  1046. rc = PTR_ERR(*aead);
  1047. *aead = NULL;
  1048. goto free_rec_seq;
  1049. }
  1050. }
  1051. ctx->push_pending_record = tls_sw_push_pending_record;
  1052. memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  1053. rc = crypto_aead_setkey(*aead, keyval,
  1054. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  1055. if (rc)
  1056. goto free_aead;
  1057. rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
  1058. if (rc)
  1059. goto free_aead;
  1060. if (sw_ctx_rx) {
  1061. /* Set up strparser */
  1062. memset(&cb, 0, sizeof(cb));
  1063. cb.rcv_msg = tls_queue;
  1064. cb.parse_msg = tls_read_size;
  1065. strp_init(&sw_ctx_rx->strp, sk, &cb);
  1066. write_lock_bh(&sk->sk_callback_lock);
  1067. sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
  1068. sk->sk_data_ready = tls_data_ready;
  1069. write_unlock_bh(&sk->sk_callback_lock);
  1070. sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
  1071. strp_check_rcv(&sw_ctx_rx->strp);
  1072. }
  1073. goto out;
  1074. free_aead:
  1075. crypto_free_aead(*aead);
  1076. *aead = NULL;
  1077. free_rec_seq:
  1078. kfree(cctx->rec_seq);
  1079. cctx->rec_seq = NULL;
  1080. free_iv:
  1081. kfree(cctx->iv);
  1082. cctx->iv = NULL;
  1083. free_priv:
  1084. if (tx) {
  1085. kfree(ctx->priv_ctx_tx);
  1086. ctx->priv_ctx_tx = NULL;
  1087. } else {
  1088. kfree(ctx->priv_ctx_rx);
  1089. ctx->priv_ctx_rx = NULL;
  1090. }
  1091. out:
  1092. return rc;
  1093. }