tls_sw.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/module.h>
  37. #include <crypto/aead.h>
  38. #include <net/tls.h>
  39. static inline void tls_make_aad(int recv,
  40. char *buf,
  41. size_t size,
  42. char *record_sequence,
  43. int record_sequence_size,
  44. unsigned char record_type)
  45. {
  46. memcpy(buf, record_sequence, record_sequence_size);
  47. buf[8] = record_type;
  48. buf[9] = TLS_1_2_VERSION_MAJOR;
  49. buf[10] = TLS_1_2_VERSION_MINOR;
  50. buf[11] = size >> 8;
  51. buf[12] = size & 0xFF;
  52. }
  53. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  54. int *sg_num_elem, unsigned int *sg_size, int target_size)
  55. {
  56. int i = *sg_num_elem - 1;
  57. int trim = *sg_size - target_size;
  58. if (trim <= 0) {
  59. WARN_ON(trim < 0);
  60. return;
  61. }
  62. *sg_size = target_size;
  63. while (trim >= sg[i].length) {
  64. trim -= sg[i].length;
  65. sk_mem_uncharge(sk, sg[i].length);
  66. put_page(sg_page(&sg[i]));
  67. i--;
  68. if (i < 0)
  69. goto out;
  70. }
  71. sg[i].length -= trim;
  72. sk_mem_uncharge(sk, trim);
  73. out:
  74. *sg_num_elem = i + 1;
  75. }
  76. static void trim_both_sgl(struct sock *sk, int target_size)
  77. {
  78. struct tls_context *tls_ctx = tls_get_ctx(sk);
  79. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  80. trim_sg(sk, ctx->sg_plaintext_data,
  81. &ctx->sg_plaintext_num_elem,
  82. &ctx->sg_plaintext_size,
  83. target_size);
  84. if (target_size > 0)
  85. target_size += tls_ctx->overhead_size;
  86. trim_sg(sk, ctx->sg_encrypted_data,
  87. &ctx->sg_encrypted_num_elem,
  88. &ctx->sg_encrypted_size,
  89. target_size);
  90. }
  91. static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
  92. int *sg_num_elem, unsigned int *sg_size,
  93. int first_coalesce)
  94. {
  95. struct page_frag *pfrag;
  96. unsigned int size = *sg_size;
  97. int num_elem = *sg_num_elem, use = 0, rc = 0;
  98. struct scatterlist *sge;
  99. unsigned int orig_offset;
  100. len -= size;
  101. pfrag = sk_page_frag(sk);
  102. while (len > 0) {
  103. if (!sk_page_frag_refill(sk, pfrag)) {
  104. rc = -ENOMEM;
  105. goto out;
  106. }
  107. use = min_t(int, len, pfrag->size - pfrag->offset);
  108. if (!sk_wmem_schedule(sk, use)) {
  109. rc = -ENOMEM;
  110. goto out;
  111. }
  112. sk_mem_charge(sk, use);
  113. size += use;
  114. orig_offset = pfrag->offset;
  115. pfrag->offset += use;
  116. sge = sg + num_elem - 1;
  117. if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
  118. sg->offset + sg->length == orig_offset) {
  119. sg->length += use;
  120. } else {
  121. sge++;
  122. sg_unmark_end(sge);
  123. sg_set_page(sge, pfrag->page, use, orig_offset);
  124. get_page(pfrag->page);
  125. ++num_elem;
  126. if (num_elem == MAX_SKB_FRAGS) {
  127. rc = -ENOSPC;
  128. break;
  129. }
  130. }
  131. len -= use;
  132. }
  133. goto out;
  134. out:
  135. *sg_size = size;
  136. *sg_num_elem = num_elem;
  137. return rc;
  138. }
  139. static int alloc_encrypted_sg(struct sock *sk, int len)
  140. {
  141. struct tls_context *tls_ctx = tls_get_ctx(sk);
  142. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  143. int rc = 0;
  144. rc = alloc_sg(sk, len, ctx->sg_encrypted_data,
  145. &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0);
  146. return rc;
  147. }
  148. static int alloc_plaintext_sg(struct sock *sk, int len)
  149. {
  150. struct tls_context *tls_ctx = tls_get_ctx(sk);
  151. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  152. int rc = 0;
  153. rc = alloc_sg(sk, len, ctx->sg_plaintext_data,
  154. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  155. tls_ctx->pending_open_record_frags);
  156. return rc;
  157. }
  158. static void free_sg(struct sock *sk, struct scatterlist *sg,
  159. int *sg_num_elem, unsigned int *sg_size)
  160. {
  161. int i, n = *sg_num_elem;
  162. for (i = 0; i < n; ++i) {
  163. sk_mem_uncharge(sk, sg[i].length);
  164. put_page(sg_page(&sg[i]));
  165. }
  166. *sg_num_elem = 0;
  167. *sg_size = 0;
  168. }
  169. static void tls_free_both_sg(struct sock *sk)
  170. {
  171. struct tls_context *tls_ctx = tls_get_ctx(sk);
  172. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  173. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  174. &ctx->sg_encrypted_size);
  175. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  176. &ctx->sg_plaintext_size);
  177. }
  178. static int tls_do_encryption(struct tls_context *tls_ctx,
  179. struct tls_sw_context *ctx, size_t data_len,
  180. gfp_t flags)
  181. {
  182. unsigned int req_size = sizeof(struct aead_request) +
  183. crypto_aead_reqsize(ctx->aead_send);
  184. struct aead_request *aead_req;
  185. int rc;
  186. aead_req = kmalloc(req_size, flags);
  187. if (!aead_req)
  188. return -ENOMEM;
  189. ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
  190. ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
  191. aead_request_set_tfm(aead_req, ctx->aead_send);
  192. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  193. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  194. data_len, tls_ctx->iv);
  195. rc = crypto_aead_encrypt(aead_req);
  196. ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
  197. ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
  198. kfree(aead_req);
  199. return rc;
  200. }
  201. static int tls_push_record(struct sock *sk, int flags,
  202. unsigned char record_type)
  203. {
  204. struct tls_context *tls_ctx = tls_get_ctx(sk);
  205. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  206. int rc;
  207. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  208. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  209. tls_make_aad(0, ctx->aad_space, ctx->sg_plaintext_size,
  210. tls_ctx->rec_seq, tls_ctx->rec_seq_size,
  211. record_type);
  212. tls_fill_prepend(tls_ctx,
  213. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  214. ctx->sg_encrypted_data[0].offset,
  215. ctx->sg_plaintext_size, record_type);
  216. tls_ctx->pending_open_record_frags = 0;
  217. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  218. rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
  219. sk->sk_allocation);
  220. if (rc < 0) {
  221. /* If we are called from write_space and
  222. * we fail, we need to set this SOCK_NOSPACE
  223. * to trigger another write_space in the future.
  224. */
  225. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  226. return rc;
  227. }
  228. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  229. &ctx->sg_plaintext_size);
  230. ctx->sg_encrypted_num_elem = 0;
  231. ctx->sg_encrypted_size = 0;
  232. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  233. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  234. if (rc < 0 && rc != -EAGAIN)
  235. tls_err_abort(sk);
  236. tls_advance_record_sn(sk, tls_ctx);
  237. return rc;
  238. }
  239. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  240. {
  241. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  242. }
  243. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  244. int length)
  245. {
  246. struct tls_context *tls_ctx = tls_get_ctx(sk);
  247. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  248. struct page *pages[MAX_SKB_FRAGS];
  249. size_t offset;
  250. ssize_t copied, use;
  251. int i = 0;
  252. unsigned int size = ctx->sg_plaintext_size;
  253. int num_elem = ctx->sg_plaintext_num_elem;
  254. int rc = 0;
  255. int maxpages;
  256. while (length > 0) {
  257. i = 0;
  258. maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
  259. if (maxpages == 0) {
  260. rc = -EFAULT;
  261. goto out;
  262. }
  263. copied = iov_iter_get_pages(from, pages,
  264. length,
  265. maxpages, &offset);
  266. if (copied <= 0) {
  267. rc = -EFAULT;
  268. goto out;
  269. }
  270. iov_iter_advance(from, copied);
  271. length -= copied;
  272. size += copied;
  273. while (copied) {
  274. use = min_t(int, copied, PAGE_SIZE - offset);
  275. sg_set_page(&ctx->sg_plaintext_data[num_elem],
  276. pages[i], use, offset);
  277. sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
  278. sk_mem_charge(sk, use);
  279. offset = 0;
  280. copied -= use;
  281. ++i;
  282. ++num_elem;
  283. }
  284. }
  285. out:
  286. ctx->sg_plaintext_size = size;
  287. ctx->sg_plaintext_num_elem = num_elem;
  288. return rc;
  289. }
  290. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  291. int bytes)
  292. {
  293. struct tls_context *tls_ctx = tls_get_ctx(sk);
  294. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  295. struct scatterlist *sg = ctx->sg_plaintext_data;
  296. int copy, i, rc = 0;
  297. for (i = tls_ctx->pending_open_record_frags;
  298. i < ctx->sg_plaintext_num_elem; ++i) {
  299. copy = sg[i].length;
  300. if (copy_from_iter(
  301. page_address(sg_page(&sg[i])) + sg[i].offset,
  302. copy, from) != copy) {
  303. rc = -EFAULT;
  304. goto out;
  305. }
  306. bytes -= copy;
  307. ++tls_ctx->pending_open_record_frags;
  308. if (!bytes)
  309. break;
  310. }
  311. out:
  312. return rc;
  313. }
  314. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  315. {
  316. struct tls_context *tls_ctx = tls_get_ctx(sk);
  317. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  318. int ret = 0;
  319. int required_size;
  320. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  321. bool eor = !(msg->msg_flags & MSG_MORE);
  322. size_t try_to_copy, copied = 0;
  323. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  324. int record_room;
  325. bool full_record;
  326. int orig_size;
  327. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  328. return -ENOTSUPP;
  329. lock_sock(sk);
  330. if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
  331. goto send_end;
  332. if (unlikely(msg->msg_controllen)) {
  333. ret = tls_proccess_cmsg(sk, msg, &record_type);
  334. if (ret)
  335. goto send_end;
  336. }
  337. while (msg_data_left(msg)) {
  338. if (sk->sk_err) {
  339. ret = sk->sk_err;
  340. goto send_end;
  341. }
  342. orig_size = ctx->sg_plaintext_size;
  343. full_record = false;
  344. try_to_copy = msg_data_left(msg);
  345. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  346. if (try_to_copy >= record_room) {
  347. try_to_copy = record_room;
  348. full_record = true;
  349. }
  350. required_size = ctx->sg_plaintext_size + try_to_copy +
  351. tls_ctx->overhead_size;
  352. if (!sk_stream_memory_free(sk))
  353. goto wait_for_sndbuf;
  354. alloc_encrypted:
  355. ret = alloc_encrypted_sg(sk, required_size);
  356. if (ret) {
  357. if (ret != -ENOSPC)
  358. goto wait_for_memory;
  359. /* Adjust try_to_copy according to the amount that was
  360. * actually allocated. The difference is due
  361. * to max sg elements limit
  362. */
  363. try_to_copy -= required_size - ctx->sg_encrypted_size;
  364. full_record = true;
  365. }
  366. if (full_record || eor) {
  367. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  368. try_to_copy);
  369. if (ret)
  370. goto fallback_to_reg_send;
  371. copied += try_to_copy;
  372. ret = tls_push_record(sk, msg->msg_flags, record_type);
  373. if (!ret)
  374. continue;
  375. if (ret == -EAGAIN)
  376. goto send_end;
  377. copied -= try_to_copy;
  378. fallback_to_reg_send:
  379. iov_iter_revert(&msg->msg_iter,
  380. ctx->sg_plaintext_size - orig_size);
  381. trim_sg(sk, ctx->sg_plaintext_data,
  382. &ctx->sg_plaintext_num_elem,
  383. &ctx->sg_plaintext_size,
  384. orig_size);
  385. }
  386. required_size = ctx->sg_plaintext_size + try_to_copy;
  387. alloc_plaintext:
  388. ret = alloc_plaintext_sg(sk, required_size);
  389. if (ret) {
  390. if (ret != -ENOSPC)
  391. goto wait_for_memory;
  392. /* Adjust try_to_copy according to the amount that was
  393. * actually allocated. The difference is due
  394. * to max sg elements limit
  395. */
  396. try_to_copy -= required_size - ctx->sg_plaintext_size;
  397. full_record = true;
  398. trim_sg(sk, ctx->sg_encrypted_data,
  399. &ctx->sg_encrypted_num_elem,
  400. &ctx->sg_encrypted_size,
  401. ctx->sg_plaintext_size +
  402. tls_ctx->overhead_size);
  403. }
  404. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  405. if (ret)
  406. goto trim_sgl;
  407. copied += try_to_copy;
  408. if (full_record || eor) {
  409. push_record:
  410. ret = tls_push_record(sk, msg->msg_flags, record_type);
  411. if (ret) {
  412. if (ret == -ENOMEM)
  413. goto wait_for_memory;
  414. goto send_end;
  415. }
  416. }
  417. continue;
  418. wait_for_sndbuf:
  419. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  420. wait_for_memory:
  421. ret = sk_stream_wait_memory(sk, &timeo);
  422. if (ret) {
  423. trim_sgl:
  424. trim_both_sgl(sk, orig_size);
  425. goto send_end;
  426. }
  427. if (tls_is_pending_closed_record(tls_ctx))
  428. goto push_record;
  429. if (ctx->sg_encrypted_size < required_size)
  430. goto alloc_encrypted;
  431. goto alloc_plaintext;
  432. }
  433. send_end:
  434. ret = sk_stream_error(sk, msg->msg_flags, ret);
  435. release_sock(sk);
  436. return copied ? copied : ret;
  437. }
  438. int tls_sw_sendpage(struct sock *sk, struct page *page,
  439. int offset, size_t size, int flags)
  440. {
  441. struct tls_context *tls_ctx = tls_get_ctx(sk);
  442. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  443. int ret = 0;
  444. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  445. bool eor;
  446. size_t orig_size = size;
  447. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  448. struct scatterlist *sg;
  449. bool full_record;
  450. int record_room;
  451. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  452. MSG_SENDPAGE_NOTLAST))
  453. return -ENOTSUPP;
  454. /* No MSG_EOR from splice, only look at MSG_MORE */
  455. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  456. lock_sock(sk);
  457. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  458. if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
  459. goto sendpage_end;
  460. /* Call the sk_stream functions to manage the sndbuf mem. */
  461. while (size > 0) {
  462. size_t copy, required_size;
  463. if (sk->sk_err) {
  464. ret = sk->sk_err;
  465. goto sendpage_end;
  466. }
  467. full_record = false;
  468. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  469. copy = size;
  470. if (copy >= record_room) {
  471. copy = record_room;
  472. full_record = true;
  473. }
  474. required_size = ctx->sg_plaintext_size + copy +
  475. tls_ctx->overhead_size;
  476. if (!sk_stream_memory_free(sk))
  477. goto wait_for_sndbuf;
  478. alloc_payload:
  479. ret = alloc_encrypted_sg(sk, required_size);
  480. if (ret) {
  481. if (ret != -ENOSPC)
  482. goto wait_for_memory;
  483. /* Adjust copy according to the amount that was
  484. * actually allocated. The difference is due
  485. * to max sg elements limit
  486. */
  487. copy -= required_size - ctx->sg_plaintext_size;
  488. full_record = true;
  489. }
  490. get_page(page);
  491. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  492. sg_set_page(sg, page, copy, offset);
  493. ctx->sg_plaintext_num_elem++;
  494. sk_mem_charge(sk, copy);
  495. offset += copy;
  496. size -= copy;
  497. ctx->sg_plaintext_size += copy;
  498. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  499. if (full_record || eor ||
  500. ctx->sg_plaintext_num_elem ==
  501. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  502. push_record:
  503. ret = tls_push_record(sk, flags, record_type);
  504. if (ret) {
  505. if (ret == -ENOMEM)
  506. goto wait_for_memory;
  507. goto sendpage_end;
  508. }
  509. }
  510. continue;
  511. wait_for_sndbuf:
  512. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  513. wait_for_memory:
  514. ret = sk_stream_wait_memory(sk, &timeo);
  515. if (ret) {
  516. trim_both_sgl(sk, ctx->sg_plaintext_size);
  517. goto sendpage_end;
  518. }
  519. if (tls_is_pending_closed_record(tls_ctx))
  520. goto push_record;
  521. goto alloc_payload;
  522. }
  523. sendpage_end:
  524. if (orig_size > size)
  525. ret = orig_size - size;
  526. else
  527. ret = sk_stream_error(sk, flags, ret);
  528. release_sock(sk);
  529. return ret;
  530. }
  531. void tls_sw_free_resources(struct sock *sk)
  532. {
  533. struct tls_context *tls_ctx = tls_get_ctx(sk);
  534. struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
  535. if (ctx->aead_send)
  536. crypto_free_aead(ctx->aead_send);
  537. tls_free_both_sg(sk);
  538. kfree(ctx);
  539. }
  540. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
  541. {
  542. char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
  543. struct tls_crypto_info *crypto_info;
  544. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  545. struct tls_sw_context *sw_ctx;
  546. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  547. char *iv, *rec_seq;
  548. int rc = 0;
  549. if (!ctx) {
  550. rc = -EINVAL;
  551. goto out;
  552. }
  553. if (ctx->priv_ctx) {
  554. rc = -EEXIST;
  555. goto out;
  556. }
  557. sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
  558. if (!sw_ctx) {
  559. rc = -ENOMEM;
  560. goto out;
  561. }
  562. ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
  563. ctx->free_resources = tls_sw_free_resources;
  564. crypto_info = &ctx->crypto_send;
  565. switch (crypto_info->cipher_type) {
  566. case TLS_CIPHER_AES_GCM_128: {
  567. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  568. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  569. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  570. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  571. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  572. rec_seq =
  573. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  574. gcm_128_info =
  575. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  576. break;
  577. }
  578. default:
  579. rc = -EINVAL;
  580. goto out;
  581. }
  582. ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  583. ctx->tag_size = tag_size;
  584. ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
  585. ctx->iv_size = iv_size;
  586. ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  587. GFP_KERNEL);
  588. if (!ctx->iv) {
  589. rc = -ENOMEM;
  590. goto out;
  591. }
  592. memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  593. memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  594. ctx->rec_seq_size = rec_seq_size;
  595. ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
  596. if (!ctx->rec_seq) {
  597. rc = -ENOMEM;
  598. goto free_iv;
  599. }
  600. memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
  601. sg_init_table(sw_ctx->sg_encrypted_data,
  602. ARRAY_SIZE(sw_ctx->sg_encrypted_data));
  603. sg_init_table(sw_ctx->sg_plaintext_data,
  604. ARRAY_SIZE(sw_ctx->sg_plaintext_data));
  605. sg_init_table(sw_ctx->sg_aead_in, 2);
  606. sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
  607. sizeof(sw_ctx->aad_space));
  608. sg_unmark_end(&sw_ctx->sg_aead_in[1]);
  609. sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
  610. sg_init_table(sw_ctx->sg_aead_out, 2);
  611. sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
  612. sizeof(sw_ctx->aad_space));
  613. sg_unmark_end(&sw_ctx->sg_aead_out[1]);
  614. sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
  615. if (!sw_ctx->aead_send) {
  616. sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
  617. if (IS_ERR(sw_ctx->aead_send)) {
  618. rc = PTR_ERR(sw_ctx->aead_send);
  619. sw_ctx->aead_send = NULL;
  620. goto free_rec_seq;
  621. }
  622. }
  623. ctx->push_pending_record = tls_sw_push_pending_record;
  624. memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  625. rc = crypto_aead_setkey(sw_ctx->aead_send, keyval,
  626. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  627. if (rc)
  628. goto free_aead;
  629. rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
  630. if (!rc)
  631. goto out;
  632. free_aead:
  633. crypto_free_aead(sw_ctx->aead_send);
  634. sw_ctx->aead_send = NULL;
  635. free_rec_seq:
  636. kfree(ctx->rec_seq);
  637. ctx->rec_seq = NULL;
  638. free_iv:
  639. kfree(ctx->iv);
  640. ctx->iv = NULL;
  641. out:
  642. return rc;
  643. }