tls_sw.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #include <crypto/aead.h>
  39. #include <net/strparser.h>
  40. #include <net/tls.h>
  41. #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
  42. static int tls_do_decryption(struct sock *sk,
  43. struct scatterlist *sgin,
  44. struct scatterlist *sgout,
  45. char *iv_recv,
  46. size_t data_len,
  47. struct sk_buff *skb,
  48. gfp_t flags)
  49. {
  50. struct tls_context *tls_ctx = tls_get_ctx(sk);
  51. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  52. struct strp_msg *rxm = strp_msg(skb);
  53. struct aead_request *aead_req;
  54. int ret;
  55. unsigned int req_size = sizeof(struct aead_request) +
  56. crypto_aead_reqsize(ctx->aead_recv);
  57. aead_req = kzalloc(req_size, flags);
  58. if (!aead_req)
  59. return -ENOMEM;
  60. aead_request_set_tfm(aead_req, ctx->aead_recv);
  61. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  62. aead_request_set_crypt(aead_req, sgin, sgout,
  63. data_len + tls_ctx->rx.tag_size,
  64. (u8 *)iv_recv);
  65. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  66. crypto_req_done, &ctx->async_wait);
  67. ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
  68. if (ret < 0)
  69. goto out;
  70. rxm->offset += tls_ctx->rx.prepend_size;
  71. rxm->full_len -= tls_ctx->rx.overhead_size;
  72. tls_advance_record_sn(sk, &tls_ctx->rx);
  73. ctx->decrypted = true;
  74. ctx->saved_data_ready(sk);
  75. out:
  76. kfree(aead_req);
  77. return ret;
  78. }
  79. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  80. int *sg_num_elem, unsigned int *sg_size, int target_size)
  81. {
  82. int i = *sg_num_elem - 1;
  83. int trim = *sg_size - target_size;
  84. if (trim <= 0) {
  85. WARN_ON(trim < 0);
  86. return;
  87. }
  88. *sg_size = target_size;
  89. while (trim >= sg[i].length) {
  90. trim -= sg[i].length;
  91. sk_mem_uncharge(sk, sg[i].length);
  92. put_page(sg_page(&sg[i]));
  93. i--;
  94. if (i < 0)
  95. goto out;
  96. }
  97. sg[i].length -= trim;
  98. sk_mem_uncharge(sk, trim);
  99. out:
  100. *sg_num_elem = i + 1;
  101. }
  102. static void trim_both_sgl(struct sock *sk, int target_size)
  103. {
  104. struct tls_context *tls_ctx = tls_get_ctx(sk);
  105. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  106. trim_sg(sk, ctx->sg_plaintext_data,
  107. &ctx->sg_plaintext_num_elem,
  108. &ctx->sg_plaintext_size,
  109. target_size);
  110. if (target_size > 0)
  111. target_size += tls_ctx->tx.overhead_size;
  112. trim_sg(sk, ctx->sg_encrypted_data,
  113. &ctx->sg_encrypted_num_elem,
  114. &ctx->sg_encrypted_size,
  115. target_size);
  116. }
  117. static int alloc_encrypted_sg(struct sock *sk, int len)
  118. {
  119. struct tls_context *tls_ctx = tls_get_ctx(sk);
  120. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  121. int rc = 0;
  122. rc = sk_alloc_sg(sk, len,
  123. ctx->sg_encrypted_data, 0,
  124. &ctx->sg_encrypted_num_elem,
  125. &ctx->sg_encrypted_size, 0);
  126. return rc;
  127. }
  128. static int alloc_plaintext_sg(struct sock *sk, int len)
  129. {
  130. struct tls_context *tls_ctx = tls_get_ctx(sk);
  131. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  132. int rc = 0;
  133. rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
  134. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  135. tls_ctx->pending_open_record_frags);
  136. return rc;
  137. }
  138. static void free_sg(struct sock *sk, struct scatterlist *sg,
  139. int *sg_num_elem, unsigned int *sg_size)
  140. {
  141. int i, n = *sg_num_elem;
  142. for (i = 0; i < n; ++i) {
  143. sk_mem_uncharge(sk, sg[i].length);
  144. put_page(sg_page(&sg[i]));
  145. }
  146. *sg_num_elem = 0;
  147. *sg_size = 0;
  148. }
  149. static void tls_free_both_sg(struct sock *sk)
  150. {
  151. struct tls_context *tls_ctx = tls_get_ctx(sk);
  152. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  153. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  154. &ctx->sg_encrypted_size);
  155. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  156. &ctx->sg_plaintext_size);
  157. }
  158. static int tls_do_encryption(struct tls_context *tls_ctx,
  159. struct tls_sw_context_tx *ctx,
  160. struct aead_request *aead_req,
  161. size_t data_len)
  162. {
  163. int rc;
  164. ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
  165. ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
  166. aead_request_set_tfm(aead_req, ctx->aead_send);
  167. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  168. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  169. data_len, tls_ctx->tx.iv);
  170. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  171. crypto_req_done, &ctx->async_wait);
  172. rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
  173. ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
  174. ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
  175. return rc;
  176. }
  177. static int tls_push_record(struct sock *sk, int flags,
  178. unsigned char record_type)
  179. {
  180. struct tls_context *tls_ctx = tls_get_ctx(sk);
  181. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  182. struct aead_request *req;
  183. int rc;
  184. req = kzalloc(sizeof(struct aead_request) +
  185. crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
  186. if (!req)
  187. return -ENOMEM;
  188. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  189. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  190. tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
  191. tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
  192. record_type);
  193. tls_fill_prepend(tls_ctx,
  194. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  195. ctx->sg_encrypted_data[0].offset,
  196. ctx->sg_plaintext_size, record_type);
  197. tls_ctx->pending_open_record_frags = 0;
  198. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  199. rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
  200. if (rc < 0) {
  201. /* If we are called from write_space and
  202. * we fail, we need to set this SOCK_NOSPACE
  203. * to trigger another write_space in the future.
  204. */
  205. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  206. goto out_req;
  207. }
  208. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  209. &ctx->sg_plaintext_size);
  210. ctx->sg_encrypted_num_elem = 0;
  211. ctx->sg_encrypted_size = 0;
  212. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  213. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  214. if (rc < 0 && rc != -EAGAIN)
  215. tls_err_abort(sk, EBADMSG);
  216. tls_advance_record_sn(sk, &tls_ctx->tx);
  217. out_req:
  218. kfree(req);
  219. return rc;
  220. }
  221. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  222. {
  223. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  224. }
  225. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  226. int length, int *pages_used,
  227. unsigned int *size_used,
  228. struct scatterlist *to, int to_max_pages,
  229. bool charge)
  230. {
  231. struct page *pages[MAX_SKB_FRAGS];
  232. size_t offset;
  233. ssize_t copied, use;
  234. int i = 0;
  235. unsigned int size = *size_used;
  236. int num_elem = *pages_used;
  237. int rc = 0;
  238. int maxpages;
  239. while (length > 0) {
  240. i = 0;
  241. maxpages = to_max_pages - num_elem;
  242. if (maxpages == 0) {
  243. rc = -EFAULT;
  244. goto out;
  245. }
  246. copied = iov_iter_get_pages(from, pages,
  247. length,
  248. maxpages, &offset);
  249. if (copied <= 0) {
  250. rc = -EFAULT;
  251. goto out;
  252. }
  253. iov_iter_advance(from, copied);
  254. length -= copied;
  255. size += copied;
  256. while (copied) {
  257. use = min_t(int, copied, PAGE_SIZE - offset);
  258. sg_set_page(&to[num_elem],
  259. pages[i], use, offset);
  260. sg_unmark_end(&to[num_elem]);
  261. if (charge)
  262. sk_mem_charge(sk, use);
  263. offset = 0;
  264. copied -= use;
  265. ++i;
  266. ++num_elem;
  267. }
  268. }
  269. out:
  270. *size_used = size;
  271. *pages_used = num_elem;
  272. return rc;
  273. }
  274. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  275. int bytes)
  276. {
  277. struct tls_context *tls_ctx = tls_get_ctx(sk);
  278. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  279. struct scatterlist *sg = ctx->sg_plaintext_data;
  280. int copy, i, rc = 0;
  281. for (i = tls_ctx->pending_open_record_frags;
  282. i < ctx->sg_plaintext_num_elem; ++i) {
  283. copy = sg[i].length;
  284. if (copy_from_iter(
  285. page_address(sg_page(&sg[i])) + sg[i].offset,
  286. copy, from) != copy) {
  287. rc = -EFAULT;
  288. goto out;
  289. }
  290. bytes -= copy;
  291. ++tls_ctx->pending_open_record_frags;
  292. if (!bytes)
  293. break;
  294. }
  295. out:
  296. return rc;
  297. }
  298. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  299. {
  300. struct tls_context *tls_ctx = tls_get_ctx(sk);
  301. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  302. int ret = 0;
  303. int required_size;
  304. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  305. bool eor = !(msg->msg_flags & MSG_MORE);
  306. size_t try_to_copy, copied = 0;
  307. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  308. int record_room;
  309. bool full_record;
  310. int orig_size;
  311. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  312. return -ENOTSUPP;
  313. lock_sock(sk);
  314. if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
  315. goto send_end;
  316. if (unlikely(msg->msg_controllen)) {
  317. ret = tls_proccess_cmsg(sk, msg, &record_type);
  318. if (ret)
  319. goto send_end;
  320. }
  321. while (msg_data_left(msg)) {
  322. if (sk->sk_err) {
  323. ret = -sk->sk_err;
  324. goto send_end;
  325. }
  326. orig_size = ctx->sg_plaintext_size;
  327. full_record = false;
  328. try_to_copy = msg_data_left(msg);
  329. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  330. if (try_to_copy >= record_room) {
  331. try_to_copy = record_room;
  332. full_record = true;
  333. }
  334. required_size = ctx->sg_plaintext_size + try_to_copy +
  335. tls_ctx->tx.overhead_size;
  336. if (!sk_stream_memory_free(sk))
  337. goto wait_for_sndbuf;
  338. alloc_encrypted:
  339. ret = alloc_encrypted_sg(sk, required_size);
  340. if (ret) {
  341. if (ret != -ENOSPC)
  342. goto wait_for_memory;
  343. /* Adjust try_to_copy according to the amount that was
  344. * actually allocated. The difference is due
  345. * to max sg elements limit
  346. */
  347. try_to_copy -= required_size - ctx->sg_encrypted_size;
  348. full_record = true;
  349. }
  350. if (full_record || eor) {
  351. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  352. try_to_copy, &ctx->sg_plaintext_num_elem,
  353. &ctx->sg_plaintext_size,
  354. ctx->sg_plaintext_data,
  355. ARRAY_SIZE(ctx->sg_plaintext_data),
  356. true);
  357. if (ret)
  358. goto fallback_to_reg_send;
  359. copied += try_to_copy;
  360. ret = tls_push_record(sk, msg->msg_flags, record_type);
  361. if (!ret)
  362. continue;
  363. if (ret == -EAGAIN)
  364. goto send_end;
  365. copied -= try_to_copy;
  366. fallback_to_reg_send:
  367. iov_iter_revert(&msg->msg_iter,
  368. ctx->sg_plaintext_size - orig_size);
  369. trim_sg(sk, ctx->sg_plaintext_data,
  370. &ctx->sg_plaintext_num_elem,
  371. &ctx->sg_plaintext_size,
  372. orig_size);
  373. }
  374. required_size = ctx->sg_plaintext_size + try_to_copy;
  375. alloc_plaintext:
  376. ret = alloc_plaintext_sg(sk, required_size);
  377. if (ret) {
  378. if (ret != -ENOSPC)
  379. goto wait_for_memory;
  380. /* Adjust try_to_copy according to the amount that was
  381. * actually allocated. The difference is due
  382. * to max sg elements limit
  383. */
  384. try_to_copy -= required_size - ctx->sg_plaintext_size;
  385. full_record = true;
  386. trim_sg(sk, ctx->sg_encrypted_data,
  387. &ctx->sg_encrypted_num_elem,
  388. &ctx->sg_encrypted_size,
  389. ctx->sg_plaintext_size +
  390. tls_ctx->tx.overhead_size);
  391. }
  392. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  393. if (ret)
  394. goto trim_sgl;
  395. copied += try_to_copy;
  396. if (full_record || eor) {
  397. push_record:
  398. ret = tls_push_record(sk, msg->msg_flags, record_type);
  399. if (ret) {
  400. if (ret == -ENOMEM)
  401. goto wait_for_memory;
  402. goto send_end;
  403. }
  404. }
  405. continue;
  406. wait_for_sndbuf:
  407. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  408. wait_for_memory:
  409. ret = sk_stream_wait_memory(sk, &timeo);
  410. if (ret) {
  411. trim_sgl:
  412. trim_both_sgl(sk, orig_size);
  413. goto send_end;
  414. }
  415. if (tls_is_pending_closed_record(tls_ctx))
  416. goto push_record;
  417. if (ctx->sg_encrypted_size < required_size)
  418. goto alloc_encrypted;
  419. goto alloc_plaintext;
  420. }
  421. send_end:
  422. ret = sk_stream_error(sk, msg->msg_flags, ret);
  423. release_sock(sk);
  424. return copied ? copied : ret;
  425. }
  426. int tls_sw_sendpage(struct sock *sk, struct page *page,
  427. int offset, size_t size, int flags)
  428. {
  429. struct tls_context *tls_ctx = tls_get_ctx(sk);
  430. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  431. int ret = 0;
  432. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  433. bool eor;
  434. size_t orig_size = size;
  435. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  436. struct scatterlist *sg;
  437. bool full_record;
  438. int record_room;
  439. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  440. MSG_SENDPAGE_NOTLAST))
  441. return -ENOTSUPP;
  442. /* No MSG_EOR from splice, only look at MSG_MORE */
  443. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  444. lock_sock(sk);
  445. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  446. if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
  447. goto sendpage_end;
  448. /* Call the sk_stream functions to manage the sndbuf mem. */
  449. while (size > 0) {
  450. size_t copy, required_size;
  451. if (sk->sk_err) {
  452. ret = -sk->sk_err;
  453. goto sendpage_end;
  454. }
  455. full_record = false;
  456. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  457. copy = size;
  458. if (copy >= record_room) {
  459. copy = record_room;
  460. full_record = true;
  461. }
  462. required_size = ctx->sg_plaintext_size + copy +
  463. tls_ctx->tx.overhead_size;
  464. if (!sk_stream_memory_free(sk))
  465. goto wait_for_sndbuf;
  466. alloc_payload:
  467. ret = alloc_encrypted_sg(sk, required_size);
  468. if (ret) {
  469. if (ret != -ENOSPC)
  470. goto wait_for_memory;
  471. /* Adjust copy according to the amount that was
  472. * actually allocated. The difference is due
  473. * to max sg elements limit
  474. */
  475. copy -= required_size - ctx->sg_plaintext_size;
  476. full_record = true;
  477. }
  478. get_page(page);
  479. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  480. sg_set_page(sg, page, copy, offset);
  481. sg_unmark_end(sg);
  482. ctx->sg_plaintext_num_elem++;
  483. sk_mem_charge(sk, copy);
  484. offset += copy;
  485. size -= copy;
  486. ctx->sg_plaintext_size += copy;
  487. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  488. if (full_record || eor ||
  489. ctx->sg_plaintext_num_elem ==
  490. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  491. push_record:
  492. ret = tls_push_record(sk, flags, record_type);
  493. if (ret) {
  494. if (ret == -ENOMEM)
  495. goto wait_for_memory;
  496. goto sendpage_end;
  497. }
  498. }
  499. continue;
  500. wait_for_sndbuf:
  501. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  502. wait_for_memory:
  503. ret = sk_stream_wait_memory(sk, &timeo);
  504. if (ret) {
  505. trim_both_sgl(sk, ctx->sg_plaintext_size);
  506. goto sendpage_end;
  507. }
  508. if (tls_is_pending_closed_record(tls_ctx))
  509. goto push_record;
  510. goto alloc_payload;
  511. }
  512. sendpage_end:
  513. if (orig_size > size)
  514. ret = orig_size - size;
  515. else
  516. ret = sk_stream_error(sk, flags, ret);
  517. release_sock(sk);
  518. return ret;
  519. }
  520. static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
  521. long timeo, int *err)
  522. {
  523. struct tls_context *tls_ctx = tls_get_ctx(sk);
  524. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  525. struct sk_buff *skb;
  526. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  527. while (!(skb = ctx->recv_pkt)) {
  528. if (sk->sk_err) {
  529. *err = sock_error(sk);
  530. return NULL;
  531. }
  532. if (sock_flag(sk, SOCK_DONE))
  533. return NULL;
  534. if ((flags & MSG_DONTWAIT) || !timeo) {
  535. *err = -EAGAIN;
  536. return NULL;
  537. }
  538. add_wait_queue(sk_sleep(sk), &wait);
  539. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  540. sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
  541. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  542. remove_wait_queue(sk_sleep(sk), &wait);
  543. /* Handle signals */
  544. if (signal_pending(current)) {
  545. *err = sock_intr_errno(timeo);
  546. return NULL;
  547. }
  548. }
  549. return skb;
  550. }
  551. static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
  552. struct scatterlist *sgout)
  553. {
  554. struct tls_context *tls_ctx = tls_get_ctx(sk);
  555. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  556. char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
  557. struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
  558. struct scatterlist *sgin = &sgin_arr[0];
  559. struct strp_msg *rxm = strp_msg(skb);
  560. int ret, nsg = ARRAY_SIZE(sgin_arr);
  561. struct sk_buff *unused;
  562. ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
  563. iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  564. tls_ctx->rx.iv_size);
  565. if (ret < 0)
  566. return ret;
  567. memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  568. if (!sgout) {
  569. nsg = skb_cow_data(skb, 0, &unused) + 1;
  570. sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
  571. sgout = sgin;
  572. }
  573. sg_init_table(sgin, nsg);
  574. sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
  575. nsg = skb_to_sgvec(skb, &sgin[1],
  576. rxm->offset + tls_ctx->rx.prepend_size,
  577. rxm->full_len - tls_ctx->rx.prepend_size);
  578. tls_make_aad(ctx->rx_aad_ciphertext,
  579. rxm->full_len - tls_ctx->rx.overhead_size,
  580. tls_ctx->rx.rec_seq,
  581. tls_ctx->rx.rec_seq_size,
  582. ctx->control);
  583. ret = tls_do_decryption(sk, sgin, sgout, iv,
  584. rxm->full_len - tls_ctx->rx.overhead_size,
  585. skb, sk->sk_allocation);
  586. if (sgin != &sgin_arr[0])
  587. kfree(sgin);
  588. return ret;
  589. }
  590. static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
  591. unsigned int len)
  592. {
  593. struct tls_context *tls_ctx = tls_get_ctx(sk);
  594. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  595. struct strp_msg *rxm = strp_msg(skb);
  596. if (len < rxm->full_len) {
  597. rxm->offset += len;
  598. rxm->full_len -= len;
  599. return false;
  600. }
  601. /* Finished with message */
  602. ctx->recv_pkt = NULL;
  603. kfree_skb(skb);
  604. __strp_unpause(&ctx->strp);
  605. return true;
  606. }
  607. int tls_sw_recvmsg(struct sock *sk,
  608. struct msghdr *msg,
  609. size_t len,
  610. int nonblock,
  611. int flags,
  612. int *addr_len)
  613. {
  614. struct tls_context *tls_ctx = tls_get_ctx(sk);
  615. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  616. unsigned char control;
  617. struct strp_msg *rxm;
  618. struct sk_buff *skb;
  619. ssize_t copied = 0;
  620. bool cmsg = false;
  621. int target, err = 0;
  622. long timeo;
  623. flags |= nonblock;
  624. if (unlikely(flags & MSG_ERRQUEUE))
  625. return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
  626. lock_sock(sk);
  627. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  628. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  629. do {
  630. bool zc = false;
  631. int chunk = 0;
  632. skb = tls_wait_data(sk, flags, timeo, &err);
  633. if (!skb)
  634. goto recv_end;
  635. rxm = strp_msg(skb);
  636. if (!cmsg) {
  637. int cerr;
  638. cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
  639. sizeof(ctx->control), &ctx->control);
  640. cmsg = true;
  641. control = ctx->control;
  642. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  643. if (cerr || msg->msg_flags & MSG_CTRUNC) {
  644. err = -EIO;
  645. goto recv_end;
  646. }
  647. }
  648. } else if (control != ctx->control) {
  649. goto recv_end;
  650. }
  651. if (!ctx->decrypted) {
  652. int page_count;
  653. int to_copy;
  654. page_count = iov_iter_npages(&msg->msg_iter,
  655. MAX_SKB_FRAGS);
  656. to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
  657. if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
  658. likely(!(flags & MSG_PEEK))) {
  659. struct scatterlist sgin[MAX_SKB_FRAGS + 1];
  660. int pages = 0;
  661. zc = true;
  662. sg_init_table(sgin, MAX_SKB_FRAGS + 1);
  663. sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
  664. TLS_AAD_SPACE_SIZE);
  665. err = zerocopy_from_iter(sk, &msg->msg_iter,
  666. to_copy, &pages,
  667. &chunk, &sgin[1],
  668. MAX_SKB_FRAGS, false);
  669. if (err < 0)
  670. goto fallback_to_reg_recv;
  671. err = decrypt_skb(sk, skb, sgin);
  672. for (; pages > 0; pages--)
  673. put_page(sg_page(&sgin[pages]));
  674. if (err < 0) {
  675. tls_err_abort(sk, EBADMSG);
  676. goto recv_end;
  677. }
  678. } else {
  679. fallback_to_reg_recv:
  680. err = decrypt_skb(sk, skb, NULL);
  681. if (err < 0) {
  682. tls_err_abort(sk, EBADMSG);
  683. goto recv_end;
  684. }
  685. }
  686. ctx->decrypted = true;
  687. }
  688. if (!zc) {
  689. chunk = min_t(unsigned int, rxm->full_len, len);
  690. err = skb_copy_datagram_msg(skb, rxm->offset, msg,
  691. chunk);
  692. if (err < 0)
  693. goto recv_end;
  694. }
  695. copied += chunk;
  696. len -= chunk;
  697. if (likely(!(flags & MSG_PEEK))) {
  698. u8 control = ctx->control;
  699. if (tls_sw_advance_skb(sk, skb, chunk)) {
  700. /* Return full control message to
  701. * userspace before trying to parse
  702. * another message type
  703. */
  704. msg->msg_flags |= MSG_EOR;
  705. if (control != TLS_RECORD_TYPE_DATA)
  706. goto recv_end;
  707. }
  708. }
  709. /* If we have a new message from strparser, continue now. */
  710. if (copied >= target && !ctx->recv_pkt)
  711. break;
  712. } while (len);
  713. recv_end:
  714. release_sock(sk);
  715. return copied ? : err;
  716. }
  717. ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
  718. struct pipe_inode_info *pipe,
  719. size_t len, unsigned int flags)
  720. {
  721. struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
  722. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  723. struct strp_msg *rxm = NULL;
  724. struct sock *sk = sock->sk;
  725. struct sk_buff *skb;
  726. ssize_t copied = 0;
  727. int err = 0;
  728. long timeo;
  729. int chunk;
  730. lock_sock(sk);
  731. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  732. skb = tls_wait_data(sk, flags, timeo, &err);
  733. if (!skb)
  734. goto splice_read_end;
  735. /* splice does not support reading control messages */
  736. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  737. err = -ENOTSUPP;
  738. goto splice_read_end;
  739. }
  740. if (!ctx->decrypted) {
  741. err = decrypt_skb(sk, skb, NULL);
  742. if (err < 0) {
  743. tls_err_abort(sk, EBADMSG);
  744. goto splice_read_end;
  745. }
  746. ctx->decrypted = true;
  747. }
  748. rxm = strp_msg(skb);
  749. chunk = min_t(unsigned int, rxm->full_len, len);
  750. copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
  751. if (copied < 0)
  752. goto splice_read_end;
  753. if (likely(!(flags & MSG_PEEK)))
  754. tls_sw_advance_skb(sk, skb, copied);
  755. splice_read_end:
  756. release_sock(sk);
  757. return copied ? : err;
  758. }
  759. unsigned int tls_sw_poll(struct file *file, struct socket *sock,
  760. struct poll_table_struct *wait)
  761. {
  762. unsigned int ret;
  763. struct sock *sk = sock->sk;
  764. struct tls_context *tls_ctx = tls_get_ctx(sk);
  765. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  766. /* Grab POLLOUT and POLLHUP from the underlying socket */
  767. ret = ctx->sk_poll(file, sock, wait);
  768. /* Clear POLLIN bits, and set based on recv_pkt */
  769. ret &= ~(POLLIN | POLLRDNORM);
  770. if (ctx->recv_pkt)
  771. ret |= POLLIN | POLLRDNORM;
  772. return ret;
  773. }
  774. static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
  775. {
  776. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  777. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  778. char header[tls_ctx->rx.prepend_size];
  779. struct strp_msg *rxm = strp_msg(skb);
  780. size_t cipher_overhead;
  781. size_t data_len = 0;
  782. int ret;
  783. /* Verify that we have a full TLS header, or wait for more data */
  784. if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
  785. return 0;
  786. /* Linearize header to local buffer */
  787. ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
  788. if (ret < 0)
  789. goto read_failure;
  790. ctx->control = header[0];
  791. data_len = ((header[4] & 0xFF) | (header[3] << 8));
  792. cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
  793. if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
  794. ret = -EMSGSIZE;
  795. goto read_failure;
  796. }
  797. if (data_len < cipher_overhead) {
  798. ret = -EBADMSG;
  799. goto read_failure;
  800. }
  801. if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
  802. header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
  803. ret = -EINVAL;
  804. goto read_failure;
  805. }
  806. return data_len + TLS_HEADER_SIZE;
  807. read_failure:
  808. tls_err_abort(strp->sk, ret);
  809. return ret;
  810. }
  811. static void tls_queue(struct strparser *strp, struct sk_buff *skb)
  812. {
  813. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  814. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  815. struct strp_msg *rxm;
  816. rxm = strp_msg(skb);
  817. ctx->decrypted = false;
  818. ctx->recv_pkt = skb;
  819. strp_pause(strp);
  820. strp->sk->sk_state_change(strp->sk);
  821. }
  822. static void tls_data_ready(struct sock *sk)
  823. {
  824. struct tls_context *tls_ctx = tls_get_ctx(sk);
  825. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  826. strp_data_ready(&ctx->strp);
  827. }
  828. void tls_sw_free_resources_tx(struct sock *sk)
  829. {
  830. struct tls_context *tls_ctx = tls_get_ctx(sk);
  831. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  832. if (ctx->aead_send)
  833. crypto_free_aead(ctx->aead_send);
  834. tls_free_both_sg(sk);
  835. kfree(ctx);
  836. }
  837. void tls_sw_free_resources_rx(struct sock *sk)
  838. {
  839. struct tls_context *tls_ctx = tls_get_ctx(sk);
  840. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  841. if (ctx->aead_recv) {
  842. if (ctx->recv_pkt) {
  843. kfree_skb(ctx->recv_pkt);
  844. ctx->recv_pkt = NULL;
  845. }
  846. crypto_free_aead(ctx->aead_recv);
  847. strp_stop(&ctx->strp);
  848. write_lock_bh(&sk->sk_callback_lock);
  849. sk->sk_data_ready = ctx->saved_data_ready;
  850. write_unlock_bh(&sk->sk_callback_lock);
  851. release_sock(sk);
  852. strp_done(&ctx->strp);
  853. lock_sock(sk);
  854. }
  855. kfree(ctx);
  856. }
  857. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
  858. {
  859. char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
  860. struct tls_crypto_info *crypto_info;
  861. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  862. struct tls_sw_context_tx *sw_ctx_tx = NULL;
  863. struct tls_sw_context_rx *sw_ctx_rx = NULL;
  864. struct cipher_context *cctx;
  865. struct crypto_aead **aead;
  866. struct strp_callbacks cb;
  867. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  868. char *iv, *rec_seq;
  869. int rc = 0;
  870. if (!ctx) {
  871. rc = -EINVAL;
  872. goto out;
  873. }
  874. if (tx) {
  875. sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
  876. if (!sw_ctx_tx) {
  877. rc = -ENOMEM;
  878. goto out;
  879. }
  880. crypto_init_wait(&sw_ctx_tx->async_wait);
  881. ctx->priv_ctx_tx = sw_ctx_tx;
  882. } else {
  883. sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
  884. if (!sw_ctx_rx) {
  885. rc = -ENOMEM;
  886. goto out;
  887. }
  888. crypto_init_wait(&sw_ctx_rx->async_wait);
  889. ctx->priv_ctx_rx = sw_ctx_rx;
  890. }
  891. if (tx) {
  892. crypto_info = &ctx->crypto_send;
  893. cctx = &ctx->tx;
  894. aead = &sw_ctx_tx->aead_send;
  895. } else {
  896. crypto_info = &ctx->crypto_recv;
  897. cctx = &ctx->rx;
  898. aead = &sw_ctx_rx->aead_recv;
  899. }
  900. switch (crypto_info->cipher_type) {
  901. case TLS_CIPHER_AES_GCM_128: {
  902. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  903. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  904. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  905. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  906. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  907. rec_seq =
  908. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  909. gcm_128_info =
  910. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  911. break;
  912. }
  913. default:
  914. rc = -EINVAL;
  915. goto free_priv;
  916. }
  917. /* Sanity-check the IV size for stack allocations. */
  918. if (iv_size > MAX_IV_SIZE) {
  919. rc = -EINVAL;
  920. goto free_priv;
  921. }
  922. cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  923. cctx->tag_size = tag_size;
  924. cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
  925. cctx->iv_size = iv_size;
  926. cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  927. GFP_KERNEL);
  928. if (!cctx->iv) {
  929. rc = -ENOMEM;
  930. goto free_priv;
  931. }
  932. memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  933. memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  934. cctx->rec_seq_size = rec_seq_size;
  935. cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
  936. if (!cctx->rec_seq) {
  937. rc = -ENOMEM;
  938. goto free_iv;
  939. }
  940. memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
  941. if (sw_ctx_tx) {
  942. sg_init_table(sw_ctx_tx->sg_encrypted_data,
  943. ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
  944. sg_init_table(sw_ctx_tx->sg_plaintext_data,
  945. ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
  946. sg_init_table(sw_ctx_tx->sg_aead_in, 2);
  947. sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
  948. sizeof(sw_ctx_tx->aad_space));
  949. sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
  950. sg_chain(sw_ctx_tx->sg_aead_in, 2,
  951. sw_ctx_tx->sg_plaintext_data);
  952. sg_init_table(sw_ctx_tx->sg_aead_out, 2);
  953. sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
  954. sizeof(sw_ctx_tx->aad_space));
  955. sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
  956. sg_chain(sw_ctx_tx->sg_aead_out, 2,
  957. sw_ctx_tx->sg_encrypted_data);
  958. }
  959. if (!*aead) {
  960. *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
  961. if (IS_ERR(*aead)) {
  962. rc = PTR_ERR(*aead);
  963. *aead = NULL;
  964. goto free_rec_seq;
  965. }
  966. }
  967. ctx->push_pending_record = tls_sw_push_pending_record;
  968. memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  969. rc = crypto_aead_setkey(*aead, keyval,
  970. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  971. if (rc)
  972. goto free_aead;
  973. rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
  974. if (rc)
  975. goto free_aead;
  976. if (sw_ctx_rx) {
  977. /* Set up strparser */
  978. memset(&cb, 0, sizeof(cb));
  979. cb.rcv_msg = tls_queue;
  980. cb.parse_msg = tls_read_size;
  981. strp_init(&sw_ctx_rx->strp, sk, &cb);
  982. write_lock_bh(&sk->sk_callback_lock);
  983. sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
  984. sk->sk_data_ready = tls_data_ready;
  985. write_unlock_bh(&sk->sk_callback_lock);
  986. sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
  987. strp_check_rcv(&sw_ctx_rx->strp);
  988. }
  989. goto out;
  990. free_aead:
  991. crypto_free_aead(*aead);
  992. *aead = NULL;
  993. free_rec_seq:
  994. kfree(cctx->rec_seq);
  995. cctx->rec_seq = NULL;
  996. free_iv:
  997. kfree(cctx->iv);
  998. cctx->iv = NULL;
  999. free_priv:
  1000. if (tx) {
  1001. kfree(ctx->priv_ctx_tx);
  1002. ctx->priv_ctx_tx = NULL;
  1003. } else {
  1004. kfree(ctx->priv_ctx_rx);
  1005. ctx->priv_ctx_rx = NULL;
  1006. }
  1007. out:
  1008. return rc;
  1009. }