tls_sw.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
  5. * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
  6. * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/module.h>
  38. #include <crypto/aead.h>
  39. #include <net/strparser.h>
  40. #include <net/tls.h>
  41. #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
  42. static int tls_do_decryption(struct sock *sk,
  43. struct scatterlist *sgin,
  44. struct scatterlist *sgout,
  45. char *iv_recv,
  46. size_t data_len,
  47. struct sk_buff *skb,
  48. gfp_t flags)
  49. {
  50. struct tls_context *tls_ctx = tls_get_ctx(sk);
  51. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  52. struct strp_msg *rxm = strp_msg(skb);
  53. struct aead_request *aead_req;
  54. int ret;
  55. unsigned int req_size = sizeof(struct aead_request) +
  56. crypto_aead_reqsize(ctx->aead_recv);
  57. aead_req = kzalloc(req_size, flags);
  58. if (!aead_req)
  59. return -ENOMEM;
  60. aead_request_set_tfm(aead_req, ctx->aead_recv);
  61. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  62. aead_request_set_crypt(aead_req, sgin, sgout,
  63. data_len + tls_ctx->rx.tag_size,
  64. (u8 *)iv_recv);
  65. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  66. crypto_req_done, &ctx->async_wait);
  67. ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
  68. if (ret < 0)
  69. goto out;
  70. rxm->offset += tls_ctx->rx.prepend_size;
  71. rxm->full_len -= tls_ctx->rx.overhead_size;
  72. tls_advance_record_sn(sk, &tls_ctx->rx);
  73. ctx->decrypted = true;
  74. ctx->saved_data_ready(sk);
  75. out:
  76. kfree(aead_req);
  77. return ret;
  78. }
  79. static void trim_sg(struct sock *sk, struct scatterlist *sg,
  80. int *sg_num_elem, unsigned int *sg_size, int target_size)
  81. {
  82. int i = *sg_num_elem - 1;
  83. int trim = *sg_size - target_size;
  84. if (trim <= 0) {
  85. WARN_ON(trim < 0);
  86. return;
  87. }
  88. *sg_size = target_size;
  89. while (trim >= sg[i].length) {
  90. trim -= sg[i].length;
  91. sk_mem_uncharge(sk, sg[i].length);
  92. put_page(sg_page(&sg[i]));
  93. i--;
  94. if (i < 0)
  95. goto out;
  96. }
  97. sg[i].length -= trim;
  98. sk_mem_uncharge(sk, trim);
  99. out:
  100. *sg_num_elem = i + 1;
  101. }
  102. static void trim_both_sgl(struct sock *sk, int target_size)
  103. {
  104. struct tls_context *tls_ctx = tls_get_ctx(sk);
  105. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  106. trim_sg(sk, ctx->sg_plaintext_data,
  107. &ctx->sg_plaintext_num_elem,
  108. &ctx->sg_plaintext_size,
  109. target_size);
  110. if (target_size > 0)
  111. target_size += tls_ctx->tx.overhead_size;
  112. trim_sg(sk, ctx->sg_encrypted_data,
  113. &ctx->sg_encrypted_num_elem,
  114. &ctx->sg_encrypted_size,
  115. target_size);
  116. }
  117. static int alloc_encrypted_sg(struct sock *sk, int len)
  118. {
  119. struct tls_context *tls_ctx = tls_get_ctx(sk);
  120. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  121. int rc = 0;
  122. rc = sk_alloc_sg(sk, len,
  123. ctx->sg_encrypted_data, 0,
  124. &ctx->sg_encrypted_num_elem,
  125. &ctx->sg_encrypted_size, 0);
  126. return rc;
  127. }
  128. static int alloc_plaintext_sg(struct sock *sk, int len)
  129. {
  130. struct tls_context *tls_ctx = tls_get_ctx(sk);
  131. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  132. int rc = 0;
  133. rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
  134. &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
  135. tls_ctx->pending_open_record_frags);
  136. return rc;
  137. }
  138. static void free_sg(struct sock *sk, struct scatterlist *sg,
  139. int *sg_num_elem, unsigned int *sg_size)
  140. {
  141. int i, n = *sg_num_elem;
  142. for (i = 0; i < n; ++i) {
  143. sk_mem_uncharge(sk, sg[i].length);
  144. put_page(sg_page(&sg[i]));
  145. }
  146. *sg_num_elem = 0;
  147. *sg_size = 0;
  148. }
  149. static void tls_free_both_sg(struct sock *sk)
  150. {
  151. struct tls_context *tls_ctx = tls_get_ctx(sk);
  152. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  153. free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
  154. &ctx->sg_encrypted_size);
  155. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  156. &ctx->sg_plaintext_size);
  157. }
  158. static int tls_do_encryption(struct tls_context *tls_ctx,
  159. struct tls_sw_context_tx *ctx,
  160. struct aead_request *aead_req,
  161. size_t data_len)
  162. {
  163. int rc;
  164. ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
  165. ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
  166. aead_request_set_tfm(aead_req, ctx->aead_send);
  167. aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
  168. aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
  169. data_len, tls_ctx->tx.iv);
  170. aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  171. crypto_req_done, &ctx->async_wait);
  172. rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
  173. ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
  174. ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
  175. return rc;
  176. }
  177. static int tls_push_record(struct sock *sk, int flags,
  178. unsigned char record_type)
  179. {
  180. struct tls_context *tls_ctx = tls_get_ctx(sk);
  181. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  182. struct aead_request *req;
  183. int rc;
  184. req = kzalloc(sizeof(struct aead_request) +
  185. crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
  186. if (!req)
  187. return -ENOMEM;
  188. sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
  189. sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
  190. tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
  191. tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
  192. record_type);
  193. tls_fill_prepend(tls_ctx,
  194. page_address(sg_page(&ctx->sg_encrypted_data[0])) +
  195. ctx->sg_encrypted_data[0].offset,
  196. ctx->sg_plaintext_size, record_type);
  197. tls_ctx->pending_open_record_frags = 0;
  198. set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
  199. rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
  200. if (rc < 0) {
  201. /* If we are called from write_space and
  202. * we fail, we need to set this SOCK_NOSPACE
  203. * to trigger another write_space in the future.
  204. */
  205. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  206. goto out_req;
  207. }
  208. free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
  209. &ctx->sg_plaintext_size);
  210. ctx->sg_encrypted_num_elem = 0;
  211. ctx->sg_encrypted_size = 0;
  212. /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
  213. rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
  214. if (rc < 0 && rc != -EAGAIN)
  215. tls_err_abort(sk, EBADMSG);
  216. tls_advance_record_sn(sk, &tls_ctx->tx);
  217. out_req:
  218. kfree(req);
  219. return rc;
  220. }
  221. static int tls_sw_push_pending_record(struct sock *sk, int flags)
  222. {
  223. return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
  224. }
  225. static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
  226. int length, int *pages_used,
  227. unsigned int *size_used,
  228. struct scatterlist *to, int to_max_pages,
  229. bool charge)
  230. {
  231. struct page *pages[MAX_SKB_FRAGS];
  232. size_t offset;
  233. ssize_t copied, use;
  234. int i = 0;
  235. unsigned int size = *size_used;
  236. int num_elem = *pages_used;
  237. int rc = 0;
  238. int maxpages;
  239. while (length > 0) {
  240. i = 0;
  241. maxpages = to_max_pages - num_elem;
  242. if (maxpages == 0) {
  243. rc = -EFAULT;
  244. goto out;
  245. }
  246. copied = iov_iter_get_pages(from, pages,
  247. length,
  248. maxpages, &offset);
  249. if (copied <= 0) {
  250. rc = -EFAULT;
  251. goto out;
  252. }
  253. iov_iter_advance(from, copied);
  254. length -= copied;
  255. size += copied;
  256. while (copied) {
  257. use = min_t(int, copied, PAGE_SIZE - offset);
  258. sg_set_page(&to[num_elem],
  259. pages[i], use, offset);
  260. sg_unmark_end(&to[num_elem]);
  261. if (charge)
  262. sk_mem_charge(sk, use);
  263. offset = 0;
  264. copied -= use;
  265. ++i;
  266. ++num_elem;
  267. }
  268. }
  269. out:
  270. *size_used = size;
  271. *pages_used = num_elem;
  272. return rc;
  273. }
  274. static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
  275. int bytes)
  276. {
  277. struct tls_context *tls_ctx = tls_get_ctx(sk);
  278. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  279. struct scatterlist *sg = ctx->sg_plaintext_data;
  280. int copy, i, rc = 0;
  281. for (i = tls_ctx->pending_open_record_frags;
  282. i < ctx->sg_plaintext_num_elem; ++i) {
  283. copy = sg[i].length;
  284. if (copy_from_iter(
  285. page_address(sg_page(&sg[i])) + sg[i].offset,
  286. copy, from) != copy) {
  287. rc = -EFAULT;
  288. goto out;
  289. }
  290. bytes -= copy;
  291. ++tls_ctx->pending_open_record_frags;
  292. if (!bytes)
  293. break;
  294. }
  295. out:
  296. return rc;
  297. }
  298. int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  299. {
  300. struct tls_context *tls_ctx = tls_get_ctx(sk);
  301. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  302. int ret = 0;
  303. int required_size;
  304. long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  305. bool eor = !(msg->msg_flags & MSG_MORE);
  306. size_t try_to_copy, copied = 0;
  307. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  308. int record_room;
  309. bool full_record;
  310. int orig_size;
  311. if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
  312. return -ENOTSUPP;
  313. lock_sock(sk);
  314. if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
  315. goto send_end;
  316. if (unlikely(msg->msg_controllen)) {
  317. ret = tls_proccess_cmsg(sk, msg, &record_type);
  318. if (ret)
  319. goto send_end;
  320. }
  321. while (msg_data_left(msg)) {
  322. if (sk->sk_err) {
  323. ret = -sk->sk_err;
  324. goto send_end;
  325. }
  326. orig_size = ctx->sg_plaintext_size;
  327. full_record = false;
  328. try_to_copy = msg_data_left(msg);
  329. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  330. if (try_to_copy >= record_room) {
  331. try_to_copy = record_room;
  332. full_record = true;
  333. }
  334. required_size = ctx->sg_plaintext_size + try_to_copy +
  335. tls_ctx->tx.overhead_size;
  336. if (!sk_stream_memory_free(sk))
  337. goto wait_for_sndbuf;
  338. alloc_encrypted:
  339. ret = alloc_encrypted_sg(sk, required_size);
  340. if (ret) {
  341. if (ret != -ENOSPC)
  342. goto wait_for_memory;
  343. /* Adjust try_to_copy according to the amount that was
  344. * actually allocated. The difference is due
  345. * to max sg elements limit
  346. */
  347. try_to_copy -= required_size - ctx->sg_encrypted_size;
  348. full_record = true;
  349. }
  350. if (full_record || eor) {
  351. ret = zerocopy_from_iter(sk, &msg->msg_iter,
  352. try_to_copy, &ctx->sg_plaintext_num_elem,
  353. &ctx->sg_plaintext_size,
  354. ctx->sg_plaintext_data,
  355. ARRAY_SIZE(ctx->sg_plaintext_data),
  356. true);
  357. if (ret)
  358. goto fallback_to_reg_send;
  359. copied += try_to_copy;
  360. ret = tls_push_record(sk, msg->msg_flags, record_type);
  361. if (!ret)
  362. continue;
  363. if (ret == -EAGAIN)
  364. goto send_end;
  365. copied -= try_to_copy;
  366. fallback_to_reg_send:
  367. iov_iter_revert(&msg->msg_iter,
  368. ctx->sg_plaintext_size - orig_size);
  369. trim_sg(sk, ctx->sg_plaintext_data,
  370. &ctx->sg_plaintext_num_elem,
  371. &ctx->sg_plaintext_size,
  372. orig_size);
  373. }
  374. required_size = ctx->sg_plaintext_size + try_to_copy;
  375. alloc_plaintext:
  376. ret = alloc_plaintext_sg(sk, required_size);
  377. if (ret) {
  378. if (ret != -ENOSPC)
  379. goto wait_for_memory;
  380. /* Adjust try_to_copy according to the amount that was
  381. * actually allocated. The difference is due
  382. * to max sg elements limit
  383. */
  384. try_to_copy -= required_size - ctx->sg_plaintext_size;
  385. full_record = true;
  386. trim_sg(sk, ctx->sg_encrypted_data,
  387. &ctx->sg_encrypted_num_elem,
  388. &ctx->sg_encrypted_size,
  389. ctx->sg_plaintext_size +
  390. tls_ctx->tx.overhead_size);
  391. }
  392. ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
  393. if (ret)
  394. goto trim_sgl;
  395. copied += try_to_copy;
  396. if (full_record || eor) {
  397. push_record:
  398. ret = tls_push_record(sk, msg->msg_flags, record_type);
  399. if (ret) {
  400. if (ret == -ENOMEM)
  401. goto wait_for_memory;
  402. goto send_end;
  403. }
  404. }
  405. continue;
  406. wait_for_sndbuf:
  407. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  408. wait_for_memory:
  409. ret = sk_stream_wait_memory(sk, &timeo);
  410. if (ret) {
  411. trim_sgl:
  412. trim_both_sgl(sk, orig_size);
  413. goto send_end;
  414. }
  415. if (tls_is_pending_closed_record(tls_ctx))
  416. goto push_record;
  417. if (ctx->sg_encrypted_size < required_size)
  418. goto alloc_encrypted;
  419. goto alloc_plaintext;
  420. }
  421. send_end:
  422. ret = sk_stream_error(sk, msg->msg_flags, ret);
  423. release_sock(sk);
  424. return copied ? copied : ret;
  425. }
  426. int tls_sw_sendpage(struct sock *sk, struct page *page,
  427. int offset, size_t size, int flags)
  428. {
  429. struct tls_context *tls_ctx = tls_get_ctx(sk);
  430. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  431. int ret = 0;
  432. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  433. bool eor;
  434. size_t orig_size = size;
  435. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  436. struct scatterlist *sg;
  437. bool full_record;
  438. int record_room;
  439. if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
  440. MSG_SENDPAGE_NOTLAST))
  441. return -ENOTSUPP;
  442. /* No MSG_EOR from splice, only look at MSG_MORE */
  443. eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
  444. lock_sock(sk);
  445. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  446. if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
  447. goto sendpage_end;
  448. /* Call the sk_stream functions to manage the sndbuf mem. */
  449. while (size > 0) {
  450. size_t copy, required_size;
  451. if (sk->sk_err) {
  452. ret = -sk->sk_err;
  453. goto sendpage_end;
  454. }
  455. full_record = false;
  456. record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
  457. copy = size;
  458. if (copy >= record_room) {
  459. copy = record_room;
  460. full_record = true;
  461. }
  462. required_size = ctx->sg_plaintext_size + copy +
  463. tls_ctx->tx.overhead_size;
  464. if (!sk_stream_memory_free(sk))
  465. goto wait_for_sndbuf;
  466. alloc_payload:
  467. ret = alloc_encrypted_sg(sk, required_size);
  468. if (ret) {
  469. if (ret != -ENOSPC)
  470. goto wait_for_memory;
  471. /* Adjust copy according to the amount that was
  472. * actually allocated. The difference is due
  473. * to max sg elements limit
  474. */
  475. copy -= required_size - ctx->sg_plaintext_size;
  476. full_record = true;
  477. }
  478. get_page(page);
  479. sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
  480. sg_set_page(sg, page, copy, offset);
  481. sg_unmark_end(sg);
  482. ctx->sg_plaintext_num_elem++;
  483. sk_mem_charge(sk, copy);
  484. offset += copy;
  485. size -= copy;
  486. ctx->sg_plaintext_size += copy;
  487. tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
  488. if (full_record || eor ||
  489. ctx->sg_plaintext_num_elem ==
  490. ARRAY_SIZE(ctx->sg_plaintext_data)) {
  491. push_record:
  492. ret = tls_push_record(sk, flags, record_type);
  493. if (ret) {
  494. if (ret == -ENOMEM)
  495. goto wait_for_memory;
  496. goto sendpage_end;
  497. }
  498. }
  499. continue;
  500. wait_for_sndbuf:
  501. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  502. wait_for_memory:
  503. ret = sk_stream_wait_memory(sk, &timeo);
  504. if (ret) {
  505. trim_both_sgl(sk, ctx->sg_plaintext_size);
  506. goto sendpage_end;
  507. }
  508. if (tls_is_pending_closed_record(tls_ctx))
  509. goto push_record;
  510. goto alloc_payload;
  511. }
  512. sendpage_end:
  513. if (orig_size > size)
  514. ret = orig_size - size;
  515. else
  516. ret = sk_stream_error(sk, flags, ret);
  517. release_sock(sk);
  518. return ret;
  519. }
  520. static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
  521. long timeo, int *err)
  522. {
  523. struct tls_context *tls_ctx = tls_get_ctx(sk);
  524. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  525. struct sk_buff *skb;
  526. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  527. while (!(skb = ctx->recv_pkt)) {
  528. if (sk->sk_err) {
  529. *err = sock_error(sk);
  530. return NULL;
  531. }
  532. if (sock_flag(sk, SOCK_DONE))
  533. return NULL;
  534. if ((flags & MSG_DONTWAIT) || !timeo) {
  535. *err = -EAGAIN;
  536. return NULL;
  537. }
  538. add_wait_queue(sk_sleep(sk), &wait);
  539. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  540. sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
  541. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  542. remove_wait_queue(sk_sleep(sk), &wait);
  543. /* Handle signals */
  544. if (signal_pending(current)) {
  545. *err = sock_intr_errno(timeo);
  546. return NULL;
  547. }
  548. }
  549. return skb;
  550. }
  551. static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
  552. struct scatterlist *sgout)
  553. {
  554. struct tls_context *tls_ctx = tls_get_ctx(sk);
  555. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  556. char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
  557. struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
  558. struct scatterlist *sgin = &sgin_arr[0];
  559. struct strp_msg *rxm = strp_msg(skb);
  560. int ret, nsg = ARRAY_SIZE(sgin_arr);
  561. struct sk_buff *unused;
  562. ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
  563. iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  564. tls_ctx->rx.iv_size);
  565. if (ret < 0)
  566. return ret;
  567. memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  568. if (!sgout) {
  569. nsg = skb_cow_data(skb, 0, &unused) + 1;
  570. sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
  571. sgout = sgin;
  572. }
  573. sg_init_table(sgin, nsg);
  574. sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
  575. nsg = skb_to_sgvec(skb, &sgin[1],
  576. rxm->offset + tls_ctx->rx.prepend_size,
  577. rxm->full_len - tls_ctx->rx.prepend_size);
  578. tls_make_aad(ctx->rx_aad_ciphertext,
  579. rxm->full_len - tls_ctx->rx.overhead_size,
  580. tls_ctx->rx.rec_seq,
  581. tls_ctx->rx.rec_seq_size,
  582. ctx->control);
  583. ret = tls_do_decryption(sk, sgin, sgout, iv,
  584. rxm->full_len - tls_ctx->rx.overhead_size,
  585. skb, sk->sk_allocation);
  586. if (sgin != &sgin_arr[0])
  587. kfree(sgin);
  588. return ret;
  589. }
  590. static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
  591. unsigned int len)
  592. {
  593. struct tls_context *tls_ctx = tls_get_ctx(sk);
  594. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  595. struct strp_msg *rxm = strp_msg(skb);
  596. if (len < rxm->full_len) {
  597. rxm->offset += len;
  598. rxm->full_len -= len;
  599. return false;
  600. }
  601. /* Finished with message */
  602. ctx->recv_pkt = NULL;
  603. kfree_skb(skb);
  604. __strp_unpause(&ctx->strp);
  605. return true;
  606. }
  607. int tls_sw_recvmsg(struct sock *sk,
  608. struct msghdr *msg,
  609. size_t len,
  610. int nonblock,
  611. int flags,
  612. int *addr_len)
  613. {
  614. struct tls_context *tls_ctx = tls_get_ctx(sk);
  615. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  616. unsigned char control;
  617. struct strp_msg *rxm;
  618. struct sk_buff *skb;
  619. ssize_t copied = 0;
  620. bool cmsg = false;
  621. int target, err = 0;
  622. long timeo;
  623. flags |= nonblock;
  624. if (unlikely(flags & MSG_ERRQUEUE))
  625. return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
  626. lock_sock(sk);
  627. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  628. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  629. do {
  630. bool zc = false;
  631. int chunk = 0;
  632. skb = tls_wait_data(sk, flags, timeo, &err);
  633. if (!skb)
  634. goto recv_end;
  635. rxm = strp_msg(skb);
  636. if (!cmsg) {
  637. int cerr;
  638. cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
  639. sizeof(ctx->control), &ctx->control);
  640. cmsg = true;
  641. control = ctx->control;
  642. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  643. if (cerr || msg->msg_flags & MSG_CTRUNC) {
  644. err = -EIO;
  645. goto recv_end;
  646. }
  647. }
  648. } else if (control != ctx->control) {
  649. goto recv_end;
  650. }
  651. if (!ctx->decrypted) {
  652. int page_count;
  653. int to_copy;
  654. page_count = iov_iter_npages(&msg->msg_iter,
  655. MAX_SKB_FRAGS);
  656. to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
  657. if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
  658. likely(!(flags & MSG_PEEK))) {
  659. struct scatterlist sgin[MAX_SKB_FRAGS + 1];
  660. int pages = 0;
  661. zc = true;
  662. sg_init_table(sgin, MAX_SKB_FRAGS + 1);
  663. sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
  664. TLS_AAD_SPACE_SIZE);
  665. err = zerocopy_from_iter(sk, &msg->msg_iter,
  666. to_copy, &pages,
  667. &chunk, &sgin[1],
  668. MAX_SKB_FRAGS, false);
  669. if (err < 0)
  670. goto fallback_to_reg_recv;
  671. err = decrypt_skb(sk, skb, sgin);
  672. for (; pages > 0; pages--)
  673. put_page(sg_page(&sgin[pages]));
  674. if (err < 0) {
  675. tls_err_abort(sk, EBADMSG);
  676. goto recv_end;
  677. }
  678. } else {
  679. fallback_to_reg_recv:
  680. err = decrypt_skb(sk, skb, NULL);
  681. if (err < 0) {
  682. tls_err_abort(sk, EBADMSG);
  683. goto recv_end;
  684. }
  685. }
  686. ctx->decrypted = true;
  687. }
  688. if (!zc) {
  689. chunk = min_t(unsigned int, rxm->full_len, len);
  690. err = skb_copy_datagram_msg(skb, rxm->offset, msg,
  691. chunk);
  692. if (err < 0)
  693. goto recv_end;
  694. }
  695. copied += chunk;
  696. len -= chunk;
  697. if (likely(!(flags & MSG_PEEK))) {
  698. u8 control = ctx->control;
  699. if (tls_sw_advance_skb(sk, skb, chunk)) {
  700. /* Return full control message to
  701. * userspace before trying to parse
  702. * another message type
  703. */
  704. msg->msg_flags |= MSG_EOR;
  705. if (control != TLS_RECORD_TYPE_DATA)
  706. goto recv_end;
  707. }
  708. }
  709. /* If we have a new message from strparser, continue now. */
  710. if (copied >= target && !ctx->recv_pkt)
  711. break;
  712. } while (len);
  713. recv_end:
  714. release_sock(sk);
  715. return copied ? : err;
  716. }
  717. ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
  718. struct pipe_inode_info *pipe,
  719. size_t len, unsigned int flags)
  720. {
  721. struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
  722. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  723. struct strp_msg *rxm = NULL;
  724. struct sock *sk = sock->sk;
  725. struct sk_buff *skb;
  726. ssize_t copied = 0;
  727. int err = 0;
  728. long timeo;
  729. int chunk;
  730. lock_sock(sk);
  731. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  732. skb = tls_wait_data(sk, flags, timeo, &err);
  733. if (!skb)
  734. goto splice_read_end;
  735. /* splice does not support reading control messages */
  736. if (ctx->control != TLS_RECORD_TYPE_DATA) {
  737. err = -ENOTSUPP;
  738. goto splice_read_end;
  739. }
  740. if (!ctx->decrypted) {
  741. err = decrypt_skb(sk, skb, NULL);
  742. if (err < 0) {
  743. tls_err_abort(sk, EBADMSG);
  744. goto splice_read_end;
  745. }
  746. ctx->decrypted = true;
  747. }
  748. rxm = strp_msg(skb);
  749. chunk = min_t(unsigned int, rxm->full_len, len);
  750. copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
  751. if (copied < 0)
  752. goto splice_read_end;
  753. if (likely(!(flags & MSG_PEEK)))
  754. tls_sw_advance_skb(sk, skb, copied);
  755. splice_read_end:
  756. release_sock(sk);
  757. return copied ? : err;
  758. }
  759. __poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
  760. {
  761. struct sock *sk = sock->sk;
  762. struct tls_context *tls_ctx = tls_get_ctx(sk);
  763. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  764. __poll_t mask;
  765. /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
  766. mask = ctx->sk_poll_mask(sock, events);
  767. /* Clear EPOLLIN bits, and set based on recv_pkt */
  768. mask &= ~(EPOLLIN | EPOLLRDNORM);
  769. if (ctx->recv_pkt)
  770. mask |= EPOLLIN | EPOLLRDNORM;
  771. return mask;
  772. }
  773. static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
  774. {
  775. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  776. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  777. char header[tls_ctx->rx.prepend_size];
  778. struct strp_msg *rxm = strp_msg(skb);
  779. size_t cipher_overhead;
  780. size_t data_len = 0;
  781. int ret;
  782. /* Verify that we have a full TLS header, or wait for more data */
  783. if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
  784. return 0;
  785. /* Linearize header to local buffer */
  786. ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
  787. if (ret < 0)
  788. goto read_failure;
  789. ctx->control = header[0];
  790. data_len = ((header[4] & 0xFF) | (header[3] << 8));
  791. cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
  792. if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
  793. ret = -EMSGSIZE;
  794. goto read_failure;
  795. }
  796. if (data_len < cipher_overhead) {
  797. ret = -EBADMSG;
  798. goto read_failure;
  799. }
  800. if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
  801. header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
  802. ret = -EINVAL;
  803. goto read_failure;
  804. }
  805. return data_len + TLS_HEADER_SIZE;
  806. read_failure:
  807. tls_err_abort(strp->sk, ret);
  808. return ret;
  809. }
  810. static void tls_queue(struct strparser *strp, struct sk_buff *skb)
  811. {
  812. struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
  813. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  814. struct strp_msg *rxm;
  815. rxm = strp_msg(skb);
  816. ctx->decrypted = false;
  817. ctx->recv_pkt = skb;
  818. strp_pause(strp);
  819. strp->sk->sk_state_change(strp->sk);
  820. }
  821. static void tls_data_ready(struct sock *sk)
  822. {
  823. struct tls_context *tls_ctx = tls_get_ctx(sk);
  824. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  825. strp_data_ready(&ctx->strp);
  826. }
  827. void tls_sw_free_resources_tx(struct sock *sk)
  828. {
  829. struct tls_context *tls_ctx = tls_get_ctx(sk);
  830. struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
  831. if (ctx->aead_send)
  832. crypto_free_aead(ctx->aead_send);
  833. tls_free_both_sg(sk);
  834. kfree(ctx);
  835. }
  836. void tls_sw_free_resources_rx(struct sock *sk)
  837. {
  838. struct tls_context *tls_ctx = tls_get_ctx(sk);
  839. struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
  840. if (ctx->aead_recv) {
  841. if (ctx->recv_pkt) {
  842. kfree_skb(ctx->recv_pkt);
  843. ctx->recv_pkt = NULL;
  844. }
  845. crypto_free_aead(ctx->aead_recv);
  846. strp_stop(&ctx->strp);
  847. write_lock_bh(&sk->sk_callback_lock);
  848. sk->sk_data_ready = ctx->saved_data_ready;
  849. write_unlock_bh(&sk->sk_callback_lock);
  850. release_sock(sk);
  851. strp_done(&ctx->strp);
  852. lock_sock(sk);
  853. }
  854. kfree(ctx);
  855. }
  856. int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
  857. {
  858. char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
  859. struct tls_crypto_info *crypto_info;
  860. struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  861. struct tls_sw_context_tx *sw_ctx_tx = NULL;
  862. struct tls_sw_context_rx *sw_ctx_rx = NULL;
  863. struct cipher_context *cctx;
  864. struct crypto_aead **aead;
  865. struct strp_callbacks cb;
  866. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  867. char *iv, *rec_seq;
  868. int rc = 0;
  869. if (!ctx) {
  870. rc = -EINVAL;
  871. goto out;
  872. }
  873. if (tx) {
  874. sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
  875. if (!sw_ctx_tx) {
  876. rc = -ENOMEM;
  877. goto out;
  878. }
  879. crypto_init_wait(&sw_ctx_tx->async_wait);
  880. ctx->priv_ctx_tx = sw_ctx_tx;
  881. } else {
  882. sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
  883. if (!sw_ctx_rx) {
  884. rc = -ENOMEM;
  885. goto out;
  886. }
  887. crypto_init_wait(&sw_ctx_rx->async_wait);
  888. ctx->priv_ctx_rx = sw_ctx_rx;
  889. }
  890. if (tx) {
  891. crypto_info = &ctx->crypto_send;
  892. cctx = &ctx->tx;
  893. aead = &sw_ctx_tx->aead_send;
  894. } else {
  895. crypto_info = &ctx->crypto_recv;
  896. cctx = &ctx->rx;
  897. aead = &sw_ctx_rx->aead_recv;
  898. }
  899. switch (crypto_info->cipher_type) {
  900. case TLS_CIPHER_AES_GCM_128: {
  901. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  902. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  903. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  904. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  905. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  906. rec_seq =
  907. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  908. gcm_128_info =
  909. (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
  910. break;
  911. }
  912. default:
  913. rc = -EINVAL;
  914. goto free_priv;
  915. }
  916. /* Sanity-check the IV size for stack allocations. */
  917. if (iv_size > MAX_IV_SIZE) {
  918. rc = -EINVAL;
  919. goto free_priv;
  920. }
  921. cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
  922. cctx->tag_size = tag_size;
  923. cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
  924. cctx->iv_size = iv_size;
  925. cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  926. GFP_KERNEL);
  927. if (!cctx->iv) {
  928. rc = -ENOMEM;
  929. goto free_priv;
  930. }
  931. memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
  932. memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  933. cctx->rec_seq_size = rec_seq_size;
  934. cctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
  935. if (!cctx->rec_seq) {
  936. rc = -ENOMEM;
  937. goto free_iv;
  938. }
  939. memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
  940. if (sw_ctx_tx) {
  941. sg_init_table(sw_ctx_tx->sg_encrypted_data,
  942. ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
  943. sg_init_table(sw_ctx_tx->sg_plaintext_data,
  944. ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
  945. sg_init_table(sw_ctx_tx->sg_aead_in, 2);
  946. sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
  947. sizeof(sw_ctx_tx->aad_space));
  948. sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
  949. sg_chain(sw_ctx_tx->sg_aead_in, 2,
  950. sw_ctx_tx->sg_plaintext_data);
  951. sg_init_table(sw_ctx_tx->sg_aead_out, 2);
  952. sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
  953. sizeof(sw_ctx_tx->aad_space));
  954. sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
  955. sg_chain(sw_ctx_tx->sg_aead_out, 2,
  956. sw_ctx_tx->sg_encrypted_data);
  957. }
  958. if (!*aead) {
  959. *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
  960. if (IS_ERR(*aead)) {
  961. rc = PTR_ERR(*aead);
  962. *aead = NULL;
  963. goto free_rec_seq;
  964. }
  965. }
  966. ctx->push_pending_record = tls_sw_push_pending_record;
  967. memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  968. rc = crypto_aead_setkey(*aead, keyval,
  969. TLS_CIPHER_AES_GCM_128_KEY_SIZE);
  970. if (rc)
  971. goto free_aead;
  972. rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
  973. if (rc)
  974. goto free_aead;
  975. if (sw_ctx_rx) {
  976. /* Set up strparser */
  977. memset(&cb, 0, sizeof(cb));
  978. cb.rcv_msg = tls_queue;
  979. cb.parse_msg = tls_read_size;
  980. strp_init(&sw_ctx_rx->strp, sk, &cb);
  981. write_lock_bh(&sk->sk_callback_lock);
  982. sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
  983. sk->sk_data_ready = tls_data_ready;
  984. write_unlock_bh(&sk->sk_callback_lock);
  985. sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
  986. strp_check_rcv(&sw_ctx_rx->strp);
  987. }
  988. goto out;
  989. free_aead:
  990. crypto_free_aead(*aead);
  991. *aead = NULL;
  992. free_rec_seq:
  993. kfree(cctx->rec_seq);
  994. cctx->rec_seq = NULL;
  995. free_iv:
  996. kfree(cctx->iv);
  997. cctx->iv = NULL;
  998. free_priv:
  999. if (tx) {
  1000. kfree(ctx->priv_ctx_tx);
  1001. ctx->priv_ctx_tx = NULL;
  1002. } else {
  1003. kfree(ctx->priv_ctx_rx);
  1004. ctx->priv_ctx_rx = NULL;
  1005. }
  1006. out:
  1007. return rc;
  1008. }