tls_main.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <net/tcp.h>
  35. #include <net/inet_common.h>
  36. #include <linux/highmem.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/sched/signal.h>
  39. #include <linux/inetdevice.h>
  40. #include <net/tls.h>
  41. MODULE_AUTHOR("Mellanox Technologies");
  42. MODULE_DESCRIPTION("Transport Layer Security Support");
  43. MODULE_LICENSE("Dual BSD/GPL");
  44. enum {
  45. TLSV4,
  46. TLSV6,
  47. TLS_NUM_PROTS,
  48. };
  49. enum {
  50. TLS_BASE,
  51. TLS_SW,
  52. #ifdef CONFIG_TLS_DEVICE
  53. TLS_HW,
  54. #endif
  55. TLS_HW_RECORD,
  56. TLS_NUM_CONFIG,
  57. };
  58. static struct proto *saved_tcpv6_prot;
  59. static DEFINE_MUTEX(tcpv6_prot_mutex);
  60. static LIST_HEAD(device_list);
  61. static DEFINE_MUTEX(device_mutex);
  62. static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
  63. static struct proto_ops tls_sw_proto_ops;
  64. static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
  65. {
  66. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  67. sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
  68. }
  69. int wait_on_pending_writer(struct sock *sk, long *timeo)
  70. {
  71. int rc = 0;
  72. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  73. add_wait_queue(sk_sleep(sk), &wait);
  74. while (1) {
  75. if (!*timeo) {
  76. rc = -EAGAIN;
  77. break;
  78. }
  79. if (signal_pending(current)) {
  80. rc = sock_intr_errno(*timeo);
  81. break;
  82. }
  83. if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
  84. break;
  85. }
  86. remove_wait_queue(sk_sleep(sk), &wait);
  87. return rc;
  88. }
  89. int tls_push_sg(struct sock *sk,
  90. struct tls_context *ctx,
  91. struct scatterlist *sg,
  92. u16 first_offset,
  93. int flags)
  94. {
  95. int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
  96. int ret = 0;
  97. struct page *p;
  98. size_t size;
  99. int offset = first_offset;
  100. size = sg->length - offset;
  101. offset += sg->offset;
  102. ctx->in_tcp_sendpages = true;
  103. while (1) {
  104. if (sg_is_last(sg))
  105. sendpage_flags = flags;
  106. /* is sending application-limited? */
  107. tcp_rate_check_app_limited(sk);
  108. p = sg_page(sg);
  109. retry:
  110. ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
  111. if (ret != size) {
  112. if (ret > 0) {
  113. offset += ret;
  114. size -= ret;
  115. goto retry;
  116. }
  117. offset -= sg->offset;
  118. ctx->partially_sent_offset = offset;
  119. ctx->partially_sent_record = (void *)sg;
  120. ctx->in_tcp_sendpages = false;
  121. return ret;
  122. }
  123. put_page(p);
  124. sk_mem_uncharge(sk, sg->length);
  125. sg = sg_next(sg);
  126. if (!sg)
  127. break;
  128. offset = sg->offset;
  129. size = sg->length;
  130. }
  131. clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
  132. ctx->in_tcp_sendpages = false;
  133. ctx->sk_write_space(sk);
  134. return 0;
  135. }
  136. static int tls_handle_open_record(struct sock *sk, int flags)
  137. {
  138. struct tls_context *ctx = tls_get_ctx(sk);
  139. if (tls_is_pending_open_record(ctx))
  140. return ctx->push_pending_record(sk, flags);
  141. return 0;
  142. }
  143. int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
  144. unsigned char *record_type)
  145. {
  146. struct cmsghdr *cmsg;
  147. int rc = -EINVAL;
  148. for_each_cmsghdr(cmsg, msg) {
  149. if (!CMSG_OK(msg, cmsg))
  150. return -EINVAL;
  151. if (cmsg->cmsg_level != SOL_TLS)
  152. continue;
  153. switch (cmsg->cmsg_type) {
  154. case TLS_SET_RECORD_TYPE:
  155. if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
  156. return -EINVAL;
  157. if (msg->msg_flags & MSG_MORE)
  158. return -EINVAL;
  159. rc = tls_handle_open_record(sk, msg->msg_flags);
  160. if (rc)
  161. return rc;
  162. *record_type = *(unsigned char *)CMSG_DATA(cmsg);
  163. rc = 0;
  164. break;
  165. default:
  166. return -EINVAL;
  167. }
  168. }
  169. return rc;
  170. }
  171. int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
  172. int flags, long *timeo)
  173. {
  174. struct scatterlist *sg;
  175. u16 offset;
  176. if (!tls_is_partially_sent_record(ctx))
  177. return ctx->push_pending_record(sk, flags);
  178. sg = ctx->partially_sent_record;
  179. offset = ctx->partially_sent_offset;
  180. ctx->partially_sent_record = NULL;
  181. return tls_push_sg(sk, ctx, sg, offset, flags);
  182. }
  183. static void tls_write_space(struct sock *sk)
  184. {
  185. struct tls_context *ctx = tls_get_ctx(sk);
  186. /* We are already sending pages, ignore notification */
  187. if (ctx->in_tcp_sendpages)
  188. return;
  189. if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
  190. gfp_t sk_allocation = sk->sk_allocation;
  191. int rc;
  192. long timeo = 0;
  193. sk->sk_allocation = GFP_ATOMIC;
  194. rc = tls_push_pending_closed_record(sk, ctx,
  195. MSG_DONTWAIT |
  196. MSG_NOSIGNAL,
  197. &timeo);
  198. sk->sk_allocation = sk_allocation;
  199. if (rc < 0)
  200. return;
  201. }
  202. ctx->sk_write_space(sk);
  203. }
  204. static void tls_sk_proto_close(struct sock *sk, long timeout)
  205. {
  206. struct tls_context *ctx = tls_get_ctx(sk);
  207. long timeo = sock_sndtimeo(sk, 0);
  208. void (*sk_proto_close)(struct sock *sk, long timeout);
  209. bool free_ctx = false;
  210. lock_sock(sk);
  211. sk_proto_close = ctx->sk_proto_close;
  212. if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) ||
  213. (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) {
  214. free_ctx = true;
  215. goto skip_tx_cleanup;
  216. }
  217. if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
  218. tls_handle_open_record(sk, 0);
  219. if (ctx->partially_sent_record) {
  220. struct scatterlist *sg = ctx->partially_sent_record;
  221. while (1) {
  222. put_page(sg_page(sg));
  223. sk_mem_uncharge(sk, sg->length);
  224. if (sg_is_last(sg))
  225. break;
  226. sg++;
  227. }
  228. }
  229. /* We need these for tls_sw_fallback handling of other packets */
  230. if (ctx->tx_conf == TLS_SW) {
  231. kfree(ctx->tx.rec_seq);
  232. kfree(ctx->tx.iv);
  233. tls_sw_free_resources_tx(sk);
  234. }
  235. if (ctx->rx_conf == TLS_SW) {
  236. kfree(ctx->rx.rec_seq);
  237. kfree(ctx->rx.iv);
  238. tls_sw_free_resources_rx(sk);
  239. }
  240. #ifdef CONFIG_TLS_DEVICE
  241. if (ctx->tx_conf != TLS_HW) {
  242. #else
  243. {
  244. #endif
  245. kfree(ctx);
  246. ctx = NULL;
  247. }
  248. skip_tx_cleanup:
  249. release_sock(sk);
  250. sk_proto_close(sk, timeout);
  251. /* free ctx for TLS_HW_RECORD, used by tcp_set_state
  252. * for sk->sk_prot->unhash [tls_hw_unhash]
  253. */
  254. if (free_ctx)
  255. kfree(ctx);
  256. }
  257. static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
  258. int __user *optlen)
  259. {
  260. int rc = 0;
  261. struct tls_context *ctx = tls_get_ctx(sk);
  262. struct tls_crypto_info *crypto_info;
  263. int len;
  264. if (get_user(len, optlen))
  265. return -EFAULT;
  266. if (!optval || (len < sizeof(*crypto_info))) {
  267. rc = -EINVAL;
  268. goto out;
  269. }
  270. if (!ctx) {
  271. rc = -EBUSY;
  272. goto out;
  273. }
  274. /* get user crypto info */
  275. crypto_info = &ctx->crypto_send;
  276. if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
  277. rc = -EBUSY;
  278. goto out;
  279. }
  280. if (len == sizeof(*crypto_info)) {
  281. if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
  282. rc = -EFAULT;
  283. goto out;
  284. }
  285. switch (crypto_info->cipher_type) {
  286. case TLS_CIPHER_AES_GCM_128: {
  287. struct tls12_crypto_info_aes_gcm_128 *
  288. crypto_info_aes_gcm_128 =
  289. container_of(crypto_info,
  290. struct tls12_crypto_info_aes_gcm_128,
  291. info);
  292. if (len != sizeof(*crypto_info_aes_gcm_128)) {
  293. rc = -EINVAL;
  294. goto out;
  295. }
  296. lock_sock(sk);
  297. memcpy(crypto_info_aes_gcm_128->iv,
  298. ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  299. TLS_CIPHER_AES_GCM_128_IV_SIZE);
  300. memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
  301. TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
  302. release_sock(sk);
  303. if (copy_to_user(optval,
  304. crypto_info_aes_gcm_128,
  305. sizeof(*crypto_info_aes_gcm_128)))
  306. rc = -EFAULT;
  307. break;
  308. }
  309. default:
  310. rc = -EINVAL;
  311. }
  312. out:
  313. return rc;
  314. }
  315. static int do_tls_getsockopt(struct sock *sk, int optname,
  316. char __user *optval, int __user *optlen)
  317. {
  318. int rc = 0;
  319. switch (optname) {
  320. case TLS_TX:
  321. rc = do_tls_getsockopt_tx(sk, optval, optlen);
  322. break;
  323. default:
  324. rc = -ENOPROTOOPT;
  325. break;
  326. }
  327. return rc;
  328. }
  329. static int tls_getsockopt(struct sock *sk, int level, int optname,
  330. char __user *optval, int __user *optlen)
  331. {
  332. struct tls_context *ctx = tls_get_ctx(sk);
  333. if (level != SOL_TLS)
  334. return ctx->getsockopt(sk, level, optname, optval, optlen);
  335. return do_tls_getsockopt(sk, optname, optval, optlen);
  336. }
  337. static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
  338. unsigned int optlen, int tx)
  339. {
  340. struct tls_crypto_info *crypto_info;
  341. struct tls_context *ctx = tls_get_ctx(sk);
  342. int rc = 0;
  343. int conf;
  344. if (!optval || (optlen < sizeof(*crypto_info))) {
  345. rc = -EINVAL;
  346. goto out;
  347. }
  348. if (tx)
  349. crypto_info = &ctx->crypto_send;
  350. else
  351. crypto_info = &ctx->crypto_recv;
  352. /* Currently we don't support set crypto info more than one time */
  353. if (TLS_CRYPTO_INFO_READY(crypto_info)) {
  354. rc = -EBUSY;
  355. goto out;
  356. }
  357. rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
  358. if (rc) {
  359. rc = -EFAULT;
  360. goto err_crypto_info;
  361. }
  362. /* check version */
  363. if (crypto_info->version != TLS_1_2_VERSION) {
  364. rc = -ENOTSUPP;
  365. goto err_crypto_info;
  366. }
  367. switch (crypto_info->cipher_type) {
  368. case TLS_CIPHER_AES_GCM_128: {
  369. if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
  370. rc = -EINVAL;
  371. goto err_crypto_info;
  372. }
  373. rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
  374. optlen - sizeof(*crypto_info));
  375. if (rc) {
  376. rc = -EFAULT;
  377. goto err_crypto_info;
  378. }
  379. break;
  380. }
  381. default:
  382. rc = -EINVAL;
  383. goto err_crypto_info;
  384. }
  385. if (tx) {
  386. #ifdef CONFIG_TLS_DEVICE
  387. rc = tls_set_device_offload(sk, ctx);
  388. conf = TLS_HW;
  389. if (rc) {
  390. #else
  391. {
  392. #endif
  393. rc = tls_set_sw_offload(sk, ctx, 1);
  394. conf = TLS_SW;
  395. }
  396. } else {
  397. rc = tls_set_sw_offload(sk, ctx, 0);
  398. conf = TLS_SW;
  399. }
  400. if (rc)
  401. goto err_crypto_info;
  402. if (tx)
  403. ctx->tx_conf = conf;
  404. else
  405. ctx->rx_conf = conf;
  406. update_sk_prot(sk, ctx);
  407. if (tx) {
  408. ctx->sk_write_space = sk->sk_write_space;
  409. sk->sk_write_space = tls_write_space;
  410. } else {
  411. sk->sk_socket->ops = &tls_sw_proto_ops;
  412. }
  413. goto out;
  414. err_crypto_info:
  415. memset(crypto_info, 0, sizeof(*crypto_info));
  416. out:
  417. return rc;
  418. }
  419. static int do_tls_setsockopt(struct sock *sk, int optname,
  420. char __user *optval, unsigned int optlen)
  421. {
  422. int rc = 0;
  423. switch (optname) {
  424. case TLS_TX:
  425. case TLS_RX:
  426. lock_sock(sk);
  427. rc = do_tls_setsockopt_conf(sk, optval, optlen,
  428. optname == TLS_TX);
  429. release_sock(sk);
  430. break;
  431. default:
  432. rc = -ENOPROTOOPT;
  433. break;
  434. }
  435. return rc;
  436. }
  437. static int tls_setsockopt(struct sock *sk, int level, int optname,
  438. char __user *optval, unsigned int optlen)
  439. {
  440. struct tls_context *ctx = tls_get_ctx(sk);
  441. if (level != SOL_TLS)
  442. return ctx->setsockopt(sk, level, optname, optval, optlen);
  443. return do_tls_setsockopt(sk, optname, optval, optlen);
  444. }
  445. static struct tls_context *create_ctx(struct sock *sk)
  446. {
  447. struct inet_connection_sock *icsk = inet_csk(sk);
  448. struct tls_context *ctx;
  449. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  450. if (!ctx)
  451. return NULL;
  452. icsk->icsk_ulp_data = ctx;
  453. return ctx;
  454. }
  455. static int tls_hw_prot(struct sock *sk)
  456. {
  457. struct tls_context *ctx;
  458. struct tls_device *dev;
  459. int rc = 0;
  460. mutex_lock(&device_mutex);
  461. list_for_each_entry(dev, &device_list, dev_list) {
  462. if (dev->feature && dev->feature(dev)) {
  463. ctx = create_ctx(sk);
  464. if (!ctx)
  465. goto out;
  466. ctx->hash = sk->sk_prot->hash;
  467. ctx->unhash = sk->sk_prot->unhash;
  468. ctx->sk_proto_close = sk->sk_prot->close;
  469. ctx->rx_conf = TLS_HW_RECORD;
  470. ctx->tx_conf = TLS_HW_RECORD;
  471. update_sk_prot(sk, ctx);
  472. rc = 1;
  473. break;
  474. }
  475. }
  476. out:
  477. mutex_unlock(&device_mutex);
  478. return rc;
  479. }
  480. static void tls_hw_unhash(struct sock *sk)
  481. {
  482. struct tls_context *ctx = tls_get_ctx(sk);
  483. struct tls_device *dev;
  484. mutex_lock(&device_mutex);
  485. list_for_each_entry(dev, &device_list, dev_list) {
  486. if (dev->unhash)
  487. dev->unhash(dev, sk);
  488. }
  489. mutex_unlock(&device_mutex);
  490. ctx->unhash(sk);
  491. }
  492. static int tls_hw_hash(struct sock *sk)
  493. {
  494. struct tls_context *ctx = tls_get_ctx(sk);
  495. struct tls_device *dev;
  496. int err;
  497. err = ctx->hash(sk);
  498. mutex_lock(&device_mutex);
  499. list_for_each_entry(dev, &device_list, dev_list) {
  500. if (dev->hash)
  501. err |= dev->hash(dev, sk);
  502. }
  503. mutex_unlock(&device_mutex);
  504. if (err)
  505. tls_hw_unhash(sk);
  506. return err;
  507. }
  508. static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
  509. struct proto *base)
  510. {
  511. prot[TLS_BASE][TLS_BASE] = *base;
  512. prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
  513. prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
  514. prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
  515. prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
  516. prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
  517. prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
  518. prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
  519. prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
  520. prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
  521. prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
  522. prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
  523. prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
  524. #ifdef CONFIG_TLS_DEVICE
  525. prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
  526. prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
  527. prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
  528. prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
  529. prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
  530. prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
  531. #endif
  532. prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
  533. prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
  534. prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
  535. prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
  536. }
  537. static int tls_init(struct sock *sk)
  538. {
  539. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  540. struct tls_context *ctx;
  541. int rc = 0;
  542. if (tls_hw_prot(sk))
  543. goto out;
  544. /* The TLS ulp is currently supported only for TCP sockets
  545. * in ESTABLISHED state.
  546. * Supporting sockets in LISTEN state will require us
  547. * to modify the accept implementation to clone rather then
  548. * share the ulp context.
  549. */
  550. if (sk->sk_state != TCP_ESTABLISHED)
  551. return -ENOTSUPP;
  552. /* allocate tls context */
  553. ctx = create_ctx(sk);
  554. if (!ctx) {
  555. rc = -ENOMEM;
  556. goto out;
  557. }
  558. ctx->setsockopt = sk->sk_prot->setsockopt;
  559. ctx->getsockopt = sk->sk_prot->getsockopt;
  560. ctx->sk_proto_close = sk->sk_prot->close;
  561. /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
  562. if (ip_ver == TLSV6 &&
  563. unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
  564. mutex_lock(&tcpv6_prot_mutex);
  565. if (likely(sk->sk_prot != saved_tcpv6_prot)) {
  566. build_protos(tls_prots[TLSV6], sk->sk_prot);
  567. smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
  568. }
  569. mutex_unlock(&tcpv6_prot_mutex);
  570. }
  571. ctx->tx_conf = TLS_BASE;
  572. ctx->rx_conf = TLS_BASE;
  573. update_sk_prot(sk, ctx);
  574. out:
  575. return rc;
  576. }
  577. void tls_register_device(struct tls_device *device)
  578. {
  579. mutex_lock(&device_mutex);
  580. list_add_tail(&device->dev_list, &device_list);
  581. mutex_unlock(&device_mutex);
  582. }
  583. EXPORT_SYMBOL(tls_register_device);
  584. void tls_unregister_device(struct tls_device *device)
  585. {
  586. mutex_lock(&device_mutex);
  587. list_del(&device->dev_list);
  588. mutex_unlock(&device_mutex);
  589. }
  590. EXPORT_SYMBOL(tls_unregister_device);
  591. static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
  592. .name = "tls",
  593. .uid = TCP_ULP_TLS,
  594. .user_visible = true,
  595. .owner = THIS_MODULE,
  596. .init = tls_init,
  597. };
  598. static int __init tls_register(void)
  599. {
  600. build_protos(tls_prots[TLSV4], &tcp_prot);
  601. tls_sw_proto_ops = inet_stream_ops;
  602. tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
  603. tls_sw_proto_ops.splice_read = tls_sw_splice_read;
  604. #ifdef CONFIG_TLS_DEVICE
  605. tls_device_init();
  606. #endif
  607. tcp_register_ulp(&tcp_tls_ulp_ops);
  608. return 0;
  609. }
  610. static void __exit tls_unregister(void)
  611. {
  612. tcp_unregister_ulp(&tcp_tls_ulp_ops);
  613. #ifdef CONFIG_TLS_DEVICE
  614. tls_device_cleanup();
  615. #endif
  616. }
  617. module_init(tls_register);
  618. module_exit(tls_unregister);