tls_main.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <net/tcp.h>
  35. #include <net/inet_common.h>
  36. #include <linux/highmem.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/sched/signal.h>
  39. #include <linux/inetdevice.h>
  40. #include <net/tls.h>
  41. MODULE_AUTHOR("Mellanox Technologies");
  42. MODULE_DESCRIPTION("Transport Layer Security Support");
  43. MODULE_LICENSE("Dual BSD/GPL");
  44. enum {
  45. TLSV4,
  46. TLSV6,
  47. TLS_NUM_PROTS,
  48. };
  49. enum {
  50. TLS_BASE,
  51. TLS_SW_TX,
  52. TLS_SW_RX,
  53. TLS_SW_RXTX,
  54. TLS_HW_RECORD,
  55. TLS_NUM_CONFIG,
  56. };
  57. static struct proto *saved_tcpv6_prot;
  58. static DEFINE_MUTEX(tcpv6_prot_mutex);
  59. static LIST_HEAD(device_list);
  60. static DEFINE_MUTEX(device_mutex);
  61. static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
  62. static struct proto_ops tls_sw_proto_ops;
  63. static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
  64. {
  65. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  66. sk->sk_prot = &tls_prots[ip_ver][ctx->conf];
  67. }
  68. int wait_on_pending_writer(struct sock *sk, long *timeo)
  69. {
  70. int rc = 0;
  71. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  72. add_wait_queue(sk_sleep(sk), &wait);
  73. while (1) {
  74. if (!*timeo) {
  75. rc = -EAGAIN;
  76. break;
  77. }
  78. if (signal_pending(current)) {
  79. rc = sock_intr_errno(*timeo);
  80. break;
  81. }
  82. if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
  83. break;
  84. }
  85. remove_wait_queue(sk_sleep(sk), &wait);
  86. return rc;
  87. }
  88. int tls_push_sg(struct sock *sk,
  89. struct tls_context *ctx,
  90. struct scatterlist *sg,
  91. u16 first_offset,
  92. int flags)
  93. {
  94. int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
  95. int ret = 0;
  96. struct page *p;
  97. size_t size;
  98. int offset = first_offset;
  99. size = sg->length - offset;
  100. offset += sg->offset;
  101. ctx->in_tcp_sendpages = true;
  102. while (1) {
  103. if (sg_is_last(sg))
  104. sendpage_flags = flags;
  105. /* is sending application-limited? */
  106. tcp_rate_check_app_limited(sk);
  107. p = sg_page(sg);
  108. retry:
  109. ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
  110. if (ret != size) {
  111. if (ret > 0) {
  112. offset += ret;
  113. size -= ret;
  114. goto retry;
  115. }
  116. offset -= sg->offset;
  117. ctx->partially_sent_offset = offset;
  118. ctx->partially_sent_record = (void *)sg;
  119. ctx->in_tcp_sendpages = false;
  120. return ret;
  121. }
  122. put_page(p);
  123. sk_mem_uncharge(sk, sg->length);
  124. sg = sg_next(sg);
  125. if (!sg)
  126. break;
  127. offset = sg->offset;
  128. size = sg->length;
  129. }
  130. clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
  131. ctx->in_tcp_sendpages = false;
  132. ctx->sk_write_space(sk);
  133. return 0;
  134. }
  135. static int tls_handle_open_record(struct sock *sk, int flags)
  136. {
  137. struct tls_context *ctx = tls_get_ctx(sk);
  138. if (tls_is_pending_open_record(ctx))
  139. return ctx->push_pending_record(sk, flags);
  140. return 0;
  141. }
  142. int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
  143. unsigned char *record_type)
  144. {
  145. struct cmsghdr *cmsg;
  146. int rc = -EINVAL;
  147. for_each_cmsghdr(cmsg, msg) {
  148. if (!CMSG_OK(msg, cmsg))
  149. return -EINVAL;
  150. if (cmsg->cmsg_level != SOL_TLS)
  151. continue;
  152. switch (cmsg->cmsg_type) {
  153. case TLS_SET_RECORD_TYPE:
  154. if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
  155. return -EINVAL;
  156. if (msg->msg_flags & MSG_MORE)
  157. return -EINVAL;
  158. rc = tls_handle_open_record(sk, msg->msg_flags);
  159. if (rc)
  160. return rc;
  161. *record_type = *(unsigned char *)CMSG_DATA(cmsg);
  162. rc = 0;
  163. break;
  164. default:
  165. return -EINVAL;
  166. }
  167. }
  168. return rc;
  169. }
  170. int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
  171. int flags, long *timeo)
  172. {
  173. struct scatterlist *sg;
  174. u16 offset;
  175. if (!tls_is_partially_sent_record(ctx))
  176. return ctx->push_pending_record(sk, flags);
  177. sg = ctx->partially_sent_record;
  178. offset = ctx->partially_sent_offset;
  179. ctx->partially_sent_record = NULL;
  180. return tls_push_sg(sk, ctx, sg, offset, flags);
  181. }
  182. static void tls_write_space(struct sock *sk)
  183. {
  184. struct tls_context *ctx = tls_get_ctx(sk);
  185. /* We are already sending pages, ignore notification */
  186. if (ctx->in_tcp_sendpages)
  187. return;
  188. if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
  189. gfp_t sk_allocation = sk->sk_allocation;
  190. int rc;
  191. long timeo = 0;
  192. sk->sk_allocation = GFP_ATOMIC;
  193. rc = tls_push_pending_closed_record(sk, ctx,
  194. MSG_DONTWAIT |
  195. MSG_NOSIGNAL,
  196. &timeo);
  197. sk->sk_allocation = sk_allocation;
  198. if (rc < 0)
  199. return;
  200. }
  201. ctx->sk_write_space(sk);
  202. }
  203. static void tls_sk_proto_close(struct sock *sk, long timeout)
  204. {
  205. struct tls_context *ctx = tls_get_ctx(sk);
  206. long timeo = sock_sndtimeo(sk, 0);
  207. void (*sk_proto_close)(struct sock *sk, long timeout);
  208. bool free_ctx = false;
  209. lock_sock(sk);
  210. sk_proto_close = ctx->sk_proto_close;
  211. if (ctx->conf == TLS_BASE || ctx->conf == TLS_HW_RECORD) {
  212. free_ctx = true;
  213. goto skip_tx_cleanup;
  214. }
  215. if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
  216. tls_handle_open_record(sk, 0);
  217. if (ctx->partially_sent_record) {
  218. struct scatterlist *sg = ctx->partially_sent_record;
  219. while (1) {
  220. put_page(sg_page(sg));
  221. sk_mem_uncharge(sk, sg->length);
  222. if (sg_is_last(sg))
  223. break;
  224. sg++;
  225. }
  226. }
  227. kfree(ctx->tx.rec_seq);
  228. kfree(ctx->tx.iv);
  229. kfree(ctx->rx.rec_seq);
  230. kfree(ctx->rx.iv);
  231. if (ctx->conf == TLS_SW_TX ||
  232. ctx->conf == TLS_SW_RX ||
  233. ctx->conf == TLS_SW_RXTX) {
  234. tls_sw_free_resources(sk);
  235. }
  236. skip_tx_cleanup:
  237. release_sock(sk);
  238. sk_proto_close(sk, timeout);
  239. /* free ctx for TLS_HW_RECORD, used by tcp_set_state
  240. * for sk->sk_prot->unhash [tls_hw_unhash]
  241. */
  242. if (free_ctx)
  243. kfree(ctx);
  244. }
  245. static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
  246. int __user *optlen)
  247. {
  248. int rc = 0;
  249. struct tls_context *ctx = tls_get_ctx(sk);
  250. struct tls_crypto_info *crypto_info;
  251. int len;
  252. if (get_user(len, optlen))
  253. return -EFAULT;
  254. if (!optval || (len < sizeof(*crypto_info))) {
  255. rc = -EINVAL;
  256. goto out;
  257. }
  258. if (!ctx) {
  259. rc = -EBUSY;
  260. goto out;
  261. }
  262. /* get user crypto info */
  263. crypto_info = &ctx->crypto_send;
  264. if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
  265. rc = -EBUSY;
  266. goto out;
  267. }
  268. if (len == sizeof(*crypto_info)) {
  269. if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
  270. rc = -EFAULT;
  271. goto out;
  272. }
  273. switch (crypto_info->cipher_type) {
  274. case TLS_CIPHER_AES_GCM_128: {
  275. struct tls12_crypto_info_aes_gcm_128 *
  276. crypto_info_aes_gcm_128 =
  277. container_of(crypto_info,
  278. struct tls12_crypto_info_aes_gcm_128,
  279. info);
  280. if (len != sizeof(*crypto_info_aes_gcm_128)) {
  281. rc = -EINVAL;
  282. goto out;
  283. }
  284. lock_sock(sk);
  285. memcpy(crypto_info_aes_gcm_128->iv,
  286. ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  287. TLS_CIPHER_AES_GCM_128_IV_SIZE);
  288. memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
  289. TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
  290. release_sock(sk);
  291. if (copy_to_user(optval,
  292. crypto_info_aes_gcm_128,
  293. sizeof(*crypto_info_aes_gcm_128)))
  294. rc = -EFAULT;
  295. break;
  296. }
  297. default:
  298. rc = -EINVAL;
  299. }
  300. out:
  301. return rc;
  302. }
  303. static int do_tls_getsockopt(struct sock *sk, int optname,
  304. char __user *optval, int __user *optlen)
  305. {
  306. int rc = 0;
  307. switch (optname) {
  308. case TLS_TX:
  309. rc = do_tls_getsockopt_tx(sk, optval, optlen);
  310. break;
  311. default:
  312. rc = -ENOPROTOOPT;
  313. break;
  314. }
  315. return rc;
  316. }
  317. static int tls_getsockopt(struct sock *sk, int level, int optname,
  318. char __user *optval, int __user *optlen)
  319. {
  320. struct tls_context *ctx = tls_get_ctx(sk);
  321. if (level != SOL_TLS)
  322. return ctx->getsockopt(sk, level, optname, optval, optlen);
  323. return do_tls_getsockopt(sk, optname, optval, optlen);
  324. }
  325. static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
  326. unsigned int optlen, int tx)
  327. {
  328. struct tls_crypto_info *crypto_info;
  329. struct tls_context *ctx = tls_get_ctx(sk);
  330. int rc = 0;
  331. int conf;
  332. if (!optval || (optlen < sizeof(*crypto_info))) {
  333. rc = -EINVAL;
  334. goto out;
  335. }
  336. if (tx)
  337. crypto_info = &ctx->crypto_send;
  338. else
  339. crypto_info = &ctx->crypto_recv;
  340. /* Currently we don't support set crypto info more than one time */
  341. if (TLS_CRYPTO_INFO_READY(crypto_info)) {
  342. rc = -EBUSY;
  343. goto out;
  344. }
  345. rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
  346. if (rc) {
  347. rc = -EFAULT;
  348. goto err_crypto_info;
  349. }
  350. /* check version */
  351. if (crypto_info->version != TLS_1_2_VERSION) {
  352. rc = -ENOTSUPP;
  353. goto err_crypto_info;
  354. }
  355. switch (crypto_info->cipher_type) {
  356. case TLS_CIPHER_AES_GCM_128: {
  357. if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
  358. rc = -EINVAL;
  359. goto err_crypto_info;
  360. }
  361. rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
  362. optlen - sizeof(*crypto_info));
  363. if (rc) {
  364. rc = -EFAULT;
  365. goto err_crypto_info;
  366. }
  367. break;
  368. }
  369. default:
  370. rc = -EINVAL;
  371. goto err_crypto_info;
  372. }
  373. /* currently SW is default, we will have ethtool in future */
  374. if (tx) {
  375. rc = tls_set_sw_offload(sk, ctx, 1);
  376. if (ctx->conf == TLS_SW_RX)
  377. conf = TLS_SW_RXTX;
  378. else
  379. conf = TLS_SW_TX;
  380. } else {
  381. rc = tls_set_sw_offload(sk, ctx, 0);
  382. if (ctx->conf == TLS_SW_TX)
  383. conf = TLS_SW_RXTX;
  384. else
  385. conf = TLS_SW_RX;
  386. }
  387. if (rc)
  388. goto err_crypto_info;
  389. ctx->conf = conf;
  390. update_sk_prot(sk, ctx);
  391. if (tx) {
  392. ctx->sk_write_space = sk->sk_write_space;
  393. sk->sk_write_space = tls_write_space;
  394. } else {
  395. sk->sk_socket->ops = &tls_sw_proto_ops;
  396. }
  397. goto out;
  398. err_crypto_info:
  399. memset(crypto_info, 0, sizeof(*crypto_info));
  400. out:
  401. return rc;
  402. }
  403. static int do_tls_setsockopt(struct sock *sk, int optname,
  404. char __user *optval, unsigned int optlen)
  405. {
  406. int rc = 0;
  407. switch (optname) {
  408. case TLS_TX:
  409. case TLS_RX:
  410. lock_sock(sk);
  411. rc = do_tls_setsockopt_conf(sk, optval, optlen,
  412. optname == TLS_TX);
  413. release_sock(sk);
  414. break;
  415. default:
  416. rc = -ENOPROTOOPT;
  417. break;
  418. }
  419. return rc;
  420. }
  421. static int tls_setsockopt(struct sock *sk, int level, int optname,
  422. char __user *optval, unsigned int optlen)
  423. {
  424. struct tls_context *ctx = tls_get_ctx(sk);
  425. if (level != SOL_TLS)
  426. return ctx->setsockopt(sk, level, optname, optval, optlen);
  427. return do_tls_setsockopt(sk, optname, optval, optlen);
  428. }
  429. static struct tls_context *create_ctx(struct sock *sk)
  430. {
  431. struct inet_connection_sock *icsk = inet_csk(sk);
  432. struct tls_context *ctx;
  433. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  434. if (!ctx)
  435. return NULL;
  436. icsk->icsk_ulp_data = ctx;
  437. return ctx;
  438. }
  439. static int tls_hw_prot(struct sock *sk)
  440. {
  441. struct tls_context *ctx;
  442. struct tls_device *dev;
  443. int rc = 0;
  444. mutex_lock(&device_mutex);
  445. list_for_each_entry(dev, &device_list, dev_list) {
  446. if (dev->feature && dev->feature(dev)) {
  447. ctx = create_ctx(sk);
  448. if (!ctx)
  449. goto out;
  450. ctx->hash = sk->sk_prot->hash;
  451. ctx->unhash = sk->sk_prot->unhash;
  452. ctx->sk_proto_close = sk->sk_prot->close;
  453. ctx->conf = TLS_HW_RECORD;
  454. update_sk_prot(sk, ctx);
  455. rc = 1;
  456. break;
  457. }
  458. }
  459. out:
  460. mutex_unlock(&device_mutex);
  461. return rc;
  462. }
  463. static void tls_hw_unhash(struct sock *sk)
  464. {
  465. struct tls_context *ctx = tls_get_ctx(sk);
  466. struct tls_device *dev;
  467. mutex_lock(&device_mutex);
  468. list_for_each_entry(dev, &device_list, dev_list) {
  469. if (dev->unhash)
  470. dev->unhash(dev, sk);
  471. }
  472. mutex_unlock(&device_mutex);
  473. ctx->unhash(sk);
  474. }
  475. static int tls_hw_hash(struct sock *sk)
  476. {
  477. struct tls_context *ctx = tls_get_ctx(sk);
  478. struct tls_device *dev;
  479. int err;
  480. err = ctx->hash(sk);
  481. mutex_lock(&device_mutex);
  482. list_for_each_entry(dev, &device_list, dev_list) {
  483. if (dev->hash)
  484. err |= dev->hash(dev, sk);
  485. }
  486. mutex_unlock(&device_mutex);
  487. if (err)
  488. tls_hw_unhash(sk);
  489. return err;
  490. }
  491. static void build_protos(struct proto *prot, struct proto *base)
  492. {
  493. prot[TLS_BASE] = *base;
  494. prot[TLS_BASE].setsockopt = tls_setsockopt;
  495. prot[TLS_BASE].getsockopt = tls_getsockopt;
  496. prot[TLS_BASE].close = tls_sk_proto_close;
  497. prot[TLS_SW_TX] = prot[TLS_BASE];
  498. prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
  499. prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
  500. prot[TLS_SW_RX] = prot[TLS_BASE];
  501. prot[TLS_SW_RX].recvmsg = tls_sw_recvmsg;
  502. prot[TLS_SW_RX].close = tls_sk_proto_close;
  503. prot[TLS_SW_RXTX] = prot[TLS_SW_TX];
  504. prot[TLS_SW_RXTX].recvmsg = tls_sw_recvmsg;
  505. prot[TLS_SW_RXTX].close = tls_sk_proto_close;
  506. prot[TLS_HW_RECORD] = *base;
  507. prot[TLS_HW_RECORD].hash = tls_hw_hash;
  508. prot[TLS_HW_RECORD].unhash = tls_hw_unhash;
  509. prot[TLS_HW_RECORD].close = tls_sk_proto_close;
  510. }
  511. static int tls_init(struct sock *sk)
  512. {
  513. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  514. struct tls_context *ctx;
  515. int rc = 0;
  516. if (tls_hw_prot(sk))
  517. goto out;
  518. /* The TLS ulp is currently supported only for TCP sockets
  519. * in ESTABLISHED state.
  520. * Supporting sockets in LISTEN state will require us
  521. * to modify the accept implementation to clone rather then
  522. * share the ulp context.
  523. */
  524. if (sk->sk_state != TCP_ESTABLISHED)
  525. return -ENOTSUPP;
  526. /* allocate tls context */
  527. ctx = create_ctx(sk);
  528. if (!ctx) {
  529. rc = -ENOMEM;
  530. goto out;
  531. }
  532. ctx->setsockopt = sk->sk_prot->setsockopt;
  533. ctx->getsockopt = sk->sk_prot->getsockopt;
  534. ctx->sk_proto_close = sk->sk_prot->close;
  535. /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
  536. if (ip_ver == TLSV6 &&
  537. unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
  538. mutex_lock(&tcpv6_prot_mutex);
  539. if (likely(sk->sk_prot != saved_tcpv6_prot)) {
  540. build_protos(tls_prots[TLSV6], sk->sk_prot);
  541. smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
  542. }
  543. mutex_unlock(&tcpv6_prot_mutex);
  544. }
  545. ctx->conf = TLS_BASE;
  546. update_sk_prot(sk, ctx);
  547. out:
  548. return rc;
  549. }
  550. void tls_register_device(struct tls_device *device)
  551. {
  552. mutex_lock(&device_mutex);
  553. list_add_tail(&device->dev_list, &device_list);
  554. mutex_unlock(&device_mutex);
  555. }
  556. EXPORT_SYMBOL(tls_register_device);
  557. void tls_unregister_device(struct tls_device *device)
  558. {
  559. mutex_lock(&device_mutex);
  560. list_del(&device->dev_list);
  561. mutex_unlock(&device_mutex);
  562. }
  563. EXPORT_SYMBOL(tls_unregister_device);
  564. static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
  565. .name = "tls",
  566. .uid = TCP_ULP_TLS,
  567. .user_visible = true,
  568. .owner = THIS_MODULE,
  569. .init = tls_init,
  570. };
  571. static int __init tls_register(void)
  572. {
  573. build_protos(tls_prots[TLSV4], &tcp_prot);
  574. tls_sw_proto_ops = inet_stream_ops;
  575. tls_sw_proto_ops.poll = tls_sw_poll;
  576. tls_sw_proto_ops.splice_read = tls_sw_splice_read;
  577. tcp_register_ulp(&tcp_tls_ulp_ops);
  578. return 0;
  579. }
  580. static void __exit tls_unregister(void)
  581. {
  582. tcp_unregister_ulp(&tcp_tls_ulp_ops);
  583. }
  584. module_init(tls_register);
  585. module_exit(tls_unregister);