tls_device.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2. *
  3. * This software is available to you under a choice of one of two
  4. * licenses. You may choose to be licensed under the terms of the GNU
  5. * General Public License (GPL) Version 2, available from the file
  6. * COPYING in the main directory of this source tree, or the
  7. * OpenIB.org BSD license below:
  8. *
  9. * Redistribution and use in source and binary forms, with or
  10. * without modification, are permitted provided that the following
  11. * conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above
  14. * copyright notice, this list of conditions and the following
  15. * disclaimer.
  16. *
  17. * - Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials
  20. * provided with the distribution.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29. * SOFTWARE.
  30. */
  31. #include <crypto/aead.h>
  32. #include <linux/highmem.h>
  33. #include <linux/module.h>
  34. #include <linux/netdevice.h>
  35. #include <net/dst.h>
  36. #include <net/inet_connection_sock.h>
  37. #include <net/tcp.h>
  38. #include <net/tls.h>
  39. /* device_offload_lock is used to synchronize tls_dev_add
  40. * against NETDEV_DOWN notifications.
  41. */
  42. static DECLARE_RWSEM(device_offload_lock);
  43. static void tls_device_gc_task(struct work_struct *work);
  44. static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
  45. static LIST_HEAD(tls_device_gc_list);
  46. static LIST_HEAD(tls_device_list);
  47. static DEFINE_SPINLOCK(tls_device_lock);
  48. static void tls_device_free_ctx(struct tls_context *ctx)
  49. {
  50. struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx);
  51. kfree(offload_ctx);
  52. kfree(ctx);
  53. }
  54. static void tls_device_gc_task(struct work_struct *work)
  55. {
  56. struct tls_context *ctx, *tmp;
  57. unsigned long flags;
  58. LIST_HEAD(gc_list);
  59. spin_lock_irqsave(&tls_device_lock, flags);
  60. list_splice_init(&tls_device_gc_list, &gc_list);
  61. spin_unlock_irqrestore(&tls_device_lock, flags);
  62. list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
  63. struct net_device *netdev = ctx->netdev;
  64. if (netdev) {
  65. netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
  66. TLS_OFFLOAD_CTX_DIR_TX);
  67. dev_put(netdev);
  68. }
  69. list_del(&ctx->list);
  70. tls_device_free_ctx(ctx);
  71. }
  72. }
  73. static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
  74. {
  75. unsigned long flags;
  76. spin_lock_irqsave(&tls_device_lock, flags);
  77. list_move_tail(&ctx->list, &tls_device_gc_list);
  78. /* schedule_work inside the spinlock
  79. * to make sure tls_device_down waits for that work.
  80. */
  81. schedule_work(&tls_device_gc_work);
  82. spin_unlock_irqrestore(&tls_device_lock, flags);
  83. }
  84. /* We assume that the socket is already connected */
  85. static struct net_device *get_netdev_for_sock(struct sock *sk)
  86. {
  87. struct dst_entry *dst = sk_dst_get(sk);
  88. struct net_device *netdev = NULL;
  89. if (likely(dst)) {
  90. netdev = dst->dev;
  91. dev_hold(netdev);
  92. }
  93. dst_release(dst);
  94. return netdev;
  95. }
  96. static void destroy_record(struct tls_record_info *record)
  97. {
  98. int nr_frags = record->num_frags;
  99. skb_frag_t *frag;
  100. while (nr_frags-- > 0) {
  101. frag = &record->frags[nr_frags];
  102. __skb_frag_unref(frag);
  103. }
  104. kfree(record);
  105. }
  106. static void delete_all_records(struct tls_offload_context *offload_ctx)
  107. {
  108. struct tls_record_info *info, *temp;
  109. list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
  110. list_del(&info->list);
  111. destroy_record(info);
  112. }
  113. offload_ctx->retransmit_hint = NULL;
  114. }
  115. static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
  116. {
  117. struct tls_context *tls_ctx = tls_get_ctx(sk);
  118. struct tls_record_info *info, *temp;
  119. struct tls_offload_context *ctx;
  120. u64 deleted_records = 0;
  121. unsigned long flags;
  122. if (!tls_ctx)
  123. return;
  124. ctx = tls_offload_ctx(tls_ctx);
  125. spin_lock_irqsave(&ctx->lock, flags);
  126. info = ctx->retransmit_hint;
  127. if (info && !before(acked_seq, info->end_seq)) {
  128. ctx->retransmit_hint = NULL;
  129. list_del(&info->list);
  130. destroy_record(info);
  131. deleted_records++;
  132. }
  133. list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
  134. if (before(acked_seq, info->end_seq))
  135. break;
  136. list_del(&info->list);
  137. destroy_record(info);
  138. deleted_records++;
  139. }
  140. ctx->unacked_record_sn += deleted_records;
  141. spin_unlock_irqrestore(&ctx->lock, flags);
  142. }
  143. /* At this point, there should be no references on this
  144. * socket and no in-flight SKBs associated with this
  145. * socket, so it is safe to free all the resources.
  146. */
  147. void tls_device_sk_destruct(struct sock *sk)
  148. {
  149. struct tls_context *tls_ctx = tls_get_ctx(sk);
  150. struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
  151. if (ctx->open_record)
  152. destroy_record(ctx->open_record);
  153. delete_all_records(ctx);
  154. crypto_free_aead(ctx->aead_send);
  155. ctx->sk_destruct(sk);
  156. clean_acked_data_disable(inet_csk(sk));
  157. if (refcount_dec_and_test(&tls_ctx->refcount))
  158. tls_device_queue_ctx_destruction(tls_ctx);
  159. }
  160. EXPORT_SYMBOL(tls_device_sk_destruct);
  161. static void tls_append_frag(struct tls_record_info *record,
  162. struct page_frag *pfrag,
  163. int size)
  164. {
  165. skb_frag_t *frag;
  166. frag = &record->frags[record->num_frags - 1];
  167. if (frag->page.p == pfrag->page &&
  168. frag->page_offset + frag->size == pfrag->offset) {
  169. frag->size += size;
  170. } else {
  171. ++frag;
  172. frag->page.p = pfrag->page;
  173. frag->page_offset = pfrag->offset;
  174. frag->size = size;
  175. ++record->num_frags;
  176. get_page(pfrag->page);
  177. }
  178. pfrag->offset += size;
  179. record->len += size;
  180. }
  181. static int tls_push_record(struct sock *sk,
  182. struct tls_context *ctx,
  183. struct tls_offload_context *offload_ctx,
  184. struct tls_record_info *record,
  185. struct page_frag *pfrag,
  186. int flags,
  187. unsigned char record_type)
  188. {
  189. struct tcp_sock *tp = tcp_sk(sk);
  190. struct page_frag dummy_tag_frag;
  191. skb_frag_t *frag;
  192. int i;
  193. /* fill prepend */
  194. frag = &record->frags[0];
  195. tls_fill_prepend(ctx,
  196. skb_frag_address(frag),
  197. record->len - ctx->tx.prepend_size,
  198. record_type);
  199. /* HW doesn't care about the data in the tag, because it fills it. */
  200. dummy_tag_frag.page = skb_frag_page(frag);
  201. dummy_tag_frag.offset = 0;
  202. tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
  203. record->end_seq = tp->write_seq + record->len;
  204. spin_lock_irq(&offload_ctx->lock);
  205. list_add_tail(&record->list, &offload_ctx->records_list);
  206. spin_unlock_irq(&offload_ctx->lock);
  207. offload_ctx->open_record = NULL;
  208. set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
  209. tls_advance_record_sn(sk, &ctx->tx);
  210. for (i = 0; i < record->num_frags; i++) {
  211. frag = &record->frags[i];
  212. sg_unmark_end(&offload_ctx->sg_tx_data[i]);
  213. sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
  214. frag->size, frag->page_offset);
  215. sk_mem_charge(sk, frag->size);
  216. get_page(skb_frag_page(frag));
  217. }
  218. sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
  219. /* all ready, send */
  220. return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
  221. }
  222. static int tls_create_new_record(struct tls_offload_context *offload_ctx,
  223. struct page_frag *pfrag,
  224. size_t prepend_size)
  225. {
  226. struct tls_record_info *record;
  227. skb_frag_t *frag;
  228. record = kmalloc(sizeof(*record), GFP_KERNEL);
  229. if (!record)
  230. return -ENOMEM;
  231. frag = &record->frags[0];
  232. __skb_frag_set_page(frag, pfrag->page);
  233. frag->page_offset = pfrag->offset;
  234. skb_frag_size_set(frag, prepend_size);
  235. get_page(pfrag->page);
  236. pfrag->offset += prepend_size;
  237. record->num_frags = 1;
  238. record->len = prepend_size;
  239. offload_ctx->open_record = record;
  240. return 0;
  241. }
  242. static int tls_do_allocation(struct sock *sk,
  243. struct tls_offload_context *offload_ctx,
  244. struct page_frag *pfrag,
  245. size_t prepend_size)
  246. {
  247. int ret;
  248. if (!offload_ctx->open_record) {
  249. if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
  250. sk->sk_allocation))) {
  251. sk->sk_prot->enter_memory_pressure(sk);
  252. sk_stream_moderate_sndbuf(sk);
  253. return -ENOMEM;
  254. }
  255. ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
  256. if (ret)
  257. return ret;
  258. if (pfrag->size > pfrag->offset)
  259. return 0;
  260. }
  261. if (!sk_page_frag_refill(sk, pfrag))
  262. return -ENOMEM;
  263. return 0;
  264. }
  265. static int tls_push_data(struct sock *sk,
  266. struct iov_iter *msg_iter,
  267. size_t size, int flags,
  268. unsigned char record_type)
  269. {
  270. struct tls_context *tls_ctx = tls_get_ctx(sk);
  271. struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
  272. int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
  273. int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
  274. struct tls_record_info *record = ctx->open_record;
  275. struct page_frag *pfrag;
  276. size_t orig_size = size;
  277. u32 max_open_record_len;
  278. int copy, rc = 0;
  279. bool done = false;
  280. long timeo;
  281. if (flags &
  282. ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
  283. return -ENOTSUPP;
  284. if (sk->sk_err)
  285. return -sk->sk_err;
  286. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  287. rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
  288. if (rc < 0)
  289. return rc;
  290. pfrag = sk_page_frag(sk);
  291. /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
  292. * we need to leave room for an authentication tag.
  293. */
  294. max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
  295. tls_ctx->tx.prepend_size;
  296. do {
  297. rc = tls_do_allocation(sk, ctx, pfrag,
  298. tls_ctx->tx.prepend_size);
  299. if (rc) {
  300. rc = sk_stream_wait_memory(sk, &timeo);
  301. if (!rc)
  302. continue;
  303. record = ctx->open_record;
  304. if (!record)
  305. break;
  306. handle_error:
  307. if (record_type != TLS_RECORD_TYPE_DATA) {
  308. /* avoid sending partial
  309. * record with type !=
  310. * application_data
  311. */
  312. size = orig_size;
  313. destroy_record(record);
  314. ctx->open_record = NULL;
  315. } else if (record->len > tls_ctx->tx.prepend_size) {
  316. goto last_record;
  317. }
  318. break;
  319. }
  320. record = ctx->open_record;
  321. copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
  322. copy = min_t(size_t, copy, (max_open_record_len - record->len));
  323. if (copy_from_iter_nocache(page_address(pfrag->page) +
  324. pfrag->offset,
  325. copy, msg_iter) != copy) {
  326. rc = -EFAULT;
  327. goto handle_error;
  328. }
  329. tls_append_frag(record, pfrag, copy);
  330. size -= copy;
  331. if (!size) {
  332. last_record:
  333. tls_push_record_flags = flags;
  334. if (more) {
  335. tls_ctx->pending_open_record_frags =
  336. record->num_frags;
  337. break;
  338. }
  339. done = true;
  340. }
  341. if (done || record->len >= max_open_record_len ||
  342. (record->num_frags >= MAX_SKB_FRAGS - 1)) {
  343. rc = tls_push_record(sk,
  344. tls_ctx,
  345. ctx,
  346. record,
  347. pfrag,
  348. tls_push_record_flags,
  349. record_type);
  350. if (rc < 0)
  351. break;
  352. }
  353. } while (!done);
  354. if (orig_size - size > 0)
  355. rc = orig_size - size;
  356. return rc;
  357. }
  358. int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  359. {
  360. unsigned char record_type = TLS_RECORD_TYPE_DATA;
  361. int rc;
  362. lock_sock(sk);
  363. if (unlikely(msg->msg_controllen)) {
  364. rc = tls_proccess_cmsg(sk, msg, &record_type);
  365. if (rc)
  366. goto out;
  367. }
  368. rc = tls_push_data(sk, &msg->msg_iter, size,
  369. msg->msg_flags, record_type);
  370. out:
  371. release_sock(sk);
  372. return rc;
  373. }
  374. int tls_device_sendpage(struct sock *sk, struct page *page,
  375. int offset, size_t size, int flags)
  376. {
  377. struct iov_iter msg_iter;
  378. char *kaddr = kmap(page);
  379. struct kvec iov;
  380. int rc;
  381. if (flags & MSG_SENDPAGE_NOTLAST)
  382. flags |= MSG_MORE;
  383. lock_sock(sk);
  384. if (flags & MSG_OOB) {
  385. rc = -ENOTSUPP;
  386. goto out;
  387. }
  388. iov.iov_base = kaddr + offset;
  389. iov.iov_len = size;
  390. iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
  391. rc = tls_push_data(sk, &msg_iter, size,
  392. flags, TLS_RECORD_TYPE_DATA);
  393. kunmap(page);
  394. out:
  395. release_sock(sk);
  396. return rc;
  397. }
  398. struct tls_record_info *tls_get_record(struct tls_offload_context *context,
  399. u32 seq, u64 *p_record_sn)
  400. {
  401. u64 record_sn = context->hint_record_sn;
  402. struct tls_record_info *info;
  403. info = context->retransmit_hint;
  404. if (!info ||
  405. before(seq, info->end_seq - info->len)) {
  406. /* if retransmit_hint is irrelevant start
  407. * from the beggining of the list
  408. */
  409. info = list_first_entry(&context->records_list,
  410. struct tls_record_info, list);
  411. record_sn = context->unacked_record_sn;
  412. }
  413. list_for_each_entry_from(info, &context->records_list, list) {
  414. if (before(seq, info->end_seq)) {
  415. if (!context->retransmit_hint ||
  416. after(info->end_seq,
  417. context->retransmit_hint->end_seq)) {
  418. context->hint_record_sn = record_sn;
  419. context->retransmit_hint = info;
  420. }
  421. *p_record_sn = record_sn;
  422. return info;
  423. }
  424. record_sn++;
  425. }
  426. return NULL;
  427. }
  428. EXPORT_SYMBOL(tls_get_record);
  429. static int tls_device_push_pending_record(struct sock *sk, int flags)
  430. {
  431. struct iov_iter msg_iter;
  432. iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
  433. return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
  434. }
  435. int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
  436. {
  437. u16 nonce_size, tag_size, iv_size, rec_seq_size;
  438. struct tls_record_info *start_marker_record;
  439. struct tls_offload_context *offload_ctx;
  440. struct tls_crypto_info *crypto_info;
  441. struct net_device *netdev;
  442. char *iv, *rec_seq;
  443. struct sk_buff *skb;
  444. int rc = -EINVAL;
  445. __be64 rcd_sn;
  446. if (!ctx)
  447. goto out;
  448. if (ctx->priv_ctx_tx) {
  449. rc = -EEXIST;
  450. goto out;
  451. }
  452. start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
  453. if (!start_marker_record) {
  454. rc = -ENOMEM;
  455. goto out;
  456. }
  457. offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL);
  458. if (!offload_ctx) {
  459. rc = -ENOMEM;
  460. goto free_marker_record;
  461. }
  462. crypto_info = &ctx->crypto_send;
  463. switch (crypto_info->cipher_type) {
  464. case TLS_CIPHER_AES_GCM_128:
  465. nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  466. tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
  467. iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
  468. iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
  469. rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
  470. rec_seq =
  471. ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
  472. break;
  473. default:
  474. rc = -EINVAL;
  475. goto free_offload_ctx;
  476. }
  477. ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
  478. ctx->tx.tag_size = tag_size;
  479. ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
  480. ctx->tx.iv_size = iv_size;
  481. ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  482. GFP_KERNEL);
  483. if (!ctx->tx.iv) {
  484. rc = -ENOMEM;
  485. goto free_offload_ctx;
  486. }
  487. memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  488. ctx->tx.rec_seq_size = rec_seq_size;
  489. ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
  490. if (!ctx->tx.rec_seq) {
  491. rc = -ENOMEM;
  492. goto free_iv;
  493. }
  494. memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size);
  495. rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
  496. if (rc)
  497. goto free_rec_seq;
  498. /* start at rec_seq - 1 to account for the start marker record */
  499. memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
  500. offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
  501. start_marker_record->end_seq = tcp_sk(sk)->write_seq;
  502. start_marker_record->len = 0;
  503. start_marker_record->num_frags = 0;
  504. INIT_LIST_HEAD(&offload_ctx->records_list);
  505. list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
  506. spin_lock_init(&offload_ctx->lock);
  507. sg_init_table(offload_ctx->sg_tx_data,
  508. ARRAY_SIZE(offload_ctx->sg_tx_data));
  509. clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
  510. ctx->push_pending_record = tls_device_push_pending_record;
  511. offload_ctx->sk_destruct = sk->sk_destruct;
  512. /* TLS offload is greatly simplified if we don't send
  513. * SKBs where only part of the payload needs to be encrypted.
  514. * So mark the last skb in the write queue as end of record.
  515. */
  516. skb = tcp_write_queue_tail(sk);
  517. if (skb)
  518. TCP_SKB_CB(skb)->eor = 1;
  519. refcount_set(&ctx->refcount, 1);
  520. /* We support starting offload on multiple sockets
  521. * concurrently, so we only need a read lock here.
  522. * This lock must precede get_netdev_for_sock to prevent races between
  523. * NETDEV_DOWN and setsockopt.
  524. */
  525. down_read(&device_offload_lock);
  526. netdev = get_netdev_for_sock(sk);
  527. if (!netdev) {
  528. pr_err_ratelimited("%s: netdev not found\n", __func__);
  529. rc = -EINVAL;
  530. goto release_lock;
  531. }
  532. if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
  533. rc = -ENOTSUPP;
  534. goto release_netdev;
  535. }
  536. /* Avoid offloading if the device is down
  537. * We don't want to offload new flows after
  538. * the NETDEV_DOWN event
  539. */
  540. if (!(netdev->flags & IFF_UP)) {
  541. rc = -EINVAL;
  542. goto release_netdev;
  543. }
  544. ctx->priv_ctx_tx = offload_ctx;
  545. rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
  546. &ctx->crypto_send,
  547. tcp_sk(sk)->write_seq);
  548. if (rc)
  549. goto release_netdev;
  550. ctx->netdev = netdev;
  551. spin_lock_irq(&tls_device_lock);
  552. list_add_tail(&ctx->list, &tls_device_list);
  553. spin_unlock_irq(&tls_device_lock);
  554. sk->sk_validate_xmit_skb = tls_validate_xmit_skb;
  555. /* following this assignment tls_is_sk_tx_device_offloaded
  556. * will return true and the context might be accessed
  557. * by the netdev's xmit function.
  558. */
  559. smp_store_release(&sk->sk_destruct,
  560. &tls_device_sk_destruct);
  561. up_read(&device_offload_lock);
  562. goto out;
  563. release_netdev:
  564. dev_put(netdev);
  565. release_lock:
  566. up_read(&device_offload_lock);
  567. clean_acked_data_disable(inet_csk(sk));
  568. crypto_free_aead(offload_ctx->aead_send);
  569. free_rec_seq:
  570. kfree(ctx->tx.rec_seq);
  571. free_iv:
  572. kfree(ctx->tx.iv);
  573. free_offload_ctx:
  574. kfree(offload_ctx);
  575. ctx->priv_ctx_tx = NULL;
  576. free_marker_record:
  577. kfree(start_marker_record);
  578. out:
  579. return rc;
  580. }
  581. static int tls_device_down(struct net_device *netdev)
  582. {
  583. struct tls_context *ctx, *tmp;
  584. unsigned long flags;
  585. LIST_HEAD(list);
  586. /* Request a write lock to block new offload attempts */
  587. down_write(&device_offload_lock);
  588. spin_lock_irqsave(&tls_device_lock, flags);
  589. list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
  590. if (ctx->netdev != netdev ||
  591. !refcount_inc_not_zero(&ctx->refcount))
  592. continue;
  593. list_move(&ctx->list, &list);
  594. }
  595. spin_unlock_irqrestore(&tls_device_lock, flags);
  596. list_for_each_entry_safe(ctx, tmp, &list, list) {
  597. netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
  598. TLS_OFFLOAD_CTX_DIR_TX);
  599. ctx->netdev = NULL;
  600. dev_put(netdev);
  601. list_del_init(&ctx->list);
  602. if (refcount_dec_and_test(&ctx->refcount))
  603. tls_device_free_ctx(ctx);
  604. }
  605. up_write(&device_offload_lock);
  606. flush_work(&tls_device_gc_work);
  607. return NOTIFY_DONE;
  608. }
  609. static int tls_dev_event(struct notifier_block *this, unsigned long event,
  610. void *ptr)
  611. {
  612. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  613. if (!(dev->features & NETIF_F_HW_TLS_TX))
  614. return NOTIFY_DONE;
  615. switch (event) {
  616. case NETDEV_REGISTER:
  617. case NETDEV_FEAT_CHANGE:
  618. if (dev->tlsdev_ops &&
  619. dev->tlsdev_ops->tls_dev_add &&
  620. dev->tlsdev_ops->tls_dev_del)
  621. return NOTIFY_DONE;
  622. else
  623. return NOTIFY_BAD;
  624. case NETDEV_DOWN:
  625. return tls_device_down(dev);
  626. }
  627. return NOTIFY_DONE;
  628. }
  629. static struct notifier_block tls_dev_notifier = {
  630. .notifier_call = tls_dev_event,
  631. };
  632. void __init tls_device_init(void)
  633. {
  634. register_netdevice_notifier(&tls_dev_notifier);
  635. }
  636. void __exit tls_device_cleanup(void)
  637. {
  638. unregister_netdevice_notifier(&tls_dev_notifier);
  639. flush_work(&tls_device_gc_work);
  640. }