tcp_ulp.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /*
  2. * Pluggable TCP upper layer protocol support.
  3. *
  4. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  6. *
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/types.h>
  11. #include <linux/list.h>
  12. #include <linux/gfp.h>
  13. #include <net/tcp.h>
  14. static DEFINE_SPINLOCK(tcp_ulp_list_lock);
  15. static LIST_HEAD(tcp_ulp_list);
  16. /* Simple linear search, don't expect many entries! */
  17. static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
  18. {
  19. struct tcp_ulp_ops *e;
  20. list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
  21. if (strcmp(e->name, name) == 0)
  22. return e;
  23. }
  24. return NULL;
  25. }
  26. static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
  27. {
  28. const struct tcp_ulp_ops *ulp = NULL;
  29. rcu_read_lock();
  30. ulp = tcp_ulp_find(name);
  31. #ifdef CONFIG_MODULES
  32. if (!ulp && capable(CAP_NET_ADMIN)) {
  33. rcu_read_unlock();
  34. request_module("tcp-ulp-%s", name);
  35. rcu_read_lock();
  36. ulp = tcp_ulp_find(name);
  37. }
  38. #endif
  39. if (!ulp || !try_module_get(ulp->owner))
  40. ulp = NULL;
  41. rcu_read_unlock();
  42. return ulp;
  43. }
  44. /* Attach new upper layer protocol to the list
  45. * of available protocols.
  46. */
  47. int tcp_register_ulp(struct tcp_ulp_ops *ulp)
  48. {
  49. int ret = 0;
  50. spin_lock(&tcp_ulp_list_lock);
  51. if (tcp_ulp_find(ulp->name))
  52. ret = -EEXIST;
  53. else
  54. list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
  55. spin_unlock(&tcp_ulp_list_lock);
  56. return ret;
  57. }
  58. EXPORT_SYMBOL_GPL(tcp_register_ulp);
  59. void tcp_unregister_ulp(struct tcp_ulp_ops *ulp)
  60. {
  61. spin_lock(&tcp_ulp_list_lock);
  62. list_del_rcu(&ulp->list);
  63. spin_unlock(&tcp_ulp_list_lock);
  64. synchronize_rcu();
  65. }
  66. EXPORT_SYMBOL_GPL(tcp_unregister_ulp);
  67. /* Build string with list of available upper layer protocl values */
  68. void tcp_get_available_ulp(char *buf, size_t maxlen)
  69. {
  70. struct tcp_ulp_ops *ulp_ops;
  71. size_t offs = 0;
  72. *buf = '\0';
  73. rcu_read_lock();
  74. list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) {
  75. offs += snprintf(buf + offs, maxlen - offs,
  76. "%s%s",
  77. offs == 0 ? "" : " ", ulp_ops->name);
  78. }
  79. rcu_read_unlock();
  80. }
  81. void tcp_cleanup_ulp(struct sock *sk)
  82. {
  83. struct inet_connection_sock *icsk = inet_csk(sk);
  84. /* No sock_owned_by_me() check here as at the time the
  85. * stack calls this function, the socket is dead and
  86. * about to be destroyed.
  87. */
  88. if (!icsk->icsk_ulp_ops)
  89. return;
  90. if (icsk->icsk_ulp_ops->release)
  91. icsk->icsk_ulp_ops->release(sk);
  92. module_put(icsk->icsk_ulp_ops->owner);
  93. icsk->icsk_ulp_ops = NULL;
  94. }
  95. static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
  96. {
  97. struct inet_connection_sock *icsk = inet_csk(sk);
  98. int err;
  99. err = -EEXIST;
  100. if (icsk->icsk_ulp_ops)
  101. goto out_err;
  102. err = ulp_ops->init(sk);
  103. if (err)
  104. goto out_err;
  105. icsk->icsk_ulp_ops = ulp_ops;
  106. return 0;
  107. out_err:
  108. module_put(ulp_ops->owner);
  109. return err;
  110. }
  111. int tcp_set_ulp(struct sock *sk, const char *name)
  112. {
  113. const struct tcp_ulp_ops *ulp_ops;
  114. sock_owned_by_me(sk);
  115. ulp_ops = __tcp_ulp_find_autoload(name);
  116. if (!ulp_ops)
  117. return -ENOENT;
  118. return __tcp_set_ulp(sk, ulp_ops);
  119. }