sock_diag.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /* License: GPL */
  2. #include <linux/mutex.h>
  3. #include <linux/socket.h>
  4. #include <linux/skbuff.h>
  5. #include <net/netlink.h>
  6. #include <net/net_namespace.h>
  7. #include <linux/module.h>
  8. #include <net/sock.h>
  9. #include <linux/kernel.h>
  10. #include <linux/tcp.h>
  11. #include <linux/workqueue.h>
  12. #include <linux/inet_diag.h>
  13. #include <linux/sock_diag.h>
  14. static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
  15. static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
  16. static DEFINE_MUTEX(sock_diag_table_mutex);
  17. static struct workqueue_struct *broadcast_wq;
  18. u64 sock_gen_cookie(struct sock *sk)
  19. {
  20. while (1) {
  21. u64 res = atomic64_read(&sk->sk_cookie);
  22. if (res)
  23. return res;
  24. res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
  25. atomic64_cmpxchg(&sk->sk_cookie, 0, res);
  26. }
  27. }
  28. int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
  29. {
  30. u64 res;
  31. if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE)
  32. return 0;
  33. res = sock_gen_cookie(sk);
  34. if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1])
  35. return -ESTALE;
  36. return 0;
  37. }
  38. EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
  39. void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
  40. {
  41. u64 res = sock_gen_cookie(sk);
  42. cookie[0] = (u32)res;
  43. cookie[1] = (u32)(res >> 32);
  44. }
  45. EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
  46. int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
  47. {
  48. u32 mem[SK_MEMINFO_VARS];
  49. sk_get_meminfo(sk, mem);
  50. return nla_put(skb, attrtype, sizeof(mem), &mem);
  51. }
  52. EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
  53. int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
  54. struct sk_buff *skb, int attrtype)
  55. {
  56. struct sock_fprog_kern *fprog;
  57. struct sk_filter *filter;
  58. struct nlattr *attr;
  59. unsigned int flen;
  60. int err = 0;
  61. if (!may_report_filterinfo) {
  62. nla_reserve(skb, attrtype, 0);
  63. return 0;
  64. }
  65. rcu_read_lock();
  66. filter = rcu_dereference(sk->sk_filter);
  67. if (!filter)
  68. goto out;
  69. fprog = filter->prog->orig_prog;
  70. if (!fprog)
  71. goto out;
  72. flen = bpf_classic_proglen(fprog);
  73. attr = nla_reserve(skb, attrtype, flen);
  74. if (attr == NULL) {
  75. err = -EMSGSIZE;
  76. goto out;
  77. }
  78. memcpy(nla_data(attr), fprog->filter, flen);
  79. out:
  80. rcu_read_unlock();
  81. return err;
  82. }
  83. EXPORT_SYMBOL(sock_diag_put_filterinfo);
  84. struct broadcast_sk {
  85. struct sock *sk;
  86. struct work_struct work;
  87. };
  88. static size_t sock_diag_nlmsg_size(void)
  89. {
  90. return NLMSG_ALIGN(sizeof(struct inet_diag_msg)
  91. + nla_total_size(sizeof(u8)) /* INET_DIAG_PROTOCOL */
  92. + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */
  93. }
  94. static void sock_diag_broadcast_destroy_work(struct work_struct *work)
  95. {
  96. struct broadcast_sk *bsk =
  97. container_of(work, struct broadcast_sk, work);
  98. struct sock *sk = bsk->sk;
  99. const struct sock_diag_handler *hndl;
  100. struct sk_buff *skb;
  101. const enum sknetlink_groups group = sock_diag_destroy_group(sk);
  102. int err = -1;
  103. WARN_ON(group == SKNLGRP_NONE);
  104. skb = nlmsg_new(sock_diag_nlmsg_size(), GFP_KERNEL);
  105. if (!skb)
  106. goto out;
  107. mutex_lock(&sock_diag_table_mutex);
  108. hndl = sock_diag_handlers[sk->sk_family];
  109. if (hndl && hndl->get_info)
  110. err = hndl->get_info(skb, sk);
  111. mutex_unlock(&sock_diag_table_mutex);
  112. if (!err)
  113. nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
  114. GFP_KERNEL);
  115. else
  116. kfree_skb(skb);
  117. out:
  118. sk_destruct(sk);
  119. kfree(bsk);
  120. }
  121. void sock_diag_broadcast_destroy(struct sock *sk)
  122. {
  123. /* Note, this function is often called from an interrupt context. */
  124. struct broadcast_sk *bsk =
  125. kmalloc(sizeof(struct broadcast_sk), GFP_ATOMIC);
  126. if (!bsk)
  127. return sk_destruct(sk);
  128. bsk->sk = sk;
  129. INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work);
  130. queue_work(broadcast_wq, &bsk->work);
  131. }
  132. void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  133. {
  134. mutex_lock(&sock_diag_table_mutex);
  135. inet_rcv_compat = fn;
  136. mutex_unlock(&sock_diag_table_mutex);
  137. }
  138. EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
  139. void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
  140. {
  141. mutex_lock(&sock_diag_table_mutex);
  142. inet_rcv_compat = NULL;
  143. mutex_unlock(&sock_diag_table_mutex);
  144. }
  145. EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
  146. int sock_diag_register(const struct sock_diag_handler *hndl)
  147. {
  148. int err = 0;
  149. if (hndl->family >= AF_MAX)
  150. return -EINVAL;
  151. mutex_lock(&sock_diag_table_mutex);
  152. if (sock_diag_handlers[hndl->family])
  153. err = -EBUSY;
  154. else
  155. sock_diag_handlers[hndl->family] = hndl;
  156. mutex_unlock(&sock_diag_table_mutex);
  157. return err;
  158. }
  159. EXPORT_SYMBOL_GPL(sock_diag_register);
  160. void sock_diag_unregister(const struct sock_diag_handler *hnld)
  161. {
  162. int family = hnld->family;
  163. if (family >= AF_MAX)
  164. return;
  165. mutex_lock(&sock_diag_table_mutex);
  166. BUG_ON(sock_diag_handlers[family] != hnld);
  167. sock_diag_handlers[family] = NULL;
  168. mutex_unlock(&sock_diag_table_mutex);
  169. }
  170. EXPORT_SYMBOL_GPL(sock_diag_unregister);
  171. static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
  172. {
  173. int err;
  174. struct sock_diag_req *req = nlmsg_data(nlh);
  175. const struct sock_diag_handler *hndl;
  176. if (nlmsg_len(nlh) < sizeof(*req))
  177. return -EINVAL;
  178. if (req->sdiag_family >= AF_MAX)
  179. return -EINVAL;
  180. if (sock_diag_handlers[req->sdiag_family] == NULL)
  181. sock_load_diag_module(req->sdiag_family, 0);
  182. mutex_lock(&sock_diag_table_mutex);
  183. hndl = sock_diag_handlers[req->sdiag_family];
  184. if (hndl == NULL)
  185. err = -ENOENT;
  186. else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY)
  187. err = hndl->dump(skb, nlh);
  188. else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy)
  189. err = hndl->destroy(skb, nlh);
  190. else
  191. err = -EOPNOTSUPP;
  192. mutex_unlock(&sock_diag_table_mutex);
  193. return err;
  194. }
  195. static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
  196. struct netlink_ext_ack *extack)
  197. {
  198. int ret;
  199. switch (nlh->nlmsg_type) {
  200. case TCPDIAG_GETSOCK:
  201. case DCCPDIAG_GETSOCK:
  202. if (inet_rcv_compat == NULL)
  203. sock_load_diag_module(AF_INET, 0);
  204. mutex_lock(&sock_diag_table_mutex);
  205. if (inet_rcv_compat != NULL)
  206. ret = inet_rcv_compat(skb, nlh);
  207. else
  208. ret = -EOPNOTSUPP;
  209. mutex_unlock(&sock_diag_table_mutex);
  210. return ret;
  211. case SOCK_DIAG_BY_FAMILY:
  212. case SOCK_DESTROY:
  213. return __sock_diag_cmd(skb, nlh);
  214. default:
  215. return -EINVAL;
  216. }
  217. }
  218. static DEFINE_MUTEX(sock_diag_mutex);
  219. static void sock_diag_rcv(struct sk_buff *skb)
  220. {
  221. mutex_lock(&sock_diag_mutex);
  222. netlink_rcv_skb(skb, &sock_diag_rcv_msg);
  223. mutex_unlock(&sock_diag_mutex);
  224. }
  225. static int sock_diag_bind(struct net *net, int group)
  226. {
  227. switch (group) {
  228. case SKNLGRP_INET_TCP_DESTROY:
  229. case SKNLGRP_INET_UDP_DESTROY:
  230. if (!sock_diag_handlers[AF_INET])
  231. sock_load_diag_module(AF_INET, 0);
  232. break;
  233. case SKNLGRP_INET6_TCP_DESTROY:
  234. case SKNLGRP_INET6_UDP_DESTROY:
  235. if (!sock_diag_handlers[AF_INET6])
  236. sock_load_diag_module(AF_INET6, 0);
  237. break;
  238. }
  239. return 0;
  240. }
  241. int sock_diag_destroy(struct sock *sk, int err)
  242. {
  243. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  244. return -EPERM;
  245. if (!sk->sk_prot->diag_destroy)
  246. return -EOPNOTSUPP;
  247. return sk->sk_prot->diag_destroy(sk, err);
  248. }
  249. EXPORT_SYMBOL_GPL(sock_diag_destroy);
  250. static int __net_init diag_net_init(struct net *net)
  251. {
  252. struct netlink_kernel_cfg cfg = {
  253. .groups = SKNLGRP_MAX,
  254. .input = sock_diag_rcv,
  255. .bind = sock_diag_bind,
  256. .flags = NL_CFG_F_NONROOT_RECV,
  257. };
  258. net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg);
  259. return net->diag_nlsk == NULL ? -ENOMEM : 0;
  260. }
  261. static void __net_exit diag_net_exit(struct net *net)
  262. {
  263. netlink_kernel_release(net->diag_nlsk);
  264. net->diag_nlsk = NULL;
  265. }
  266. static struct pernet_operations diag_net_ops = {
  267. .init = diag_net_init,
  268. .exit = diag_net_exit,
  269. };
  270. static int __init sock_diag_init(void)
  271. {
  272. broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
  273. BUG_ON(!broadcast_wq);
  274. return register_pernet_subsys(&diag_net_ops);
  275. }
  276. device_initcall(sock_diag_init);