diag.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #include <linux/module.h>
  2. #include <net/sock.h>
  3. #include <linux/netlink.h>
  4. #include <linux/sock_diag.h>
  5. #include <linux/netlink_diag.h>
  6. #include <linux/rhashtable.h>
  7. #include "af_netlink.h"
  8. #ifdef CONFIG_NETLINK_MMAP
  9. static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type,
  10. struct sk_buff *nlskb)
  11. {
  12. struct netlink_diag_ring ndr;
  13. ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
  14. ndr.ndr_block_nr = ring->pg_vec_len;
  15. ndr.ndr_frame_size = ring->frame_size;
  16. ndr.ndr_frame_nr = ring->frame_max + 1;
  17. return nla_put(nlskb, nl_type, sizeof(ndr), &ndr);
  18. }
  19. static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
  20. {
  21. struct netlink_sock *nlk = nlk_sk(sk);
  22. int ret;
  23. mutex_lock(&nlk->pg_vec_lock);
  24. ret = sk_diag_put_ring(&nlk->rx_ring, NETLINK_DIAG_RX_RING, nlskb);
  25. if (!ret)
  26. ret = sk_diag_put_ring(&nlk->tx_ring, NETLINK_DIAG_TX_RING,
  27. nlskb);
  28. mutex_unlock(&nlk->pg_vec_lock);
  29. return ret;
  30. }
  31. #else
  32. static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
  33. {
  34. return 0;
  35. }
  36. #endif
  37. static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
  38. {
  39. struct netlink_sock *nlk = nlk_sk(sk);
  40. if (nlk->groups == NULL)
  41. return 0;
  42. return nla_put(nlskb, NETLINK_DIAG_GROUPS, NLGRPSZ(nlk->ngroups),
  43. nlk->groups);
  44. }
  45. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
  46. struct netlink_diag_req *req,
  47. u32 portid, u32 seq, u32 flags, int sk_ino)
  48. {
  49. struct nlmsghdr *nlh;
  50. struct netlink_diag_msg *rep;
  51. struct netlink_sock *nlk = nlk_sk(sk);
  52. nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
  53. flags);
  54. if (!nlh)
  55. return -EMSGSIZE;
  56. rep = nlmsg_data(nlh);
  57. rep->ndiag_family = AF_NETLINK;
  58. rep->ndiag_type = sk->sk_type;
  59. rep->ndiag_protocol = sk->sk_protocol;
  60. rep->ndiag_state = sk->sk_state;
  61. rep->ndiag_ino = sk_ino;
  62. rep->ndiag_portid = nlk->portid;
  63. rep->ndiag_dst_portid = nlk->dst_portid;
  64. rep->ndiag_dst_group = nlk->dst_group;
  65. sock_diag_save_cookie(sk, rep->ndiag_cookie);
  66. if ((req->ndiag_show & NDIAG_SHOW_GROUPS) &&
  67. sk_diag_dump_groups(sk, skb))
  68. goto out_nlmsg_trim;
  69. if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) &&
  70. sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
  71. goto out_nlmsg_trim;
  72. if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) &&
  73. sk_diag_put_rings_cfg(sk, skb))
  74. goto out_nlmsg_trim;
  75. return nlmsg_end(skb, nlh);
  76. out_nlmsg_trim:
  77. nlmsg_cancel(skb, nlh);
  78. return -EMSGSIZE;
  79. }
  80. static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
  81. int protocol, int s_num)
  82. {
  83. struct netlink_table *tbl = &nl_table[protocol];
  84. struct rhashtable *ht = &tbl->hash;
  85. const struct bucket_table *htbl = rht_dereference(ht->tbl, ht);
  86. struct net *net = sock_net(skb->sk);
  87. struct netlink_diag_req *req;
  88. struct netlink_sock *nlsk;
  89. struct sock *sk;
  90. int ret = 0, num = 0, i;
  91. req = nlmsg_data(cb->nlh);
  92. for (i = 0; i < htbl->size; i++) {
  93. rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) {
  94. sk = (struct sock *)nlsk;
  95. if (!net_eq(sock_net(sk), net))
  96. continue;
  97. if (num < s_num) {
  98. num++;
  99. continue;
  100. }
  101. if (sk_diag_fill(sk, skb, req,
  102. NETLINK_CB(cb->skb).portid,
  103. cb->nlh->nlmsg_seq,
  104. NLM_F_MULTI,
  105. sock_i_ino(sk)) < 0) {
  106. ret = 1;
  107. goto done;
  108. }
  109. num++;
  110. }
  111. }
  112. sk_for_each_bound(sk, &tbl->mc_list) {
  113. if (sk_hashed(sk))
  114. continue;
  115. if (!net_eq(sock_net(sk), net))
  116. continue;
  117. if (num < s_num) {
  118. num++;
  119. continue;
  120. }
  121. if (sk_diag_fill(sk, skb, req,
  122. NETLINK_CB(cb->skb).portid,
  123. cb->nlh->nlmsg_seq,
  124. NLM_F_MULTI,
  125. sock_i_ino(sk)) < 0) {
  126. ret = 1;
  127. goto done;
  128. }
  129. num++;
  130. }
  131. done:
  132. cb->args[0] = num;
  133. cb->args[1] = protocol;
  134. return ret;
  135. }
  136. static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  137. {
  138. struct netlink_diag_req *req;
  139. int s_num = cb->args[0];
  140. req = nlmsg_data(cb->nlh);
  141. mutex_lock(&nl_sk_hash_lock);
  142. read_lock(&nl_table_lock);
  143. if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
  144. int i;
  145. for (i = cb->args[1]; i < MAX_LINKS; i++) {
  146. if (__netlink_diag_dump(skb, cb, i, s_num))
  147. break;
  148. s_num = 0;
  149. }
  150. } else {
  151. if (req->sdiag_protocol >= MAX_LINKS) {
  152. read_unlock(&nl_table_lock);
  153. mutex_unlock(&nl_sk_hash_lock);
  154. return -ENOENT;
  155. }
  156. __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
  157. }
  158. read_unlock(&nl_table_lock);
  159. mutex_unlock(&nl_sk_hash_lock);
  160. return skb->len;
  161. }
  162. static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  163. {
  164. int hdrlen = sizeof(struct netlink_diag_req);
  165. struct net *net = sock_net(skb->sk);
  166. if (nlmsg_len(h) < hdrlen)
  167. return -EINVAL;
  168. if (h->nlmsg_flags & NLM_F_DUMP) {
  169. struct netlink_dump_control c = {
  170. .dump = netlink_diag_dump,
  171. };
  172. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  173. } else
  174. return -EOPNOTSUPP;
  175. }
  176. static const struct sock_diag_handler netlink_diag_handler = {
  177. .family = AF_NETLINK,
  178. .dump = netlink_diag_handler_dump,
  179. };
  180. static int __init netlink_diag_init(void)
  181. {
  182. return sock_diag_register(&netlink_diag_handler);
  183. }
  184. static void __exit netlink_diag_exit(void)
  185. {
  186. sock_diag_unregister(&netlink_diag_handler);
  187. }
  188. module_init(netlink_diag_init);
  189. module_exit(netlink_diag_exit);
  190. MODULE_LICENSE("GPL");
  191. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);