diag.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. #include <linux/types.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/sock_diag.h>
  4. #include <linux/unix_diag.h>
  5. #include <linux/skbuff.h>
  6. #include <net/netlink.h>
  7. #include <net/af_unix.h>
  8. #include <net/tcp_states.h>
  9. #define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
  10. RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
  11. static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
  12. {
  13. struct unix_address *addr = unix_sk(sk)->addr;
  14. char *s;
  15. if (addr) {
  16. s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
  17. memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
  18. }
  19. return 0;
  20. rtattr_failure:
  21. return -EMSGSIZE;
  22. }
  23. static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
  24. {
  25. struct dentry *dentry = unix_sk(sk)->dentry;
  26. struct unix_diag_vfs *uv;
  27. if (dentry) {
  28. uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
  29. uv->udiag_vfs_ino = dentry->d_inode->i_ino;
  30. uv->udiag_vfs_dev = dentry->d_sb->s_dev;
  31. }
  32. return 0;
  33. rtattr_failure:
  34. return -EMSGSIZE;
  35. }
  36. static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
  37. {
  38. struct sock *peer;
  39. int ino;
  40. peer = unix_peer_get(sk);
  41. if (peer) {
  42. unix_state_lock(peer);
  43. ino = sock_i_ino(peer);
  44. unix_state_unlock(peer);
  45. sock_put(peer);
  46. RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
  47. }
  48. return 0;
  49. rtattr_failure:
  50. return -EMSGSIZE;
  51. }
  52. static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
  53. {
  54. struct sk_buff *skb;
  55. u32 *buf;
  56. int i;
  57. if (sk->sk_state == TCP_LISTEN) {
  58. spin_lock(&sk->sk_receive_queue.lock);
  59. buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, sk->sk_receive_queue.qlen);
  60. i = 0;
  61. skb_queue_walk(&sk->sk_receive_queue, skb) {
  62. struct sock *req, *peer;
  63. req = skb->sk;
  64. /*
  65. * The state lock is outer for the same sk's
  66. * queue lock. With the other's queue locked it's
  67. * OK to lock the state.
  68. */
  69. unix_state_lock_nested(req);
  70. peer = unix_sk(req)->peer;
  71. if (peer)
  72. buf[i++] = sock_i_ino(peer);
  73. unix_state_unlock(req);
  74. }
  75. spin_unlock(&sk->sk_receive_queue.lock);
  76. }
  77. return 0;
  78. rtattr_failure:
  79. spin_unlock(&sk->sk_receive_queue.lock);
  80. return -EMSGSIZE;
  81. }
  82. static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
  83. {
  84. RTA_PUT_U32(nlskb, UNIX_DIAG_RQLEN, sk->sk_receive_queue.qlen);
  85. return 0;
  86. rtattr_failure:
  87. return -EMSGSIZE;
  88. }
  89. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  90. u32 pid, u32 seq, u32 flags, int sk_ino)
  91. {
  92. unsigned char *b = skb_tail_pointer(skb);
  93. struct nlmsghdr *nlh;
  94. struct unix_diag_msg *rep;
  95. nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
  96. nlh->nlmsg_flags = flags;
  97. rep = NLMSG_DATA(nlh);
  98. rep->udiag_family = AF_UNIX;
  99. rep->udiag_type = sk->sk_type;
  100. rep->udiag_state = sk->sk_state;
  101. rep->udiag_ino = sk_ino;
  102. sock_diag_save_cookie(sk, rep->udiag_cookie);
  103. if ((req->udiag_show & UDIAG_SHOW_NAME) &&
  104. sk_diag_dump_name(sk, skb))
  105. goto nlmsg_failure;
  106. if ((req->udiag_show & UDIAG_SHOW_VFS) &&
  107. sk_diag_dump_vfs(sk, skb))
  108. goto nlmsg_failure;
  109. if ((req->udiag_show & UDIAG_SHOW_PEER) &&
  110. sk_diag_dump_peer(sk, skb))
  111. goto nlmsg_failure;
  112. if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
  113. sk_diag_dump_icons(sk, skb))
  114. goto nlmsg_failure;
  115. if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
  116. sk_diag_show_rqlen(sk, skb))
  117. goto nlmsg_failure;
  118. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  119. return skb->len;
  120. nlmsg_failure:
  121. nlmsg_trim(skb, b);
  122. return -EMSGSIZE;
  123. }
  124. static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
  125. u32 pid, u32 seq, u32 flags)
  126. {
  127. int sk_ino;
  128. unix_state_lock(sk);
  129. sk_ino = sock_i_ino(sk);
  130. unix_state_unlock(sk);
  131. if (!sk_ino)
  132. return 0;
  133. return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
  134. }
  135. static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  136. {
  137. struct unix_diag_req *req;
  138. int num, s_num, slot, s_slot;
  139. req = NLMSG_DATA(cb->nlh);
  140. s_slot = cb->args[0];
  141. num = s_num = cb->args[1];
  142. spin_lock(&unix_table_lock);
  143. for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
  144. struct sock *sk;
  145. struct hlist_node *node;
  146. num = 0;
  147. sk_for_each(sk, node, &unix_socket_table[slot]) {
  148. if (num < s_num)
  149. goto next;
  150. if (!(req->udiag_states & (1 << sk->sk_state)))
  151. goto next;
  152. if (sk_diag_dump(sk, skb, req,
  153. NETLINK_CB(cb->skb).pid,
  154. cb->nlh->nlmsg_seq,
  155. NLM_F_MULTI) < 0)
  156. goto done;
  157. next:
  158. num++;
  159. }
  160. }
  161. done:
  162. spin_unlock(&unix_table_lock);
  163. cb->args[0] = slot;
  164. cb->args[1] = num;
  165. return skb->len;
  166. }
  167. static struct sock *unix_lookup_by_ino(int ino)
  168. {
  169. int i;
  170. struct sock *sk;
  171. spin_lock(&unix_table_lock);
  172. for (i = 0; i <= UNIX_HASH_SIZE; i++) {
  173. struct hlist_node *node;
  174. sk_for_each(sk, node, &unix_socket_table[i])
  175. if (ino == sock_i_ino(sk)) {
  176. sock_hold(sk);
  177. spin_unlock(&unix_table_lock);
  178. return sk;
  179. }
  180. }
  181. spin_unlock(&unix_table_lock);
  182. return NULL;
  183. }
  184. static int unix_diag_get_exact(struct sk_buff *in_skb,
  185. const struct nlmsghdr *nlh,
  186. struct unix_diag_req *req)
  187. {
  188. int err = -EINVAL;
  189. struct sock *sk;
  190. struct sk_buff *rep;
  191. unsigned int extra_len;
  192. if (req->udiag_ino == 0)
  193. goto out_nosk;
  194. sk = unix_lookup_by_ino(req->udiag_ino);
  195. err = -ENOENT;
  196. if (sk == NULL)
  197. goto out_nosk;
  198. err = sock_diag_check_cookie(sk, req->udiag_cookie);
  199. if (err)
  200. goto out;
  201. extra_len = 256;
  202. again:
  203. err = -ENOMEM;
  204. rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
  205. GFP_KERNEL);
  206. if (!rep)
  207. goto out;
  208. err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
  209. nlh->nlmsg_seq, 0, req->udiag_ino);
  210. if (err < 0) {
  211. kfree_skb(rep);
  212. extra_len += 256;
  213. if (extra_len >= PAGE_SIZE)
  214. goto out;
  215. goto again;
  216. }
  217. err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
  218. MSG_DONTWAIT);
  219. if (err > 0)
  220. err = 0;
  221. out:
  222. if (sk)
  223. sock_put(sk);
  224. out_nosk:
  225. return err;
  226. }
  227. static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  228. {
  229. int hdrlen = sizeof(struct unix_diag_req);
  230. if (nlmsg_len(h) < hdrlen)
  231. return -EINVAL;
  232. if (h->nlmsg_flags & NLM_F_DUMP)
  233. return netlink_dump_start(sock_diag_nlsk, skb, h,
  234. unix_diag_dump, NULL, 0);
  235. else
  236. return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
  237. }
  238. static struct sock_diag_handler unix_diag_handler = {
  239. .family = AF_UNIX,
  240. .dump = unix_diag_handler_dump,
  241. };
  242. static int __init unix_diag_init(void)
  243. {
  244. return sock_diag_register(&unix_diag_handler);
  245. }
  246. static void __exit unix_diag_exit(void)
  247. {
  248. sock_diag_unregister(&unix_diag_handler);
  249. }
  250. module_init(unix_diag_init);
  251. module_exit(unix_diag_exit);
  252. MODULE_LICENSE("GPL");
  253. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);