smc_diag.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /*
  2. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  3. *
  4. * Monitoring SMC transport protocol sockets
  5. *
  6. * Copyright IBM Corp. 2016
  7. *
  8. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/init.h>
  14. #include <linux/sock_diag.h>
  15. #include <linux/inet_diag.h>
  16. #include <linux/smc_diag.h>
  17. #include <net/netlink.h>
  18. #include <net/smc.h>
  19. #include "smc.h"
  20. #include "smc_core.h"
  21. static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
  22. {
  23. sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
  24. be16_to_cpu(((__be16 *)gid_raw)[0]),
  25. be16_to_cpu(((__be16 *)gid_raw)[1]),
  26. be16_to_cpu(((__be16 *)gid_raw)[2]),
  27. be16_to_cpu(((__be16 *)gid_raw)[3]),
  28. be16_to_cpu(((__be16 *)gid_raw)[4]),
  29. be16_to_cpu(((__be16 *)gid_raw)[5]),
  30. be16_to_cpu(((__be16 *)gid_raw)[6]),
  31. be16_to_cpu(((__be16 *)gid_raw)[7]));
  32. }
  33. static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
  34. {
  35. struct smc_sock *smc = smc_sk(sk);
  36. if (!smc->clcsock)
  37. return;
  38. r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
  39. r->id.idiag_dport = smc->clcsock->sk->sk_dport;
  40. r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
  41. sock_diag_save_cookie(sk, r->id.idiag_cookie);
  42. if (sk->sk_protocol == SMCPROTO_SMC) {
  43. r->diag_family = PF_INET;
  44. memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
  45. memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
  46. r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
  47. r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
  48. #if IS_ENABLED(CONFIG_IPV6)
  49. } else if (sk->sk_protocol == SMCPROTO_SMC6) {
  50. r->diag_family = PF_INET6;
  51. memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
  52. sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
  53. memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
  54. sizeof(smc->clcsock->sk->sk_v6_daddr));
  55. #endif
  56. }
  57. }
  58. static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
  59. struct smc_diag_msg *r,
  60. struct user_namespace *user_ns)
  61. {
  62. if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
  63. return 1;
  64. r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
  65. r->diag_inode = sock_i_ino(sk);
  66. return 0;
  67. }
  68. static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
  69. struct netlink_callback *cb,
  70. const struct smc_diag_req *req,
  71. struct nlattr *bc)
  72. {
  73. struct smc_sock *smc = smc_sk(sk);
  74. struct smc_diag_fallback fallback;
  75. struct user_namespace *user_ns;
  76. struct smc_diag_msg *r;
  77. struct nlmsghdr *nlh;
  78. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  79. cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
  80. if (!nlh)
  81. return -EMSGSIZE;
  82. r = nlmsg_data(nlh);
  83. smc_diag_msg_common_fill(r, sk);
  84. r->diag_state = sk->sk_state;
  85. if (smc->use_fallback)
  86. r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
  87. else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
  88. r->diag_mode = SMC_DIAG_MODE_SMCD;
  89. else
  90. r->diag_mode = SMC_DIAG_MODE_SMCR;
  91. user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
  92. if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
  93. goto errout;
  94. fallback.reason = smc->fallback_rsn;
  95. fallback.peer_diagnosis = smc->peer_diagnosis;
  96. if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
  97. goto errout;
  98. if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
  99. smc->conn.alert_token_local) {
  100. struct smc_connection *conn = &smc->conn;
  101. struct smc_diag_conninfo cinfo = {
  102. .token = conn->alert_token_local,
  103. .sndbuf_size = conn->sndbuf_desc ?
  104. conn->sndbuf_desc->len : 0,
  105. .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
  106. .peer_rmbe_size = conn->peer_rmbe_size,
  107. .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
  108. .rx_prod.count = conn->local_rx_ctrl.prod.count,
  109. .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
  110. .rx_cons.count = conn->local_rx_ctrl.cons.count,
  111. .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
  112. .tx_prod.count = conn->local_tx_ctrl.prod.count,
  113. .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
  114. .tx_cons.count = conn->local_tx_ctrl.cons.count,
  115. .tx_prod_flags =
  116. *(u8 *)&conn->local_tx_ctrl.prod_flags,
  117. .tx_conn_state_flags =
  118. *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
  119. .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
  120. .rx_conn_state_flags =
  121. *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
  122. .tx_prep.wrap = conn->tx_curs_prep.wrap,
  123. .tx_prep.count = conn->tx_curs_prep.count,
  124. .tx_sent.wrap = conn->tx_curs_sent.wrap,
  125. .tx_sent.count = conn->tx_curs_sent.count,
  126. .tx_fin.wrap = conn->tx_curs_fin.wrap,
  127. .tx_fin.count = conn->tx_curs_fin.count,
  128. };
  129. if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
  130. goto errout;
  131. }
  132. if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
  133. (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
  134. !list_empty(&smc->conn.lgr->list)) {
  135. struct smc_diag_lgrinfo linfo = {
  136. .role = smc->conn.lgr->role,
  137. .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
  138. .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
  139. };
  140. memcpy(linfo.lnk[0].ibname,
  141. smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
  142. sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
  143. smc_gid_be16_convert(linfo.lnk[0].gid,
  144. smc->conn.lgr->lnk[0].gid);
  145. smc_gid_be16_convert(linfo.lnk[0].peer_gid,
  146. smc->conn.lgr->lnk[0].peer_gid);
  147. if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
  148. goto errout;
  149. }
  150. if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
  151. (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
  152. !list_empty(&smc->conn.lgr->list)) {
  153. struct smc_connection *conn = &smc->conn;
  154. struct smcd_diag_dmbinfo dinfo = {
  155. .linkid = *((u32 *)conn->lgr->id),
  156. .peer_gid = conn->lgr->peer_gid,
  157. .my_gid = conn->lgr->smcd->local_gid,
  158. .token = conn->rmb_desc->token,
  159. .peer_token = conn->peer_token
  160. };
  161. if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
  162. goto errout;
  163. }
  164. nlmsg_end(skb, nlh);
  165. return 0;
  166. errout:
  167. nlmsg_cancel(skb, nlh);
  168. return -EMSGSIZE;
  169. }
  170. static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
  171. struct netlink_callback *cb)
  172. {
  173. struct net *net = sock_net(skb->sk);
  174. struct nlattr *bc = NULL;
  175. struct hlist_head *head;
  176. struct sock *sk;
  177. int rc = 0;
  178. read_lock(&prot->h.smc_hash->lock);
  179. head = &prot->h.smc_hash->ht;
  180. if (hlist_empty(head))
  181. goto out;
  182. sk_for_each(sk, head) {
  183. if (!net_eq(sock_net(sk), net))
  184. continue;
  185. rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
  186. if (rc)
  187. break;
  188. }
  189. out:
  190. read_unlock(&prot->h.smc_hash->lock);
  191. return rc;
  192. }
  193. static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  194. {
  195. int rc = 0;
  196. rc = smc_diag_dump_proto(&smc_proto, skb, cb);
  197. if (!rc)
  198. rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
  199. return rc;
  200. }
  201. static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  202. {
  203. struct net *net = sock_net(skb->sk);
  204. if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
  205. h->nlmsg_flags & NLM_F_DUMP) {
  206. {
  207. struct netlink_dump_control c = {
  208. .dump = smc_diag_dump,
  209. .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
  210. };
  211. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  212. }
  213. }
  214. return 0;
  215. }
  216. static const struct sock_diag_handler smc_diag_handler = {
  217. .family = AF_SMC,
  218. .dump = smc_diag_handler_dump,
  219. };
  220. static int __init smc_diag_init(void)
  221. {
  222. return sock_diag_register(&smc_diag_handler);
  223. }
  224. static void __exit smc_diag_exit(void)
  225. {
  226. sock_diag_unregister(&smc_diag_handler);
  227. }
  228. module_init(smc_diag_init);
  229. module_exit(smc_diag_exit);
  230. MODULE_LICENSE("GPL");
  231. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);