addr.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. /*
  2. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  3. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/mutex.h>
  36. #include <linux/inetdevice.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/module.h>
  40. #include <net/arp.h>
  41. #include <net/neighbour.h>
  42. #include <net/route.h>
  43. #include <net/netevent.h>
  44. #include <net/addrconf.h>
  45. #include <net/ip6_route.h>
  46. #include <rdma/ib_addr.h>
  47. #include <rdma/ib.h>
  48. #include <rdma/rdma_netlink.h>
  49. #include <net/netlink.h>
  50. #include "core_priv.h"
  51. struct addr_req {
  52. struct list_head list;
  53. struct sockaddr_storage src_addr;
  54. struct sockaddr_storage dst_addr;
  55. struct rdma_dev_addr *addr;
  56. struct rdma_addr_client *client;
  57. void *context;
  58. void (*callback)(int status, struct sockaddr *src_addr,
  59. struct rdma_dev_addr *addr, void *context);
  60. unsigned long timeout;
  61. struct delayed_work work;
  62. int status;
  63. u32 seq;
  64. };
  65. static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
  66. static void process_req(struct work_struct *work);
  67. static DEFINE_MUTEX(lock);
  68. static LIST_HEAD(req_list);
  69. static DECLARE_DELAYED_WORK(work, process_req);
  70. static struct workqueue_struct *addr_wq;
  71. static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
  72. [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
  73. .len = sizeof(struct rdma_nla_ls_gid)},
  74. };
  75. static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
  76. {
  77. struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
  78. int ret;
  79. if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
  80. return false;
  81. ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
  82. nlmsg_len(nlh), ib_nl_addr_policy, NULL);
  83. if (ret)
  84. return false;
  85. return true;
  86. }
  87. static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
  88. {
  89. const struct nlattr *head, *curr;
  90. union ib_gid gid;
  91. struct addr_req *req;
  92. int len, rem;
  93. int found = 0;
  94. head = (const struct nlattr *)nlmsg_data(nlh);
  95. len = nlmsg_len(nlh);
  96. nla_for_each_attr(curr, head, len, rem) {
  97. if (curr->nla_type == LS_NLA_TYPE_DGID)
  98. memcpy(&gid, nla_data(curr), nla_len(curr));
  99. }
  100. mutex_lock(&lock);
  101. list_for_each_entry(req, &req_list, list) {
  102. if (nlh->nlmsg_seq != req->seq)
  103. continue;
  104. /* We set the DGID part, the rest was set earlier */
  105. rdma_addr_set_dgid(req->addr, &gid);
  106. req->status = 0;
  107. found = 1;
  108. break;
  109. }
  110. mutex_unlock(&lock);
  111. if (!found)
  112. pr_info("Couldn't find request waiting for DGID: %pI6\n",
  113. &gid);
  114. }
  115. int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
  116. struct nlmsghdr *nlh,
  117. struct netlink_ext_ack *extack)
  118. {
  119. if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
  120. !(NETLINK_CB(skb).sk))
  121. return -EPERM;
  122. if (ib_nl_is_good_ip_resp(nlh))
  123. ib_nl_process_good_ip_rsep(nlh);
  124. return skb->len;
  125. }
  126. static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
  127. const void *daddr,
  128. u32 seq, u16 family)
  129. {
  130. struct sk_buff *skb = NULL;
  131. struct nlmsghdr *nlh;
  132. struct rdma_ls_ip_resolve_header *header;
  133. void *data;
  134. size_t size;
  135. int attrtype;
  136. int len;
  137. if (family == AF_INET) {
  138. size = sizeof(struct in_addr);
  139. attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
  140. } else {
  141. size = sizeof(struct in6_addr);
  142. attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
  143. }
  144. len = nla_total_size(sizeof(size));
  145. len += NLMSG_ALIGN(sizeof(*header));
  146. skb = nlmsg_new(len, GFP_KERNEL);
  147. if (!skb)
  148. return -ENOMEM;
  149. data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
  150. RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
  151. if (!data) {
  152. nlmsg_free(skb);
  153. return -ENODATA;
  154. }
  155. /* Construct the family header first */
  156. header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
  157. header->ifindex = dev_addr->bound_dev_if;
  158. nla_put(skb, attrtype, size, daddr);
  159. /* Repair the nlmsg header length */
  160. nlmsg_end(skb, nlh);
  161. rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
  162. /* Make the request retry, so when we get the response from userspace
  163. * we will have something.
  164. */
  165. return -ENODATA;
  166. }
  167. int rdma_addr_size(struct sockaddr *addr)
  168. {
  169. switch (addr->sa_family) {
  170. case AF_INET:
  171. return sizeof(struct sockaddr_in);
  172. case AF_INET6:
  173. return sizeof(struct sockaddr_in6);
  174. case AF_IB:
  175. return sizeof(struct sockaddr_ib);
  176. default:
  177. return 0;
  178. }
  179. }
  180. EXPORT_SYMBOL(rdma_addr_size);
  181. static struct rdma_addr_client self;
  182. void rdma_addr_register_client(struct rdma_addr_client *client)
  183. {
  184. atomic_set(&client->refcount, 1);
  185. init_completion(&client->comp);
  186. }
  187. EXPORT_SYMBOL(rdma_addr_register_client);
  188. static inline void put_client(struct rdma_addr_client *client)
  189. {
  190. if (atomic_dec_and_test(&client->refcount))
  191. complete(&client->comp);
  192. }
  193. void rdma_addr_unregister_client(struct rdma_addr_client *client)
  194. {
  195. put_client(client);
  196. wait_for_completion(&client->comp);
  197. }
  198. EXPORT_SYMBOL(rdma_addr_unregister_client);
  199. void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
  200. const struct net_device *dev,
  201. const unsigned char *dst_dev_addr)
  202. {
  203. dev_addr->dev_type = dev->type;
  204. memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
  205. memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
  206. if (dst_dev_addr)
  207. memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
  208. dev_addr->bound_dev_if = dev->ifindex;
  209. }
  210. EXPORT_SYMBOL(rdma_copy_addr);
  211. int rdma_translate_ip(const struct sockaddr *addr,
  212. struct rdma_dev_addr *dev_addr)
  213. {
  214. struct net_device *dev;
  215. if (dev_addr->bound_dev_if) {
  216. dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
  217. if (!dev)
  218. return -ENODEV;
  219. rdma_copy_addr(dev_addr, dev, NULL);
  220. dev_put(dev);
  221. return 0;
  222. }
  223. switch (addr->sa_family) {
  224. case AF_INET:
  225. dev = ip_dev_find(dev_addr->net,
  226. ((const struct sockaddr_in *)addr)->sin_addr.s_addr);
  227. if (!dev)
  228. return -EADDRNOTAVAIL;
  229. rdma_copy_addr(dev_addr, dev, NULL);
  230. dev_put(dev);
  231. break;
  232. #if IS_ENABLED(CONFIG_IPV6)
  233. case AF_INET6:
  234. rcu_read_lock();
  235. for_each_netdev_rcu(dev_addr->net, dev) {
  236. if (ipv6_chk_addr(dev_addr->net,
  237. &((const struct sockaddr_in6 *)addr)->sin6_addr,
  238. dev, 1)) {
  239. rdma_copy_addr(dev_addr, dev, NULL);
  240. break;
  241. }
  242. }
  243. rcu_read_unlock();
  244. break;
  245. #endif
  246. }
  247. return 0;
  248. }
  249. EXPORT_SYMBOL(rdma_translate_ip);
  250. static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
  251. {
  252. unsigned long delay;
  253. delay = time - jiffies;
  254. if ((long)delay < 0)
  255. delay = 0;
  256. mod_delayed_work(addr_wq, delayed_work, delay);
  257. }
  258. static void queue_req(struct addr_req *req)
  259. {
  260. struct addr_req *temp_req;
  261. mutex_lock(&lock);
  262. list_for_each_entry_reverse(temp_req, &req_list, list) {
  263. if (time_after_eq(req->timeout, temp_req->timeout))
  264. break;
  265. }
  266. list_add(&req->list, &temp_req->list);
  267. set_timeout(&req->work, req->timeout);
  268. mutex_unlock(&lock);
  269. }
  270. static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
  271. const void *daddr, u32 seq, u16 family)
  272. {
  273. if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
  274. return -EADDRNOTAVAIL;
  275. /* We fill in what we can, the response will fill the rest */
  276. rdma_copy_addr(dev_addr, dst->dev, NULL);
  277. return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
  278. }
  279. static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
  280. const void *daddr)
  281. {
  282. struct neighbour *n;
  283. int ret = 0;
  284. n = dst_neigh_lookup(dst, daddr);
  285. rcu_read_lock();
  286. if (!n || !(n->nud_state & NUD_VALID)) {
  287. if (n)
  288. neigh_event_send(n, NULL);
  289. ret = -ENODATA;
  290. } else {
  291. rdma_copy_addr(dev_addr, dst->dev, n->ha);
  292. }
  293. rcu_read_unlock();
  294. if (n)
  295. neigh_release(n);
  296. return ret;
  297. }
  298. static bool has_gateway(struct dst_entry *dst, sa_family_t family)
  299. {
  300. struct rtable *rt;
  301. struct rt6_info *rt6;
  302. if (family == AF_INET) {
  303. rt = container_of(dst, struct rtable, dst);
  304. return rt->rt_uses_gateway;
  305. }
  306. rt6 = container_of(dst, struct rt6_info, dst);
  307. return rt6->rt6i_flags & RTF_GATEWAY;
  308. }
  309. static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
  310. const struct sockaddr *dst_in, u32 seq)
  311. {
  312. const struct sockaddr_in *dst_in4 =
  313. (const struct sockaddr_in *)dst_in;
  314. const struct sockaddr_in6 *dst_in6 =
  315. (const struct sockaddr_in6 *)dst_in;
  316. const void *daddr = (dst_in->sa_family == AF_INET) ?
  317. (const void *)&dst_in4->sin_addr.s_addr :
  318. (const void *)&dst_in6->sin6_addr;
  319. sa_family_t family = dst_in->sa_family;
  320. /* Gateway + ARPHRD_INFINIBAND -> IB router */
  321. if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
  322. return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
  323. else
  324. return dst_fetch_ha(dst, dev_addr, daddr);
  325. }
  326. static int addr4_resolve(struct sockaddr_in *src_in,
  327. const struct sockaddr_in *dst_in,
  328. struct rdma_dev_addr *addr,
  329. struct rtable **prt)
  330. {
  331. __be32 src_ip = src_in->sin_addr.s_addr;
  332. __be32 dst_ip = dst_in->sin_addr.s_addr;
  333. struct rtable *rt;
  334. struct flowi4 fl4;
  335. int ret;
  336. memset(&fl4, 0, sizeof(fl4));
  337. fl4.daddr = dst_ip;
  338. fl4.saddr = src_ip;
  339. fl4.flowi4_oif = addr->bound_dev_if;
  340. rt = ip_route_output_key(addr->net, &fl4);
  341. ret = PTR_ERR_OR_ZERO(rt);
  342. if (ret)
  343. return ret;
  344. src_in->sin_family = AF_INET;
  345. src_in->sin_addr.s_addr = fl4.saddr;
  346. /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
  347. * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
  348. * type accordingly.
  349. */
  350. if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
  351. addr->network = RDMA_NETWORK_IPV4;
  352. addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
  353. *prt = rt;
  354. return 0;
  355. }
  356. #if IS_ENABLED(CONFIG_IPV6)
  357. static int addr6_resolve(struct sockaddr_in6 *src_in,
  358. const struct sockaddr_in6 *dst_in,
  359. struct rdma_dev_addr *addr,
  360. struct dst_entry **pdst)
  361. {
  362. struct flowi6 fl6;
  363. struct dst_entry *dst;
  364. struct rt6_info *rt;
  365. int ret;
  366. memset(&fl6, 0, sizeof fl6);
  367. fl6.daddr = dst_in->sin6_addr;
  368. fl6.saddr = src_in->sin6_addr;
  369. fl6.flowi6_oif = addr->bound_dev_if;
  370. ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
  371. if (ret < 0)
  372. return ret;
  373. rt = (struct rt6_info *)dst;
  374. if (ipv6_addr_any(&src_in->sin6_addr)) {
  375. src_in->sin6_family = AF_INET6;
  376. src_in->sin6_addr = fl6.saddr;
  377. }
  378. /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
  379. * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
  380. * type accordingly.
  381. */
  382. if (rt->rt6i_flags & RTF_GATEWAY &&
  383. ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
  384. addr->network = RDMA_NETWORK_IPV6;
  385. addr->hoplimit = ip6_dst_hoplimit(dst);
  386. *pdst = dst;
  387. return 0;
  388. }
  389. #else
  390. static int addr6_resolve(struct sockaddr_in6 *src_in,
  391. const struct sockaddr_in6 *dst_in,
  392. struct rdma_dev_addr *addr,
  393. struct dst_entry **pdst)
  394. {
  395. return -EADDRNOTAVAIL;
  396. }
  397. #endif
  398. static int addr_resolve_neigh(struct dst_entry *dst,
  399. const struct sockaddr *dst_in,
  400. struct rdma_dev_addr *addr,
  401. u32 seq)
  402. {
  403. if (dst->dev->flags & IFF_LOOPBACK) {
  404. int ret;
  405. ret = rdma_translate_ip(dst_in, addr);
  406. if (!ret)
  407. memcpy(addr->dst_dev_addr, addr->src_dev_addr,
  408. MAX_ADDR_LEN);
  409. return ret;
  410. }
  411. /* If the device doesn't do ARP internally */
  412. if (!(dst->dev->flags & IFF_NOARP))
  413. return fetch_ha(dst, addr, dst_in, seq);
  414. rdma_copy_addr(addr, dst->dev, NULL);
  415. return 0;
  416. }
  417. static int addr_resolve(struct sockaddr *src_in,
  418. const struct sockaddr *dst_in,
  419. struct rdma_dev_addr *addr,
  420. bool resolve_neigh,
  421. u32 seq)
  422. {
  423. struct net_device *ndev;
  424. struct dst_entry *dst;
  425. int ret;
  426. if (!addr->net) {
  427. pr_warn_ratelimited("%s: missing namespace\n", __func__);
  428. return -EINVAL;
  429. }
  430. if (src_in->sa_family == AF_INET) {
  431. struct rtable *rt = NULL;
  432. const struct sockaddr_in *dst_in4 =
  433. (const struct sockaddr_in *)dst_in;
  434. ret = addr4_resolve((struct sockaddr_in *)src_in,
  435. dst_in4, addr, &rt);
  436. if (ret)
  437. return ret;
  438. if (resolve_neigh)
  439. ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
  440. if (addr->bound_dev_if) {
  441. ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
  442. } else {
  443. ndev = rt->dst.dev;
  444. dev_hold(ndev);
  445. }
  446. ip_rt_put(rt);
  447. } else {
  448. const struct sockaddr_in6 *dst_in6 =
  449. (const struct sockaddr_in6 *)dst_in;
  450. ret = addr6_resolve((struct sockaddr_in6 *)src_in,
  451. dst_in6, addr,
  452. &dst);
  453. if (ret)
  454. return ret;
  455. if (resolve_neigh)
  456. ret = addr_resolve_neigh(dst, dst_in, addr, seq);
  457. if (addr->bound_dev_if) {
  458. ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
  459. } else {
  460. ndev = dst->dev;
  461. dev_hold(ndev);
  462. }
  463. dst_release(dst);
  464. }
  465. if (ndev->flags & IFF_LOOPBACK) {
  466. ret = rdma_translate_ip(dst_in, addr);
  467. /*
  468. * Put the loopback device and get the translated
  469. * device instead.
  470. */
  471. dev_put(ndev);
  472. ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
  473. } else {
  474. addr->bound_dev_if = ndev->ifindex;
  475. }
  476. dev_put(ndev);
  477. return ret;
  478. }
  479. static void process_one_req(struct work_struct *_work)
  480. {
  481. struct addr_req *req;
  482. struct sockaddr *src_in, *dst_in;
  483. mutex_lock(&lock);
  484. req = container_of(_work, struct addr_req, work.work);
  485. if (req->status == -ENODATA) {
  486. src_in = (struct sockaddr *)&req->src_addr;
  487. dst_in = (struct sockaddr *)&req->dst_addr;
  488. req->status = addr_resolve(src_in, dst_in, req->addr,
  489. true, req->seq);
  490. if (req->status && time_after_eq(jiffies, req->timeout)) {
  491. req->status = -ETIMEDOUT;
  492. } else if (req->status == -ENODATA) {
  493. /* requeue the work for retrying again */
  494. set_timeout(&req->work, req->timeout);
  495. mutex_unlock(&lock);
  496. return;
  497. }
  498. }
  499. list_del(&req->list);
  500. mutex_unlock(&lock);
  501. req->callback(req->status, (struct sockaddr *)&req->src_addr,
  502. req->addr, req->context);
  503. put_client(req->client);
  504. kfree(req);
  505. }
  506. static void process_req(struct work_struct *work)
  507. {
  508. struct addr_req *req, *temp_req;
  509. struct sockaddr *src_in, *dst_in;
  510. struct list_head done_list;
  511. INIT_LIST_HEAD(&done_list);
  512. mutex_lock(&lock);
  513. list_for_each_entry_safe(req, temp_req, &req_list, list) {
  514. if (req->status == -ENODATA) {
  515. src_in = (struct sockaddr *) &req->src_addr;
  516. dst_in = (struct sockaddr *) &req->dst_addr;
  517. req->status = addr_resolve(src_in, dst_in, req->addr,
  518. true, req->seq);
  519. if (req->status && time_after_eq(jiffies, req->timeout))
  520. req->status = -ETIMEDOUT;
  521. else if (req->status == -ENODATA) {
  522. set_timeout(&req->work, req->timeout);
  523. continue;
  524. }
  525. }
  526. list_move_tail(&req->list, &done_list);
  527. }
  528. mutex_unlock(&lock);
  529. list_for_each_entry_safe(req, temp_req, &done_list, list) {
  530. list_del(&req->list);
  531. /* It is safe to cancel other work items from this work item
  532. * because at a time there can be only one work item running
  533. * with this single threaded work queue.
  534. */
  535. cancel_delayed_work(&req->work);
  536. req->callback(req->status, (struct sockaddr *) &req->src_addr,
  537. req->addr, req->context);
  538. put_client(req->client);
  539. kfree(req);
  540. }
  541. }
  542. int rdma_resolve_ip(struct rdma_addr_client *client,
  543. struct sockaddr *src_addr, struct sockaddr *dst_addr,
  544. struct rdma_dev_addr *addr, int timeout_ms,
  545. void (*callback)(int status, struct sockaddr *src_addr,
  546. struct rdma_dev_addr *addr, void *context),
  547. void *context)
  548. {
  549. struct sockaddr *src_in, *dst_in;
  550. struct addr_req *req;
  551. int ret = 0;
  552. req = kzalloc(sizeof *req, GFP_KERNEL);
  553. if (!req)
  554. return -ENOMEM;
  555. src_in = (struct sockaddr *) &req->src_addr;
  556. dst_in = (struct sockaddr *) &req->dst_addr;
  557. if (src_addr) {
  558. if (src_addr->sa_family != dst_addr->sa_family) {
  559. ret = -EINVAL;
  560. goto err;
  561. }
  562. memcpy(src_in, src_addr, rdma_addr_size(src_addr));
  563. } else {
  564. src_in->sa_family = dst_addr->sa_family;
  565. }
  566. memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr));
  567. req->addr = addr;
  568. req->callback = callback;
  569. req->context = context;
  570. req->client = client;
  571. atomic_inc(&client->refcount);
  572. INIT_DELAYED_WORK(&req->work, process_one_req);
  573. req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
  574. req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
  575. switch (req->status) {
  576. case 0:
  577. req->timeout = jiffies;
  578. queue_req(req);
  579. break;
  580. case -ENODATA:
  581. req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
  582. queue_req(req);
  583. break;
  584. default:
  585. ret = req->status;
  586. atomic_dec(&client->refcount);
  587. goto err;
  588. }
  589. return ret;
  590. err:
  591. kfree(req);
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(rdma_resolve_ip);
  595. int rdma_resolve_ip_route(struct sockaddr *src_addr,
  596. const struct sockaddr *dst_addr,
  597. struct rdma_dev_addr *addr)
  598. {
  599. struct sockaddr_storage ssrc_addr = {};
  600. struct sockaddr *src_in = (struct sockaddr *)&ssrc_addr;
  601. if (src_addr) {
  602. if (src_addr->sa_family != dst_addr->sa_family)
  603. return -EINVAL;
  604. memcpy(src_in, src_addr, rdma_addr_size(src_addr));
  605. } else {
  606. src_in->sa_family = dst_addr->sa_family;
  607. }
  608. return addr_resolve(src_in, dst_addr, addr, false, 0);
  609. }
  610. EXPORT_SYMBOL(rdma_resolve_ip_route);
  611. void rdma_addr_cancel(struct rdma_dev_addr *addr)
  612. {
  613. struct addr_req *req, *temp_req;
  614. mutex_lock(&lock);
  615. list_for_each_entry_safe(req, temp_req, &req_list, list) {
  616. if (req->addr == addr) {
  617. req->status = -ECANCELED;
  618. req->timeout = jiffies;
  619. list_move(&req->list, &req_list);
  620. set_timeout(&req->work, req->timeout);
  621. break;
  622. }
  623. }
  624. mutex_unlock(&lock);
  625. }
  626. EXPORT_SYMBOL(rdma_addr_cancel);
  627. struct resolve_cb_context {
  628. struct completion comp;
  629. int status;
  630. };
  631. static void resolve_cb(int status, struct sockaddr *src_addr,
  632. struct rdma_dev_addr *addr, void *context)
  633. {
  634. ((struct resolve_cb_context *)context)->status = status;
  635. complete(&((struct resolve_cb_context *)context)->comp);
  636. }
  637. int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
  638. const union ib_gid *dgid,
  639. u8 *dmac, const struct net_device *ndev,
  640. int *hoplimit)
  641. {
  642. struct rdma_dev_addr dev_addr;
  643. struct resolve_cb_context ctx;
  644. union {
  645. struct sockaddr _sockaddr;
  646. struct sockaddr_in _sockaddr_in;
  647. struct sockaddr_in6 _sockaddr_in6;
  648. } sgid_addr, dgid_addr;
  649. int ret;
  650. rdma_gid2ip(&sgid_addr._sockaddr, sgid);
  651. rdma_gid2ip(&dgid_addr._sockaddr, dgid);
  652. memset(&dev_addr, 0, sizeof(dev_addr));
  653. dev_addr.bound_dev_if = ndev->ifindex;
  654. dev_addr.net = &init_net;
  655. init_completion(&ctx.comp);
  656. ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
  657. &dev_addr, 1000, resolve_cb, &ctx);
  658. if (ret)
  659. return ret;
  660. wait_for_completion(&ctx.comp);
  661. ret = ctx.status;
  662. if (ret)
  663. return ret;
  664. memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
  665. *hoplimit = dev_addr.hoplimit;
  666. return 0;
  667. }
  668. static int netevent_callback(struct notifier_block *self, unsigned long event,
  669. void *ctx)
  670. {
  671. if (event == NETEVENT_NEIGH_UPDATE) {
  672. struct neighbour *neigh = ctx;
  673. if (neigh->nud_state & NUD_VALID)
  674. set_timeout(&work, jiffies);
  675. }
  676. return 0;
  677. }
  678. static struct notifier_block nb = {
  679. .notifier_call = netevent_callback
  680. };
  681. int addr_init(void)
  682. {
  683. addr_wq = alloc_ordered_workqueue("ib_addr", 0);
  684. if (!addr_wq)
  685. return -ENOMEM;
  686. register_netevent_notifier(&nb);
  687. rdma_addr_register_client(&self);
  688. return 0;
  689. }
  690. void addr_cleanup(void)
  691. {
  692. rdma_addr_unregister_client(&self);
  693. unregister_netevent_notifier(&nb);
  694. destroy_workqueue(addr_wq);
  695. }