name_distr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. /*
  2. * net/tipc/name_distr.c: TIPC name distribution code
  3. *
  4. * Copyright (c) 2000-2006, 2014, Ericsson AB
  5. * Copyright (c) 2005, 2010-2011, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include "core.h"
  37. #include "link.h"
  38. #include "name_distr.h"
  39. /**
  40. * struct publ_list - list of publications made by this node
  41. * @list: circular list of publications
  42. * @list_size: number of entries in list
  43. */
  44. struct publ_list {
  45. struct list_head list;
  46. u32 size;
  47. };
  48. static struct publ_list publ_zone = {
  49. .list = LIST_HEAD_INIT(publ_zone.list),
  50. .size = 0,
  51. };
  52. static struct publ_list publ_cluster = {
  53. .list = LIST_HEAD_INIT(publ_cluster.list),
  54. .size = 0,
  55. };
  56. static struct publ_list publ_node = {
  57. .list = LIST_HEAD_INIT(publ_node.list),
  58. .size = 0,
  59. };
  60. static struct publ_list *publ_lists[] = {
  61. NULL,
  62. &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
  63. &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
  64. &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
  65. };
  66. int sysctl_tipc_named_timeout __read_mostly = 2000;
  67. /**
  68. * struct tipc_dist_queue - queue holding deferred name table updates
  69. */
  70. static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
  71. struct distr_queue_item {
  72. struct distr_item i;
  73. u32 dtype;
  74. u32 node;
  75. unsigned long expires;
  76. struct list_head next;
  77. };
  78. /**
  79. * publ_to_item - add publication info to a publication message
  80. */
  81. static void publ_to_item(struct distr_item *i, struct publication *p)
  82. {
  83. i->type = htonl(p->type);
  84. i->lower = htonl(p->lower);
  85. i->upper = htonl(p->upper);
  86. i->ref = htonl(p->ref);
  87. i->key = htonl(p->key);
  88. }
  89. /**
  90. * named_prepare_buf - allocate & initialize a publication message
  91. */
  92. static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
  93. {
  94. struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
  95. struct tipc_msg *msg;
  96. if (buf != NULL) {
  97. msg = buf_msg(buf);
  98. tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
  99. msg_set_size(msg, INT_H_SIZE + size);
  100. }
  101. return buf;
  102. }
  103. void named_cluster_distribute(struct sk_buff *buf)
  104. {
  105. struct sk_buff *obuf;
  106. struct tipc_node *node;
  107. u32 dnode;
  108. rcu_read_lock();
  109. list_for_each_entry_rcu(node, &tipc_node_list, list) {
  110. dnode = node->addr;
  111. if (in_own_node(dnode))
  112. continue;
  113. if (!tipc_node_active_links(node))
  114. continue;
  115. obuf = skb_copy(buf, GFP_ATOMIC);
  116. if (!obuf)
  117. break;
  118. msg_set_destnode(buf_msg(obuf), dnode);
  119. tipc_link_xmit(obuf, dnode, dnode);
  120. }
  121. rcu_read_unlock();
  122. kfree_skb(buf);
  123. }
  124. /**
  125. * tipc_named_publish - tell other nodes about a new publication by this node
  126. */
  127. struct sk_buff *tipc_named_publish(struct publication *publ)
  128. {
  129. struct sk_buff *buf;
  130. struct distr_item *item;
  131. list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
  132. publ_lists[publ->scope]->size++;
  133. if (publ->scope == TIPC_NODE_SCOPE)
  134. return NULL;
  135. buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
  136. if (!buf) {
  137. pr_warn("Publication distribution failure\n");
  138. return NULL;
  139. }
  140. item = (struct distr_item *)msg_data(buf_msg(buf));
  141. publ_to_item(item, publ);
  142. return buf;
  143. }
  144. /**
  145. * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  146. */
  147. struct sk_buff *tipc_named_withdraw(struct publication *publ)
  148. {
  149. struct sk_buff *buf;
  150. struct distr_item *item;
  151. list_del(&publ->local_list);
  152. publ_lists[publ->scope]->size--;
  153. if (publ->scope == TIPC_NODE_SCOPE)
  154. return NULL;
  155. buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
  156. if (!buf) {
  157. pr_warn("Withdrawal distribution failure\n");
  158. return NULL;
  159. }
  160. item = (struct distr_item *)msg_data(buf_msg(buf));
  161. publ_to_item(item, publ);
  162. return buf;
  163. }
  164. /**
  165. * named_distribute - prepare name info for bulk distribution to another node
  166. * @msg_list: list of messages (buffers) to be returned from this function
  167. * @dnode: node to be updated
  168. * @pls: linked list of publication items to be packed into buffer chain
  169. */
  170. static void named_distribute(struct list_head *msg_list, u32 dnode,
  171. struct publ_list *pls)
  172. {
  173. struct publication *publ;
  174. struct sk_buff *buf = NULL;
  175. struct distr_item *item = NULL;
  176. uint dsz = pls->size * ITEM_SIZE;
  177. uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
  178. uint rem = dsz;
  179. uint msg_rem = 0;
  180. list_for_each_entry(publ, &pls->list, local_list) {
  181. /* Prepare next buffer: */
  182. if (!buf) {
  183. msg_rem = min_t(uint, rem, msg_dsz);
  184. rem -= msg_rem;
  185. buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
  186. if (!buf) {
  187. pr_warn("Bulk publication failure\n");
  188. return;
  189. }
  190. item = (struct distr_item *)msg_data(buf_msg(buf));
  191. }
  192. /* Pack publication into message: */
  193. publ_to_item(item, publ);
  194. item++;
  195. msg_rem -= ITEM_SIZE;
  196. /* Append full buffer to list: */
  197. if (!msg_rem) {
  198. list_add_tail((struct list_head *)buf, msg_list);
  199. buf = NULL;
  200. }
  201. }
  202. }
  203. /**
  204. * tipc_named_node_up - tell specified node about all publications by this node
  205. */
  206. void tipc_named_node_up(u32 dnode)
  207. {
  208. LIST_HEAD(msg_list);
  209. struct sk_buff *buf_chain;
  210. read_lock_bh(&tipc_nametbl_lock);
  211. named_distribute(&msg_list, dnode, &publ_cluster);
  212. named_distribute(&msg_list, dnode, &publ_zone);
  213. read_unlock_bh(&tipc_nametbl_lock);
  214. /* Convert circular list to linear list and send: */
  215. buf_chain = (struct sk_buff *)msg_list.next;
  216. ((struct sk_buff *)msg_list.prev)->next = NULL;
  217. tipc_link_xmit(buf_chain, dnode, dnode);
  218. }
  219. /**
  220. * named_purge_publ - remove publication associated with a failed node
  221. *
  222. * Invoked for each publication issued by a newly failed node.
  223. * Removes publication structure from name table & deletes it.
  224. */
  225. static void named_purge_publ(struct publication *publ)
  226. {
  227. struct publication *p;
  228. write_lock_bh(&tipc_nametbl_lock);
  229. p = tipc_nametbl_remove_publ(publ->type, publ->lower,
  230. publ->node, publ->ref, publ->key);
  231. if (p)
  232. tipc_nodesub_unsubscribe(&p->subscr);
  233. write_unlock_bh(&tipc_nametbl_lock);
  234. if (p != publ) {
  235. pr_err("Unable to remove publication from failed node\n"
  236. " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
  237. publ->type, publ->lower, publ->node, publ->ref,
  238. publ->key);
  239. }
  240. kfree(p);
  241. }
  242. /**
  243. * tipc_update_nametbl - try to process a nametable update and notify
  244. * subscribers
  245. *
  246. * tipc_nametbl_lock must be held.
  247. * Returns the publication item if successful, otherwise NULL.
  248. */
  249. static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
  250. {
  251. struct publication *publ = NULL;
  252. if (dtype == PUBLICATION) {
  253. publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
  254. ntohl(i->upper),
  255. TIPC_CLUSTER_SCOPE, node,
  256. ntohl(i->ref), ntohl(i->key));
  257. if (publ) {
  258. tipc_nodesub_subscribe(&publ->subscr, node, publ,
  259. (net_ev_handler)
  260. named_purge_publ);
  261. return true;
  262. }
  263. } else if (dtype == WITHDRAWAL) {
  264. publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
  265. node, ntohl(i->ref),
  266. ntohl(i->key));
  267. if (publ) {
  268. tipc_nodesub_unsubscribe(&publ->subscr);
  269. kfree(publ);
  270. return true;
  271. }
  272. } else {
  273. pr_warn("Unrecognized name table message received\n");
  274. }
  275. return false;
  276. }
  277. /**
  278. * tipc_named_add_backlog - add a failed name table update to the backlog
  279. *
  280. */
  281. static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
  282. {
  283. struct distr_queue_item *e;
  284. unsigned long now = get_jiffies_64();
  285. e = kzalloc(sizeof(*e), GFP_ATOMIC);
  286. if (!e)
  287. return;
  288. e->dtype = type;
  289. e->node = node;
  290. e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
  291. memcpy(e, i, sizeof(*i));
  292. list_add_tail(&e->next, &tipc_dist_queue);
  293. }
  294. /**
  295. * tipc_named_process_backlog - try to process any pending name table updates
  296. * from the network.
  297. */
  298. void tipc_named_process_backlog(void)
  299. {
  300. struct distr_queue_item *e, *tmp;
  301. char addr[16];
  302. unsigned long now = get_jiffies_64();
  303. list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
  304. if (time_after(e->expires, now)) {
  305. if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
  306. continue;
  307. } else {
  308. tipc_addr_string_fill(addr, e->node);
  309. pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
  310. e->dtype, ntohl(e->i.type),
  311. ntohl(e->i.lower),
  312. ntohl(e->i.upper),
  313. addr, ntohl(e->i.key));
  314. }
  315. list_del(&e->next);
  316. kfree(e);
  317. }
  318. }
  319. /**
  320. * tipc_named_rcv - process name table update message sent by another node
  321. */
  322. void tipc_named_rcv(struct sk_buff *buf)
  323. {
  324. struct tipc_msg *msg = buf_msg(buf);
  325. struct distr_item *item = (struct distr_item *)msg_data(msg);
  326. u32 count = msg_data_sz(msg) / ITEM_SIZE;
  327. u32 node = msg_orignode(msg);
  328. write_lock_bh(&tipc_nametbl_lock);
  329. while (count--) {
  330. if (!tipc_update_nametbl(item, node, msg_type(msg)))
  331. tipc_named_add_backlog(item, msg_type(msg), node);
  332. item++;
  333. }
  334. tipc_named_process_backlog();
  335. write_unlock_bh(&tipc_nametbl_lock);
  336. kfree_skb(buf);
  337. }
  338. /**
  339. * tipc_named_reinit - re-initialize local publications
  340. *
  341. * This routine is called whenever TIPC networking is enabled.
  342. * All name table entries published by this node are updated to reflect
  343. * the node's new network address.
  344. */
  345. void tipc_named_reinit(void)
  346. {
  347. struct publication *publ;
  348. int scope;
  349. write_lock_bh(&tipc_nametbl_lock);
  350. for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
  351. list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
  352. publ->node = tipc_own_addr;
  353. write_unlock_bh(&tipc_nametbl_lock);
  354. }