node.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423
  1. /*
  2. * net/tipc/node.c: TIPC node management routines
  3. *
  4. * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
  5. * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include "core.h"
  37. #include "link.h"
  38. #include "node.h"
  39. #include "name_distr.h"
  40. #include "socket.h"
  41. #include "bcast.h"
  42. #include "monitor.h"
  43. #include "discover.h"
  44. #include "netlink.h"
  45. #define INVALID_NODE_SIG 0x10000
  46. #define NODE_CLEANUP_AFTER 300000
  47. /* Flags used to take different actions according to flag type
  48. * TIPC_NOTIFY_NODE_DOWN: notify node is down
  49. * TIPC_NOTIFY_NODE_UP: notify node is up
  50. * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  51. */
  52. enum {
  53. TIPC_NOTIFY_NODE_DOWN = (1 << 3),
  54. TIPC_NOTIFY_NODE_UP = (1 << 4),
  55. TIPC_NOTIFY_LINK_UP = (1 << 6),
  56. TIPC_NOTIFY_LINK_DOWN = (1 << 7)
  57. };
  58. struct tipc_link_entry {
  59. struct tipc_link *link;
  60. spinlock_t lock; /* per link */
  61. u32 mtu;
  62. struct sk_buff_head inputq;
  63. struct tipc_media_addr maddr;
  64. };
  65. struct tipc_bclink_entry {
  66. struct tipc_link *link;
  67. struct sk_buff_head inputq1;
  68. struct sk_buff_head arrvq;
  69. struct sk_buff_head inputq2;
  70. struct sk_buff_head namedq;
  71. };
  72. /**
  73. * struct tipc_node - TIPC node structure
  74. * @addr: network address of node
  75. * @ref: reference counter to node object
  76. * @lock: rwlock governing access to structure
  77. * @net: the applicable net namespace
  78. * @hash: links to adjacent nodes in unsorted hash chain
  79. * @inputq: pointer to input queue containing messages for msg event
  80. * @namedq: pointer to name table input queue with name table messages
  81. * @active_links: bearer ids of active links, used as index into links[] array
  82. * @links: array containing references to all links to node
  83. * @action_flags: bit mask of different types of node actions
  84. * @state: connectivity state vs peer node
  85. * @sync_point: sequence number where synch/failover is finished
  86. * @list: links to adjacent nodes in sorted list of cluster's nodes
  87. * @working_links: number of working links to node (both active and standby)
  88. * @link_cnt: number of links to node
  89. * @capabilities: bitmap, indicating peer node's functional capabilities
  90. * @signature: node instance identifier
  91. * @link_id: local and remote bearer ids of changing link, if any
  92. * @publ_list: list of publications
  93. * @rcu: rcu struct for tipc_node
  94. * @delete_at: indicates the time for deleting a down node
  95. */
  96. struct tipc_node {
  97. u32 addr;
  98. struct kref kref;
  99. rwlock_t lock;
  100. struct net *net;
  101. struct hlist_node hash;
  102. int active_links[2];
  103. struct tipc_link_entry links[MAX_BEARERS];
  104. struct tipc_bclink_entry bc_entry;
  105. int action_flags;
  106. struct list_head list;
  107. int state;
  108. bool failover_sent;
  109. u16 sync_point;
  110. int link_cnt;
  111. u16 working_links;
  112. u16 capabilities;
  113. u32 signature;
  114. u32 link_id;
  115. u8 peer_id[16];
  116. struct list_head publ_list;
  117. struct list_head conn_sks;
  118. unsigned long keepalive_intv;
  119. struct timer_list timer;
  120. struct rcu_head rcu;
  121. unsigned long delete_at;
  122. };
  123. /* Node FSM states and events:
  124. */
  125. enum {
  126. SELF_DOWN_PEER_DOWN = 0xdd,
  127. SELF_UP_PEER_UP = 0xaa,
  128. SELF_DOWN_PEER_LEAVING = 0xd1,
  129. SELF_UP_PEER_COMING = 0xac,
  130. SELF_COMING_PEER_UP = 0xca,
  131. SELF_LEAVING_PEER_DOWN = 0x1d,
  132. NODE_FAILINGOVER = 0xf0,
  133. NODE_SYNCHING = 0xcc
  134. };
  135. enum {
  136. SELF_ESTABL_CONTACT_EVT = 0xece,
  137. SELF_LOST_CONTACT_EVT = 0x1ce,
  138. PEER_ESTABL_CONTACT_EVT = 0x9ece,
  139. PEER_LOST_CONTACT_EVT = 0x91ce,
  140. NODE_FAILOVER_BEGIN_EVT = 0xfbe,
  141. NODE_FAILOVER_END_EVT = 0xfee,
  142. NODE_SYNCH_BEGIN_EVT = 0xcbe,
  143. NODE_SYNCH_END_EVT = 0xcee
  144. };
  145. static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
  146. struct sk_buff_head *xmitq,
  147. struct tipc_media_addr **maddr);
  148. static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
  149. bool delete);
  150. static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
  151. static void tipc_node_delete(struct tipc_node *node);
  152. static void tipc_node_timeout(struct timer_list *t);
  153. static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
  154. static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
  155. static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
  156. static void tipc_node_put(struct tipc_node *node);
  157. static bool node_is_up(struct tipc_node *n);
  158. static void tipc_node_delete_from_list(struct tipc_node *node);
  159. struct tipc_sock_conn {
  160. u32 port;
  161. u32 peer_port;
  162. u32 peer_node;
  163. struct list_head list;
  164. };
  165. static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
  166. {
  167. int bearer_id = n->active_links[sel & 1];
  168. if (unlikely(bearer_id == INVALID_BEARER_ID))
  169. return NULL;
  170. return n->links[bearer_id].link;
  171. }
  172. int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
  173. {
  174. struct tipc_node *n;
  175. int bearer_id;
  176. unsigned int mtu = MAX_MSG_SIZE;
  177. n = tipc_node_find(net, addr);
  178. if (unlikely(!n))
  179. return mtu;
  180. bearer_id = n->active_links[sel & 1];
  181. if (likely(bearer_id != INVALID_BEARER_ID))
  182. mtu = n->links[bearer_id].mtu;
  183. tipc_node_put(n);
  184. return mtu;
  185. }
  186. bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
  187. {
  188. u8 *own_id = tipc_own_id(net);
  189. struct tipc_node *n;
  190. if (!own_id)
  191. return true;
  192. if (addr == tipc_own_addr(net)) {
  193. memcpy(id, own_id, TIPC_NODEID_LEN);
  194. return true;
  195. }
  196. n = tipc_node_find(net, addr);
  197. if (!n)
  198. return false;
  199. memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
  200. tipc_node_put(n);
  201. return true;
  202. }
  203. u16 tipc_node_get_capabilities(struct net *net, u32 addr)
  204. {
  205. struct tipc_node *n;
  206. u16 caps;
  207. n = tipc_node_find(net, addr);
  208. if (unlikely(!n))
  209. return TIPC_NODE_CAPABILITIES;
  210. caps = n->capabilities;
  211. tipc_node_put(n);
  212. return caps;
  213. }
  214. static void tipc_node_kref_release(struct kref *kref)
  215. {
  216. struct tipc_node *n = container_of(kref, struct tipc_node, kref);
  217. kfree(n->bc_entry.link);
  218. kfree_rcu(n, rcu);
  219. }
  220. static void tipc_node_put(struct tipc_node *node)
  221. {
  222. kref_put(&node->kref, tipc_node_kref_release);
  223. }
  224. static void tipc_node_get(struct tipc_node *node)
  225. {
  226. kref_get(&node->kref);
  227. }
  228. /*
  229. * tipc_node_find - locate specified node object, if it exists
  230. */
  231. static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
  232. {
  233. struct tipc_net *tn = tipc_net(net);
  234. struct tipc_node *node;
  235. unsigned int thash = tipc_hashfn(addr);
  236. rcu_read_lock();
  237. hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
  238. if (node->addr != addr)
  239. continue;
  240. if (!kref_get_unless_zero(&node->kref))
  241. node = NULL;
  242. break;
  243. }
  244. rcu_read_unlock();
  245. return node;
  246. }
  247. /* tipc_node_find_by_id - locate specified node object by its 128-bit id
  248. * Note: this function is called only when a discovery request failed
  249. * to find the node by its 32-bit id, and is not time critical
  250. */
  251. static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
  252. {
  253. struct tipc_net *tn = tipc_net(net);
  254. struct tipc_node *n;
  255. bool found = false;
  256. rcu_read_lock();
  257. list_for_each_entry_rcu(n, &tn->node_list, list) {
  258. read_lock_bh(&n->lock);
  259. if (!memcmp(id, n->peer_id, 16) &&
  260. kref_get_unless_zero(&n->kref))
  261. found = true;
  262. read_unlock_bh(&n->lock);
  263. if (found)
  264. break;
  265. }
  266. rcu_read_unlock();
  267. return found ? n : NULL;
  268. }
  269. static void tipc_node_read_lock(struct tipc_node *n)
  270. {
  271. read_lock_bh(&n->lock);
  272. }
  273. static void tipc_node_read_unlock(struct tipc_node *n)
  274. {
  275. read_unlock_bh(&n->lock);
  276. }
  277. static void tipc_node_write_lock(struct tipc_node *n)
  278. {
  279. write_lock_bh(&n->lock);
  280. }
  281. static void tipc_node_write_unlock_fast(struct tipc_node *n)
  282. {
  283. write_unlock_bh(&n->lock);
  284. }
  285. static void tipc_node_write_unlock(struct tipc_node *n)
  286. {
  287. struct net *net = n->net;
  288. u32 addr = 0;
  289. u32 flags = n->action_flags;
  290. u32 link_id = 0;
  291. u32 bearer_id;
  292. struct list_head *publ_list;
  293. if (likely(!flags)) {
  294. write_unlock_bh(&n->lock);
  295. return;
  296. }
  297. addr = n->addr;
  298. link_id = n->link_id;
  299. bearer_id = link_id & 0xffff;
  300. publ_list = &n->publ_list;
  301. n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
  302. TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
  303. write_unlock_bh(&n->lock);
  304. if (flags & TIPC_NOTIFY_NODE_DOWN)
  305. tipc_publ_notify(net, publ_list, addr);
  306. if (flags & TIPC_NOTIFY_NODE_UP)
  307. tipc_named_node_up(net, addr);
  308. if (flags & TIPC_NOTIFY_LINK_UP) {
  309. tipc_mon_peer_up(net, addr, bearer_id);
  310. tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
  311. TIPC_NODE_SCOPE, link_id, link_id);
  312. }
  313. if (flags & TIPC_NOTIFY_LINK_DOWN) {
  314. tipc_mon_peer_down(net, addr, bearer_id);
  315. tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
  316. addr, link_id);
  317. }
  318. }
  319. static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
  320. u8 *peer_id, u16 capabilities)
  321. {
  322. struct tipc_net *tn = net_generic(net, tipc_net_id);
  323. struct tipc_node *n, *temp_node;
  324. struct tipc_link *l;
  325. int bearer_id;
  326. int i;
  327. spin_lock_bh(&tn->node_list_lock);
  328. n = tipc_node_find(net, addr);
  329. if (n) {
  330. if (n->capabilities == capabilities)
  331. goto exit;
  332. /* Same node may come back with new capabilities */
  333. write_lock_bh(&n->lock);
  334. n->capabilities = capabilities;
  335. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  336. l = n->links[bearer_id].link;
  337. if (l)
  338. tipc_link_update_caps(l, capabilities);
  339. }
  340. write_unlock_bh(&n->lock);
  341. goto exit;
  342. }
  343. n = kzalloc(sizeof(*n), GFP_ATOMIC);
  344. if (!n) {
  345. pr_warn("Node creation failed, no memory\n");
  346. goto exit;
  347. }
  348. n->addr = addr;
  349. memcpy(&n->peer_id, peer_id, 16);
  350. n->net = net;
  351. n->capabilities = capabilities;
  352. kref_init(&n->kref);
  353. rwlock_init(&n->lock);
  354. INIT_HLIST_NODE(&n->hash);
  355. INIT_LIST_HEAD(&n->list);
  356. INIT_LIST_HEAD(&n->publ_list);
  357. INIT_LIST_HEAD(&n->conn_sks);
  358. skb_queue_head_init(&n->bc_entry.namedq);
  359. skb_queue_head_init(&n->bc_entry.inputq1);
  360. __skb_queue_head_init(&n->bc_entry.arrvq);
  361. skb_queue_head_init(&n->bc_entry.inputq2);
  362. for (i = 0; i < MAX_BEARERS; i++)
  363. spin_lock_init(&n->links[i].lock);
  364. n->state = SELF_DOWN_PEER_LEAVING;
  365. n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
  366. n->signature = INVALID_NODE_SIG;
  367. n->active_links[0] = INVALID_BEARER_ID;
  368. n->active_links[1] = INVALID_BEARER_ID;
  369. if (!tipc_link_bc_create(net, tipc_own_addr(net),
  370. addr, U16_MAX,
  371. tipc_link_window(tipc_bc_sndlink(net)),
  372. n->capabilities,
  373. &n->bc_entry.inputq1,
  374. &n->bc_entry.namedq,
  375. tipc_bc_sndlink(net),
  376. &n->bc_entry.link)) {
  377. pr_warn("Broadcast rcv link creation failed, no memory\n");
  378. kfree(n);
  379. n = NULL;
  380. goto exit;
  381. }
  382. tipc_node_get(n);
  383. timer_setup(&n->timer, tipc_node_timeout, 0);
  384. n->keepalive_intv = U32_MAX;
  385. hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
  386. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  387. if (n->addr < temp_node->addr)
  388. break;
  389. }
  390. list_add_tail_rcu(&n->list, &temp_node->list);
  391. exit:
  392. spin_unlock_bh(&tn->node_list_lock);
  393. return n;
  394. }
  395. static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
  396. {
  397. unsigned long tol = tipc_link_tolerance(l);
  398. unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
  399. /* Link with lowest tolerance determines timer interval */
  400. if (intv < n->keepalive_intv)
  401. n->keepalive_intv = intv;
  402. /* Ensure link's abort limit corresponds to current tolerance */
  403. tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
  404. }
  405. static void tipc_node_delete_from_list(struct tipc_node *node)
  406. {
  407. list_del_rcu(&node->list);
  408. hlist_del_rcu(&node->hash);
  409. tipc_node_put(node);
  410. }
  411. static void tipc_node_delete(struct tipc_node *node)
  412. {
  413. tipc_node_delete_from_list(node);
  414. del_timer_sync(&node->timer);
  415. tipc_node_put(node);
  416. }
  417. void tipc_node_stop(struct net *net)
  418. {
  419. struct tipc_net *tn = tipc_net(net);
  420. struct tipc_node *node, *t_node;
  421. spin_lock_bh(&tn->node_list_lock);
  422. list_for_each_entry_safe(node, t_node, &tn->node_list, list)
  423. tipc_node_delete(node);
  424. spin_unlock_bh(&tn->node_list_lock);
  425. }
  426. void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
  427. {
  428. struct tipc_node *n;
  429. if (in_own_node(net, addr))
  430. return;
  431. n = tipc_node_find(net, addr);
  432. if (!n) {
  433. pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
  434. return;
  435. }
  436. tipc_node_write_lock(n);
  437. list_add_tail(subscr, &n->publ_list);
  438. tipc_node_write_unlock_fast(n);
  439. tipc_node_put(n);
  440. }
  441. void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
  442. {
  443. struct tipc_node *n;
  444. if (in_own_node(net, addr))
  445. return;
  446. n = tipc_node_find(net, addr);
  447. if (!n) {
  448. pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
  449. return;
  450. }
  451. tipc_node_write_lock(n);
  452. list_del_init(subscr);
  453. tipc_node_write_unlock_fast(n);
  454. tipc_node_put(n);
  455. }
  456. int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
  457. {
  458. struct tipc_node *node;
  459. struct tipc_sock_conn *conn;
  460. int err = 0;
  461. if (in_own_node(net, dnode))
  462. return 0;
  463. node = tipc_node_find(net, dnode);
  464. if (!node) {
  465. pr_warn("Connecting sock to node 0x%x failed\n", dnode);
  466. return -EHOSTUNREACH;
  467. }
  468. conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
  469. if (!conn) {
  470. err = -EHOSTUNREACH;
  471. goto exit;
  472. }
  473. conn->peer_node = dnode;
  474. conn->port = port;
  475. conn->peer_port = peer_port;
  476. tipc_node_write_lock(node);
  477. list_add_tail(&conn->list, &node->conn_sks);
  478. tipc_node_write_unlock(node);
  479. exit:
  480. tipc_node_put(node);
  481. return err;
  482. }
  483. void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
  484. {
  485. struct tipc_node *node;
  486. struct tipc_sock_conn *conn, *safe;
  487. if (in_own_node(net, dnode))
  488. return;
  489. node = tipc_node_find(net, dnode);
  490. if (!node)
  491. return;
  492. tipc_node_write_lock(node);
  493. list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
  494. if (port != conn->port)
  495. continue;
  496. list_del(&conn->list);
  497. kfree(conn);
  498. }
  499. tipc_node_write_unlock(node);
  500. tipc_node_put(node);
  501. }
  502. static void tipc_node_clear_links(struct tipc_node *node)
  503. {
  504. int i;
  505. for (i = 0; i < MAX_BEARERS; i++) {
  506. struct tipc_link_entry *le = &node->links[i];
  507. if (le->link) {
  508. kfree(le->link);
  509. le->link = NULL;
  510. node->link_cnt--;
  511. }
  512. }
  513. }
  514. /* tipc_node_cleanup - delete nodes that does not
  515. * have active links for NODE_CLEANUP_AFTER time
  516. */
  517. static int tipc_node_cleanup(struct tipc_node *peer)
  518. {
  519. struct tipc_net *tn = tipc_net(peer->net);
  520. bool deleted = false;
  521. spin_lock_bh(&tn->node_list_lock);
  522. tipc_node_write_lock(peer);
  523. if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
  524. tipc_node_clear_links(peer);
  525. tipc_node_delete_from_list(peer);
  526. deleted = true;
  527. }
  528. tipc_node_write_unlock(peer);
  529. spin_unlock_bh(&tn->node_list_lock);
  530. return deleted;
  531. }
  532. /* tipc_node_timeout - handle expiration of node timer
  533. */
  534. static void tipc_node_timeout(struct timer_list *t)
  535. {
  536. struct tipc_node *n = from_timer(n, t, timer);
  537. struct tipc_link_entry *le;
  538. struct sk_buff_head xmitq;
  539. int remains = n->link_cnt;
  540. int bearer_id;
  541. int rc = 0;
  542. if (!node_is_up(n) && tipc_node_cleanup(n)) {
  543. /*Removing the reference of Timer*/
  544. tipc_node_put(n);
  545. return;
  546. }
  547. __skb_queue_head_init(&xmitq);
  548. for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
  549. tipc_node_read_lock(n);
  550. le = &n->links[bearer_id];
  551. if (le->link) {
  552. spin_lock_bh(&le->lock);
  553. /* Link tolerance may change asynchronously: */
  554. tipc_node_calculate_timer(n, le->link);
  555. rc = tipc_link_timeout(le->link, &xmitq);
  556. spin_unlock_bh(&le->lock);
  557. remains--;
  558. }
  559. tipc_node_read_unlock(n);
  560. tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
  561. if (rc & TIPC_LINK_DOWN_EVT)
  562. tipc_node_link_down(n, bearer_id, false);
  563. }
  564. mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
  565. }
  566. /**
  567. * __tipc_node_link_up - handle addition of link
  568. * Node lock must be held by caller
  569. * Link becomes active (alone or shared) or standby, depending on its priority.
  570. */
  571. static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
  572. struct sk_buff_head *xmitq)
  573. {
  574. int *slot0 = &n->active_links[0];
  575. int *slot1 = &n->active_links[1];
  576. struct tipc_link *ol = node_active_link(n, 0);
  577. struct tipc_link *nl = n->links[bearer_id].link;
  578. if (!nl || tipc_link_is_up(nl))
  579. return;
  580. tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
  581. if (!tipc_link_is_up(nl))
  582. return;
  583. n->working_links++;
  584. n->action_flags |= TIPC_NOTIFY_LINK_UP;
  585. n->link_id = tipc_link_id(nl);
  586. /* Leave room for tunnel header when returning 'mtu' to users: */
  587. n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
  588. tipc_bearer_add_dest(n->net, bearer_id, n->addr);
  589. tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
  590. pr_debug("Established link <%s> on network plane %c\n",
  591. tipc_link_name(nl), tipc_link_plane(nl));
  592. /* Ensure that a STATE message goes first */
  593. tipc_link_build_state_msg(nl, xmitq);
  594. /* First link? => give it both slots */
  595. if (!ol) {
  596. *slot0 = bearer_id;
  597. *slot1 = bearer_id;
  598. tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
  599. n->failover_sent = false;
  600. n->action_flags |= TIPC_NOTIFY_NODE_UP;
  601. tipc_link_set_active(nl, true);
  602. tipc_bcast_add_peer(n->net, nl, xmitq);
  603. return;
  604. }
  605. /* Second link => redistribute slots */
  606. if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
  607. pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
  608. *slot0 = bearer_id;
  609. *slot1 = bearer_id;
  610. tipc_link_set_active(nl, true);
  611. tipc_link_set_active(ol, false);
  612. } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
  613. tipc_link_set_active(nl, true);
  614. *slot1 = bearer_id;
  615. } else {
  616. pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
  617. }
  618. /* Prepare synchronization with first link */
  619. tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
  620. }
  621. /**
  622. * tipc_node_link_up - handle addition of link
  623. *
  624. * Link becomes active (alone or shared) or standby, depending on its priority.
  625. */
  626. static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
  627. struct sk_buff_head *xmitq)
  628. {
  629. struct tipc_media_addr *maddr;
  630. tipc_node_write_lock(n);
  631. __tipc_node_link_up(n, bearer_id, xmitq);
  632. maddr = &n->links[bearer_id].maddr;
  633. tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
  634. tipc_node_write_unlock(n);
  635. }
  636. /**
  637. * __tipc_node_link_down - handle loss of link
  638. */
  639. static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
  640. struct sk_buff_head *xmitq,
  641. struct tipc_media_addr **maddr)
  642. {
  643. struct tipc_link_entry *le = &n->links[*bearer_id];
  644. int *slot0 = &n->active_links[0];
  645. int *slot1 = &n->active_links[1];
  646. int i, highest = 0, prio;
  647. struct tipc_link *l, *_l, *tnl;
  648. l = n->links[*bearer_id].link;
  649. if (!l || tipc_link_is_reset(l))
  650. return;
  651. n->working_links--;
  652. n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
  653. n->link_id = tipc_link_id(l);
  654. tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
  655. pr_debug("Lost link <%s> on network plane %c\n",
  656. tipc_link_name(l), tipc_link_plane(l));
  657. /* Select new active link if any available */
  658. *slot0 = INVALID_BEARER_ID;
  659. *slot1 = INVALID_BEARER_ID;
  660. for (i = 0; i < MAX_BEARERS; i++) {
  661. _l = n->links[i].link;
  662. if (!_l || !tipc_link_is_up(_l))
  663. continue;
  664. if (_l == l)
  665. continue;
  666. prio = tipc_link_prio(_l);
  667. if (prio < highest)
  668. continue;
  669. if (prio > highest) {
  670. highest = prio;
  671. *slot0 = i;
  672. *slot1 = i;
  673. continue;
  674. }
  675. *slot1 = i;
  676. }
  677. if (!node_is_up(n)) {
  678. if (tipc_link_peer_is_down(l))
  679. tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
  680. tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
  681. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  682. tipc_link_reset(l);
  683. tipc_link_build_reset_msg(l, xmitq);
  684. *maddr = &n->links[*bearer_id].maddr;
  685. node_lost_contact(n, &le->inputq);
  686. tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
  687. return;
  688. }
  689. tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
  690. /* There is still a working link => initiate failover */
  691. *bearer_id = n->active_links[0];
  692. tnl = n->links[*bearer_id].link;
  693. tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
  694. tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
  695. n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
  696. tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
  697. tipc_link_reset(l);
  698. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  699. tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
  700. tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
  701. *maddr = &n->links[*bearer_id].maddr;
  702. }
  703. static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
  704. {
  705. struct tipc_link_entry *le = &n->links[bearer_id];
  706. struct tipc_link *l = le->link;
  707. struct tipc_media_addr *maddr;
  708. struct sk_buff_head xmitq;
  709. int old_bearer_id = bearer_id;
  710. if (!l)
  711. return;
  712. __skb_queue_head_init(&xmitq);
  713. tipc_node_write_lock(n);
  714. if (!tipc_link_is_establishing(l)) {
  715. __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
  716. if (delete) {
  717. kfree(l);
  718. le->link = NULL;
  719. n->link_cnt--;
  720. }
  721. } else {
  722. /* Defuse pending tipc_node_link_up() */
  723. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  724. }
  725. tipc_node_write_unlock(n);
  726. if (delete)
  727. tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
  728. tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
  729. tipc_sk_rcv(n->net, &le->inputq);
  730. }
  731. static bool node_is_up(struct tipc_node *n)
  732. {
  733. return n->active_links[0] != INVALID_BEARER_ID;
  734. }
  735. bool tipc_node_is_up(struct net *net, u32 addr)
  736. {
  737. struct tipc_node *n;
  738. bool retval = false;
  739. if (in_own_node(net, addr))
  740. return true;
  741. n = tipc_node_find(net, addr);
  742. if (!n)
  743. return false;
  744. retval = node_is_up(n);
  745. tipc_node_put(n);
  746. return retval;
  747. }
  748. static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
  749. {
  750. struct tipc_node *n;
  751. addr ^= tipc_net(net)->random;
  752. while ((n = tipc_node_find(net, addr))) {
  753. tipc_node_put(n);
  754. addr++;
  755. }
  756. return addr;
  757. }
  758. /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
  759. * Returns suggested address if any, otherwise 0
  760. */
  761. u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
  762. {
  763. struct tipc_net *tn = tipc_net(net);
  764. struct tipc_node *n;
  765. /* Suggest new address if some other peer is using this one */
  766. n = tipc_node_find(net, addr);
  767. if (n) {
  768. if (!memcmp(n->peer_id, id, NODE_ID_LEN))
  769. addr = 0;
  770. tipc_node_put(n);
  771. if (!addr)
  772. return 0;
  773. return tipc_node_suggest_addr(net, addr);
  774. }
  775. /* Suggest previously used address if peer is known */
  776. n = tipc_node_find_by_id(net, id);
  777. if (n) {
  778. addr = n->addr;
  779. tipc_node_put(n);
  780. return addr;
  781. }
  782. /* Even this node may be in conflict */
  783. if (tn->trial_addr == addr)
  784. return tipc_node_suggest_addr(net, addr);
  785. return 0;
  786. }
  787. void tipc_node_check_dest(struct net *net, u32 addr,
  788. u8 *peer_id, struct tipc_bearer *b,
  789. u16 capabilities, u32 signature,
  790. struct tipc_media_addr *maddr,
  791. bool *respond, bool *dupl_addr)
  792. {
  793. struct tipc_node *n;
  794. struct tipc_link *l;
  795. struct tipc_link_entry *le;
  796. bool addr_match = false;
  797. bool sign_match = false;
  798. bool link_up = false;
  799. bool accept_addr = false;
  800. bool reset = true;
  801. char *if_name;
  802. unsigned long intv;
  803. *dupl_addr = false;
  804. *respond = false;
  805. n = tipc_node_create(net, addr, peer_id, capabilities);
  806. if (!n)
  807. return;
  808. tipc_node_write_lock(n);
  809. le = &n->links[b->identity];
  810. /* Prepare to validate requesting node's signature and media address */
  811. l = le->link;
  812. link_up = l && tipc_link_is_up(l);
  813. addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
  814. sign_match = (signature == n->signature);
  815. /* These three flags give us eight permutations: */
  816. if (sign_match && addr_match && link_up) {
  817. /* All is fine. Do nothing. */
  818. reset = false;
  819. } else if (sign_match && addr_match && !link_up) {
  820. /* Respond. The link will come up in due time */
  821. *respond = true;
  822. } else if (sign_match && !addr_match && link_up) {
  823. /* Peer has changed i/f address without rebooting.
  824. * If so, the link will reset soon, and the next
  825. * discovery will be accepted. So we can ignore it.
  826. * It may also be an cloned or malicious peer having
  827. * chosen the same node address and signature as an
  828. * existing one.
  829. * Ignore requests until the link goes down, if ever.
  830. */
  831. *dupl_addr = true;
  832. } else if (sign_match && !addr_match && !link_up) {
  833. /* Peer link has changed i/f address without rebooting.
  834. * It may also be a cloned or malicious peer; we can't
  835. * distinguish between the two.
  836. * The signature is correct, so we must accept.
  837. */
  838. accept_addr = true;
  839. *respond = true;
  840. } else if (!sign_match && addr_match && link_up) {
  841. /* Peer node rebooted. Two possibilities:
  842. * - Delayed re-discovery; this link endpoint has already
  843. * reset and re-established contact with the peer, before
  844. * receiving a discovery message from that node.
  845. * (The peer happened to receive one from this node first).
  846. * - The peer came back so fast that our side has not
  847. * discovered it yet. Probing from this side will soon
  848. * reset the link, since there can be no working link
  849. * endpoint at the peer end, and the link will re-establish.
  850. * Accept the signature, since it comes from a known peer.
  851. */
  852. n->signature = signature;
  853. } else if (!sign_match && addr_match && !link_up) {
  854. /* The peer node has rebooted.
  855. * Accept signature, since it is a known peer.
  856. */
  857. n->signature = signature;
  858. *respond = true;
  859. } else if (!sign_match && !addr_match && link_up) {
  860. /* Peer rebooted with new address, or a new/duplicate peer.
  861. * Ignore until the link goes down, if ever.
  862. */
  863. *dupl_addr = true;
  864. } else if (!sign_match && !addr_match && !link_up) {
  865. /* Peer rebooted with new address, or it is a new peer.
  866. * Accept signature and address.
  867. */
  868. n->signature = signature;
  869. accept_addr = true;
  870. *respond = true;
  871. }
  872. if (!accept_addr)
  873. goto exit;
  874. /* Now create new link if not already existing */
  875. if (!l) {
  876. if (n->link_cnt == 2)
  877. goto exit;
  878. if_name = strchr(b->name, ':') + 1;
  879. if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
  880. b->net_plane, b->mtu, b->priority,
  881. b->window, mod(tipc_net(net)->random),
  882. tipc_own_addr(net), addr, peer_id,
  883. n->capabilities,
  884. tipc_bc_sndlink(n->net), n->bc_entry.link,
  885. &le->inputq,
  886. &n->bc_entry.namedq, &l)) {
  887. *respond = false;
  888. goto exit;
  889. }
  890. tipc_link_reset(l);
  891. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  892. if (n->state == NODE_FAILINGOVER)
  893. tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
  894. le->link = l;
  895. n->link_cnt++;
  896. tipc_node_calculate_timer(n, l);
  897. if (n->link_cnt == 1) {
  898. intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
  899. if (!mod_timer(&n->timer, intv))
  900. tipc_node_get(n);
  901. }
  902. }
  903. memcpy(&le->maddr, maddr, sizeof(*maddr));
  904. exit:
  905. tipc_node_write_unlock(n);
  906. if (reset && l && !tipc_link_is_reset(l))
  907. tipc_node_link_down(n, b->identity, false);
  908. tipc_node_put(n);
  909. }
  910. void tipc_node_delete_links(struct net *net, int bearer_id)
  911. {
  912. struct tipc_net *tn = net_generic(net, tipc_net_id);
  913. struct tipc_node *n;
  914. rcu_read_lock();
  915. list_for_each_entry_rcu(n, &tn->node_list, list) {
  916. tipc_node_link_down(n, bearer_id, true);
  917. }
  918. rcu_read_unlock();
  919. }
  920. static void tipc_node_reset_links(struct tipc_node *n)
  921. {
  922. int i;
  923. pr_warn("Resetting all links to %x\n", n->addr);
  924. for (i = 0; i < MAX_BEARERS; i++) {
  925. tipc_node_link_down(n, i, false);
  926. }
  927. }
  928. /* tipc_node_fsm_evt - node finite state machine
  929. * Determines when contact is allowed with peer node
  930. */
  931. static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
  932. {
  933. int state = n->state;
  934. switch (state) {
  935. case SELF_DOWN_PEER_DOWN:
  936. switch (evt) {
  937. case SELF_ESTABL_CONTACT_EVT:
  938. state = SELF_UP_PEER_COMING;
  939. break;
  940. case PEER_ESTABL_CONTACT_EVT:
  941. state = SELF_COMING_PEER_UP;
  942. break;
  943. case SELF_LOST_CONTACT_EVT:
  944. case PEER_LOST_CONTACT_EVT:
  945. break;
  946. case NODE_SYNCH_END_EVT:
  947. case NODE_SYNCH_BEGIN_EVT:
  948. case NODE_FAILOVER_BEGIN_EVT:
  949. case NODE_FAILOVER_END_EVT:
  950. default:
  951. goto illegal_evt;
  952. }
  953. break;
  954. case SELF_UP_PEER_UP:
  955. switch (evt) {
  956. case SELF_LOST_CONTACT_EVT:
  957. state = SELF_DOWN_PEER_LEAVING;
  958. break;
  959. case PEER_LOST_CONTACT_EVT:
  960. state = SELF_LEAVING_PEER_DOWN;
  961. break;
  962. case NODE_SYNCH_BEGIN_EVT:
  963. state = NODE_SYNCHING;
  964. break;
  965. case NODE_FAILOVER_BEGIN_EVT:
  966. state = NODE_FAILINGOVER;
  967. break;
  968. case SELF_ESTABL_CONTACT_EVT:
  969. case PEER_ESTABL_CONTACT_EVT:
  970. case NODE_SYNCH_END_EVT:
  971. case NODE_FAILOVER_END_EVT:
  972. break;
  973. default:
  974. goto illegal_evt;
  975. }
  976. break;
  977. case SELF_DOWN_PEER_LEAVING:
  978. switch (evt) {
  979. case PEER_LOST_CONTACT_EVT:
  980. state = SELF_DOWN_PEER_DOWN;
  981. break;
  982. case SELF_ESTABL_CONTACT_EVT:
  983. case PEER_ESTABL_CONTACT_EVT:
  984. case SELF_LOST_CONTACT_EVT:
  985. break;
  986. case NODE_SYNCH_END_EVT:
  987. case NODE_SYNCH_BEGIN_EVT:
  988. case NODE_FAILOVER_BEGIN_EVT:
  989. case NODE_FAILOVER_END_EVT:
  990. default:
  991. goto illegal_evt;
  992. }
  993. break;
  994. case SELF_UP_PEER_COMING:
  995. switch (evt) {
  996. case PEER_ESTABL_CONTACT_EVT:
  997. state = SELF_UP_PEER_UP;
  998. break;
  999. case SELF_LOST_CONTACT_EVT:
  1000. state = SELF_DOWN_PEER_DOWN;
  1001. break;
  1002. case SELF_ESTABL_CONTACT_EVT:
  1003. case PEER_LOST_CONTACT_EVT:
  1004. case NODE_SYNCH_END_EVT:
  1005. case NODE_FAILOVER_BEGIN_EVT:
  1006. break;
  1007. case NODE_SYNCH_BEGIN_EVT:
  1008. case NODE_FAILOVER_END_EVT:
  1009. default:
  1010. goto illegal_evt;
  1011. }
  1012. break;
  1013. case SELF_COMING_PEER_UP:
  1014. switch (evt) {
  1015. case SELF_ESTABL_CONTACT_EVT:
  1016. state = SELF_UP_PEER_UP;
  1017. break;
  1018. case PEER_LOST_CONTACT_EVT:
  1019. state = SELF_DOWN_PEER_DOWN;
  1020. break;
  1021. case SELF_LOST_CONTACT_EVT:
  1022. case PEER_ESTABL_CONTACT_EVT:
  1023. break;
  1024. case NODE_SYNCH_END_EVT:
  1025. case NODE_SYNCH_BEGIN_EVT:
  1026. case NODE_FAILOVER_BEGIN_EVT:
  1027. case NODE_FAILOVER_END_EVT:
  1028. default:
  1029. goto illegal_evt;
  1030. }
  1031. break;
  1032. case SELF_LEAVING_PEER_DOWN:
  1033. switch (evt) {
  1034. case SELF_LOST_CONTACT_EVT:
  1035. state = SELF_DOWN_PEER_DOWN;
  1036. break;
  1037. case SELF_ESTABL_CONTACT_EVT:
  1038. case PEER_ESTABL_CONTACT_EVT:
  1039. case PEER_LOST_CONTACT_EVT:
  1040. break;
  1041. case NODE_SYNCH_END_EVT:
  1042. case NODE_SYNCH_BEGIN_EVT:
  1043. case NODE_FAILOVER_BEGIN_EVT:
  1044. case NODE_FAILOVER_END_EVT:
  1045. default:
  1046. goto illegal_evt;
  1047. }
  1048. break;
  1049. case NODE_FAILINGOVER:
  1050. switch (evt) {
  1051. case SELF_LOST_CONTACT_EVT:
  1052. state = SELF_DOWN_PEER_LEAVING;
  1053. break;
  1054. case PEER_LOST_CONTACT_EVT:
  1055. state = SELF_LEAVING_PEER_DOWN;
  1056. break;
  1057. case NODE_FAILOVER_END_EVT:
  1058. state = SELF_UP_PEER_UP;
  1059. break;
  1060. case NODE_FAILOVER_BEGIN_EVT:
  1061. case SELF_ESTABL_CONTACT_EVT:
  1062. case PEER_ESTABL_CONTACT_EVT:
  1063. break;
  1064. case NODE_SYNCH_BEGIN_EVT:
  1065. case NODE_SYNCH_END_EVT:
  1066. default:
  1067. goto illegal_evt;
  1068. }
  1069. break;
  1070. case NODE_SYNCHING:
  1071. switch (evt) {
  1072. case SELF_LOST_CONTACT_EVT:
  1073. state = SELF_DOWN_PEER_LEAVING;
  1074. break;
  1075. case PEER_LOST_CONTACT_EVT:
  1076. state = SELF_LEAVING_PEER_DOWN;
  1077. break;
  1078. case NODE_SYNCH_END_EVT:
  1079. state = SELF_UP_PEER_UP;
  1080. break;
  1081. case NODE_FAILOVER_BEGIN_EVT:
  1082. state = NODE_FAILINGOVER;
  1083. break;
  1084. case NODE_SYNCH_BEGIN_EVT:
  1085. case SELF_ESTABL_CONTACT_EVT:
  1086. case PEER_ESTABL_CONTACT_EVT:
  1087. break;
  1088. case NODE_FAILOVER_END_EVT:
  1089. default:
  1090. goto illegal_evt;
  1091. }
  1092. break;
  1093. default:
  1094. pr_err("Unknown node fsm state %x\n", state);
  1095. break;
  1096. }
  1097. n->state = state;
  1098. return;
  1099. illegal_evt:
  1100. pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
  1101. }
  1102. static void node_lost_contact(struct tipc_node *n,
  1103. struct sk_buff_head *inputq)
  1104. {
  1105. struct tipc_sock_conn *conn, *safe;
  1106. struct tipc_link *l;
  1107. struct list_head *conns = &n->conn_sks;
  1108. struct sk_buff *skb;
  1109. uint i;
  1110. pr_debug("Lost contact with %x\n", n->addr);
  1111. n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
  1112. /* Clean up broadcast state */
  1113. tipc_bcast_remove_peer(n->net, n->bc_entry.link);
  1114. /* Abort any ongoing link failover */
  1115. for (i = 0; i < MAX_BEARERS; i++) {
  1116. l = n->links[i].link;
  1117. if (l)
  1118. tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
  1119. }
  1120. /* Notify publications from this node */
  1121. n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
  1122. /* Notify sockets connected to node */
  1123. list_for_each_entry_safe(conn, safe, conns, list) {
  1124. skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
  1125. SHORT_H_SIZE, 0, tipc_own_addr(n->net),
  1126. conn->peer_node, conn->port,
  1127. conn->peer_port, TIPC_ERR_NO_NODE);
  1128. if (likely(skb))
  1129. skb_queue_tail(inputq, skb);
  1130. list_del(&conn->list);
  1131. kfree(conn);
  1132. }
  1133. }
  1134. /**
  1135. * tipc_node_get_linkname - get the name of a link
  1136. *
  1137. * @bearer_id: id of the bearer
  1138. * @node: peer node address
  1139. * @linkname: link name output buffer
  1140. *
  1141. * Returns 0 on success
  1142. */
  1143. int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
  1144. char *linkname, size_t len)
  1145. {
  1146. struct tipc_link *link;
  1147. int err = -EINVAL;
  1148. struct tipc_node *node = tipc_node_find(net, addr);
  1149. if (!node)
  1150. return err;
  1151. if (bearer_id >= MAX_BEARERS)
  1152. goto exit;
  1153. tipc_node_read_lock(node);
  1154. link = node->links[bearer_id].link;
  1155. if (link) {
  1156. strncpy(linkname, tipc_link_name(link), len);
  1157. err = 0;
  1158. }
  1159. tipc_node_read_unlock(node);
  1160. exit:
  1161. tipc_node_put(node);
  1162. return err;
  1163. }
  1164. /* Caller should hold node lock for the passed node */
  1165. static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
  1166. {
  1167. void *hdr;
  1168. struct nlattr *attrs;
  1169. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  1170. NLM_F_MULTI, TIPC_NL_NODE_GET);
  1171. if (!hdr)
  1172. return -EMSGSIZE;
  1173. attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
  1174. if (!attrs)
  1175. goto msg_full;
  1176. if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
  1177. goto attr_msg_full;
  1178. if (node_is_up(node))
  1179. if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
  1180. goto attr_msg_full;
  1181. nla_nest_end(msg->skb, attrs);
  1182. genlmsg_end(msg->skb, hdr);
  1183. return 0;
  1184. attr_msg_full:
  1185. nla_nest_cancel(msg->skb, attrs);
  1186. msg_full:
  1187. genlmsg_cancel(msg->skb, hdr);
  1188. return -EMSGSIZE;
  1189. }
  1190. /**
  1191. * tipc_node_xmit() is the general link level function for message sending
  1192. * @net: the applicable net namespace
  1193. * @list: chain of buffers containing message
  1194. * @dnode: address of destination node
  1195. * @selector: a number used for deterministic link selection
  1196. * Consumes the buffer chain.
  1197. * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
  1198. */
  1199. int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
  1200. u32 dnode, int selector)
  1201. {
  1202. struct tipc_link_entry *le = NULL;
  1203. struct tipc_node *n;
  1204. struct sk_buff_head xmitq;
  1205. int bearer_id;
  1206. int rc;
  1207. if (in_own_node(net, dnode)) {
  1208. tipc_sk_rcv(net, list);
  1209. return 0;
  1210. }
  1211. n = tipc_node_find(net, dnode);
  1212. if (unlikely(!n)) {
  1213. skb_queue_purge(list);
  1214. return -EHOSTUNREACH;
  1215. }
  1216. tipc_node_read_lock(n);
  1217. bearer_id = n->active_links[selector & 1];
  1218. if (unlikely(bearer_id == INVALID_BEARER_ID)) {
  1219. tipc_node_read_unlock(n);
  1220. tipc_node_put(n);
  1221. skb_queue_purge(list);
  1222. return -EHOSTUNREACH;
  1223. }
  1224. __skb_queue_head_init(&xmitq);
  1225. le = &n->links[bearer_id];
  1226. spin_lock_bh(&le->lock);
  1227. rc = tipc_link_xmit(le->link, list, &xmitq);
  1228. spin_unlock_bh(&le->lock);
  1229. tipc_node_read_unlock(n);
  1230. if (unlikely(rc == -ENOBUFS))
  1231. tipc_node_link_down(n, bearer_id, false);
  1232. else
  1233. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
  1234. tipc_node_put(n);
  1235. return rc;
  1236. }
  1237. /* tipc_node_xmit_skb(): send single buffer to destination
  1238. * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
  1239. * messages, which will not be rejected
  1240. * The only exception is datagram messages rerouted after secondary
  1241. * lookup, which are rare and safe to dispose of anyway.
  1242. */
  1243. int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
  1244. u32 selector)
  1245. {
  1246. struct sk_buff_head head;
  1247. skb_queue_head_init(&head);
  1248. __skb_queue_tail(&head, skb);
  1249. tipc_node_xmit(net, &head, dnode, selector);
  1250. return 0;
  1251. }
  1252. /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
  1253. * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
  1254. */
  1255. int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
  1256. {
  1257. struct sk_buff *skb;
  1258. u32 selector, dnode;
  1259. while ((skb = __skb_dequeue(xmitq))) {
  1260. selector = msg_origport(buf_msg(skb));
  1261. dnode = msg_destnode(buf_msg(skb));
  1262. tipc_node_xmit_skb(net, skb, dnode, selector);
  1263. }
  1264. return 0;
  1265. }
  1266. void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
  1267. {
  1268. struct sk_buff *txskb;
  1269. struct tipc_node *n;
  1270. u32 dst;
  1271. rcu_read_lock();
  1272. list_for_each_entry_rcu(n, tipc_nodes(net), list) {
  1273. dst = n->addr;
  1274. if (in_own_node(net, dst))
  1275. continue;
  1276. if (!node_is_up(n))
  1277. continue;
  1278. txskb = pskb_copy(skb, GFP_ATOMIC);
  1279. if (!txskb)
  1280. break;
  1281. msg_set_destnode(buf_msg(txskb), dst);
  1282. tipc_node_xmit_skb(net, txskb, dst, 0);
  1283. }
  1284. rcu_read_unlock();
  1285. kfree_skb(skb);
  1286. }
  1287. static void tipc_node_mcast_rcv(struct tipc_node *n)
  1288. {
  1289. struct tipc_bclink_entry *be = &n->bc_entry;
  1290. /* 'arrvq' is under inputq2's lock protection */
  1291. spin_lock_bh(&be->inputq2.lock);
  1292. spin_lock_bh(&be->inputq1.lock);
  1293. skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
  1294. spin_unlock_bh(&be->inputq1.lock);
  1295. spin_unlock_bh(&be->inputq2.lock);
  1296. tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
  1297. }
  1298. static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
  1299. int bearer_id, struct sk_buff_head *xmitq)
  1300. {
  1301. struct tipc_link *ucl;
  1302. int rc;
  1303. rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
  1304. if (rc & TIPC_LINK_DOWN_EVT) {
  1305. tipc_node_reset_links(n);
  1306. return;
  1307. }
  1308. if (!(rc & TIPC_LINK_SND_STATE))
  1309. return;
  1310. /* If probe message, a STATE response will be sent anyway */
  1311. if (msg_probe(hdr))
  1312. return;
  1313. /* Produce a STATE message carrying broadcast NACK */
  1314. tipc_node_read_lock(n);
  1315. ucl = n->links[bearer_id].link;
  1316. if (ucl)
  1317. tipc_link_build_state_msg(ucl, xmitq);
  1318. tipc_node_read_unlock(n);
  1319. }
  1320. /**
  1321. * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
  1322. * @net: the applicable net namespace
  1323. * @skb: TIPC packet
  1324. * @bearer_id: id of bearer message arrived on
  1325. *
  1326. * Invoked with no locks held.
  1327. */
  1328. static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
  1329. {
  1330. int rc;
  1331. struct sk_buff_head xmitq;
  1332. struct tipc_bclink_entry *be;
  1333. struct tipc_link_entry *le;
  1334. struct tipc_msg *hdr = buf_msg(skb);
  1335. int usr = msg_user(hdr);
  1336. u32 dnode = msg_destnode(hdr);
  1337. struct tipc_node *n;
  1338. __skb_queue_head_init(&xmitq);
  1339. /* If NACK for other node, let rcv link for that node peek into it */
  1340. if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
  1341. n = tipc_node_find(net, dnode);
  1342. else
  1343. n = tipc_node_find(net, msg_prevnode(hdr));
  1344. if (!n) {
  1345. kfree_skb(skb);
  1346. return;
  1347. }
  1348. be = &n->bc_entry;
  1349. le = &n->links[bearer_id];
  1350. rc = tipc_bcast_rcv(net, be->link, skb);
  1351. /* Broadcast ACKs are sent on a unicast link */
  1352. if (rc & TIPC_LINK_SND_STATE) {
  1353. tipc_node_read_lock(n);
  1354. tipc_link_build_state_msg(le->link, &xmitq);
  1355. tipc_node_read_unlock(n);
  1356. }
  1357. if (!skb_queue_empty(&xmitq))
  1358. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
  1359. if (!skb_queue_empty(&be->inputq1))
  1360. tipc_node_mcast_rcv(n);
  1361. /* If reassembly or retransmission failure => reset all links to peer */
  1362. if (rc & TIPC_LINK_DOWN_EVT)
  1363. tipc_node_reset_links(n);
  1364. tipc_node_put(n);
  1365. }
  1366. /**
  1367. * tipc_node_check_state - check and if necessary update node state
  1368. * @skb: TIPC packet
  1369. * @bearer_id: identity of bearer delivering the packet
  1370. * Returns true if state and msg are ok, otherwise false
  1371. */
  1372. static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
  1373. int bearer_id, struct sk_buff_head *xmitq)
  1374. {
  1375. struct tipc_msg *hdr = buf_msg(skb);
  1376. int usr = msg_user(hdr);
  1377. int mtyp = msg_type(hdr);
  1378. u16 oseqno = msg_seqno(hdr);
  1379. u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
  1380. u16 exp_pkts = msg_msgcnt(hdr);
  1381. u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
  1382. int state = n->state;
  1383. struct tipc_link *l, *tnl, *pl = NULL;
  1384. struct tipc_media_addr *maddr;
  1385. int pb_id;
  1386. l = n->links[bearer_id].link;
  1387. if (!l)
  1388. return false;
  1389. rcv_nxt = tipc_link_rcv_nxt(l);
  1390. if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
  1391. return true;
  1392. /* Find parallel link, if any */
  1393. for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
  1394. if ((pb_id != bearer_id) && n->links[pb_id].link) {
  1395. pl = n->links[pb_id].link;
  1396. break;
  1397. }
  1398. }
  1399. if (!tipc_link_validate_msg(l, hdr))
  1400. return false;
  1401. /* Check and update node accesibility if applicable */
  1402. if (state == SELF_UP_PEER_COMING) {
  1403. if (!tipc_link_is_up(l))
  1404. return true;
  1405. if (!msg_peer_link_is_up(hdr))
  1406. return true;
  1407. tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
  1408. }
  1409. if (state == SELF_DOWN_PEER_LEAVING) {
  1410. if (msg_peer_node_is_up(hdr))
  1411. return false;
  1412. tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
  1413. return true;
  1414. }
  1415. if (state == SELF_LEAVING_PEER_DOWN)
  1416. return false;
  1417. /* Ignore duplicate packets */
  1418. if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
  1419. return true;
  1420. /* Initiate or update failover mode if applicable */
  1421. if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
  1422. syncpt = oseqno + exp_pkts - 1;
  1423. if (pl && tipc_link_is_up(pl)) {
  1424. __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
  1425. tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
  1426. tipc_link_inputq(l));
  1427. }
  1428. /* If parallel link was already down, and this happened before
  1429. * the tunnel link came up, FAILOVER was never sent. Ensure that
  1430. * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
  1431. */
  1432. if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
  1433. tipc_link_create_dummy_tnl_msg(l, xmitq);
  1434. n->failover_sent = true;
  1435. }
  1436. /* If pkts arrive out of order, use lowest calculated syncpt */
  1437. if (less(syncpt, n->sync_point))
  1438. n->sync_point = syncpt;
  1439. }
  1440. /* Open parallel link when tunnel link reaches synch point */
  1441. if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
  1442. if (!more(rcv_nxt, n->sync_point))
  1443. return true;
  1444. tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
  1445. if (pl)
  1446. tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
  1447. return true;
  1448. }
  1449. /* No synching needed if only one link */
  1450. if (!pl || !tipc_link_is_up(pl))
  1451. return true;
  1452. /* Initiate synch mode if applicable */
  1453. if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
  1454. syncpt = iseqno + exp_pkts - 1;
  1455. if (!tipc_link_is_up(l))
  1456. __tipc_node_link_up(n, bearer_id, xmitq);
  1457. if (n->state == SELF_UP_PEER_UP) {
  1458. n->sync_point = syncpt;
  1459. tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
  1460. tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
  1461. }
  1462. }
  1463. /* Open tunnel link when parallel link reaches synch point */
  1464. if (n->state == NODE_SYNCHING) {
  1465. if (tipc_link_is_synching(l)) {
  1466. tnl = l;
  1467. } else {
  1468. tnl = pl;
  1469. pl = l;
  1470. }
  1471. inputq_len = skb_queue_len(tipc_link_inputq(pl));
  1472. dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
  1473. if (more(dlv_nxt, n->sync_point)) {
  1474. tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
  1475. tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
  1476. return true;
  1477. }
  1478. if (l == pl)
  1479. return true;
  1480. if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
  1481. return true;
  1482. if (usr == LINK_PROTOCOL)
  1483. return true;
  1484. return false;
  1485. }
  1486. return true;
  1487. }
  1488. /**
  1489. * tipc_rcv - process TIPC packets/messages arriving from off-node
  1490. * @net: the applicable net namespace
  1491. * @skb: TIPC packet
  1492. * @bearer: pointer to bearer message arrived on
  1493. *
  1494. * Invoked with no locks held. Bearer pointer must point to a valid bearer
  1495. * structure (i.e. cannot be NULL), but bearer can be inactive.
  1496. */
  1497. void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
  1498. {
  1499. struct sk_buff_head xmitq;
  1500. struct tipc_node *n;
  1501. struct tipc_msg *hdr;
  1502. int bearer_id = b->identity;
  1503. struct tipc_link_entry *le;
  1504. u32 self = tipc_own_addr(net);
  1505. int usr, rc = 0;
  1506. u16 bc_ack;
  1507. __skb_queue_head_init(&xmitq);
  1508. /* Ensure message is well-formed before touching the header */
  1509. if (unlikely(!tipc_msg_validate(&skb)))
  1510. goto discard;
  1511. hdr = buf_msg(skb);
  1512. usr = msg_user(hdr);
  1513. bc_ack = msg_bcast_ack(hdr);
  1514. /* Handle arrival of discovery or broadcast packet */
  1515. if (unlikely(msg_non_seq(hdr))) {
  1516. if (unlikely(usr == LINK_CONFIG))
  1517. return tipc_disc_rcv(net, skb, b);
  1518. else
  1519. return tipc_node_bc_rcv(net, skb, bearer_id);
  1520. }
  1521. /* Discard unicast link messages destined for another node */
  1522. if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
  1523. goto discard;
  1524. /* Locate neighboring node that sent packet */
  1525. n = tipc_node_find(net, msg_prevnode(hdr));
  1526. if (unlikely(!n))
  1527. goto discard;
  1528. le = &n->links[bearer_id];
  1529. /* Ensure broadcast reception is in synch with peer's send state */
  1530. if (unlikely(usr == LINK_PROTOCOL))
  1531. tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
  1532. else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
  1533. tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
  1534. /* Receive packet directly if conditions permit */
  1535. tipc_node_read_lock(n);
  1536. if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
  1537. spin_lock_bh(&le->lock);
  1538. if (le->link) {
  1539. rc = tipc_link_rcv(le->link, skb, &xmitq);
  1540. skb = NULL;
  1541. }
  1542. spin_unlock_bh(&le->lock);
  1543. }
  1544. tipc_node_read_unlock(n);
  1545. /* Check/update node state before receiving */
  1546. if (unlikely(skb)) {
  1547. if (unlikely(skb_linearize(skb)))
  1548. goto discard;
  1549. tipc_node_write_lock(n);
  1550. if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
  1551. if (le->link) {
  1552. rc = tipc_link_rcv(le->link, skb, &xmitq);
  1553. skb = NULL;
  1554. }
  1555. }
  1556. tipc_node_write_unlock(n);
  1557. }
  1558. if (unlikely(rc & TIPC_LINK_UP_EVT))
  1559. tipc_node_link_up(n, bearer_id, &xmitq);
  1560. if (unlikely(rc & TIPC_LINK_DOWN_EVT))
  1561. tipc_node_link_down(n, bearer_id, false);
  1562. if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
  1563. tipc_named_rcv(net, &n->bc_entry.namedq);
  1564. if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
  1565. tipc_node_mcast_rcv(n);
  1566. if (!skb_queue_empty(&le->inputq))
  1567. tipc_sk_rcv(net, &le->inputq);
  1568. if (!skb_queue_empty(&xmitq))
  1569. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
  1570. tipc_node_put(n);
  1571. discard:
  1572. kfree_skb(skb);
  1573. }
  1574. void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
  1575. int prop)
  1576. {
  1577. struct tipc_net *tn = tipc_net(net);
  1578. int bearer_id = b->identity;
  1579. struct sk_buff_head xmitq;
  1580. struct tipc_link_entry *e;
  1581. struct tipc_node *n;
  1582. __skb_queue_head_init(&xmitq);
  1583. rcu_read_lock();
  1584. list_for_each_entry_rcu(n, &tn->node_list, list) {
  1585. tipc_node_write_lock(n);
  1586. e = &n->links[bearer_id];
  1587. if (e->link) {
  1588. if (prop == TIPC_NLA_PROP_TOL)
  1589. tipc_link_set_tolerance(e->link, b->tolerance,
  1590. &xmitq);
  1591. else if (prop == TIPC_NLA_PROP_MTU)
  1592. tipc_link_set_mtu(e->link, b->mtu);
  1593. }
  1594. tipc_node_write_unlock(n);
  1595. tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
  1596. }
  1597. rcu_read_unlock();
  1598. }
  1599. int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
  1600. {
  1601. struct net *net = sock_net(skb->sk);
  1602. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1603. struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
  1604. struct tipc_node *peer;
  1605. u32 addr;
  1606. int err;
  1607. /* We identify the peer by its net */
  1608. if (!info->attrs[TIPC_NLA_NET])
  1609. return -EINVAL;
  1610. err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
  1611. info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
  1612. info->extack);
  1613. if (err)
  1614. return err;
  1615. if (!attrs[TIPC_NLA_NET_ADDR])
  1616. return -EINVAL;
  1617. addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
  1618. if (in_own_node(net, addr))
  1619. return -ENOTSUPP;
  1620. spin_lock_bh(&tn->node_list_lock);
  1621. peer = tipc_node_find(net, addr);
  1622. if (!peer) {
  1623. spin_unlock_bh(&tn->node_list_lock);
  1624. return -ENXIO;
  1625. }
  1626. tipc_node_write_lock(peer);
  1627. if (peer->state != SELF_DOWN_PEER_DOWN &&
  1628. peer->state != SELF_DOWN_PEER_LEAVING) {
  1629. tipc_node_write_unlock(peer);
  1630. err = -EBUSY;
  1631. goto err_out;
  1632. }
  1633. tipc_node_clear_links(peer);
  1634. tipc_node_write_unlock(peer);
  1635. tipc_node_delete(peer);
  1636. err = 0;
  1637. err_out:
  1638. tipc_node_put(peer);
  1639. spin_unlock_bh(&tn->node_list_lock);
  1640. return err;
  1641. }
  1642. int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1643. {
  1644. int err;
  1645. struct net *net = sock_net(skb->sk);
  1646. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1647. int done = cb->args[0];
  1648. int last_addr = cb->args[1];
  1649. struct tipc_node *node;
  1650. struct tipc_nl_msg msg;
  1651. if (done)
  1652. return 0;
  1653. msg.skb = skb;
  1654. msg.portid = NETLINK_CB(cb->skb).portid;
  1655. msg.seq = cb->nlh->nlmsg_seq;
  1656. rcu_read_lock();
  1657. if (last_addr) {
  1658. node = tipc_node_find(net, last_addr);
  1659. if (!node) {
  1660. rcu_read_unlock();
  1661. /* We never set seq or call nl_dump_check_consistent()
  1662. * this means that setting prev_seq here will cause the
  1663. * consistence check to fail in the netlink callback
  1664. * handler. Resulting in the NLMSG_DONE message having
  1665. * the NLM_F_DUMP_INTR flag set if the node state
  1666. * changed while we released the lock.
  1667. */
  1668. cb->prev_seq = 1;
  1669. return -EPIPE;
  1670. }
  1671. tipc_node_put(node);
  1672. }
  1673. list_for_each_entry_rcu(node, &tn->node_list, list) {
  1674. if (last_addr) {
  1675. if (node->addr == last_addr)
  1676. last_addr = 0;
  1677. else
  1678. continue;
  1679. }
  1680. tipc_node_read_lock(node);
  1681. err = __tipc_nl_add_node(&msg, node);
  1682. if (err) {
  1683. last_addr = node->addr;
  1684. tipc_node_read_unlock(node);
  1685. goto out;
  1686. }
  1687. tipc_node_read_unlock(node);
  1688. }
  1689. done = 1;
  1690. out:
  1691. cb->args[0] = done;
  1692. cb->args[1] = last_addr;
  1693. rcu_read_unlock();
  1694. return skb->len;
  1695. }
  1696. /* tipc_node_find_by_name - locate owner node of link by link's name
  1697. * @net: the applicable net namespace
  1698. * @name: pointer to link name string
  1699. * @bearer_id: pointer to index in 'node->links' array where the link was found.
  1700. *
  1701. * Returns pointer to node owning the link, or 0 if no matching link is found.
  1702. */
  1703. static struct tipc_node *tipc_node_find_by_name(struct net *net,
  1704. const char *link_name,
  1705. unsigned int *bearer_id)
  1706. {
  1707. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1708. struct tipc_link *l;
  1709. struct tipc_node *n;
  1710. struct tipc_node *found_node = NULL;
  1711. int i;
  1712. *bearer_id = 0;
  1713. rcu_read_lock();
  1714. list_for_each_entry_rcu(n, &tn->node_list, list) {
  1715. tipc_node_read_lock(n);
  1716. for (i = 0; i < MAX_BEARERS; i++) {
  1717. l = n->links[i].link;
  1718. if (l && !strcmp(tipc_link_name(l), link_name)) {
  1719. *bearer_id = i;
  1720. found_node = n;
  1721. break;
  1722. }
  1723. }
  1724. tipc_node_read_unlock(n);
  1725. if (found_node)
  1726. break;
  1727. }
  1728. rcu_read_unlock();
  1729. return found_node;
  1730. }
  1731. int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
  1732. {
  1733. int err;
  1734. int res = 0;
  1735. int bearer_id;
  1736. char *name;
  1737. struct tipc_link *link;
  1738. struct tipc_node *node;
  1739. struct sk_buff_head xmitq;
  1740. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  1741. struct net *net = sock_net(skb->sk);
  1742. __skb_queue_head_init(&xmitq);
  1743. if (!info->attrs[TIPC_NLA_LINK])
  1744. return -EINVAL;
  1745. err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
  1746. info->attrs[TIPC_NLA_LINK],
  1747. tipc_nl_link_policy, info->extack);
  1748. if (err)
  1749. return err;
  1750. if (!attrs[TIPC_NLA_LINK_NAME])
  1751. return -EINVAL;
  1752. name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  1753. if (strcmp(name, tipc_bclink_name) == 0)
  1754. return tipc_nl_bc_link_set(net, attrs);
  1755. node = tipc_node_find_by_name(net, name, &bearer_id);
  1756. if (!node)
  1757. return -EINVAL;
  1758. tipc_node_read_lock(node);
  1759. link = node->links[bearer_id].link;
  1760. if (!link) {
  1761. res = -EINVAL;
  1762. goto out;
  1763. }
  1764. if (attrs[TIPC_NLA_LINK_PROP]) {
  1765. struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
  1766. err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
  1767. props);
  1768. if (err) {
  1769. res = err;
  1770. goto out;
  1771. }
  1772. if (props[TIPC_NLA_PROP_TOL]) {
  1773. u32 tol;
  1774. tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
  1775. tipc_link_set_tolerance(link, tol, &xmitq);
  1776. }
  1777. if (props[TIPC_NLA_PROP_PRIO]) {
  1778. u32 prio;
  1779. prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
  1780. tipc_link_set_prio(link, prio, &xmitq);
  1781. }
  1782. if (props[TIPC_NLA_PROP_WIN]) {
  1783. u32 win;
  1784. win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
  1785. tipc_link_set_queue_limits(link, win);
  1786. }
  1787. }
  1788. out:
  1789. tipc_node_read_unlock(node);
  1790. tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
  1791. return res;
  1792. }
  1793. int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
  1794. {
  1795. struct net *net = genl_info_net(info);
  1796. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  1797. struct tipc_nl_msg msg;
  1798. char *name;
  1799. int err;
  1800. msg.portid = info->snd_portid;
  1801. msg.seq = info->snd_seq;
  1802. if (!info->attrs[TIPC_NLA_LINK])
  1803. return -EINVAL;
  1804. err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
  1805. info->attrs[TIPC_NLA_LINK],
  1806. tipc_nl_link_policy, info->extack);
  1807. if (err)
  1808. return err;
  1809. if (!attrs[TIPC_NLA_LINK_NAME])
  1810. return -EINVAL;
  1811. name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  1812. msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  1813. if (!msg.skb)
  1814. return -ENOMEM;
  1815. if (strcmp(name, tipc_bclink_name) == 0) {
  1816. err = tipc_nl_add_bc_link(net, &msg);
  1817. if (err)
  1818. goto err_free;
  1819. } else {
  1820. int bearer_id;
  1821. struct tipc_node *node;
  1822. struct tipc_link *link;
  1823. node = tipc_node_find_by_name(net, name, &bearer_id);
  1824. if (!node) {
  1825. err = -EINVAL;
  1826. goto err_free;
  1827. }
  1828. tipc_node_read_lock(node);
  1829. link = node->links[bearer_id].link;
  1830. if (!link) {
  1831. tipc_node_read_unlock(node);
  1832. err = -EINVAL;
  1833. goto err_free;
  1834. }
  1835. err = __tipc_nl_add_link(net, &msg, link, 0);
  1836. tipc_node_read_unlock(node);
  1837. if (err)
  1838. goto err_free;
  1839. }
  1840. return genlmsg_reply(msg.skb, info);
  1841. err_free:
  1842. nlmsg_free(msg.skb);
  1843. return err;
  1844. }
  1845. int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
  1846. {
  1847. int err;
  1848. char *link_name;
  1849. unsigned int bearer_id;
  1850. struct tipc_link *link;
  1851. struct tipc_node *node;
  1852. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  1853. struct net *net = sock_net(skb->sk);
  1854. struct tipc_link_entry *le;
  1855. if (!info->attrs[TIPC_NLA_LINK])
  1856. return -EINVAL;
  1857. err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
  1858. info->attrs[TIPC_NLA_LINK],
  1859. tipc_nl_link_policy, info->extack);
  1860. if (err)
  1861. return err;
  1862. if (!attrs[TIPC_NLA_LINK_NAME])
  1863. return -EINVAL;
  1864. link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  1865. if (strcmp(link_name, tipc_bclink_name) == 0) {
  1866. err = tipc_bclink_reset_stats(net);
  1867. if (err)
  1868. return err;
  1869. return 0;
  1870. }
  1871. node = tipc_node_find_by_name(net, link_name, &bearer_id);
  1872. if (!node)
  1873. return -EINVAL;
  1874. le = &node->links[bearer_id];
  1875. tipc_node_read_lock(node);
  1876. spin_lock_bh(&le->lock);
  1877. link = node->links[bearer_id].link;
  1878. if (!link) {
  1879. spin_unlock_bh(&le->lock);
  1880. tipc_node_read_unlock(node);
  1881. return -EINVAL;
  1882. }
  1883. tipc_link_reset_stats(link);
  1884. spin_unlock_bh(&le->lock);
  1885. tipc_node_read_unlock(node);
  1886. return 0;
  1887. }
  1888. /* Caller should hold node lock */
  1889. static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
  1890. struct tipc_node *node, u32 *prev_link)
  1891. {
  1892. u32 i;
  1893. int err;
  1894. for (i = *prev_link; i < MAX_BEARERS; i++) {
  1895. *prev_link = i;
  1896. if (!node->links[i].link)
  1897. continue;
  1898. err = __tipc_nl_add_link(net, msg,
  1899. node->links[i].link, NLM_F_MULTI);
  1900. if (err)
  1901. return err;
  1902. }
  1903. *prev_link = 0;
  1904. return 0;
  1905. }
  1906. int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
  1907. {
  1908. struct net *net = sock_net(skb->sk);
  1909. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1910. struct tipc_node *node;
  1911. struct tipc_nl_msg msg;
  1912. u32 prev_node = cb->args[0];
  1913. u32 prev_link = cb->args[1];
  1914. int done = cb->args[2];
  1915. int err;
  1916. if (done)
  1917. return 0;
  1918. msg.skb = skb;
  1919. msg.portid = NETLINK_CB(cb->skb).portid;
  1920. msg.seq = cb->nlh->nlmsg_seq;
  1921. rcu_read_lock();
  1922. if (prev_node) {
  1923. node = tipc_node_find(net, prev_node);
  1924. if (!node) {
  1925. /* We never set seq or call nl_dump_check_consistent()
  1926. * this means that setting prev_seq here will cause the
  1927. * consistence check to fail in the netlink callback
  1928. * handler. Resulting in the last NLMSG_DONE message
  1929. * having the NLM_F_DUMP_INTR flag set.
  1930. */
  1931. cb->prev_seq = 1;
  1932. goto out;
  1933. }
  1934. tipc_node_put(node);
  1935. list_for_each_entry_continue_rcu(node, &tn->node_list,
  1936. list) {
  1937. tipc_node_read_lock(node);
  1938. err = __tipc_nl_add_node_links(net, &msg, node,
  1939. &prev_link);
  1940. tipc_node_read_unlock(node);
  1941. if (err)
  1942. goto out;
  1943. prev_node = node->addr;
  1944. }
  1945. } else {
  1946. err = tipc_nl_add_bc_link(net, &msg);
  1947. if (err)
  1948. goto out;
  1949. list_for_each_entry_rcu(node, &tn->node_list, list) {
  1950. tipc_node_read_lock(node);
  1951. err = __tipc_nl_add_node_links(net, &msg, node,
  1952. &prev_link);
  1953. tipc_node_read_unlock(node);
  1954. if (err)
  1955. goto out;
  1956. prev_node = node->addr;
  1957. }
  1958. }
  1959. done = 1;
  1960. out:
  1961. rcu_read_unlock();
  1962. cb->args[0] = prev_node;
  1963. cb->args[1] = prev_link;
  1964. cb->args[2] = done;
  1965. return skb->len;
  1966. }
  1967. int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
  1968. {
  1969. struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
  1970. struct net *net = sock_net(skb->sk);
  1971. int err;
  1972. if (!info->attrs[TIPC_NLA_MON])
  1973. return -EINVAL;
  1974. err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
  1975. info->attrs[TIPC_NLA_MON],
  1976. tipc_nl_monitor_policy, info->extack);
  1977. if (err)
  1978. return err;
  1979. if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
  1980. u32 val;
  1981. val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
  1982. err = tipc_nl_monitor_set_threshold(net, val);
  1983. if (err)
  1984. return err;
  1985. }
  1986. return 0;
  1987. }
  1988. static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
  1989. {
  1990. struct nlattr *attrs;
  1991. void *hdr;
  1992. u32 val;
  1993. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  1994. 0, TIPC_NL_MON_GET);
  1995. if (!hdr)
  1996. return -EMSGSIZE;
  1997. attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
  1998. if (!attrs)
  1999. goto msg_full;
  2000. val = tipc_nl_monitor_get_threshold(net);
  2001. if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
  2002. goto attr_msg_full;
  2003. nla_nest_end(msg->skb, attrs);
  2004. genlmsg_end(msg->skb, hdr);
  2005. return 0;
  2006. attr_msg_full:
  2007. nla_nest_cancel(msg->skb, attrs);
  2008. msg_full:
  2009. genlmsg_cancel(msg->skb, hdr);
  2010. return -EMSGSIZE;
  2011. }
  2012. int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
  2013. {
  2014. struct net *net = sock_net(skb->sk);
  2015. struct tipc_nl_msg msg;
  2016. int err;
  2017. msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  2018. if (!msg.skb)
  2019. return -ENOMEM;
  2020. msg.portid = info->snd_portid;
  2021. msg.seq = info->snd_seq;
  2022. err = __tipc_nl_add_monitor_prop(net, &msg);
  2023. if (err) {
  2024. nlmsg_free(msg.skb);
  2025. return err;
  2026. }
  2027. return genlmsg_reply(msg.skb, info);
  2028. }
  2029. int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
  2030. {
  2031. struct net *net = sock_net(skb->sk);
  2032. u32 prev_bearer = cb->args[0];
  2033. struct tipc_nl_msg msg;
  2034. int bearer_id;
  2035. int err;
  2036. if (prev_bearer == MAX_BEARERS)
  2037. return 0;
  2038. msg.skb = skb;
  2039. msg.portid = NETLINK_CB(cb->skb).portid;
  2040. msg.seq = cb->nlh->nlmsg_seq;
  2041. rtnl_lock();
  2042. for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
  2043. err = __tipc_nl_add_monitor(net, &msg, bearer_id);
  2044. if (err)
  2045. break;
  2046. }
  2047. rtnl_unlock();
  2048. cb->args[0] = bearer_id;
  2049. return skb->len;
  2050. }
  2051. int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
  2052. struct netlink_callback *cb)
  2053. {
  2054. struct net *net = sock_net(skb->sk);
  2055. u32 prev_node = cb->args[1];
  2056. u32 bearer_id = cb->args[2];
  2057. int done = cb->args[0];
  2058. struct tipc_nl_msg msg;
  2059. int err;
  2060. if (!prev_node) {
  2061. struct nlattr **attrs;
  2062. struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
  2063. err = tipc_nlmsg_parse(cb->nlh, &attrs);
  2064. if (err)
  2065. return err;
  2066. if (!attrs[TIPC_NLA_MON])
  2067. return -EINVAL;
  2068. err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
  2069. attrs[TIPC_NLA_MON],
  2070. tipc_nl_monitor_policy, NULL);
  2071. if (err)
  2072. return err;
  2073. if (!mon[TIPC_NLA_MON_REF])
  2074. return -EINVAL;
  2075. bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
  2076. if (bearer_id >= MAX_BEARERS)
  2077. return -EINVAL;
  2078. }
  2079. if (done)
  2080. return 0;
  2081. msg.skb = skb;
  2082. msg.portid = NETLINK_CB(cb->skb).portid;
  2083. msg.seq = cb->nlh->nlmsg_seq;
  2084. rtnl_lock();
  2085. err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
  2086. if (!err)
  2087. done = 1;
  2088. rtnl_unlock();
  2089. cb->args[0] = done;
  2090. cb->args[1] = prev_node;
  2091. cb->args[2] = bearer_id;
  2092. return skb->len;
  2093. }