name_table.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070
  1. /*
  2. * net/tipc/name_table.c: TIPC name table code
  3. *
  4. * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  5. * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <net/sock.h>
  37. #include "core.h"
  38. #include "netlink.h"
  39. #include "name_table.h"
  40. #include "name_distr.h"
  41. #include "subscr.h"
  42. #include "bcast.h"
  43. #include "addr.h"
  44. #include <net/genetlink.h>
  45. #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
  46. static const struct nla_policy
  47. tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
  48. [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
  49. [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
  50. };
  51. /**
  52. * struct name_info - name sequence publication info
  53. * @node_list: circular list of publications made by own node
  54. * @cluster_list: circular list of publications made by own cluster
  55. * @zone_list: circular list of publications made by own zone
  56. * @node_list_size: number of entries in "node_list"
  57. * @cluster_list_size: number of entries in "cluster_list"
  58. * @zone_list_size: number of entries in "zone_list"
  59. *
  60. * Note: The zone list always contains at least one entry, since all
  61. * publications of the associated name sequence belong to it.
  62. * (The cluster and node lists may be empty.)
  63. */
  64. struct name_info {
  65. struct list_head node_list;
  66. struct list_head cluster_list;
  67. struct list_head zone_list;
  68. u32 node_list_size;
  69. u32 cluster_list_size;
  70. u32 zone_list_size;
  71. };
  72. /**
  73. * struct sub_seq - container for all published instances of a name sequence
  74. * @lower: name sequence lower bound
  75. * @upper: name sequence upper bound
  76. * @info: pointer to name sequence publication info
  77. */
  78. struct sub_seq {
  79. u32 lower;
  80. u32 upper;
  81. struct name_info *info;
  82. };
  83. /**
  84. * struct name_seq - container for all published instances of a name type
  85. * @type: 32 bit 'type' value for name sequence
  86. * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
  87. * sub-sequences are sorted in ascending order
  88. * @alloc: number of sub-sequences currently in array
  89. * @first_free: array index of first unused sub-sequence entry
  90. * @ns_list: links to adjacent name sequences in hash chain
  91. * @subscriptions: list of subscriptions for this 'type'
  92. * @lock: spinlock controlling access to publication lists of all sub-sequences
  93. * @rcu: RCU callback head used for deferred freeing
  94. */
  95. struct name_seq {
  96. u32 type;
  97. struct sub_seq *sseqs;
  98. u32 alloc;
  99. u32 first_free;
  100. struct hlist_node ns_list;
  101. struct list_head subscriptions;
  102. spinlock_t lock;
  103. struct rcu_head rcu;
  104. };
  105. static int hash(int x)
  106. {
  107. return x & (TIPC_NAMETBL_SIZE - 1);
  108. }
  109. /**
  110. * publ_create - create a publication structure
  111. */
  112. static struct publication *publ_create(u32 type, u32 lower, u32 upper,
  113. u32 scope, u32 node, u32 port_ref,
  114. u32 key)
  115. {
  116. struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
  117. if (publ == NULL) {
  118. pr_warn("Publication creation failure, no memory\n");
  119. return NULL;
  120. }
  121. publ->type = type;
  122. publ->lower = lower;
  123. publ->upper = upper;
  124. publ->scope = scope;
  125. publ->node = node;
  126. publ->ref = port_ref;
  127. publ->key = key;
  128. INIT_LIST_HEAD(&publ->pport_list);
  129. return publ;
  130. }
  131. /**
  132. * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
  133. */
  134. static struct sub_seq *tipc_subseq_alloc(u32 cnt)
  135. {
  136. return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
  137. }
  138. /**
  139. * tipc_nameseq_create - create a name sequence structure for the specified 'type'
  140. *
  141. * Allocates a single sub-sequence structure and sets it to all 0's.
  142. */
  143. static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
  144. {
  145. struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
  146. struct sub_seq *sseq = tipc_subseq_alloc(1);
  147. if (!nseq || !sseq) {
  148. pr_warn("Name sequence creation failed, no memory\n");
  149. kfree(nseq);
  150. kfree(sseq);
  151. return NULL;
  152. }
  153. spin_lock_init(&nseq->lock);
  154. nseq->type = type;
  155. nseq->sseqs = sseq;
  156. nseq->alloc = 1;
  157. INIT_HLIST_NODE(&nseq->ns_list);
  158. INIT_LIST_HEAD(&nseq->subscriptions);
  159. hlist_add_head_rcu(&nseq->ns_list, seq_head);
  160. return nseq;
  161. }
  162. /**
  163. * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
  164. *
  165. * Very time-critical, so binary searches through sub-sequence array.
  166. */
  167. static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
  168. u32 instance)
  169. {
  170. struct sub_seq *sseqs = nseq->sseqs;
  171. int low = 0;
  172. int high = nseq->first_free - 1;
  173. int mid;
  174. while (low <= high) {
  175. mid = (low + high) / 2;
  176. if (instance < sseqs[mid].lower)
  177. high = mid - 1;
  178. else if (instance > sseqs[mid].upper)
  179. low = mid + 1;
  180. else
  181. return &sseqs[mid];
  182. }
  183. return NULL;
  184. }
  185. /**
  186. * nameseq_locate_subseq - determine position of name instance in sub-sequence
  187. *
  188. * Returns index in sub-sequence array of the entry that contains the specified
  189. * instance value; if no entry contains that value, returns the position
  190. * where a new entry for it would be inserted in the array.
  191. *
  192. * Note: Similar to binary search code for locating a sub-sequence.
  193. */
  194. static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
  195. {
  196. struct sub_seq *sseqs = nseq->sseqs;
  197. int low = 0;
  198. int high = nseq->first_free - 1;
  199. int mid;
  200. while (low <= high) {
  201. mid = (low + high) / 2;
  202. if (instance < sseqs[mid].lower)
  203. high = mid - 1;
  204. else if (instance > sseqs[mid].upper)
  205. low = mid + 1;
  206. else
  207. return mid;
  208. }
  209. return low;
  210. }
  211. /**
  212. * tipc_nameseq_insert_publ
  213. */
  214. static struct publication *tipc_nameseq_insert_publ(struct net *net,
  215. struct name_seq *nseq,
  216. u32 type, u32 lower,
  217. u32 upper, u32 scope,
  218. u32 node, u32 port, u32 key)
  219. {
  220. struct tipc_subscription *s;
  221. struct tipc_subscription *st;
  222. struct publication *publ;
  223. struct sub_seq *sseq;
  224. struct name_info *info;
  225. int created_subseq = 0;
  226. sseq = nameseq_find_subseq(nseq, lower);
  227. if (sseq) {
  228. /* Lower end overlaps existing entry => need an exact match */
  229. if ((sseq->lower != lower) || (sseq->upper != upper)) {
  230. return NULL;
  231. }
  232. info = sseq->info;
  233. /* Check if an identical publication already exists */
  234. list_for_each_entry(publ, &info->zone_list, zone_list) {
  235. if ((publ->ref == port) && (publ->key == key) &&
  236. (!publ->node || (publ->node == node)))
  237. return NULL;
  238. }
  239. } else {
  240. u32 inspos;
  241. struct sub_seq *freesseq;
  242. /* Find where lower end should be inserted */
  243. inspos = nameseq_locate_subseq(nseq, lower);
  244. /* Fail if upper end overlaps into an existing entry */
  245. if ((inspos < nseq->first_free) &&
  246. (upper >= nseq->sseqs[inspos].lower)) {
  247. return NULL;
  248. }
  249. /* Ensure there is space for new sub-sequence */
  250. if (nseq->first_free == nseq->alloc) {
  251. struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
  252. if (!sseqs) {
  253. pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
  254. type, lower, upper);
  255. return NULL;
  256. }
  257. memcpy(sseqs, nseq->sseqs,
  258. nseq->alloc * sizeof(struct sub_seq));
  259. kfree(nseq->sseqs);
  260. nseq->sseqs = sseqs;
  261. nseq->alloc *= 2;
  262. }
  263. info = kzalloc(sizeof(*info), GFP_ATOMIC);
  264. if (!info) {
  265. pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
  266. type, lower, upper);
  267. return NULL;
  268. }
  269. INIT_LIST_HEAD(&info->node_list);
  270. INIT_LIST_HEAD(&info->cluster_list);
  271. INIT_LIST_HEAD(&info->zone_list);
  272. /* Insert new sub-sequence */
  273. sseq = &nseq->sseqs[inspos];
  274. freesseq = &nseq->sseqs[nseq->first_free];
  275. memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
  276. memset(sseq, 0, sizeof(*sseq));
  277. nseq->first_free++;
  278. sseq->lower = lower;
  279. sseq->upper = upper;
  280. sseq->info = info;
  281. created_subseq = 1;
  282. }
  283. /* Insert a publication */
  284. publ = publ_create(type, lower, upper, scope, node, port, key);
  285. if (!publ)
  286. return NULL;
  287. list_add(&publ->zone_list, &info->zone_list);
  288. info->zone_list_size++;
  289. if (in_own_cluster(net, node)) {
  290. list_add(&publ->cluster_list, &info->cluster_list);
  291. info->cluster_list_size++;
  292. }
  293. if (in_own_node(net, node)) {
  294. list_add(&publ->node_list, &info->node_list);
  295. info->node_list_size++;
  296. }
  297. /* Any subscriptions waiting for notification? */
  298. list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
  299. tipc_subscr_report_overlap(s,
  300. publ->lower,
  301. publ->upper,
  302. TIPC_PUBLISHED,
  303. publ->ref,
  304. publ->node,
  305. created_subseq);
  306. }
  307. return publ;
  308. }
  309. /**
  310. * tipc_nameseq_remove_publ
  311. *
  312. * NOTE: There may be cases where TIPC is asked to remove a publication
  313. * that is not in the name table. For example, if another node issues a
  314. * publication for a name sequence that overlaps an existing name sequence
  315. * the publication will not be recorded, which means the publication won't
  316. * be found when the name sequence is later withdrawn by that node.
  317. * A failed withdraw request simply returns a failure indication and lets the
  318. * caller issue any error or warning messages associated with such a problem.
  319. */
  320. static struct publication *tipc_nameseq_remove_publ(struct net *net,
  321. struct name_seq *nseq,
  322. u32 inst, u32 node,
  323. u32 ref, u32 key)
  324. {
  325. struct publication *publ;
  326. struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
  327. struct name_info *info;
  328. struct sub_seq *free;
  329. struct tipc_subscription *s, *st;
  330. int removed_subseq = 0;
  331. if (!sseq)
  332. return NULL;
  333. info = sseq->info;
  334. /* Locate publication, if it exists */
  335. list_for_each_entry(publ, &info->zone_list, zone_list) {
  336. if ((publ->key == key) && (publ->ref == ref) &&
  337. (!publ->node || (publ->node == node)))
  338. goto found;
  339. }
  340. return NULL;
  341. found:
  342. /* Remove publication from zone scope list */
  343. list_del(&publ->zone_list);
  344. info->zone_list_size--;
  345. /* Remove publication from cluster scope list, if present */
  346. if (in_own_cluster(net, node)) {
  347. list_del(&publ->cluster_list);
  348. info->cluster_list_size--;
  349. }
  350. /* Remove publication from node scope list, if present */
  351. if (in_own_node(net, node)) {
  352. list_del(&publ->node_list);
  353. info->node_list_size--;
  354. }
  355. /* Contract subseq list if no more publications for that subseq */
  356. if (list_empty(&info->zone_list)) {
  357. kfree(info);
  358. free = &nseq->sseqs[nseq->first_free--];
  359. memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
  360. removed_subseq = 1;
  361. }
  362. /* Notify any waiting subscriptions */
  363. list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
  364. tipc_subscr_report_overlap(s,
  365. publ->lower,
  366. publ->upper,
  367. TIPC_WITHDRAWN,
  368. publ->ref,
  369. publ->node,
  370. removed_subseq);
  371. }
  372. return publ;
  373. }
  374. /**
  375. * tipc_nameseq_subscribe - attach a subscription, and issue
  376. * the prescribed number of events if there is any sub-
  377. * sequence overlapping with the requested sequence
  378. */
  379. static void tipc_nameseq_subscribe(struct name_seq *nseq,
  380. struct tipc_subscription *s)
  381. {
  382. struct sub_seq *sseq = nseq->sseqs;
  383. list_add(&s->nameseq_list, &nseq->subscriptions);
  384. if (!sseq)
  385. return;
  386. while (sseq != &nseq->sseqs[nseq->first_free]) {
  387. if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
  388. struct publication *crs;
  389. struct name_info *info = sseq->info;
  390. int must_report = 1;
  391. list_for_each_entry(crs, &info->zone_list, zone_list) {
  392. tipc_subscr_report_overlap(s,
  393. sseq->lower,
  394. sseq->upper,
  395. TIPC_PUBLISHED,
  396. crs->ref,
  397. crs->node,
  398. must_report);
  399. must_report = 0;
  400. }
  401. }
  402. sseq++;
  403. }
  404. }
  405. static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
  406. {
  407. struct tipc_net *tn = net_generic(net, tipc_net_id);
  408. struct hlist_head *seq_head;
  409. struct name_seq *ns;
  410. seq_head = &tn->nametbl->seq_hlist[hash(type)];
  411. hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
  412. if (ns->type == type)
  413. return ns;
  414. }
  415. return NULL;
  416. };
  417. struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
  418. u32 lower, u32 upper, u32 scope,
  419. u32 node, u32 port, u32 key)
  420. {
  421. struct tipc_net *tn = net_generic(net, tipc_net_id);
  422. struct publication *publ;
  423. struct name_seq *seq = nametbl_find_seq(net, type);
  424. int index = hash(type);
  425. if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
  426. (lower > upper)) {
  427. pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
  428. type, lower, upper, scope);
  429. return NULL;
  430. }
  431. if (!seq)
  432. seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
  433. if (!seq)
  434. return NULL;
  435. spin_lock_bh(&seq->lock);
  436. publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
  437. scope, node, port, key);
  438. spin_unlock_bh(&seq->lock);
  439. return publ;
  440. }
  441. struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
  442. u32 lower, u32 node, u32 ref,
  443. u32 key)
  444. {
  445. struct publication *publ;
  446. struct name_seq *seq = nametbl_find_seq(net, type);
  447. if (!seq)
  448. return NULL;
  449. spin_lock_bh(&seq->lock);
  450. publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
  451. if (!seq->first_free && list_empty(&seq->subscriptions)) {
  452. hlist_del_init_rcu(&seq->ns_list);
  453. kfree(seq->sseqs);
  454. spin_unlock_bh(&seq->lock);
  455. kfree_rcu(seq, rcu);
  456. return publ;
  457. }
  458. spin_unlock_bh(&seq->lock);
  459. return publ;
  460. }
  461. /**
  462. * tipc_nametbl_translate - perform name translation
  463. *
  464. * On entry, 'destnode' is the search domain used during translation.
  465. *
  466. * On exit:
  467. * - if name translation is deferred to another node/cluster/zone,
  468. * leaves 'destnode' unchanged (will be non-zero) and returns 0
  469. * - if name translation is attempted and succeeds, sets 'destnode'
  470. * to publishing node and returns port reference (will be non-zero)
  471. * - if name translation is attempted and fails, sets 'destnode' to 0
  472. * and returns 0
  473. */
  474. u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
  475. u32 *destnode)
  476. {
  477. struct tipc_net *tn = net_generic(net, tipc_net_id);
  478. struct sub_seq *sseq;
  479. struct name_info *info;
  480. struct publication *publ;
  481. struct name_seq *seq;
  482. u32 ref = 0;
  483. u32 node = 0;
  484. if (!tipc_in_scope(*destnode, tn->own_addr))
  485. return 0;
  486. rcu_read_lock();
  487. seq = nametbl_find_seq(net, type);
  488. if (unlikely(!seq))
  489. goto not_found;
  490. spin_lock_bh(&seq->lock);
  491. sseq = nameseq_find_subseq(seq, instance);
  492. if (unlikely(!sseq))
  493. goto no_match;
  494. info = sseq->info;
  495. /* Closest-First Algorithm */
  496. if (likely(!*destnode)) {
  497. if (!list_empty(&info->node_list)) {
  498. publ = list_first_entry(&info->node_list,
  499. struct publication,
  500. node_list);
  501. list_move_tail(&publ->node_list,
  502. &info->node_list);
  503. } else if (!list_empty(&info->cluster_list)) {
  504. publ = list_first_entry(&info->cluster_list,
  505. struct publication,
  506. cluster_list);
  507. list_move_tail(&publ->cluster_list,
  508. &info->cluster_list);
  509. } else {
  510. publ = list_first_entry(&info->zone_list,
  511. struct publication,
  512. zone_list);
  513. list_move_tail(&publ->zone_list,
  514. &info->zone_list);
  515. }
  516. }
  517. /* Round-Robin Algorithm */
  518. else if (*destnode == tn->own_addr) {
  519. if (list_empty(&info->node_list))
  520. goto no_match;
  521. publ = list_first_entry(&info->node_list, struct publication,
  522. node_list);
  523. list_move_tail(&publ->node_list, &info->node_list);
  524. } else if (in_own_cluster_exact(net, *destnode)) {
  525. if (list_empty(&info->cluster_list))
  526. goto no_match;
  527. publ = list_first_entry(&info->cluster_list, struct publication,
  528. cluster_list);
  529. list_move_tail(&publ->cluster_list, &info->cluster_list);
  530. } else {
  531. publ = list_first_entry(&info->zone_list, struct publication,
  532. zone_list);
  533. list_move_tail(&publ->zone_list, &info->zone_list);
  534. }
  535. ref = publ->ref;
  536. node = publ->node;
  537. no_match:
  538. spin_unlock_bh(&seq->lock);
  539. not_found:
  540. rcu_read_unlock();
  541. *destnode = node;
  542. return ref;
  543. }
  544. /**
  545. * tipc_nametbl_mc_translate - find multicast destinations
  546. *
  547. * Creates list of all local ports that overlap the given multicast address;
  548. * also determines if any off-node ports overlap.
  549. *
  550. * Note: Publications with a scope narrower than 'limit' are ignored.
  551. * (i.e. local node-scope publications mustn't receive messages arriving
  552. * from another node, even if the multcast link brought it here)
  553. *
  554. * Returns non-zero if any off-node ports overlap
  555. */
  556. int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
  557. u32 limit, struct tipc_plist *dports)
  558. {
  559. struct name_seq *seq;
  560. struct sub_seq *sseq;
  561. struct sub_seq *sseq_stop;
  562. struct name_info *info;
  563. int res = 0;
  564. rcu_read_lock();
  565. seq = nametbl_find_seq(net, type);
  566. if (!seq)
  567. goto exit;
  568. spin_lock_bh(&seq->lock);
  569. sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
  570. sseq_stop = seq->sseqs + seq->first_free;
  571. for (; sseq != sseq_stop; sseq++) {
  572. struct publication *publ;
  573. if (sseq->lower > upper)
  574. break;
  575. info = sseq->info;
  576. list_for_each_entry(publ, &info->node_list, node_list) {
  577. if (publ->scope <= limit)
  578. tipc_plist_push(dports, publ->ref);
  579. }
  580. if (info->cluster_list_size != info->node_list_size)
  581. res = 1;
  582. }
  583. spin_unlock_bh(&seq->lock);
  584. exit:
  585. rcu_read_unlock();
  586. return res;
  587. }
  588. /*
  589. * tipc_nametbl_publish - add name publication to network name tables
  590. */
  591. struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
  592. u32 upper, u32 scope, u32 port_ref,
  593. u32 key)
  594. {
  595. struct publication *publ;
  596. struct sk_buff *buf = NULL;
  597. struct tipc_net *tn = net_generic(net, tipc_net_id);
  598. spin_lock_bh(&tn->nametbl_lock);
  599. if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
  600. pr_warn("Publication failed, local publication limit reached (%u)\n",
  601. TIPC_MAX_PUBLICATIONS);
  602. spin_unlock_bh(&tn->nametbl_lock);
  603. return NULL;
  604. }
  605. publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
  606. tn->own_addr, port_ref, key);
  607. if (likely(publ)) {
  608. tn->nametbl->local_publ_count++;
  609. buf = tipc_named_publish(net, publ);
  610. /* Any pending external events? */
  611. tipc_named_process_backlog(net);
  612. }
  613. spin_unlock_bh(&tn->nametbl_lock);
  614. if (buf)
  615. named_cluster_distribute(net, buf);
  616. return publ;
  617. }
  618. /**
  619. * tipc_nametbl_withdraw - withdraw name publication from network name tables
  620. */
  621. int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
  622. u32 key)
  623. {
  624. struct publication *publ;
  625. struct sk_buff *skb = NULL;
  626. struct tipc_net *tn = net_generic(net, tipc_net_id);
  627. spin_lock_bh(&tn->nametbl_lock);
  628. publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
  629. ref, key);
  630. if (likely(publ)) {
  631. tn->nametbl->local_publ_count--;
  632. skb = tipc_named_withdraw(net, publ);
  633. /* Any pending external events? */
  634. tipc_named_process_backlog(net);
  635. list_del_init(&publ->pport_list);
  636. kfree_rcu(publ, rcu);
  637. } else {
  638. pr_err("Unable to remove local publication\n"
  639. "(type=%u, lower=%u, ref=%u, key=%u)\n",
  640. type, lower, ref, key);
  641. }
  642. spin_unlock_bh(&tn->nametbl_lock);
  643. if (skb) {
  644. named_cluster_distribute(net, skb);
  645. return 1;
  646. }
  647. return 0;
  648. }
  649. /**
  650. * tipc_nametbl_subscribe - add a subscription object to the name table
  651. */
  652. void tipc_nametbl_subscribe(struct tipc_subscription *s)
  653. {
  654. struct tipc_net *tn = net_generic(s->net, tipc_net_id);
  655. u32 type = s->seq.type;
  656. int index = hash(type);
  657. struct name_seq *seq;
  658. spin_lock_bh(&tn->nametbl_lock);
  659. seq = nametbl_find_seq(s->net, type);
  660. if (!seq)
  661. seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
  662. if (seq) {
  663. spin_lock_bh(&seq->lock);
  664. tipc_nameseq_subscribe(seq, s);
  665. spin_unlock_bh(&seq->lock);
  666. } else {
  667. pr_warn("Failed to create subscription for {%u,%u,%u}\n",
  668. s->seq.type, s->seq.lower, s->seq.upper);
  669. }
  670. spin_unlock_bh(&tn->nametbl_lock);
  671. }
  672. /**
  673. * tipc_nametbl_unsubscribe - remove a subscription object from name table
  674. */
  675. void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
  676. {
  677. struct tipc_net *tn = net_generic(s->net, tipc_net_id);
  678. struct name_seq *seq;
  679. spin_lock_bh(&tn->nametbl_lock);
  680. seq = nametbl_find_seq(s->net, s->seq.type);
  681. if (seq != NULL) {
  682. spin_lock_bh(&seq->lock);
  683. list_del_init(&s->nameseq_list);
  684. if (!seq->first_free && list_empty(&seq->subscriptions)) {
  685. hlist_del_init_rcu(&seq->ns_list);
  686. kfree(seq->sseqs);
  687. spin_unlock_bh(&seq->lock);
  688. kfree_rcu(seq, rcu);
  689. } else {
  690. spin_unlock_bh(&seq->lock);
  691. }
  692. }
  693. spin_unlock_bh(&tn->nametbl_lock);
  694. }
  695. int tipc_nametbl_init(struct net *net)
  696. {
  697. struct tipc_net *tn = net_generic(net, tipc_net_id);
  698. struct name_table *tipc_nametbl;
  699. int i;
  700. tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
  701. if (!tipc_nametbl)
  702. return -ENOMEM;
  703. for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
  704. INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
  705. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
  706. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
  707. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
  708. tn->nametbl = tipc_nametbl;
  709. spin_lock_init(&tn->nametbl_lock);
  710. return 0;
  711. }
  712. /**
  713. * tipc_purge_publications - remove all publications for a given type
  714. *
  715. * tipc_nametbl_lock must be held when calling this function
  716. */
  717. static void tipc_purge_publications(struct net *net, struct name_seq *seq)
  718. {
  719. struct publication *publ, *safe;
  720. struct sub_seq *sseq;
  721. struct name_info *info;
  722. spin_lock_bh(&seq->lock);
  723. sseq = seq->sseqs;
  724. info = sseq->info;
  725. list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
  726. tipc_nametbl_remove_publ(net, publ->type, publ->lower,
  727. publ->node, publ->ref, publ->key);
  728. kfree_rcu(publ, rcu);
  729. }
  730. hlist_del_init_rcu(&seq->ns_list);
  731. kfree(seq->sseqs);
  732. spin_unlock_bh(&seq->lock);
  733. kfree_rcu(seq, rcu);
  734. }
  735. void tipc_nametbl_stop(struct net *net)
  736. {
  737. u32 i;
  738. struct name_seq *seq;
  739. struct hlist_head *seq_head;
  740. struct tipc_net *tn = net_generic(net, tipc_net_id);
  741. struct name_table *tipc_nametbl = tn->nametbl;
  742. /* Verify name table is empty and purge any lingering
  743. * publications, then release the name table
  744. */
  745. spin_lock_bh(&tn->nametbl_lock);
  746. for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
  747. if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
  748. continue;
  749. seq_head = &tipc_nametbl->seq_hlist[i];
  750. hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
  751. tipc_purge_publications(net, seq);
  752. }
  753. }
  754. spin_unlock_bh(&tn->nametbl_lock);
  755. synchronize_net();
  756. kfree(tipc_nametbl);
  757. }
  758. static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
  759. struct name_seq *seq,
  760. struct sub_seq *sseq, u32 *last_publ)
  761. {
  762. void *hdr;
  763. struct nlattr *attrs;
  764. struct nlattr *publ;
  765. struct publication *p;
  766. if (*last_publ) {
  767. list_for_each_entry(p, &sseq->info->zone_list, zone_list)
  768. if (p->key == *last_publ)
  769. break;
  770. if (p->key != *last_publ)
  771. return -EPIPE;
  772. } else {
  773. p = list_first_entry(&sseq->info->zone_list, struct publication,
  774. zone_list);
  775. }
  776. list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
  777. *last_publ = p->key;
  778. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
  779. &tipc_genl_family, NLM_F_MULTI,
  780. TIPC_NL_NAME_TABLE_GET);
  781. if (!hdr)
  782. return -EMSGSIZE;
  783. attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
  784. if (!attrs)
  785. goto msg_full;
  786. publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
  787. if (!publ)
  788. goto attr_msg_full;
  789. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
  790. goto publ_msg_full;
  791. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
  792. goto publ_msg_full;
  793. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
  794. goto publ_msg_full;
  795. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
  796. goto publ_msg_full;
  797. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
  798. goto publ_msg_full;
  799. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
  800. goto publ_msg_full;
  801. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
  802. goto publ_msg_full;
  803. nla_nest_end(msg->skb, publ);
  804. nla_nest_end(msg->skb, attrs);
  805. genlmsg_end(msg->skb, hdr);
  806. }
  807. *last_publ = 0;
  808. return 0;
  809. publ_msg_full:
  810. nla_nest_cancel(msg->skb, publ);
  811. attr_msg_full:
  812. nla_nest_cancel(msg->skb, attrs);
  813. msg_full:
  814. genlmsg_cancel(msg->skb, hdr);
  815. return -EMSGSIZE;
  816. }
  817. static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
  818. u32 *last_lower, u32 *last_publ)
  819. {
  820. struct sub_seq *sseq;
  821. struct sub_seq *sseq_start;
  822. int err;
  823. if (*last_lower) {
  824. sseq_start = nameseq_find_subseq(seq, *last_lower);
  825. if (!sseq_start)
  826. return -EPIPE;
  827. } else {
  828. sseq_start = seq->sseqs;
  829. }
  830. for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
  831. err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
  832. if (err) {
  833. *last_lower = sseq->lower;
  834. return err;
  835. }
  836. }
  837. *last_lower = 0;
  838. return 0;
  839. }
  840. static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
  841. u32 *last_type, u32 *last_lower, u32 *last_publ)
  842. {
  843. struct tipc_net *tn = net_generic(net, tipc_net_id);
  844. struct hlist_head *seq_head;
  845. struct name_seq *seq = NULL;
  846. int err;
  847. int i;
  848. if (*last_type)
  849. i = hash(*last_type);
  850. else
  851. i = 0;
  852. for (; i < TIPC_NAMETBL_SIZE; i++) {
  853. seq_head = &tn->nametbl->seq_hlist[i];
  854. if (*last_type) {
  855. seq = nametbl_find_seq(net, *last_type);
  856. if (!seq)
  857. return -EPIPE;
  858. } else {
  859. hlist_for_each_entry_rcu(seq, seq_head, ns_list)
  860. break;
  861. if (!seq)
  862. continue;
  863. }
  864. hlist_for_each_entry_from_rcu(seq, ns_list) {
  865. spin_lock_bh(&seq->lock);
  866. err = __tipc_nl_subseq_list(msg, seq, last_lower,
  867. last_publ);
  868. if (err) {
  869. *last_type = seq->type;
  870. spin_unlock_bh(&seq->lock);
  871. return err;
  872. }
  873. spin_unlock_bh(&seq->lock);
  874. }
  875. *last_type = 0;
  876. }
  877. return 0;
  878. }
  879. int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
  880. {
  881. int err;
  882. int done = cb->args[3];
  883. u32 last_type = cb->args[0];
  884. u32 last_lower = cb->args[1];
  885. u32 last_publ = cb->args[2];
  886. struct net *net = sock_net(skb->sk);
  887. struct tipc_nl_msg msg;
  888. if (done)
  889. return 0;
  890. msg.skb = skb;
  891. msg.portid = NETLINK_CB(cb->skb).portid;
  892. msg.seq = cb->nlh->nlmsg_seq;
  893. rcu_read_lock();
  894. err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
  895. if (!err) {
  896. done = 1;
  897. } else if (err != -EMSGSIZE) {
  898. /* We never set seq or call nl_dump_check_consistent() this
  899. * means that setting prev_seq here will cause the consistence
  900. * check to fail in the netlink callback handler. Resulting in
  901. * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
  902. * we got an error.
  903. */
  904. cb->prev_seq = 1;
  905. }
  906. rcu_read_unlock();
  907. cb->args[0] = last_type;
  908. cb->args[1] = last_lower;
  909. cb->args[2] = last_publ;
  910. cb->args[3] = done;
  911. return skb->len;
  912. }
  913. void tipc_plist_push(struct tipc_plist *pl, u32 port)
  914. {
  915. struct tipc_plist *nl;
  916. if (likely(!pl->port)) {
  917. pl->port = port;
  918. return;
  919. }
  920. if (pl->port == port)
  921. return;
  922. list_for_each_entry(nl, &pl->list, list) {
  923. if (nl->port == port)
  924. return;
  925. }
  926. nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
  927. if (nl) {
  928. nl->port = port;
  929. list_add(&nl->list, &pl->list);
  930. }
  931. }
  932. u32 tipc_plist_pop(struct tipc_plist *pl)
  933. {
  934. struct tipc_plist *nl;
  935. u32 port = 0;
  936. if (likely(list_empty(&pl->list))) {
  937. port = pl->port;
  938. pl->port = 0;
  939. return port;
  940. }
  941. nl = list_first_entry(&pl->list, typeof(*nl), list);
  942. port = nl->port;
  943. list_del(&nl->list);
  944. kfree(nl);
  945. return port;
  946. }