originator.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  2. *
  3. * Marek Lindner, Simon Wunderlich
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of version 2 of the GNU General Public
  7. * License as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "originator.h"
  18. #include "main.h"
  19. #include <linux/errno.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/fs.h>
  22. #include <linux/jiffies.h>
  23. #include <linux/kernel.h>
  24. #include <linux/list.h>
  25. #include <linux/lockdep.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/rculist.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/workqueue.h>
  32. #include "distributed-arp-table.h"
  33. #include "fragmentation.h"
  34. #include "gateway_client.h"
  35. #include "hard-interface.h"
  36. #include "hash.h"
  37. #include "multicast.h"
  38. #include "network-coding.h"
  39. #include "routing.h"
  40. #include "translation-table.h"
  41. /* hash class keys */
  42. static struct lock_class_key batadv_orig_hash_lock_class_key;
  43. static void batadv_purge_orig(struct work_struct *work);
  44. /* returns 1 if they are the same originator */
  45. int batadv_compare_orig(const struct hlist_node *node, const void *data2)
  46. {
  47. const void *data1 = container_of(node, struct batadv_orig_node,
  48. hash_entry);
  49. return batadv_compare_eth(data1, data2);
  50. }
  51. /**
  52. * batadv_orig_node_vlan_get - get an orig_node_vlan object
  53. * @orig_node: the originator serving the VLAN
  54. * @vid: the VLAN identifier
  55. *
  56. * Returns the vlan object identified by vid and belonging to orig_node or NULL
  57. * if it does not exist.
  58. */
  59. struct batadv_orig_node_vlan *
  60. batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
  61. unsigned short vid)
  62. {
  63. struct batadv_orig_node_vlan *vlan = NULL, *tmp;
  64. rcu_read_lock();
  65. hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
  66. if (tmp->vid != vid)
  67. continue;
  68. if (!atomic_inc_not_zero(&tmp->refcount))
  69. continue;
  70. vlan = tmp;
  71. break;
  72. }
  73. rcu_read_unlock();
  74. return vlan;
  75. }
  76. /**
  77. * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
  78. * object
  79. * @orig_node: the originator serving the VLAN
  80. * @vid: the VLAN identifier
  81. *
  82. * Returns NULL in case of failure or the vlan object identified by vid and
  83. * belonging to orig_node otherwise. The object is created and added to the list
  84. * if it does not exist.
  85. *
  86. * The object is returned with refcounter increased by 1.
  87. */
  88. struct batadv_orig_node_vlan *
  89. batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
  90. unsigned short vid)
  91. {
  92. struct batadv_orig_node_vlan *vlan;
  93. spin_lock_bh(&orig_node->vlan_list_lock);
  94. /* first look if an object for this vid already exists */
  95. vlan = batadv_orig_node_vlan_get(orig_node, vid);
  96. if (vlan)
  97. goto out;
  98. vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
  99. if (!vlan)
  100. goto out;
  101. atomic_set(&vlan->refcount, 2);
  102. vlan->vid = vid;
  103. hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
  104. out:
  105. spin_unlock_bh(&orig_node->vlan_list_lock);
  106. return vlan;
  107. }
  108. /**
  109. * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
  110. * the originator-vlan object
  111. * @orig_vlan: the originator-vlan object to release
  112. */
  113. void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
  114. {
  115. if (atomic_dec_and_test(&orig_vlan->refcount))
  116. kfree_rcu(orig_vlan, rcu);
  117. }
  118. int batadv_originator_init(struct batadv_priv *bat_priv)
  119. {
  120. if (bat_priv->orig_hash)
  121. return 0;
  122. bat_priv->orig_hash = batadv_hash_new(1024);
  123. if (!bat_priv->orig_hash)
  124. goto err;
  125. batadv_hash_set_lock_class(bat_priv->orig_hash,
  126. &batadv_orig_hash_lock_class_key);
  127. INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
  128. queue_delayed_work(batadv_event_workqueue,
  129. &bat_priv->orig_work,
  130. msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
  131. return 0;
  132. err:
  133. return -ENOMEM;
  134. }
  135. /**
  136. * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
  137. * @rcu: rcu pointer of the neigh_ifinfo object
  138. */
  139. static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
  140. {
  141. struct batadv_neigh_ifinfo *neigh_ifinfo;
  142. neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
  143. if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
  144. batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
  145. kfree(neigh_ifinfo);
  146. }
  147. /**
  148. * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
  149. * the neigh_ifinfo (without rcu callback)
  150. * @neigh_ifinfo: the neigh_ifinfo object to release
  151. */
  152. static void
  153. batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
  154. {
  155. if (atomic_dec_and_test(&neigh_ifinfo->refcount))
  156. batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
  157. }
  158. /**
  159. * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
  160. * the neigh_ifinfo
  161. * @neigh_ifinfo: the neigh_ifinfo object to release
  162. */
  163. void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
  164. {
  165. if (atomic_dec_and_test(&neigh_ifinfo->refcount))
  166. call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
  167. }
  168. /**
  169. * batadv_neigh_node_free_rcu - free the neigh_node
  170. * @rcu: rcu pointer of the neigh_node
  171. */
  172. static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
  173. {
  174. struct hlist_node *node_tmp;
  175. struct batadv_neigh_node *neigh_node;
  176. struct batadv_neigh_ifinfo *neigh_ifinfo;
  177. struct batadv_algo_ops *bao;
  178. neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
  179. bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
  180. hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
  181. &neigh_node->ifinfo_list, list) {
  182. batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
  183. }
  184. if (bao->bat_neigh_free)
  185. bao->bat_neigh_free(neigh_node);
  186. batadv_hardif_free_ref_now(neigh_node->if_incoming);
  187. kfree(neigh_node);
  188. }
  189. /**
  190. * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
  191. * and possibly free it (without rcu callback)
  192. * @neigh_node: neigh neighbor to free
  193. */
  194. static void
  195. batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
  196. {
  197. if (atomic_dec_and_test(&neigh_node->refcount))
  198. batadv_neigh_node_free_rcu(&neigh_node->rcu);
  199. }
  200. /**
  201. * batadv_neigh_node_free_ref - decrement the neighbors refcounter
  202. * and possibly free it
  203. * @neigh_node: neigh neighbor to free
  204. */
  205. void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
  206. {
  207. if (atomic_dec_and_test(&neigh_node->refcount))
  208. call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
  209. }
  210. /**
  211. * batadv_orig_node_get_router - router to the originator depending on iface
  212. * @orig_node: the orig node for the router
  213. * @if_outgoing: the interface where the payload packet has been received or
  214. * the OGM should be sent to
  215. *
  216. * Returns the neighbor which should be router for this orig_node/iface.
  217. *
  218. * The object is returned with refcounter increased by 1.
  219. */
  220. struct batadv_neigh_node *
  221. batadv_orig_router_get(struct batadv_orig_node *orig_node,
  222. const struct batadv_hard_iface *if_outgoing)
  223. {
  224. struct batadv_orig_ifinfo *orig_ifinfo;
  225. struct batadv_neigh_node *router = NULL;
  226. rcu_read_lock();
  227. hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
  228. if (orig_ifinfo->if_outgoing != if_outgoing)
  229. continue;
  230. router = rcu_dereference(orig_ifinfo->router);
  231. break;
  232. }
  233. if (router && !atomic_inc_not_zero(&router->refcount))
  234. router = NULL;
  235. rcu_read_unlock();
  236. return router;
  237. }
  238. /**
  239. * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
  240. * @orig_node: the orig node to be queried
  241. * @if_outgoing: the interface for which the ifinfo should be acquired
  242. *
  243. * Returns the requested orig_ifinfo or NULL if not found.
  244. *
  245. * The object is returned with refcounter increased by 1.
  246. */
  247. struct batadv_orig_ifinfo *
  248. batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
  249. struct batadv_hard_iface *if_outgoing)
  250. {
  251. struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
  252. rcu_read_lock();
  253. hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
  254. list) {
  255. if (tmp->if_outgoing != if_outgoing)
  256. continue;
  257. if (!atomic_inc_not_zero(&tmp->refcount))
  258. continue;
  259. orig_ifinfo = tmp;
  260. break;
  261. }
  262. rcu_read_unlock();
  263. return orig_ifinfo;
  264. }
  265. /**
  266. * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
  267. * @orig_node: the orig node to be queried
  268. * @if_outgoing: the interface for which the ifinfo should be acquired
  269. *
  270. * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
  271. * interface otherwise. The object is created and added to the list
  272. * if it does not exist.
  273. *
  274. * The object is returned with refcounter increased by 1.
  275. */
  276. struct batadv_orig_ifinfo *
  277. batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
  278. struct batadv_hard_iface *if_outgoing)
  279. {
  280. struct batadv_orig_ifinfo *orig_ifinfo = NULL;
  281. unsigned long reset_time;
  282. spin_lock_bh(&orig_node->neigh_list_lock);
  283. orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
  284. if (orig_ifinfo)
  285. goto out;
  286. orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
  287. if (!orig_ifinfo)
  288. goto out;
  289. if (if_outgoing != BATADV_IF_DEFAULT &&
  290. !atomic_inc_not_zero(&if_outgoing->refcount)) {
  291. kfree(orig_ifinfo);
  292. orig_ifinfo = NULL;
  293. goto out;
  294. }
  295. reset_time = jiffies - 1;
  296. reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
  297. orig_ifinfo->batman_seqno_reset = reset_time;
  298. orig_ifinfo->if_outgoing = if_outgoing;
  299. INIT_HLIST_NODE(&orig_ifinfo->list);
  300. atomic_set(&orig_ifinfo->refcount, 2);
  301. hlist_add_head_rcu(&orig_ifinfo->list,
  302. &orig_node->ifinfo_list);
  303. out:
  304. spin_unlock_bh(&orig_node->neigh_list_lock);
  305. return orig_ifinfo;
  306. }
  307. /**
  308. * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
  309. * @neigh_node: the neigh node to be queried
  310. * @if_outgoing: the interface for which the ifinfo should be acquired
  311. *
  312. * The object is returned with refcounter increased by 1.
  313. *
  314. * Returns the requested neigh_ifinfo or NULL if not found
  315. */
  316. struct batadv_neigh_ifinfo *
  317. batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
  318. struct batadv_hard_iface *if_outgoing)
  319. {
  320. struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
  321. *tmp_neigh_ifinfo;
  322. rcu_read_lock();
  323. hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
  324. list) {
  325. if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
  326. continue;
  327. if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
  328. continue;
  329. neigh_ifinfo = tmp_neigh_ifinfo;
  330. break;
  331. }
  332. rcu_read_unlock();
  333. return neigh_ifinfo;
  334. }
  335. /**
  336. * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
  337. * @neigh_node: the neigh node to be queried
  338. * @if_outgoing: the interface for which the ifinfo should be acquired
  339. *
  340. * Returns NULL in case of failure or the neigh_ifinfo object for the
  341. * if_outgoing interface otherwise. The object is created and added to the list
  342. * if it does not exist.
  343. *
  344. * The object is returned with refcounter increased by 1.
  345. */
  346. struct batadv_neigh_ifinfo *
  347. batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
  348. struct batadv_hard_iface *if_outgoing)
  349. {
  350. struct batadv_neigh_ifinfo *neigh_ifinfo;
  351. spin_lock_bh(&neigh->ifinfo_lock);
  352. neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
  353. if (neigh_ifinfo)
  354. goto out;
  355. neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
  356. if (!neigh_ifinfo)
  357. goto out;
  358. if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
  359. kfree(neigh_ifinfo);
  360. neigh_ifinfo = NULL;
  361. goto out;
  362. }
  363. INIT_HLIST_NODE(&neigh_ifinfo->list);
  364. atomic_set(&neigh_ifinfo->refcount, 2);
  365. neigh_ifinfo->if_outgoing = if_outgoing;
  366. hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
  367. out:
  368. spin_unlock_bh(&neigh->ifinfo_lock);
  369. return neigh_ifinfo;
  370. }
  371. /**
  372. * batadv_neigh_node_get - retrieve a neighbour from the list
  373. * @orig_node: originator which the neighbour belongs to
  374. * @hard_iface: the interface where this neighbour is connected to
  375. * @addr: the address of the neighbour
  376. *
  377. * Looks for and possibly returns a neighbour belonging to this originator list
  378. * which is connected through the provided hard interface.
  379. * Returns NULL if the neighbour is not found.
  380. */
  381. static struct batadv_neigh_node *
  382. batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
  383. const struct batadv_hard_iface *hard_iface,
  384. const u8 *addr)
  385. {
  386. struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
  387. rcu_read_lock();
  388. hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
  389. if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
  390. continue;
  391. if (tmp_neigh_node->if_incoming != hard_iface)
  392. continue;
  393. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  394. continue;
  395. res = tmp_neigh_node;
  396. break;
  397. }
  398. rcu_read_unlock();
  399. return res;
  400. }
  401. /**
  402. * batadv_neigh_node_new - create and init a new neigh_node object
  403. * @orig_node: originator object representing the neighbour
  404. * @hard_iface: the interface where the neighbour is connected to
  405. * @neigh_addr: the mac address of the neighbour interface
  406. *
  407. * Allocates a new neigh_node object and initialises all the generic fields.
  408. * Returns the new object or NULL on failure.
  409. */
  410. struct batadv_neigh_node *
  411. batadv_neigh_node_new(struct batadv_orig_node *orig_node,
  412. struct batadv_hard_iface *hard_iface,
  413. const u8 *neigh_addr)
  414. {
  415. struct batadv_neigh_node *neigh_node;
  416. neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
  417. if (neigh_node)
  418. goto out;
  419. neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
  420. if (!neigh_node)
  421. goto out;
  422. if (!atomic_inc_not_zero(&hard_iface->refcount)) {
  423. kfree(neigh_node);
  424. neigh_node = NULL;
  425. goto out;
  426. }
  427. INIT_HLIST_NODE(&neigh_node->list);
  428. INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
  429. spin_lock_init(&neigh_node->ifinfo_lock);
  430. ether_addr_copy(neigh_node->addr, neigh_addr);
  431. neigh_node->if_incoming = hard_iface;
  432. neigh_node->orig_node = orig_node;
  433. /* extra reference for return */
  434. atomic_set(&neigh_node->refcount, 2);
  435. spin_lock_bh(&orig_node->neigh_list_lock);
  436. hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
  437. spin_unlock_bh(&orig_node->neigh_list_lock);
  438. batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
  439. "Creating new neighbor %pM for orig_node %pM on interface %s\n",
  440. neigh_addr, orig_node->orig, hard_iface->net_dev->name);
  441. out:
  442. return neigh_node;
  443. }
  444. /**
  445. * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
  446. * @rcu: rcu pointer of the orig_ifinfo object
  447. */
  448. static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
  449. {
  450. struct batadv_orig_ifinfo *orig_ifinfo;
  451. struct batadv_neigh_node *router;
  452. orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
  453. if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
  454. batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
  455. /* this is the last reference to this object */
  456. router = rcu_dereference_protected(orig_ifinfo->router, true);
  457. if (router)
  458. batadv_neigh_node_free_ref_now(router);
  459. kfree(orig_ifinfo);
  460. }
  461. /**
  462. * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
  463. * the orig_ifinfo (without rcu callback)
  464. * @orig_ifinfo: the orig_ifinfo object to release
  465. */
  466. static void
  467. batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
  468. {
  469. if (atomic_dec_and_test(&orig_ifinfo->refcount))
  470. batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
  471. }
  472. /**
  473. * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
  474. * the orig_ifinfo
  475. * @orig_ifinfo: the orig_ifinfo object to release
  476. */
  477. void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
  478. {
  479. if (atomic_dec_and_test(&orig_ifinfo->refcount))
  480. call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
  481. }
  482. static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
  483. {
  484. struct hlist_node *node_tmp;
  485. struct batadv_neigh_node *neigh_node;
  486. struct batadv_orig_node *orig_node;
  487. struct batadv_orig_ifinfo *orig_ifinfo;
  488. orig_node = container_of(rcu, struct batadv_orig_node, rcu);
  489. spin_lock_bh(&orig_node->neigh_list_lock);
  490. /* for all neighbors towards this originator ... */
  491. hlist_for_each_entry_safe(neigh_node, node_tmp,
  492. &orig_node->neigh_list, list) {
  493. hlist_del_rcu(&neigh_node->list);
  494. batadv_neigh_node_free_ref_now(neigh_node);
  495. }
  496. hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
  497. &orig_node->ifinfo_list, list) {
  498. hlist_del_rcu(&orig_ifinfo->list);
  499. batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
  500. }
  501. spin_unlock_bh(&orig_node->neigh_list_lock);
  502. batadv_mcast_purge_orig(orig_node);
  503. /* Free nc_nodes */
  504. batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
  505. batadv_frag_purge_orig(orig_node, NULL);
  506. if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
  507. orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
  508. kfree(orig_node->tt_buff);
  509. kfree(orig_node);
  510. }
  511. /**
  512. * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
  513. * schedule an rcu callback for freeing it
  514. * @orig_node: the orig node to free
  515. */
  516. void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
  517. {
  518. if (atomic_dec_and_test(&orig_node->refcount))
  519. call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
  520. }
  521. /**
  522. * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
  523. * possibly free it (without rcu callback)
  524. * @orig_node: the orig node to free
  525. */
  526. void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
  527. {
  528. if (atomic_dec_and_test(&orig_node->refcount))
  529. batadv_orig_node_free_rcu(&orig_node->rcu);
  530. }
  531. void batadv_originator_free(struct batadv_priv *bat_priv)
  532. {
  533. struct batadv_hashtable *hash = bat_priv->orig_hash;
  534. struct hlist_node *node_tmp;
  535. struct hlist_head *head;
  536. spinlock_t *list_lock; /* spinlock to protect write access */
  537. struct batadv_orig_node *orig_node;
  538. u32 i;
  539. if (!hash)
  540. return;
  541. cancel_delayed_work_sync(&bat_priv->orig_work);
  542. bat_priv->orig_hash = NULL;
  543. for (i = 0; i < hash->size; i++) {
  544. head = &hash->table[i];
  545. list_lock = &hash->list_locks[i];
  546. spin_lock_bh(list_lock);
  547. hlist_for_each_entry_safe(orig_node, node_tmp,
  548. head, hash_entry) {
  549. hlist_del_rcu(&orig_node->hash_entry);
  550. batadv_orig_node_free_ref(orig_node);
  551. }
  552. spin_unlock_bh(list_lock);
  553. }
  554. batadv_hash_destroy(hash);
  555. }
  556. /**
  557. * batadv_orig_node_new - creates a new orig_node
  558. * @bat_priv: the bat priv with all the soft interface information
  559. * @addr: the mac address of the originator
  560. *
  561. * Creates a new originator object and initialise all the generic fields.
  562. * The new object is not added to the originator list.
  563. * Returns the newly created object or NULL on failure.
  564. */
  565. struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
  566. const u8 *addr)
  567. {
  568. struct batadv_orig_node *orig_node;
  569. struct batadv_orig_node_vlan *vlan;
  570. unsigned long reset_time;
  571. int i;
  572. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  573. "Creating new originator: %pM\n", addr);
  574. orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
  575. if (!orig_node)
  576. return NULL;
  577. INIT_HLIST_HEAD(&orig_node->neigh_list);
  578. INIT_HLIST_HEAD(&orig_node->vlan_list);
  579. INIT_HLIST_HEAD(&orig_node->ifinfo_list);
  580. spin_lock_init(&orig_node->bcast_seqno_lock);
  581. spin_lock_init(&orig_node->neigh_list_lock);
  582. spin_lock_init(&orig_node->tt_buff_lock);
  583. spin_lock_init(&orig_node->tt_lock);
  584. spin_lock_init(&orig_node->vlan_list_lock);
  585. batadv_nc_init_orig(orig_node);
  586. /* extra reference for return */
  587. atomic_set(&orig_node->refcount, 2);
  588. orig_node->bat_priv = bat_priv;
  589. ether_addr_copy(orig_node->orig, addr);
  590. batadv_dat_init_orig_node_addr(orig_node);
  591. atomic_set(&orig_node->last_ttvn, 0);
  592. orig_node->tt_buff = NULL;
  593. orig_node->tt_buff_len = 0;
  594. orig_node->last_seen = jiffies;
  595. reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
  596. orig_node->bcast_seqno_reset = reset_time;
  597. #ifdef CONFIG_BATMAN_ADV_MCAST
  598. orig_node->mcast_flags = BATADV_NO_FLAGS;
  599. INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
  600. INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
  601. INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
  602. spin_lock_init(&orig_node->mcast_handler_lock);
  603. #endif
  604. /* create a vlan object for the "untagged" LAN */
  605. vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
  606. if (!vlan)
  607. goto free_orig_node;
  608. /* batadv_orig_node_vlan_new() increases the refcounter.
  609. * Immediately release vlan since it is not needed anymore in this
  610. * context
  611. */
  612. batadv_orig_node_vlan_free_ref(vlan);
  613. for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
  614. INIT_HLIST_HEAD(&orig_node->fragments[i].head);
  615. spin_lock_init(&orig_node->fragments[i].lock);
  616. orig_node->fragments[i].size = 0;
  617. }
  618. return orig_node;
  619. free_orig_node:
  620. kfree(orig_node);
  621. return NULL;
  622. }
  623. /**
  624. * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
  625. * @bat_priv: the bat priv with all the soft interface information
  626. * @neigh: orig node which is to be checked
  627. */
  628. static void
  629. batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
  630. struct batadv_neigh_node *neigh)
  631. {
  632. struct batadv_neigh_ifinfo *neigh_ifinfo;
  633. struct batadv_hard_iface *if_outgoing;
  634. struct hlist_node *node_tmp;
  635. spin_lock_bh(&neigh->ifinfo_lock);
  636. /* for all ifinfo objects for this neighinator */
  637. hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
  638. &neigh->ifinfo_list, list) {
  639. if_outgoing = neigh_ifinfo->if_outgoing;
  640. /* always keep the default interface */
  641. if (if_outgoing == BATADV_IF_DEFAULT)
  642. continue;
  643. /* don't purge if the interface is not (going) down */
  644. if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
  645. (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
  646. (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
  647. continue;
  648. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  649. "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
  650. neigh->addr, if_outgoing->net_dev->name);
  651. hlist_del_rcu(&neigh_ifinfo->list);
  652. batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
  653. }
  654. spin_unlock_bh(&neigh->ifinfo_lock);
  655. }
  656. /**
  657. * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
  658. * @bat_priv: the bat priv with all the soft interface information
  659. * @orig_node: orig node which is to be checked
  660. *
  661. * Returns true if any ifinfo entry was purged, false otherwise.
  662. */
  663. static bool
  664. batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
  665. struct batadv_orig_node *orig_node)
  666. {
  667. struct batadv_orig_ifinfo *orig_ifinfo;
  668. struct batadv_hard_iface *if_outgoing;
  669. struct hlist_node *node_tmp;
  670. bool ifinfo_purged = false;
  671. spin_lock_bh(&orig_node->neigh_list_lock);
  672. /* for all ifinfo objects for this originator */
  673. hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
  674. &orig_node->ifinfo_list, list) {
  675. if_outgoing = orig_ifinfo->if_outgoing;
  676. /* always keep the default interface */
  677. if (if_outgoing == BATADV_IF_DEFAULT)
  678. continue;
  679. /* don't purge if the interface is not (going) down */
  680. if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
  681. (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
  682. (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
  683. continue;
  684. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  685. "router/ifinfo purge: originator %pM, iface: %s\n",
  686. orig_node->orig, if_outgoing->net_dev->name);
  687. ifinfo_purged = true;
  688. hlist_del_rcu(&orig_ifinfo->list);
  689. batadv_orig_ifinfo_free_ref(orig_ifinfo);
  690. if (orig_node->last_bonding_candidate == orig_ifinfo) {
  691. orig_node->last_bonding_candidate = NULL;
  692. batadv_orig_ifinfo_free_ref(orig_ifinfo);
  693. }
  694. }
  695. spin_unlock_bh(&orig_node->neigh_list_lock);
  696. return ifinfo_purged;
  697. }
  698. /**
  699. * batadv_purge_orig_neighbors - purges neighbors from originator
  700. * @bat_priv: the bat priv with all the soft interface information
  701. * @orig_node: orig node which is to be checked
  702. *
  703. * Returns true if any neighbor was purged, false otherwise
  704. */
  705. static bool
  706. batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
  707. struct batadv_orig_node *orig_node)
  708. {
  709. struct hlist_node *node_tmp;
  710. struct batadv_neigh_node *neigh_node;
  711. bool neigh_purged = false;
  712. unsigned long last_seen;
  713. struct batadv_hard_iface *if_incoming;
  714. spin_lock_bh(&orig_node->neigh_list_lock);
  715. /* for all neighbors towards this originator ... */
  716. hlist_for_each_entry_safe(neigh_node, node_tmp,
  717. &orig_node->neigh_list, list) {
  718. last_seen = neigh_node->last_seen;
  719. if_incoming = neigh_node->if_incoming;
  720. if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
  721. (if_incoming->if_status == BATADV_IF_INACTIVE) ||
  722. (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
  723. (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
  724. if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
  725. (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
  726. (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
  727. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  728. "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
  729. orig_node->orig, neigh_node->addr,
  730. if_incoming->net_dev->name);
  731. else
  732. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  733. "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
  734. orig_node->orig, neigh_node->addr,
  735. jiffies_to_msecs(last_seen));
  736. neigh_purged = true;
  737. hlist_del_rcu(&neigh_node->list);
  738. batadv_neigh_node_free_ref(neigh_node);
  739. } else {
  740. /* only necessary if not the whole neighbor is to be
  741. * deleted, but some interface has been removed.
  742. */
  743. batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
  744. }
  745. }
  746. spin_unlock_bh(&orig_node->neigh_list_lock);
  747. return neigh_purged;
  748. }
  749. /**
  750. * batadv_find_best_neighbor - finds the best neighbor after purging
  751. * @bat_priv: the bat priv with all the soft interface information
  752. * @orig_node: orig node which is to be checked
  753. * @if_outgoing: the interface for which the metric should be compared
  754. *
  755. * Returns the current best neighbor, with refcount increased.
  756. */
  757. static struct batadv_neigh_node *
  758. batadv_find_best_neighbor(struct batadv_priv *bat_priv,
  759. struct batadv_orig_node *orig_node,
  760. struct batadv_hard_iface *if_outgoing)
  761. {
  762. struct batadv_neigh_node *best = NULL, *neigh;
  763. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  764. rcu_read_lock();
  765. hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
  766. if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
  767. best, if_outgoing) <= 0))
  768. continue;
  769. if (!atomic_inc_not_zero(&neigh->refcount))
  770. continue;
  771. if (best)
  772. batadv_neigh_node_free_ref(best);
  773. best = neigh;
  774. }
  775. rcu_read_unlock();
  776. return best;
  777. }
  778. /**
  779. * batadv_purge_orig_node - purges obsolete information from an orig_node
  780. * @bat_priv: the bat priv with all the soft interface information
  781. * @orig_node: orig node which is to be checked
  782. *
  783. * This function checks if the orig_node or substructures of it have become
  784. * obsolete, and purges this information if that's the case.
  785. *
  786. * Returns true if the orig_node is to be removed, false otherwise.
  787. */
  788. static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
  789. struct batadv_orig_node *orig_node)
  790. {
  791. struct batadv_neigh_node *best_neigh_node;
  792. struct batadv_hard_iface *hard_iface;
  793. bool changed_ifinfo, changed_neigh;
  794. if (batadv_has_timed_out(orig_node->last_seen,
  795. 2 * BATADV_PURGE_TIMEOUT)) {
  796. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  797. "Originator timeout: originator %pM, last_seen %u\n",
  798. orig_node->orig,
  799. jiffies_to_msecs(orig_node->last_seen));
  800. return true;
  801. }
  802. changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
  803. changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
  804. if (!changed_ifinfo && !changed_neigh)
  805. return false;
  806. /* first for NULL ... */
  807. best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
  808. BATADV_IF_DEFAULT);
  809. batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
  810. best_neigh_node);
  811. if (best_neigh_node)
  812. batadv_neigh_node_free_ref(best_neigh_node);
  813. /* ... then for all other interfaces. */
  814. rcu_read_lock();
  815. list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
  816. if (hard_iface->if_status != BATADV_IF_ACTIVE)
  817. continue;
  818. if (hard_iface->soft_iface != bat_priv->soft_iface)
  819. continue;
  820. best_neigh_node = batadv_find_best_neighbor(bat_priv,
  821. orig_node,
  822. hard_iface);
  823. batadv_update_route(bat_priv, orig_node, hard_iface,
  824. best_neigh_node);
  825. if (best_neigh_node)
  826. batadv_neigh_node_free_ref(best_neigh_node);
  827. }
  828. rcu_read_unlock();
  829. return false;
  830. }
  831. static void _batadv_purge_orig(struct batadv_priv *bat_priv)
  832. {
  833. struct batadv_hashtable *hash = bat_priv->orig_hash;
  834. struct hlist_node *node_tmp;
  835. struct hlist_head *head;
  836. spinlock_t *list_lock; /* spinlock to protect write access */
  837. struct batadv_orig_node *orig_node;
  838. u32 i;
  839. if (!hash)
  840. return;
  841. /* for all origins... */
  842. for (i = 0; i < hash->size; i++) {
  843. head = &hash->table[i];
  844. list_lock = &hash->list_locks[i];
  845. spin_lock_bh(list_lock);
  846. hlist_for_each_entry_safe(orig_node, node_tmp,
  847. head, hash_entry) {
  848. if (batadv_purge_orig_node(bat_priv, orig_node)) {
  849. batadv_gw_node_delete(bat_priv, orig_node);
  850. hlist_del_rcu(&orig_node->hash_entry);
  851. batadv_tt_global_del_orig(orig_node->bat_priv,
  852. orig_node, -1,
  853. "originator timed out");
  854. batadv_orig_node_free_ref(orig_node);
  855. continue;
  856. }
  857. batadv_frag_purge_orig(orig_node,
  858. batadv_frag_check_entry);
  859. }
  860. spin_unlock_bh(list_lock);
  861. }
  862. batadv_gw_election(bat_priv);
  863. }
  864. static void batadv_purge_orig(struct work_struct *work)
  865. {
  866. struct delayed_work *delayed_work;
  867. struct batadv_priv *bat_priv;
  868. delayed_work = container_of(work, struct delayed_work, work);
  869. bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
  870. _batadv_purge_orig(bat_priv);
  871. queue_delayed_work(batadv_event_workqueue,
  872. &bat_priv->orig_work,
  873. msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
  874. }
  875. void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
  876. {
  877. _batadv_purge_orig(bat_priv);
  878. }
  879. int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
  880. {
  881. struct net_device *net_dev = (struct net_device *)seq->private;
  882. struct batadv_priv *bat_priv = netdev_priv(net_dev);
  883. struct batadv_hard_iface *primary_if;
  884. primary_if = batadv_seq_print_text_primary_if_get(seq);
  885. if (!primary_if)
  886. return 0;
  887. seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
  888. BATADV_SOURCE_VERSION, primary_if->net_dev->name,
  889. primary_if->net_dev->dev_addr, net_dev->name,
  890. bat_priv->bat_algo_ops->name);
  891. batadv_hardif_free_ref(primary_if);
  892. if (!bat_priv->bat_algo_ops->bat_orig_print) {
  893. seq_puts(seq,
  894. "No printing function for this routing protocol\n");
  895. return 0;
  896. }
  897. bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
  898. BATADV_IF_DEFAULT);
  899. return 0;
  900. }
  901. /**
  902. * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
  903. * outgoing interface
  904. * @seq: debugfs table seq_file struct
  905. * @offset: not used
  906. *
  907. * Returns 0
  908. */
  909. int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
  910. {
  911. struct net_device *net_dev = (struct net_device *)seq->private;
  912. struct batadv_hard_iface *hard_iface;
  913. struct batadv_priv *bat_priv;
  914. hard_iface = batadv_hardif_get_by_netdev(net_dev);
  915. if (!hard_iface || !hard_iface->soft_iface) {
  916. seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
  917. goto out;
  918. }
  919. bat_priv = netdev_priv(hard_iface->soft_iface);
  920. if (!bat_priv->bat_algo_ops->bat_orig_print) {
  921. seq_puts(seq,
  922. "No printing function for this routing protocol\n");
  923. goto out;
  924. }
  925. if (hard_iface->if_status != BATADV_IF_ACTIVE) {
  926. seq_puts(seq, "Interface not active\n");
  927. goto out;
  928. }
  929. seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
  930. BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
  931. hard_iface->net_dev->dev_addr,
  932. hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
  933. bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
  934. out:
  935. if (hard_iface)
  936. batadv_hardif_free_ref(hard_iface);
  937. return 0;
  938. }
  939. int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
  940. int max_if_num)
  941. {
  942. struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  943. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  944. struct batadv_hashtable *hash = bat_priv->orig_hash;
  945. struct hlist_head *head;
  946. struct batadv_orig_node *orig_node;
  947. u32 i;
  948. int ret;
  949. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  950. * if_num
  951. */
  952. for (i = 0; i < hash->size; i++) {
  953. head = &hash->table[i];
  954. rcu_read_lock();
  955. hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
  956. ret = 0;
  957. if (bao->bat_orig_add_if)
  958. ret = bao->bat_orig_add_if(orig_node,
  959. max_if_num);
  960. if (ret == -ENOMEM)
  961. goto err;
  962. }
  963. rcu_read_unlock();
  964. }
  965. return 0;
  966. err:
  967. rcu_read_unlock();
  968. return -ENOMEM;
  969. }
  970. int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
  971. int max_if_num)
  972. {
  973. struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  974. struct batadv_hashtable *hash = bat_priv->orig_hash;
  975. struct hlist_head *head;
  976. struct batadv_hard_iface *hard_iface_tmp;
  977. struct batadv_orig_node *orig_node;
  978. struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
  979. u32 i;
  980. int ret;
  981. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  982. * if_num
  983. */
  984. for (i = 0; i < hash->size; i++) {
  985. head = &hash->table[i];
  986. rcu_read_lock();
  987. hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
  988. ret = 0;
  989. if (bao->bat_orig_del_if)
  990. ret = bao->bat_orig_del_if(orig_node,
  991. max_if_num,
  992. hard_iface->if_num);
  993. if (ret == -ENOMEM)
  994. goto err;
  995. }
  996. rcu_read_unlock();
  997. }
  998. /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
  999. rcu_read_lock();
  1000. list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
  1001. if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
  1002. continue;
  1003. if (hard_iface == hard_iface_tmp)
  1004. continue;
  1005. if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
  1006. continue;
  1007. if (hard_iface_tmp->if_num > hard_iface->if_num)
  1008. hard_iface_tmp->if_num--;
  1009. }
  1010. rcu_read_unlock();
  1011. hard_iface->if_num = -1;
  1012. return 0;
  1013. err:
  1014. rcu_read_unlock();
  1015. return -ENOMEM;
  1016. }