mesh_pathtbl.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. /*
  2. * Copyright (c) 2008, 2009 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/string.h>
  15. #include <net/mac80211.h>
  16. #include "wme.h"
  17. #include "ieee80211_i.h"
  18. #include "mesh.h"
  19. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  20. #define INIT_PATHS_SIZE_ORDER 2
  21. /* Keep the mean chain length below this constant */
  22. #define MEAN_CHAIN_LEN 2
  23. static inline bool mpath_expired(struct mesh_path *mpath)
  24. {
  25. return (mpath->flags & MESH_PATH_ACTIVE) &&
  26. time_after(jiffies, mpath->exp_time) &&
  27. !(mpath->flags & MESH_PATH_FIXED);
  28. }
  29. struct mpath_node {
  30. struct hlist_node list;
  31. struct rcu_head rcu;
  32. /* This indirection allows two different tables to point to the same
  33. * mesh_path structure, useful when resizing
  34. */
  35. struct mesh_path *mpath;
  36. };
  37. static struct mesh_table __rcu *mesh_paths;
  38. static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
  39. int mesh_paths_generation;
  40. /* This lock will have the grow table function as writer and add / delete nodes
  41. * as readers. RCU provides sufficient protection only when reading the table
  42. * (i.e. doing lookups). Adding or adding or removing nodes requires we take
  43. * the read lock or we risk operating on an old table. The write lock is only
  44. * needed when modifying the number of buckets a table.
  45. */
  46. static DEFINE_RWLOCK(pathtbl_resize_lock);
  47. static inline struct mesh_table *resize_dereference_mesh_paths(void)
  48. {
  49. return rcu_dereference_protected(mesh_paths,
  50. lockdep_is_held(&pathtbl_resize_lock));
  51. }
  52. static inline struct mesh_table *resize_dereference_mpp_paths(void)
  53. {
  54. return rcu_dereference_protected(mpp_paths,
  55. lockdep_is_held(&pathtbl_resize_lock));
  56. }
  57. /*
  58. * CAREFUL -- "tbl" must not be an expression,
  59. * in particular not an rcu_dereference(), since
  60. * it's used twice. So it is illegal to do
  61. * for_each_mesh_entry(rcu_dereference(...), ...)
  62. */
  63. #define for_each_mesh_entry(tbl, node, i) \
  64. for (i = 0; i <= tbl->hash_mask; i++) \
  65. hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
  66. static struct mesh_table *mesh_table_alloc(int size_order)
  67. {
  68. int i;
  69. struct mesh_table *newtbl;
  70. newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
  71. if (!newtbl)
  72. return NULL;
  73. newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  74. (1 << size_order), GFP_ATOMIC);
  75. if (!newtbl->hash_buckets) {
  76. kfree(newtbl);
  77. return NULL;
  78. }
  79. newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  80. (1 << size_order), GFP_ATOMIC);
  81. if (!newtbl->hashwlock) {
  82. kfree(newtbl->hash_buckets);
  83. kfree(newtbl);
  84. return NULL;
  85. }
  86. newtbl->size_order = size_order;
  87. newtbl->hash_mask = (1 << size_order) - 1;
  88. atomic_set(&newtbl->entries, 0);
  89. get_random_bytes(&newtbl->hash_rnd,
  90. sizeof(newtbl->hash_rnd));
  91. for (i = 0; i <= newtbl->hash_mask; i++)
  92. spin_lock_init(&newtbl->hashwlock[i]);
  93. spin_lock_init(&newtbl->gates_lock);
  94. return newtbl;
  95. }
  96. static void __mesh_table_free(struct mesh_table *tbl)
  97. {
  98. kfree(tbl->hash_buckets);
  99. kfree(tbl->hashwlock);
  100. kfree(tbl);
  101. }
  102. static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  103. {
  104. struct hlist_head *mesh_hash;
  105. struct hlist_node *p, *q;
  106. struct mpath_node *gate;
  107. int i;
  108. mesh_hash = tbl->hash_buckets;
  109. for (i = 0; i <= tbl->hash_mask; i++) {
  110. spin_lock_bh(&tbl->hashwlock[i]);
  111. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  112. tbl->free_node(p, free_leafs);
  113. atomic_dec(&tbl->entries);
  114. }
  115. spin_unlock_bh(&tbl->hashwlock[i]);
  116. }
  117. if (free_leafs) {
  118. spin_lock_bh(&tbl->gates_lock);
  119. hlist_for_each_entry_safe(gate, q,
  120. tbl->known_gates, list) {
  121. hlist_del(&gate->list);
  122. kfree(gate);
  123. }
  124. kfree(tbl->known_gates);
  125. spin_unlock_bh(&tbl->gates_lock);
  126. }
  127. __mesh_table_free(tbl);
  128. }
  129. static int mesh_table_grow(struct mesh_table *oldtbl,
  130. struct mesh_table *newtbl)
  131. {
  132. struct hlist_head *oldhash;
  133. struct hlist_node *p, *q;
  134. int i;
  135. if (atomic_read(&oldtbl->entries)
  136. < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
  137. return -EAGAIN;
  138. newtbl->free_node = oldtbl->free_node;
  139. newtbl->mean_chain_len = oldtbl->mean_chain_len;
  140. newtbl->copy_node = oldtbl->copy_node;
  141. newtbl->known_gates = oldtbl->known_gates;
  142. atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  143. oldhash = oldtbl->hash_buckets;
  144. for (i = 0; i <= oldtbl->hash_mask; i++)
  145. hlist_for_each(p, &oldhash[i])
  146. if (oldtbl->copy_node(p, newtbl) < 0)
  147. goto errcopy;
  148. return 0;
  149. errcopy:
  150. for (i = 0; i <= newtbl->hash_mask; i++) {
  151. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  152. oldtbl->free_node(p, 0);
  153. }
  154. return -ENOMEM;
  155. }
  156. static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
  157. struct mesh_table *tbl)
  158. {
  159. /* Use last four bytes of hw addr and interface index as hash index */
  160. return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
  161. tbl->hash_rnd) & tbl->hash_mask;
  162. }
  163. /**
  164. *
  165. * mesh_path_assign_nexthop - update mesh path next hop
  166. *
  167. * @mpath: mesh path to update
  168. * @sta: next hop to assign
  169. *
  170. * Locking: mpath->state_lock must be held when calling this function
  171. */
  172. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  173. {
  174. struct sk_buff *skb;
  175. struct ieee80211_hdr *hdr;
  176. unsigned long flags;
  177. rcu_assign_pointer(mpath->next_hop, sta);
  178. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  179. skb_queue_walk(&mpath->frame_queue, skb) {
  180. hdr = (struct ieee80211_hdr *) skb->data;
  181. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  182. memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
  183. ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
  184. }
  185. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  186. }
  187. static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  188. struct mesh_path *gate_mpath)
  189. {
  190. struct ieee80211_hdr *hdr;
  191. struct ieee80211s_hdr *mshdr;
  192. int mesh_hdrlen, hdrlen;
  193. char *next_hop;
  194. hdr = (struct ieee80211_hdr *) skb->data;
  195. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  196. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  197. if (!(mshdr->flags & MESH_FLAGS_AE)) {
  198. /* size of the fixed part of the mesh header */
  199. mesh_hdrlen = 6;
  200. /* make room for the two extended addresses */
  201. skb_push(skb, 2 * ETH_ALEN);
  202. memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
  203. hdr = (struct ieee80211_hdr *) skb->data;
  204. /* we preserve the previous mesh header and only add
  205. * the new addreses */
  206. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  207. mshdr->flags = MESH_FLAGS_AE_A5_A6;
  208. memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
  209. memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
  210. }
  211. /* update next hop */
  212. hdr = (struct ieee80211_hdr *) skb->data;
  213. rcu_read_lock();
  214. next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
  215. memcpy(hdr->addr1, next_hop, ETH_ALEN);
  216. rcu_read_unlock();
  217. memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
  218. memcpy(hdr->addr3, dst_addr, ETH_ALEN);
  219. }
  220. /**
  221. *
  222. * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  223. *
  224. * This function is used to transfer or copy frames from an unresolved mpath to
  225. * a gate mpath. The function also adds the Address Extension field and
  226. * updates the next hop.
  227. *
  228. * If a frame already has an Address Extension field, only the next hop and
  229. * destination addresses are updated.
  230. *
  231. * The gate mpath must be an active mpath with a valid mpath->next_hop.
  232. *
  233. * @mpath: An active mpath the frames will be sent to (i.e. the gate)
  234. * @from_mpath: The failed mpath
  235. * @copy: When true, copy all the frames to the new mpath queue. When false,
  236. * move them.
  237. */
  238. static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
  239. struct mesh_path *from_mpath,
  240. bool copy)
  241. {
  242. struct sk_buff *skb, *fskb, *tmp;
  243. struct sk_buff_head failq;
  244. unsigned long flags;
  245. if (WARN_ON(gate_mpath == from_mpath))
  246. return;
  247. if (WARN_ON(!gate_mpath->next_hop))
  248. return;
  249. __skb_queue_head_init(&failq);
  250. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  251. skb_queue_splice_init(&from_mpath->frame_queue, &failq);
  252. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  253. skb_queue_walk_safe(&failq, fskb, tmp) {
  254. if (skb_queue_len(&gate_mpath->frame_queue) >=
  255. MESH_FRAME_QUEUE_LEN) {
  256. mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
  257. break;
  258. }
  259. skb = skb_copy(fskb, GFP_ATOMIC);
  260. if (WARN_ON(!skb))
  261. break;
  262. prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
  263. skb_queue_tail(&gate_mpath->frame_queue, skb);
  264. if (copy)
  265. continue;
  266. __skb_unlink(fskb, &failq);
  267. kfree_skb(fskb);
  268. }
  269. mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
  270. gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
  271. if (!copy)
  272. return;
  273. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  274. skb_queue_splice(&failq, &from_mpath->frame_queue);
  275. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  276. }
  277. static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
  278. struct ieee80211_sub_if_data *sdata)
  279. {
  280. struct mesh_path *mpath;
  281. struct hlist_head *bucket;
  282. struct mpath_node *node;
  283. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  284. hlist_for_each_entry_rcu(node, bucket, list) {
  285. mpath = node->mpath;
  286. if (mpath->sdata == sdata &&
  287. ether_addr_equal(dst, mpath->dst)) {
  288. if (mpath_expired(mpath)) {
  289. spin_lock_bh(&mpath->state_lock);
  290. mpath->flags &= ~MESH_PATH_ACTIVE;
  291. spin_unlock_bh(&mpath->state_lock);
  292. }
  293. return mpath;
  294. }
  295. }
  296. return NULL;
  297. }
  298. /**
  299. * mesh_path_lookup - look up a path in the mesh path table
  300. * @sdata: local subif
  301. * @dst: hardware address (ETH_ALEN length) of destination
  302. *
  303. * Returns: pointer to the mesh path structure, or NULL if not found
  304. *
  305. * Locking: must be called within a read rcu section.
  306. */
  307. struct mesh_path *
  308. mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
  309. {
  310. return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
  311. }
  312. struct mesh_path *
  313. mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
  314. {
  315. return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
  316. }
  317. /**
  318. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  319. * @idx: index
  320. * @sdata: local subif, or NULL for all entries
  321. *
  322. * Returns: pointer to the mesh path structure, or NULL if not found.
  323. *
  324. * Locking: must be called within a read rcu section.
  325. */
  326. struct mesh_path *
  327. mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
  328. {
  329. struct mesh_table *tbl = rcu_dereference(mesh_paths);
  330. struct mpath_node *node;
  331. int i;
  332. int j = 0;
  333. for_each_mesh_entry(tbl, node, i) {
  334. if (sdata && node->mpath->sdata != sdata)
  335. continue;
  336. if (j++ == idx) {
  337. if (mpath_expired(node->mpath)) {
  338. spin_lock_bh(&node->mpath->state_lock);
  339. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  340. spin_unlock_bh(&node->mpath->state_lock);
  341. }
  342. return node->mpath;
  343. }
  344. }
  345. return NULL;
  346. }
  347. /**
  348. * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  349. * @mpath: gate path to add to table
  350. */
  351. int mesh_path_add_gate(struct mesh_path *mpath)
  352. {
  353. struct mesh_table *tbl;
  354. struct mpath_node *gate, *new_gate;
  355. int err;
  356. rcu_read_lock();
  357. tbl = rcu_dereference(mesh_paths);
  358. hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
  359. if (gate->mpath == mpath) {
  360. err = -EEXIST;
  361. goto err_rcu;
  362. }
  363. new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  364. if (!new_gate) {
  365. err = -ENOMEM;
  366. goto err_rcu;
  367. }
  368. mpath->is_gate = true;
  369. mpath->sdata->u.mesh.num_gates++;
  370. new_gate->mpath = mpath;
  371. spin_lock_bh(&tbl->gates_lock);
  372. hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
  373. spin_unlock_bh(&tbl->gates_lock);
  374. mpath_dbg(mpath->sdata,
  375. "Mesh path: Recorded new gate: %pM. %d known gates\n",
  376. mpath->dst, mpath->sdata->u.mesh.num_gates);
  377. err = 0;
  378. err_rcu:
  379. rcu_read_unlock();
  380. return err;
  381. }
  382. /**
  383. * mesh_gate_del - remove a mesh gate from the list of known gates
  384. * @tbl: table which holds our list of known gates
  385. * @mpath: gate mpath
  386. *
  387. * Locking: must be called inside rcu_read_lock() section
  388. */
  389. static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
  390. {
  391. struct mpath_node *gate;
  392. struct hlist_node *q;
  393. hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
  394. if (gate->mpath != mpath)
  395. continue;
  396. spin_lock_bh(&tbl->gates_lock);
  397. hlist_del_rcu(&gate->list);
  398. kfree_rcu(gate, rcu);
  399. spin_unlock_bh(&tbl->gates_lock);
  400. mpath->sdata->u.mesh.num_gates--;
  401. mpath->is_gate = false;
  402. mpath_dbg(mpath->sdata,
  403. "Mesh path: Deleted gate: %pM. %d known gates\n",
  404. mpath->dst, mpath->sdata->u.mesh.num_gates);
  405. break;
  406. }
  407. }
  408. /**
  409. * mesh_gate_num - number of gates known to this interface
  410. * @sdata: subif data
  411. */
  412. int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  413. {
  414. return sdata->u.mesh.num_gates;
  415. }
  416. /**
  417. * mesh_path_add - allocate and add a new path to the mesh path table
  418. * @dst: destination address of the path (ETH_ALEN length)
  419. * @sdata: local subif
  420. *
  421. * Returns: 0 on success
  422. *
  423. * State: the initial state of the new path is set to 0
  424. */
  425. struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
  426. const u8 *dst)
  427. {
  428. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  429. struct ieee80211_local *local = sdata->local;
  430. struct mesh_table *tbl;
  431. struct mesh_path *mpath, *new_mpath;
  432. struct mpath_node *node, *new_node;
  433. struct hlist_head *bucket;
  434. int grow = 0;
  435. int err;
  436. u32 hash_idx;
  437. if (ether_addr_equal(dst, sdata->vif.addr))
  438. /* never add ourselves as neighbours */
  439. return ERR_PTR(-ENOTSUPP);
  440. if (is_multicast_ether_addr(dst))
  441. return ERR_PTR(-ENOTSUPP);
  442. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  443. return ERR_PTR(-ENOSPC);
  444. read_lock_bh(&pathtbl_resize_lock);
  445. tbl = resize_dereference_mesh_paths();
  446. hash_idx = mesh_table_hash(dst, sdata, tbl);
  447. bucket = &tbl->hash_buckets[hash_idx];
  448. spin_lock(&tbl->hashwlock[hash_idx]);
  449. hlist_for_each_entry(node, bucket, list) {
  450. mpath = node->mpath;
  451. if (mpath->sdata == sdata &&
  452. ether_addr_equal(dst, mpath->dst))
  453. goto found;
  454. }
  455. err = -ENOMEM;
  456. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  457. if (!new_mpath)
  458. goto err_path_alloc;
  459. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  460. if (!new_node)
  461. goto err_node_alloc;
  462. memcpy(new_mpath->dst, dst, ETH_ALEN);
  463. eth_broadcast_addr(new_mpath->rann_snd_addr);
  464. new_mpath->is_root = false;
  465. new_mpath->sdata = sdata;
  466. new_mpath->flags = 0;
  467. skb_queue_head_init(&new_mpath->frame_queue);
  468. new_node->mpath = new_mpath;
  469. new_mpath->timer.data = (unsigned long) new_mpath;
  470. new_mpath->timer.function = mesh_path_timer;
  471. new_mpath->exp_time = jiffies;
  472. spin_lock_init(&new_mpath->state_lock);
  473. init_timer(&new_mpath->timer);
  474. hlist_add_head_rcu(&new_node->list, bucket);
  475. if (atomic_inc_return(&tbl->entries) >=
  476. tbl->mean_chain_len * (tbl->hash_mask + 1))
  477. grow = 1;
  478. mesh_paths_generation++;
  479. if (grow) {
  480. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  481. ieee80211_queue_work(&local->hw, &sdata->work);
  482. }
  483. mpath = new_mpath;
  484. found:
  485. spin_unlock(&tbl->hashwlock[hash_idx]);
  486. read_unlock_bh(&pathtbl_resize_lock);
  487. return mpath;
  488. err_node_alloc:
  489. kfree(new_mpath);
  490. err_path_alloc:
  491. atomic_dec(&sdata->u.mesh.mpaths);
  492. spin_unlock(&tbl->hashwlock[hash_idx]);
  493. read_unlock_bh(&pathtbl_resize_lock);
  494. return ERR_PTR(err);
  495. }
  496. static void mesh_table_free_rcu(struct rcu_head *rcu)
  497. {
  498. struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
  499. mesh_table_free(tbl, false);
  500. }
  501. void mesh_mpath_table_grow(void)
  502. {
  503. struct mesh_table *oldtbl, *newtbl;
  504. write_lock_bh(&pathtbl_resize_lock);
  505. oldtbl = resize_dereference_mesh_paths();
  506. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  507. if (!newtbl)
  508. goto out;
  509. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  510. __mesh_table_free(newtbl);
  511. goto out;
  512. }
  513. rcu_assign_pointer(mesh_paths, newtbl);
  514. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  515. out:
  516. write_unlock_bh(&pathtbl_resize_lock);
  517. }
  518. void mesh_mpp_table_grow(void)
  519. {
  520. struct mesh_table *oldtbl, *newtbl;
  521. write_lock_bh(&pathtbl_resize_lock);
  522. oldtbl = resize_dereference_mpp_paths();
  523. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  524. if (!newtbl)
  525. goto out;
  526. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  527. __mesh_table_free(newtbl);
  528. goto out;
  529. }
  530. rcu_assign_pointer(mpp_paths, newtbl);
  531. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  532. out:
  533. write_unlock_bh(&pathtbl_resize_lock);
  534. }
  535. int mpp_path_add(struct ieee80211_sub_if_data *sdata,
  536. const u8 *dst, const u8 *mpp)
  537. {
  538. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  539. struct ieee80211_local *local = sdata->local;
  540. struct mesh_table *tbl;
  541. struct mesh_path *mpath, *new_mpath;
  542. struct mpath_node *node, *new_node;
  543. struct hlist_head *bucket;
  544. int grow = 0;
  545. int err = 0;
  546. u32 hash_idx;
  547. if (ether_addr_equal(dst, sdata->vif.addr))
  548. /* never add ourselves as neighbours */
  549. return -ENOTSUPP;
  550. if (is_multicast_ether_addr(dst))
  551. return -ENOTSUPP;
  552. err = -ENOMEM;
  553. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  554. if (!new_mpath)
  555. goto err_path_alloc;
  556. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  557. if (!new_node)
  558. goto err_node_alloc;
  559. read_lock_bh(&pathtbl_resize_lock);
  560. memcpy(new_mpath->dst, dst, ETH_ALEN);
  561. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  562. new_mpath->sdata = sdata;
  563. new_mpath->flags = 0;
  564. skb_queue_head_init(&new_mpath->frame_queue);
  565. new_node->mpath = new_mpath;
  566. init_timer(&new_mpath->timer);
  567. new_mpath->exp_time = jiffies;
  568. spin_lock_init(&new_mpath->state_lock);
  569. tbl = resize_dereference_mpp_paths();
  570. hash_idx = mesh_table_hash(dst, sdata, tbl);
  571. bucket = &tbl->hash_buckets[hash_idx];
  572. spin_lock(&tbl->hashwlock[hash_idx]);
  573. err = -EEXIST;
  574. hlist_for_each_entry(node, bucket, list) {
  575. mpath = node->mpath;
  576. if (mpath->sdata == sdata &&
  577. ether_addr_equal(dst, mpath->dst))
  578. goto err_exists;
  579. }
  580. hlist_add_head_rcu(&new_node->list, bucket);
  581. if (atomic_inc_return(&tbl->entries) >=
  582. tbl->mean_chain_len * (tbl->hash_mask + 1))
  583. grow = 1;
  584. spin_unlock(&tbl->hashwlock[hash_idx]);
  585. read_unlock_bh(&pathtbl_resize_lock);
  586. if (grow) {
  587. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  588. ieee80211_queue_work(&local->hw, &sdata->work);
  589. }
  590. return 0;
  591. err_exists:
  592. spin_unlock(&tbl->hashwlock[hash_idx]);
  593. read_unlock_bh(&pathtbl_resize_lock);
  594. kfree(new_node);
  595. err_node_alloc:
  596. kfree(new_mpath);
  597. err_path_alloc:
  598. return err;
  599. }
  600. /**
  601. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  602. *
  603. * @sta: broken peer link
  604. *
  605. * This function must be called from the rate control algorithm if enough
  606. * delivery errors suggest that a peer link is no longer usable.
  607. */
  608. void mesh_plink_broken(struct sta_info *sta)
  609. {
  610. struct mesh_table *tbl;
  611. static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  612. struct mesh_path *mpath;
  613. struct mpath_node *node;
  614. struct ieee80211_sub_if_data *sdata = sta->sdata;
  615. int i;
  616. rcu_read_lock();
  617. tbl = rcu_dereference(mesh_paths);
  618. for_each_mesh_entry(tbl, node, i) {
  619. mpath = node->mpath;
  620. if (rcu_dereference(mpath->next_hop) == sta &&
  621. mpath->flags & MESH_PATH_ACTIVE &&
  622. !(mpath->flags & MESH_PATH_FIXED)) {
  623. spin_lock_bh(&mpath->state_lock);
  624. mpath->flags &= ~MESH_PATH_ACTIVE;
  625. ++mpath->sn;
  626. spin_unlock_bh(&mpath->state_lock);
  627. mesh_path_error_tx(sdata,
  628. sdata->u.mesh.mshcfg.element_ttl,
  629. mpath->dst, mpath->sn,
  630. WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
  631. }
  632. }
  633. rcu_read_unlock();
  634. }
  635. static void mesh_path_node_reclaim(struct rcu_head *rp)
  636. {
  637. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  638. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  639. del_timer_sync(&node->mpath->timer);
  640. atomic_dec(&sdata->u.mesh.mpaths);
  641. kfree(node->mpath);
  642. kfree(node);
  643. }
  644. /* needs to be called with the corresponding hashwlock taken */
  645. static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
  646. {
  647. struct mesh_path *mpath;
  648. mpath = node->mpath;
  649. spin_lock(&mpath->state_lock);
  650. mpath->flags |= MESH_PATH_RESOLVING;
  651. if (mpath->is_gate)
  652. mesh_gate_del(tbl, mpath);
  653. hlist_del_rcu(&node->list);
  654. call_rcu(&node->rcu, mesh_path_node_reclaim);
  655. spin_unlock(&mpath->state_lock);
  656. atomic_dec(&tbl->entries);
  657. }
  658. /**
  659. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  660. *
  661. * @sta: mesh peer to match
  662. *
  663. * RCU notes: this function is called when a mesh plink transitions from
  664. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  665. * allows path creation. This will happen before the sta can be freed (because
  666. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  667. * protected against the plink disappearing.
  668. */
  669. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  670. {
  671. struct mesh_table *tbl;
  672. struct mesh_path *mpath;
  673. struct mpath_node *node;
  674. int i;
  675. rcu_read_lock();
  676. read_lock_bh(&pathtbl_resize_lock);
  677. tbl = resize_dereference_mesh_paths();
  678. for_each_mesh_entry(tbl, node, i) {
  679. mpath = node->mpath;
  680. if (rcu_dereference(mpath->next_hop) == sta) {
  681. spin_lock(&tbl->hashwlock[i]);
  682. __mesh_path_del(tbl, node);
  683. spin_unlock(&tbl->hashwlock[i]);
  684. }
  685. }
  686. read_unlock_bh(&pathtbl_resize_lock);
  687. rcu_read_unlock();
  688. }
  689. static void table_flush_by_iface(struct mesh_table *tbl,
  690. struct ieee80211_sub_if_data *sdata)
  691. {
  692. struct mesh_path *mpath;
  693. struct mpath_node *node;
  694. int i;
  695. WARN_ON(!rcu_read_lock_held());
  696. for_each_mesh_entry(tbl, node, i) {
  697. mpath = node->mpath;
  698. if (mpath->sdata != sdata)
  699. continue;
  700. spin_lock_bh(&tbl->hashwlock[i]);
  701. __mesh_path_del(tbl, node);
  702. spin_unlock_bh(&tbl->hashwlock[i]);
  703. }
  704. }
  705. /**
  706. * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  707. *
  708. * This function deletes both mesh paths as well as mesh portal paths.
  709. *
  710. * @sdata: interface data to match
  711. *
  712. */
  713. void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
  714. {
  715. struct mesh_table *tbl;
  716. rcu_read_lock();
  717. read_lock_bh(&pathtbl_resize_lock);
  718. tbl = resize_dereference_mesh_paths();
  719. table_flush_by_iface(tbl, sdata);
  720. tbl = resize_dereference_mpp_paths();
  721. table_flush_by_iface(tbl, sdata);
  722. read_unlock_bh(&pathtbl_resize_lock);
  723. rcu_read_unlock();
  724. }
  725. /**
  726. * mesh_path_del - delete a mesh path from the table
  727. *
  728. * @addr: dst address (ETH_ALEN length)
  729. * @sdata: local subif
  730. *
  731. * Returns: 0 if successful
  732. */
  733. int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
  734. {
  735. struct mesh_table *tbl;
  736. struct mesh_path *mpath;
  737. struct mpath_node *node;
  738. struct hlist_head *bucket;
  739. int hash_idx;
  740. int err = 0;
  741. read_lock_bh(&pathtbl_resize_lock);
  742. tbl = resize_dereference_mesh_paths();
  743. hash_idx = mesh_table_hash(addr, sdata, tbl);
  744. bucket = &tbl->hash_buckets[hash_idx];
  745. spin_lock(&tbl->hashwlock[hash_idx]);
  746. hlist_for_each_entry(node, bucket, list) {
  747. mpath = node->mpath;
  748. if (mpath->sdata == sdata &&
  749. ether_addr_equal(addr, mpath->dst)) {
  750. __mesh_path_del(tbl, node);
  751. goto enddel;
  752. }
  753. }
  754. err = -ENXIO;
  755. enddel:
  756. mesh_paths_generation++;
  757. spin_unlock(&tbl->hashwlock[hash_idx]);
  758. read_unlock_bh(&pathtbl_resize_lock);
  759. return err;
  760. }
  761. /**
  762. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  763. *
  764. * @mpath: mesh path to activate
  765. *
  766. * Locking: the state_lock of the mpath structure must NOT be held when calling
  767. * this function.
  768. */
  769. void mesh_path_tx_pending(struct mesh_path *mpath)
  770. {
  771. if (mpath->flags & MESH_PATH_ACTIVE)
  772. ieee80211_add_pending_skbs(mpath->sdata->local,
  773. &mpath->frame_queue);
  774. }
  775. /**
  776. * mesh_path_send_to_gates - sends pending frames to all known mesh gates
  777. *
  778. * @mpath: mesh path whose queue will be emptied
  779. *
  780. * If there is only one gate, the frames are transferred from the failed mpath
  781. * queue to that gate's queue. If there are more than one gates, the frames
  782. * are copied from each gate to the next. After frames are copied, the
  783. * mpath queues are emptied onto the transmission queue.
  784. */
  785. int mesh_path_send_to_gates(struct mesh_path *mpath)
  786. {
  787. struct ieee80211_sub_if_data *sdata = mpath->sdata;
  788. struct mesh_table *tbl;
  789. struct mesh_path *from_mpath = mpath;
  790. struct mpath_node *gate = NULL;
  791. bool copy = false;
  792. struct hlist_head *known_gates;
  793. rcu_read_lock();
  794. tbl = rcu_dereference(mesh_paths);
  795. known_gates = tbl->known_gates;
  796. rcu_read_unlock();
  797. if (!known_gates)
  798. return -EHOSTUNREACH;
  799. hlist_for_each_entry_rcu(gate, known_gates, list) {
  800. if (gate->mpath->sdata != sdata)
  801. continue;
  802. if (gate->mpath->flags & MESH_PATH_ACTIVE) {
  803. mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
  804. mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
  805. from_mpath = gate->mpath;
  806. copy = true;
  807. } else {
  808. mpath_dbg(sdata,
  809. "Not forwarding %p (flags %#x)\n",
  810. gate->mpath, gate->mpath->flags);
  811. }
  812. }
  813. hlist_for_each_entry_rcu(gate, known_gates, list)
  814. if (gate->mpath->sdata == sdata) {
  815. mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
  816. mesh_path_tx_pending(gate->mpath);
  817. }
  818. return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
  819. }
  820. /**
  821. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  822. *
  823. * @skb: frame to discard
  824. * @sdata: network subif the frame was to be sent through
  825. *
  826. * Locking: the function must me called within a rcu_read_lock region
  827. */
  828. void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
  829. struct sk_buff *skb)
  830. {
  831. kfree_skb(skb);
  832. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  833. }
  834. /**
  835. * mesh_path_flush_pending - free the pending queue of a mesh path
  836. *
  837. * @mpath: mesh path whose queue has to be freed
  838. *
  839. * Locking: the function must me called within a rcu_read_lock region
  840. */
  841. void mesh_path_flush_pending(struct mesh_path *mpath)
  842. {
  843. struct sk_buff *skb;
  844. while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
  845. mesh_path_discard_frame(mpath->sdata, skb);
  846. }
  847. /**
  848. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  849. *
  850. * @mpath: the mesh path to modify
  851. * @next_hop: the next hop to force
  852. *
  853. * Locking: this function must be called holding mpath->state_lock
  854. */
  855. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  856. {
  857. spin_lock_bh(&mpath->state_lock);
  858. mesh_path_assign_nexthop(mpath, next_hop);
  859. mpath->sn = 0xffff;
  860. mpath->metric = 0;
  861. mpath->hop_count = 0;
  862. mpath->exp_time = 0;
  863. mpath->flags |= MESH_PATH_FIXED;
  864. mesh_path_activate(mpath);
  865. spin_unlock_bh(&mpath->state_lock);
  866. mesh_path_tx_pending(mpath);
  867. }
  868. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  869. {
  870. struct mesh_path *mpath;
  871. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  872. mpath = node->mpath;
  873. hlist_del_rcu(p);
  874. if (free_leafs) {
  875. del_timer_sync(&mpath->timer);
  876. kfree(mpath);
  877. }
  878. kfree(node);
  879. }
  880. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  881. {
  882. struct mesh_path *mpath;
  883. struct mpath_node *node, *new_node;
  884. u32 hash_idx;
  885. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  886. if (new_node == NULL)
  887. return -ENOMEM;
  888. node = hlist_entry(p, struct mpath_node, list);
  889. mpath = node->mpath;
  890. new_node->mpath = mpath;
  891. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  892. hlist_add_head(&new_node->list,
  893. &newtbl->hash_buckets[hash_idx]);
  894. return 0;
  895. }
  896. int mesh_pathtbl_init(void)
  897. {
  898. struct mesh_table *tbl_path, *tbl_mpp;
  899. int ret;
  900. tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  901. if (!tbl_path)
  902. return -ENOMEM;
  903. tbl_path->free_node = &mesh_path_node_free;
  904. tbl_path->copy_node = &mesh_path_node_copy;
  905. tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
  906. tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  907. if (!tbl_path->known_gates) {
  908. ret = -ENOMEM;
  909. goto free_path;
  910. }
  911. INIT_HLIST_HEAD(tbl_path->known_gates);
  912. tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  913. if (!tbl_mpp) {
  914. ret = -ENOMEM;
  915. goto free_path;
  916. }
  917. tbl_mpp->free_node = &mesh_path_node_free;
  918. tbl_mpp->copy_node = &mesh_path_node_copy;
  919. tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
  920. tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  921. if (!tbl_mpp->known_gates) {
  922. ret = -ENOMEM;
  923. goto free_mpp;
  924. }
  925. INIT_HLIST_HEAD(tbl_mpp->known_gates);
  926. /* Need no locking since this is during init */
  927. RCU_INIT_POINTER(mesh_paths, tbl_path);
  928. RCU_INIT_POINTER(mpp_paths, tbl_mpp);
  929. return 0;
  930. free_mpp:
  931. mesh_table_free(tbl_mpp, true);
  932. free_path:
  933. mesh_table_free(tbl_path, true);
  934. return ret;
  935. }
  936. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  937. {
  938. struct mesh_table *tbl;
  939. struct mesh_path *mpath;
  940. struct mpath_node *node;
  941. int i;
  942. rcu_read_lock();
  943. tbl = rcu_dereference(mesh_paths);
  944. for_each_mesh_entry(tbl, node, i) {
  945. if (node->mpath->sdata != sdata)
  946. continue;
  947. mpath = node->mpath;
  948. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  949. (!(mpath->flags & MESH_PATH_FIXED)) &&
  950. time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
  951. mesh_path_del(mpath->sdata, mpath->dst);
  952. }
  953. rcu_read_unlock();
  954. }
  955. void mesh_pathtbl_unregister(void)
  956. {
  957. /* no need for locking during exit path */
  958. mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
  959. mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
  960. }