send.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
  2. *
  3. * Marek Lindner, Simon Wunderlich
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of version 2 of the GNU General Public
  7. * License as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "send.h"
  18. #include "main.h"
  19. #include <linux/atomic.h>
  20. #include <linux/bug.h>
  21. #include <linux/byteorder/generic.h>
  22. #include <linux/errno.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/fs.h>
  25. #include <linux/if.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/kernel.h>
  29. #include <linux/kref.h>
  30. #include <linux/list.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/printk.h>
  33. #include <linux/rculist.h>
  34. #include <linux/rcupdate.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/slab.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/stddef.h>
  39. #include <linux/workqueue.h>
  40. #include "distributed-arp-table.h"
  41. #include "fragmentation.h"
  42. #include "gateway_client.h"
  43. #include "hard-interface.h"
  44. #include "log.h"
  45. #include "network-coding.h"
  46. #include "originator.h"
  47. #include "routing.h"
  48. #include "soft-interface.h"
  49. #include "translation-table.h"
  50. static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
  51. /**
  52. * batadv_send_skb_packet - send an already prepared packet
  53. * @skb: the packet to send
  54. * @hard_iface: the interface to use to send the broadcast packet
  55. * @dst_addr: the payload destination
  56. *
  57. * Send out an already prepared packet to the given neighbor or broadcast it
  58. * using the specified interface. Either hard_iface or neigh_node must be not
  59. * NULL.
  60. * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
  61. * otherwise it is sent as unicast to the given neighbor.
  62. *
  63. * Regardless of the return value, the skb is consumed.
  64. *
  65. * Return: A negative errno code is returned on a failure. A success does not
  66. * guarantee the frame will be transmitted as it may be dropped due
  67. * to congestion or traffic shaping.
  68. */
  69. int batadv_send_skb_packet(struct sk_buff *skb,
  70. struct batadv_hard_iface *hard_iface,
  71. const u8 *dst_addr)
  72. {
  73. struct batadv_priv *bat_priv;
  74. struct ethhdr *ethhdr;
  75. bat_priv = netdev_priv(hard_iface->soft_iface);
  76. if (hard_iface->if_status != BATADV_IF_ACTIVE)
  77. goto send_skb_err;
  78. if (unlikely(!hard_iface->net_dev))
  79. goto send_skb_err;
  80. if (!(hard_iface->net_dev->flags & IFF_UP)) {
  81. pr_warn("Interface %s is not up - can't send packet via that interface!\n",
  82. hard_iface->net_dev->name);
  83. goto send_skb_err;
  84. }
  85. /* push to the ethernet header. */
  86. if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
  87. goto send_skb_err;
  88. skb_reset_mac_header(skb);
  89. ethhdr = eth_hdr(skb);
  90. ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
  91. ether_addr_copy(ethhdr->h_dest, dst_addr);
  92. ethhdr->h_proto = htons(ETH_P_BATMAN);
  93. skb_set_network_header(skb, ETH_HLEN);
  94. skb->protocol = htons(ETH_P_BATMAN);
  95. skb->dev = hard_iface->net_dev;
  96. /* Save a clone of the skb to use when decoding coded packets */
  97. batadv_nc_skb_store_for_decoding(bat_priv, skb);
  98. /* dev_queue_xmit() returns a negative result on error. However on
  99. * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
  100. * (which is > 0). This will not be treated as an error.
  101. */
  102. return dev_queue_xmit(skb);
  103. send_skb_err:
  104. kfree_skb(skb);
  105. return NET_XMIT_DROP;
  106. }
  107. int batadv_send_broadcast_skb(struct sk_buff *skb,
  108. struct batadv_hard_iface *hard_iface)
  109. {
  110. return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
  111. }
  112. int batadv_send_unicast_skb(struct sk_buff *skb,
  113. struct batadv_neigh_node *neigh)
  114. {
  115. #ifdef CONFIG_BATMAN_ADV_BATMAN_V
  116. struct batadv_hardif_neigh_node *hardif_neigh;
  117. #endif
  118. int ret;
  119. ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
  120. #ifdef CONFIG_BATMAN_ADV_BATMAN_V
  121. hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
  122. if ((hardif_neigh) && (ret != NET_XMIT_DROP))
  123. hardif_neigh->bat_v.last_unicast_tx = jiffies;
  124. if (hardif_neigh)
  125. batadv_hardif_neigh_put(hardif_neigh);
  126. #endif
  127. return ret;
  128. }
  129. /**
  130. * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
  131. * @skb: Packet to be transmitted.
  132. * @orig_node: Final destination of the packet.
  133. * @recv_if: Interface used when receiving the packet (can be NULL).
  134. *
  135. * Looks up the best next-hop towards the passed originator and passes the
  136. * skb on for preparation of MAC header. If the packet originated from this
  137. * host, NULL can be passed as recv_if and no interface alternating is
  138. * attempted.
  139. *
  140. * Return: negative errno code on a failure, -EINPROGRESS if the skb is
  141. * buffered for later transmit or the NET_XMIT status returned by the
  142. * lower routine if the packet has been passed down.
  143. */
  144. int batadv_send_skb_to_orig(struct sk_buff *skb,
  145. struct batadv_orig_node *orig_node,
  146. struct batadv_hard_iface *recv_if)
  147. {
  148. struct batadv_priv *bat_priv = orig_node->bat_priv;
  149. struct batadv_neigh_node *neigh_node;
  150. int ret;
  151. /* batadv_find_router() increases neigh_nodes refcount if found. */
  152. neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
  153. if (!neigh_node) {
  154. ret = -EINVAL;
  155. goto free_skb;
  156. }
  157. /* Check if the skb is too large to send in one piece and fragment
  158. * it if needed.
  159. */
  160. if (atomic_read(&bat_priv->fragmentation) &&
  161. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  162. /* Fragment and send packet. */
  163. ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
  164. /* skb was consumed */
  165. skb = NULL;
  166. goto put_neigh_node;
  167. }
  168. /* try to network code the packet, if it is received on an interface
  169. * (i.e. being forwarded). If the packet originates from this node or if
  170. * network coding fails, then send the packet as usual.
  171. */
  172. if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
  173. ret = -EINPROGRESS;
  174. else
  175. ret = batadv_send_unicast_skb(skb, neigh_node);
  176. /* skb was consumed */
  177. skb = NULL;
  178. put_neigh_node:
  179. batadv_neigh_node_put(neigh_node);
  180. free_skb:
  181. kfree_skb(skb);
  182. return ret;
  183. }
  184. /**
  185. * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
  186. * common fields for unicast packets
  187. * @skb: the skb carrying the unicast header to initialize
  188. * @hdr_size: amount of bytes to push at the beginning of the skb
  189. * @orig_node: the destination node
  190. *
  191. * Return: false if the buffer extension was not possible or true otherwise.
  192. */
  193. static bool
  194. batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
  195. struct batadv_orig_node *orig_node)
  196. {
  197. struct batadv_unicast_packet *unicast_packet;
  198. u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
  199. if (batadv_skb_head_push(skb, hdr_size) < 0)
  200. return false;
  201. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  202. unicast_packet->version = BATADV_COMPAT_VERSION;
  203. /* batman packet type: unicast */
  204. unicast_packet->packet_type = BATADV_UNICAST;
  205. /* set unicast ttl */
  206. unicast_packet->ttl = BATADV_TTL;
  207. /* copy the destination for faster routing */
  208. ether_addr_copy(unicast_packet->dest, orig_node->orig);
  209. /* set the destination tt version number */
  210. unicast_packet->ttvn = ttvn;
  211. return true;
  212. }
  213. /**
  214. * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
  215. * @skb: the skb containing the payload to encapsulate
  216. * @orig_node: the destination node
  217. *
  218. * Return: false if the payload could not be encapsulated or true otherwise.
  219. */
  220. static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
  221. struct batadv_orig_node *orig_node)
  222. {
  223. size_t uni_size = sizeof(struct batadv_unicast_packet);
  224. return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
  225. }
  226. /**
  227. * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
  228. * unicast 4addr header
  229. * @bat_priv: the bat priv with all the soft interface information
  230. * @skb: the skb containing the payload to encapsulate
  231. * @orig: the destination node
  232. * @packet_subtype: the unicast 4addr packet subtype to use
  233. *
  234. * Return: false if the payload could not be encapsulated or true otherwise.
  235. */
  236. bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
  237. struct sk_buff *skb,
  238. struct batadv_orig_node *orig,
  239. int packet_subtype)
  240. {
  241. struct batadv_hard_iface *primary_if;
  242. struct batadv_unicast_4addr_packet *uc_4addr_packet;
  243. bool ret = false;
  244. primary_if = batadv_primary_if_get_selected(bat_priv);
  245. if (!primary_if)
  246. goto out;
  247. /* Pull the header space and fill the unicast_packet substructure.
  248. * We can do that because the first member of the uc_4addr_packet
  249. * is of type struct unicast_packet
  250. */
  251. if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
  252. orig))
  253. goto out;
  254. uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
  255. uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
  256. ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
  257. uc_4addr_packet->subtype = packet_subtype;
  258. uc_4addr_packet->reserved = 0;
  259. ret = true;
  260. out:
  261. if (primary_if)
  262. batadv_hardif_put(primary_if);
  263. return ret;
  264. }
  265. /**
  266. * batadv_send_skb_unicast - encapsulate and send an skb via unicast
  267. * @bat_priv: the bat priv with all the soft interface information
  268. * @skb: payload to send
  269. * @packet_type: the batman unicast packet type to use
  270. * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
  271. * 4addr packets)
  272. * @orig_node: the originator to send the packet to
  273. * @vid: the vid to be used to search the translation table
  274. *
  275. * Wrap the given skb into a batman-adv unicast or unicast-4addr header
  276. * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
  277. * as packet_type. Then send this frame to the given orig_node.
  278. *
  279. * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  280. */
  281. int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
  282. struct sk_buff *skb, int packet_type,
  283. int packet_subtype,
  284. struct batadv_orig_node *orig_node,
  285. unsigned short vid)
  286. {
  287. struct batadv_unicast_packet *unicast_packet;
  288. struct ethhdr *ethhdr;
  289. int ret = NET_XMIT_DROP;
  290. if (!orig_node)
  291. goto out;
  292. switch (packet_type) {
  293. case BATADV_UNICAST:
  294. if (!batadv_send_skb_prepare_unicast(skb, orig_node))
  295. goto out;
  296. break;
  297. case BATADV_UNICAST_4ADDR:
  298. if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
  299. orig_node,
  300. packet_subtype))
  301. goto out;
  302. break;
  303. default:
  304. /* this function supports UNICAST and UNICAST_4ADDR only. It
  305. * should never be invoked with any other packet type
  306. */
  307. goto out;
  308. }
  309. /* skb->data might have been reallocated by
  310. * batadv_send_skb_prepare_unicast{,_4addr}()
  311. */
  312. ethhdr = eth_hdr(skb);
  313. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  314. /* inform the destination node that we are still missing a correct route
  315. * for this client. The destination will receive this packet and will
  316. * try to reroute it because the ttvn contained in the header is less
  317. * than the current one
  318. */
  319. if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
  320. unicast_packet->ttvn = unicast_packet->ttvn - 1;
  321. ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
  322. /* skb was consumed */
  323. skb = NULL;
  324. out:
  325. kfree_skb(skb);
  326. return ret;
  327. }
  328. /**
  329. * batadv_send_skb_via_tt_generic - send an skb via TT lookup
  330. * @bat_priv: the bat priv with all the soft interface information
  331. * @skb: payload to send
  332. * @packet_type: the batman unicast packet type to use
  333. * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
  334. * 4addr packets)
  335. * @dst_hint: can be used to override the destination contained in the skb
  336. * @vid: the vid to be used to search the translation table
  337. *
  338. * Look up the recipient node for the destination address in the ethernet
  339. * header via the translation table. Wrap the given skb into a batman-adv
  340. * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
  341. * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
  342. * to the according destination node.
  343. *
  344. * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  345. */
  346. int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
  347. struct sk_buff *skb, int packet_type,
  348. int packet_subtype, u8 *dst_hint,
  349. unsigned short vid)
  350. {
  351. struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
  352. struct batadv_orig_node *orig_node;
  353. u8 *src, *dst;
  354. int ret;
  355. src = ethhdr->h_source;
  356. dst = ethhdr->h_dest;
  357. /* if we got an hint! let's send the packet to this client (if any) */
  358. if (dst_hint) {
  359. src = NULL;
  360. dst = dst_hint;
  361. }
  362. orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
  363. ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
  364. packet_subtype, orig_node, vid);
  365. if (orig_node)
  366. batadv_orig_node_put(orig_node);
  367. return ret;
  368. }
  369. /**
  370. * batadv_send_skb_via_gw - send an skb via gateway lookup
  371. * @bat_priv: the bat priv with all the soft interface information
  372. * @skb: payload to send
  373. * @vid: the vid to be used to search the translation table
  374. *
  375. * Look up the currently selected gateway. Wrap the given skb into a batman-adv
  376. * unicast header and send this frame to this gateway node.
  377. *
  378. * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  379. */
  380. int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
  381. unsigned short vid)
  382. {
  383. struct batadv_orig_node *orig_node;
  384. int ret;
  385. orig_node = batadv_gw_get_selected_orig(bat_priv);
  386. ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
  387. BATADV_P_DATA, orig_node, vid);
  388. if (orig_node)
  389. batadv_orig_node_put(orig_node);
  390. return ret;
  391. }
  392. /**
  393. * batadv_forw_packet_free - free a forwarding packet
  394. * @forw_packet: The packet to free
  395. * @dropped: whether the packet is freed because is is dropped
  396. *
  397. * This frees a forwarding packet and releases any resources it might
  398. * have claimed.
  399. */
  400. void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
  401. bool dropped)
  402. {
  403. if (dropped)
  404. kfree_skb(forw_packet->skb);
  405. else
  406. consume_skb(forw_packet->skb);
  407. if (forw_packet->if_incoming)
  408. batadv_hardif_put(forw_packet->if_incoming);
  409. if (forw_packet->if_outgoing)
  410. batadv_hardif_put(forw_packet->if_outgoing);
  411. if (forw_packet->queue_left)
  412. atomic_inc(forw_packet->queue_left);
  413. kfree(forw_packet);
  414. }
  415. /**
  416. * batadv_forw_packet_alloc - allocate a forwarding packet
  417. * @if_incoming: The (optional) if_incoming to be grabbed
  418. * @if_outgoing: The (optional) if_outgoing to be grabbed
  419. * @queue_left: The (optional) queue counter to decrease
  420. * @bat_priv: The bat_priv for the mesh of this forw_packet
  421. *
  422. * Allocates a forwarding packet and tries to get a reference to the
  423. * (optional) if_incoming, if_outgoing and queue_left. If queue_left
  424. * is NULL then bat_priv is optional, too.
  425. *
  426. * Return: An allocated forwarding packet on success, NULL otherwise.
  427. */
  428. struct batadv_forw_packet *
  429. batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
  430. struct batadv_hard_iface *if_outgoing,
  431. atomic_t *queue_left,
  432. struct batadv_priv *bat_priv)
  433. {
  434. struct batadv_forw_packet *forw_packet;
  435. const char *qname;
  436. if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
  437. qname = "unknown";
  438. if (queue_left == &bat_priv->bcast_queue_left)
  439. qname = "bcast";
  440. if (queue_left == &bat_priv->batman_queue_left)
  441. qname = "batman";
  442. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  443. "%s queue is full\n", qname);
  444. return NULL;
  445. }
  446. forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
  447. if (!forw_packet)
  448. goto err;
  449. if (if_incoming)
  450. kref_get(&if_incoming->refcount);
  451. if (if_outgoing)
  452. kref_get(&if_outgoing->refcount);
  453. INIT_HLIST_NODE(&forw_packet->list);
  454. INIT_HLIST_NODE(&forw_packet->cleanup_list);
  455. forw_packet->skb = NULL;
  456. forw_packet->queue_left = queue_left;
  457. forw_packet->if_incoming = if_incoming;
  458. forw_packet->if_outgoing = if_outgoing;
  459. forw_packet->num_packets = 0;
  460. return forw_packet;
  461. err:
  462. if (queue_left)
  463. atomic_inc(queue_left);
  464. return NULL;
  465. }
  466. /**
  467. * batadv_forw_packet_was_stolen - check whether someone stole this packet
  468. * @forw_packet: the forwarding packet to check
  469. *
  470. * This function checks whether the given forwarding packet was claimed by
  471. * someone else for free().
  472. *
  473. * Return: True if someone stole it, false otherwise.
  474. */
  475. static bool
  476. batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
  477. {
  478. return !hlist_unhashed(&forw_packet->cleanup_list);
  479. }
  480. /**
  481. * batadv_forw_packet_steal - claim a forw_packet for free()
  482. * @forw_packet: the forwarding packet to steal
  483. * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
  484. *
  485. * This function tries to steal a specific forw_packet from global
  486. * visibility for the purpose of getting it for free(). That means
  487. * the caller is *not* allowed to requeue it afterwards.
  488. *
  489. * Return: True if stealing was successful. False if someone else stole it
  490. * before us.
  491. */
  492. bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
  493. spinlock_t *lock)
  494. {
  495. /* did purging routine steal it earlier? */
  496. spin_lock_bh(lock);
  497. if (batadv_forw_packet_was_stolen(forw_packet)) {
  498. spin_unlock_bh(lock);
  499. return false;
  500. }
  501. hlist_del_init(&forw_packet->list);
  502. /* Just to spot misuse of this function */
  503. hlist_add_fake(&forw_packet->cleanup_list);
  504. spin_unlock_bh(lock);
  505. return true;
  506. }
  507. /**
  508. * batadv_forw_packet_list_steal - claim a list of forward packets for free()
  509. * @forw_list: the to be stolen forward packets
  510. * @cleanup_list: a backup pointer, to be able to dispose the packet later
  511. * @hard_iface: the interface to steal forward packets from
  512. *
  513. * This function claims responsibility to free any forw_packet queued on the
  514. * given hard_iface. If hard_iface is NULL forwarding packets on all hard
  515. * interfaces will be claimed.
  516. *
  517. * The packets are being moved from the forw_list to the cleanup_list and
  518. * by that allows already running threads to notice the claiming.
  519. */
  520. static void
  521. batadv_forw_packet_list_steal(struct hlist_head *forw_list,
  522. struct hlist_head *cleanup_list,
  523. const struct batadv_hard_iface *hard_iface)
  524. {
  525. struct batadv_forw_packet *forw_packet;
  526. struct hlist_node *safe_tmp_node;
  527. hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
  528. forw_list, list) {
  529. /* if purge_outstanding_packets() was called with an argument
  530. * we delete only packets belonging to the given interface
  531. */
  532. if (hard_iface &&
  533. (forw_packet->if_incoming != hard_iface) &&
  534. (forw_packet->if_outgoing != hard_iface))
  535. continue;
  536. hlist_del(&forw_packet->list);
  537. hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
  538. }
  539. }
  540. /**
  541. * batadv_forw_packet_list_free - free a list of forward packets
  542. * @head: a list of to be freed forw_packets
  543. *
  544. * This function cancels the scheduling of any packet in the provided list,
  545. * waits for any possibly running packet forwarding thread to finish and
  546. * finally, safely frees this forward packet.
  547. *
  548. * This function might sleep.
  549. */
  550. static void batadv_forw_packet_list_free(struct hlist_head *head)
  551. {
  552. struct batadv_forw_packet *forw_packet;
  553. struct hlist_node *safe_tmp_node;
  554. hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
  555. cleanup_list) {
  556. cancel_delayed_work_sync(&forw_packet->delayed_work);
  557. hlist_del(&forw_packet->cleanup_list);
  558. batadv_forw_packet_free(forw_packet, true);
  559. }
  560. }
  561. /**
  562. * batadv_forw_packet_queue - try to queue a forwarding packet
  563. * @forw_packet: the forwarding packet to queue
  564. * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
  565. * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
  566. * @send_time: timestamp (jiffies) when the packet is to be sent
  567. *
  568. * This function tries to (re)queue a forwarding packet. Requeuing
  569. * is prevented if the according interface is shutting down
  570. * (e.g. if batadv_forw_packet_list_steal() was called for this
  571. * packet earlier).
  572. *
  573. * Calling batadv_forw_packet_queue() after a call to
  574. * batadv_forw_packet_steal() is forbidden!
  575. *
  576. * Caller needs to ensure that forw_packet->delayed_work was initialized.
  577. */
  578. static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
  579. spinlock_t *lock, struct hlist_head *head,
  580. unsigned long send_time)
  581. {
  582. spin_lock_bh(lock);
  583. /* did purging routine steal it from us? */
  584. if (batadv_forw_packet_was_stolen(forw_packet)) {
  585. /* If you got it for free() without trouble, then
  586. * don't get back into the queue after stealing...
  587. */
  588. WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
  589. "Requeuing after batadv_forw_packet_steal() not allowed!\n");
  590. spin_unlock_bh(lock);
  591. return;
  592. }
  593. hlist_del_init(&forw_packet->list);
  594. hlist_add_head(&forw_packet->list, head);
  595. queue_delayed_work(batadv_event_workqueue,
  596. &forw_packet->delayed_work,
  597. send_time - jiffies);
  598. spin_unlock_bh(lock);
  599. }
  600. /**
  601. * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
  602. * @bat_priv: the bat priv with all the soft interface information
  603. * @forw_packet: the forwarding packet to queue
  604. * @send_time: timestamp (jiffies) when the packet is to be sent
  605. *
  606. * This function tries to (re)queue a broadcast packet.
  607. *
  608. * Caller needs to ensure that forw_packet->delayed_work was initialized.
  609. */
  610. static void
  611. batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
  612. struct batadv_forw_packet *forw_packet,
  613. unsigned long send_time)
  614. {
  615. batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
  616. &bat_priv->forw_bcast_list, send_time);
  617. }
  618. /**
  619. * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
  620. * @bat_priv: the bat priv with all the soft interface information
  621. * @forw_packet: the forwarding packet to queue
  622. * @send_time: timestamp (jiffies) when the packet is to be sent
  623. *
  624. * This function tries to (re)queue an OGMv1 packet.
  625. *
  626. * Caller needs to ensure that forw_packet->delayed_work was initialized.
  627. */
  628. void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
  629. struct batadv_forw_packet *forw_packet,
  630. unsigned long send_time)
  631. {
  632. batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
  633. &bat_priv->forw_bat_list, send_time);
  634. }
  635. /**
  636. * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
  637. * @bat_priv: the bat priv with all the soft interface information
  638. * @skb: broadcast packet to add
  639. * @delay: number of jiffies to wait before sending
  640. * @own_packet: true if it is a self-generated broadcast packet
  641. *
  642. * add a broadcast packet to the queue and setup timers. broadcast packets
  643. * are sent multiple times to increase probability for being received.
  644. *
  645. * The skb is not consumed, so the caller should make sure that the
  646. * skb is freed.
  647. *
  648. * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
  649. */
  650. int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
  651. const struct sk_buff *skb,
  652. unsigned long delay,
  653. bool own_packet)
  654. {
  655. struct batadv_hard_iface *primary_if;
  656. struct batadv_forw_packet *forw_packet;
  657. struct batadv_bcast_packet *bcast_packet;
  658. struct sk_buff *newskb;
  659. primary_if = batadv_primary_if_get_selected(bat_priv);
  660. if (!primary_if)
  661. goto err;
  662. forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
  663. &bat_priv->bcast_queue_left,
  664. bat_priv);
  665. batadv_hardif_put(primary_if);
  666. if (!forw_packet)
  667. goto err;
  668. newskb = skb_copy(skb, GFP_ATOMIC);
  669. if (!newskb)
  670. goto err_packet_free;
  671. /* as we have a copy now, it is safe to decrease the TTL */
  672. bcast_packet = (struct batadv_bcast_packet *)newskb->data;
  673. bcast_packet->ttl--;
  674. forw_packet->skb = newskb;
  675. forw_packet->own = own_packet;
  676. INIT_DELAYED_WORK(&forw_packet->delayed_work,
  677. batadv_send_outstanding_bcast_packet);
  678. batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
  679. return NETDEV_TX_OK;
  680. err_packet_free:
  681. batadv_forw_packet_free(forw_packet, true);
  682. err:
  683. return NETDEV_TX_BUSY;
  684. }
  685. static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
  686. {
  687. struct batadv_hard_iface *hard_iface;
  688. struct batadv_hardif_neigh_node *neigh_node;
  689. struct delayed_work *delayed_work;
  690. struct batadv_forw_packet *forw_packet;
  691. struct batadv_bcast_packet *bcast_packet;
  692. struct sk_buff *skb1;
  693. struct net_device *soft_iface;
  694. struct batadv_priv *bat_priv;
  695. unsigned long send_time = jiffies + msecs_to_jiffies(5);
  696. bool dropped = false;
  697. u8 *neigh_addr;
  698. u8 *orig_neigh;
  699. int ret = 0;
  700. delayed_work = to_delayed_work(work);
  701. forw_packet = container_of(delayed_work, struct batadv_forw_packet,
  702. delayed_work);
  703. soft_iface = forw_packet->if_incoming->soft_iface;
  704. bat_priv = netdev_priv(soft_iface);
  705. if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
  706. dropped = true;
  707. goto out;
  708. }
  709. if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
  710. dropped = true;
  711. goto out;
  712. }
  713. bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
  714. /* rebroadcast packet */
  715. rcu_read_lock();
  716. list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
  717. if (hard_iface->soft_iface != soft_iface)
  718. continue;
  719. if (forw_packet->num_packets >= hard_iface->num_bcasts)
  720. continue;
  721. if (forw_packet->own) {
  722. neigh_node = NULL;
  723. } else {
  724. neigh_addr = eth_hdr(forw_packet->skb)->h_source;
  725. neigh_node = batadv_hardif_neigh_get(hard_iface,
  726. neigh_addr);
  727. }
  728. orig_neigh = neigh_node ? neigh_node->orig : NULL;
  729. ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
  730. orig_neigh);
  731. if (ret) {
  732. char *type;
  733. switch (ret) {
  734. case BATADV_HARDIF_BCAST_NORECIPIENT:
  735. type = "no neighbor";
  736. break;
  737. case BATADV_HARDIF_BCAST_DUPFWD:
  738. type = "single neighbor is source";
  739. break;
  740. case BATADV_HARDIF_BCAST_DUPORIG:
  741. type = "single neighbor is originator";
  742. break;
  743. default:
  744. type = "unknown";
  745. }
  746. batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n",
  747. bcast_packet->orig,
  748. hard_iface->net_dev->name, type);
  749. if (neigh_node)
  750. batadv_hardif_neigh_put(neigh_node);
  751. continue;
  752. }
  753. if (neigh_node)
  754. batadv_hardif_neigh_put(neigh_node);
  755. if (!kref_get_unless_zero(&hard_iface->refcount))
  756. continue;
  757. /* send a copy of the saved skb */
  758. skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
  759. if (skb1)
  760. batadv_send_broadcast_skb(skb1, hard_iface);
  761. batadv_hardif_put(hard_iface);
  762. }
  763. rcu_read_unlock();
  764. forw_packet->num_packets++;
  765. /* if we still have some more bcasts to send */
  766. if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
  767. batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
  768. send_time);
  769. return;
  770. }
  771. out:
  772. /* do we get something for free()? */
  773. if (batadv_forw_packet_steal(forw_packet,
  774. &bat_priv->forw_bcast_list_lock))
  775. batadv_forw_packet_free(forw_packet, dropped);
  776. }
  777. /**
  778. * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
  779. * @bat_priv: the bat priv with all the soft interface information
  780. * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
  781. *
  782. * This method cancels and purges any broadcast and OGMv1 packet on the given
  783. * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
  784. * interfaces will be canceled and purged.
  785. *
  786. * This function might sleep.
  787. */
  788. void
  789. batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
  790. const struct batadv_hard_iface *hard_iface)
  791. {
  792. struct hlist_head head = HLIST_HEAD_INIT;
  793. if (hard_iface)
  794. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  795. "purge_outstanding_packets(): %s\n",
  796. hard_iface->net_dev->name);
  797. else
  798. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  799. "purge_outstanding_packets()\n");
  800. /* claim bcast list for free() */
  801. spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  802. batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
  803. hard_iface);
  804. spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  805. /* claim batman packet list for free() */
  806. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  807. batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
  808. hard_iface);
  809. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  810. /* then cancel or wait for packet workers to finish and free */
  811. batadv_forw_packet_list_free(&head);
  812. }