main.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. /* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
  2. *
  3. * Marek Lindner, Simon Wunderlich
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of version 2 of the GNU General Public
  7. * License as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/crc32c.h>
  18. #include <linux/highmem.h>
  19. #include <linux/if_vlan.h>
  20. #include <net/ip.h>
  21. #include <net/ipv6.h>
  22. #include <net/dsfield.h>
  23. #include "main.h"
  24. #include "sysfs.h"
  25. #include "debugfs.h"
  26. #include "routing.h"
  27. #include "send.h"
  28. #include "originator.h"
  29. #include "soft-interface.h"
  30. #include "icmp_socket.h"
  31. #include "translation-table.h"
  32. #include "hard-interface.h"
  33. #include "gateway_client.h"
  34. #include "bridge_loop_avoidance.h"
  35. #include "distributed-arp-table.h"
  36. #include "multicast.h"
  37. #include "gateway_common.h"
  38. #include "hash.h"
  39. #include "bat_algo.h"
  40. #include "network-coding.h"
  41. #include "fragmentation.h"
  42. /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  43. * list traversals just rcu-locked
  44. */
  45. struct list_head batadv_hardif_list;
  46. static int (*batadv_rx_handler[256])(struct sk_buff *,
  47. struct batadv_hard_iface *);
  48. char batadv_routing_algo[20] = "BATMAN_IV";
  49. static struct hlist_head batadv_algo_list;
  50. unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  51. struct workqueue_struct *batadv_event_workqueue;
  52. static void batadv_recv_handler_init(void);
  53. static int __init batadv_init(void)
  54. {
  55. INIT_LIST_HEAD(&batadv_hardif_list);
  56. INIT_HLIST_HEAD(&batadv_algo_list);
  57. batadv_recv_handler_init();
  58. batadv_iv_init();
  59. batadv_nc_init();
  60. batadv_event_workqueue = create_singlethread_workqueue("bat_events");
  61. if (!batadv_event_workqueue)
  62. return -ENOMEM;
  63. batadv_socket_init();
  64. batadv_debugfs_init();
  65. register_netdevice_notifier(&batadv_hard_if_notifier);
  66. rtnl_link_register(&batadv_link_ops);
  67. pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
  68. BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
  69. return 0;
  70. }
  71. static void __exit batadv_exit(void)
  72. {
  73. batadv_debugfs_destroy();
  74. rtnl_link_unregister(&batadv_link_ops);
  75. unregister_netdevice_notifier(&batadv_hard_if_notifier);
  76. batadv_hardif_remove_interfaces();
  77. flush_workqueue(batadv_event_workqueue);
  78. destroy_workqueue(batadv_event_workqueue);
  79. batadv_event_workqueue = NULL;
  80. rcu_barrier();
  81. }
  82. int batadv_mesh_init(struct net_device *soft_iface)
  83. {
  84. struct batadv_priv *bat_priv = netdev_priv(soft_iface);
  85. int ret;
  86. spin_lock_init(&bat_priv->forw_bat_list_lock);
  87. spin_lock_init(&bat_priv->forw_bcast_list_lock);
  88. spin_lock_init(&bat_priv->tt.changes_list_lock);
  89. spin_lock_init(&bat_priv->tt.req_list_lock);
  90. spin_lock_init(&bat_priv->tt.roam_list_lock);
  91. spin_lock_init(&bat_priv->tt.last_changeset_lock);
  92. spin_lock_init(&bat_priv->tt.commit_lock);
  93. spin_lock_init(&bat_priv->gw.list_lock);
  94. #ifdef CONFIG_BATMAN_ADV_MCAST
  95. spin_lock_init(&bat_priv->mcast.want_lists_lock);
  96. #endif
  97. spin_lock_init(&bat_priv->tvlv.container_list_lock);
  98. spin_lock_init(&bat_priv->tvlv.handler_list_lock);
  99. spin_lock_init(&bat_priv->softif_vlan_list_lock);
  100. INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
  101. INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
  102. INIT_HLIST_HEAD(&bat_priv->gw.list);
  103. #ifdef CONFIG_BATMAN_ADV_MCAST
  104. INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
  105. INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
  106. INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
  107. #endif
  108. INIT_LIST_HEAD(&bat_priv->tt.changes_list);
  109. INIT_LIST_HEAD(&bat_priv->tt.req_list);
  110. INIT_LIST_HEAD(&bat_priv->tt.roam_list);
  111. #ifdef CONFIG_BATMAN_ADV_MCAST
  112. INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
  113. #endif
  114. INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
  115. INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
  116. INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
  117. ret = batadv_originator_init(bat_priv);
  118. if (ret < 0)
  119. goto err;
  120. ret = batadv_tt_init(bat_priv);
  121. if (ret < 0)
  122. goto err;
  123. ret = batadv_bla_init(bat_priv);
  124. if (ret < 0)
  125. goto err;
  126. ret = batadv_dat_init(bat_priv);
  127. if (ret < 0)
  128. goto err;
  129. ret = batadv_nc_mesh_init(bat_priv);
  130. if (ret < 0)
  131. goto err;
  132. batadv_gw_init(bat_priv);
  133. batadv_mcast_init(bat_priv);
  134. atomic_set(&bat_priv->gw.reselect, 0);
  135. atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
  136. return 0;
  137. err:
  138. batadv_mesh_free(soft_iface);
  139. return ret;
  140. }
  141. void batadv_mesh_free(struct net_device *soft_iface)
  142. {
  143. struct batadv_priv *bat_priv = netdev_priv(soft_iface);
  144. atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
  145. batadv_purge_outstanding_packets(bat_priv, NULL);
  146. batadv_gw_node_purge(bat_priv);
  147. batadv_nc_mesh_free(bat_priv);
  148. batadv_dat_free(bat_priv);
  149. batadv_bla_free(bat_priv);
  150. batadv_mcast_free(bat_priv);
  151. /* Free the TT and the originator tables only after having terminated
  152. * all the other depending components which may use these structures for
  153. * their purposes.
  154. */
  155. batadv_tt_free(bat_priv);
  156. /* Since the originator table clean up routine is accessing the TT
  157. * tables as well, it has to be invoked after the TT tables have been
  158. * freed and marked as empty. This ensures that no cleanup RCU callbacks
  159. * accessing the TT data are scheduled for later execution.
  160. */
  161. batadv_originator_free(bat_priv);
  162. batadv_gw_free(bat_priv);
  163. free_percpu(bat_priv->bat_counters);
  164. bat_priv->bat_counters = NULL;
  165. atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
  166. }
  167. /**
  168. * batadv_is_my_mac - check if the given mac address belongs to any of the real
  169. * interfaces in the current mesh
  170. * @bat_priv: the bat priv with all the soft interface information
  171. * @addr: the address to check
  172. */
  173. int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
  174. {
  175. const struct batadv_hard_iface *hard_iface;
  176. rcu_read_lock();
  177. list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
  178. if (hard_iface->if_status != BATADV_IF_ACTIVE)
  179. continue;
  180. if (hard_iface->soft_iface != bat_priv->soft_iface)
  181. continue;
  182. if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
  183. rcu_read_unlock();
  184. return 1;
  185. }
  186. }
  187. rcu_read_unlock();
  188. return 0;
  189. }
  190. /**
  191. * batadv_seq_print_text_primary_if_get - called from debugfs table printing
  192. * function that requires the primary interface
  193. * @seq: debugfs table seq_file struct
  194. *
  195. * Returns primary interface if found or NULL otherwise.
  196. */
  197. struct batadv_hard_iface *
  198. batadv_seq_print_text_primary_if_get(struct seq_file *seq)
  199. {
  200. struct net_device *net_dev = (struct net_device *)seq->private;
  201. struct batadv_priv *bat_priv = netdev_priv(net_dev);
  202. struct batadv_hard_iface *primary_if;
  203. primary_if = batadv_primary_if_get_selected(bat_priv);
  204. if (!primary_if) {
  205. seq_printf(seq,
  206. "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
  207. net_dev->name);
  208. goto out;
  209. }
  210. if (primary_if->if_status == BATADV_IF_ACTIVE)
  211. goto out;
  212. seq_printf(seq,
  213. "BATMAN mesh %s disabled - primary interface not active\n",
  214. net_dev->name);
  215. batadv_hardif_free_ref(primary_if);
  216. primary_if = NULL;
  217. out:
  218. return primary_if;
  219. }
  220. /**
  221. * batadv_max_header_len - calculate maximum encapsulation overhead for a
  222. * payload packet
  223. *
  224. * Return the maximum encapsulation overhead in bytes.
  225. */
  226. int batadv_max_header_len(void)
  227. {
  228. int header_len = 0;
  229. header_len = max_t(int, header_len,
  230. sizeof(struct batadv_unicast_packet));
  231. header_len = max_t(int, header_len,
  232. sizeof(struct batadv_unicast_4addr_packet));
  233. header_len = max_t(int, header_len,
  234. sizeof(struct batadv_bcast_packet));
  235. #ifdef CONFIG_BATMAN_ADV_NC
  236. header_len = max_t(int, header_len,
  237. sizeof(struct batadv_coded_packet));
  238. #endif
  239. return header_len + ETH_HLEN;
  240. }
  241. /**
  242. * batadv_skb_set_priority - sets skb priority according to packet content
  243. * @skb: the packet to be sent
  244. * @offset: offset to the packet content
  245. *
  246. * This function sets a value between 256 and 263 (802.1d priority), which
  247. * can be interpreted by the cfg80211 or other drivers.
  248. */
  249. void batadv_skb_set_priority(struct sk_buff *skb, int offset)
  250. {
  251. struct iphdr ip_hdr_tmp, *ip_hdr;
  252. struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
  253. struct ethhdr ethhdr_tmp, *ethhdr;
  254. struct vlan_ethhdr *vhdr, vhdr_tmp;
  255. u32 prio;
  256. /* already set, do nothing */
  257. if (skb->priority >= 256 && skb->priority <= 263)
  258. return;
  259. ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
  260. if (!ethhdr)
  261. return;
  262. switch (ethhdr->h_proto) {
  263. case htons(ETH_P_8021Q):
  264. vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
  265. sizeof(*vhdr), &vhdr_tmp);
  266. if (!vhdr)
  267. return;
  268. prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
  269. prio = prio >> VLAN_PRIO_SHIFT;
  270. break;
  271. case htons(ETH_P_IP):
  272. ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
  273. sizeof(*ip_hdr), &ip_hdr_tmp);
  274. if (!ip_hdr)
  275. return;
  276. prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
  277. break;
  278. case htons(ETH_P_IPV6):
  279. ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
  280. sizeof(*ip6_hdr), &ip6_hdr_tmp);
  281. if (!ip6_hdr)
  282. return;
  283. prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
  284. break;
  285. default:
  286. return;
  287. }
  288. skb->priority = prio + 256;
  289. }
  290. static int batadv_recv_unhandled_packet(struct sk_buff *skb,
  291. struct batadv_hard_iface *recv_if)
  292. {
  293. return NET_RX_DROP;
  294. }
  295. /* incoming packets with the batman ethertype received on any active hard
  296. * interface
  297. */
  298. int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
  299. struct packet_type *ptype,
  300. struct net_device *orig_dev)
  301. {
  302. struct batadv_priv *bat_priv;
  303. struct batadv_ogm_packet *batadv_ogm_packet;
  304. struct batadv_hard_iface *hard_iface;
  305. uint8_t idx;
  306. int ret;
  307. hard_iface = container_of(ptype, struct batadv_hard_iface,
  308. batman_adv_ptype);
  309. skb = skb_share_check(skb, GFP_ATOMIC);
  310. /* skb was released by skb_share_check() */
  311. if (!skb)
  312. goto err_out;
  313. /* packet should hold at least type and version */
  314. if (unlikely(!pskb_may_pull(skb, 2)))
  315. goto err_free;
  316. /* expect a valid ethernet header here. */
  317. if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
  318. goto err_free;
  319. if (!hard_iface->soft_iface)
  320. goto err_free;
  321. bat_priv = netdev_priv(hard_iface->soft_iface);
  322. if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
  323. goto err_free;
  324. /* discard frames on not active interfaces */
  325. if (hard_iface->if_status != BATADV_IF_ACTIVE)
  326. goto err_free;
  327. batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
  328. if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
  329. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  330. "Drop packet: incompatible batman version (%i)\n",
  331. batadv_ogm_packet->version);
  332. goto err_free;
  333. }
  334. /* reset control block to avoid left overs from previous users */
  335. memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
  336. /* all receive handlers return whether they received or reused
  337. * the supplied skb. if not, we have to free the skb.
  338. */
  339. idx = batadv_ogm_packet->packet_type;
  340. ret = (*batadv_rx_handler[idx])(skb, hard_iface);
  341. if (ret == NET_RX_DROP)
  342. kfree_skb(skb);
  343. /* return NET_RX_SUCCESS in any case as we
  344. * most probably dropped the packet for
  345. * routing-logical reasons.
  346. */
  347. return NET_RX_SUCCESS;
  348. err_free:
  349. kfree_skb(skb);
  350. err_out:
  351. return NET_RX_DROP;
  352. }
  353. static void batadv_recv_handler_init(void)
  354. {
  355. int i;
  356. for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
  357. batadv_rx_handler[i] = batadv_recv_unhandled_packet;
  358. for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
  359. batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
  360. /* compile time checks for sizes */
  361. BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
  362. BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
  363. BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
  364. BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
  365. BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
  366. BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
  367. BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
  368. BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
  369. BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
  370. BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
  371. BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
  372. BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
  373. BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
  374. BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
  375. BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
  376. BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
  377. /* broadcast packet */
  378. batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
  379. /* unicast packets ... */
  380. /* unicast with 4 addresses packet */
  381. batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
  382. /* unicast packet */
  383. batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
  384. /* unicast tvlv packet */
  385. batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
  386. /* batman icmp packet */
  387. batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
  388. /* Fragmented packets */
  389. batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
  390. }
  391. int
  392. batadv_recv_handler_register(uint8_t packet_type,
  393. int (*recv_handler)(struct sk_buff *,
  394. struct batadv_hard_iface *))
  395. {
  396. int (*curr)(struct sk_buff *,
  397. struct batadv_hard_iface *);
  398. curr = batadv_rx_handler[packet_type];
  399. if ((curr != batadv_recv_unhandled_packet) &&
  400. (curr != batadv_recv_unhandled_unicast_packet))
  401. return -EBUSY;
  402. batadv_rx_handler[packet_type] = recv_handler;
  403. return 0;
  404. }
  405. void batadv_recv_handler_unregister(uint8_t packet_type)
  406. {
  407. batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
  408. }
  409. static struct batadv_algo_ops *batadv_algo_get(char *name)
  410. {
  411. struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
  412. hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
  413. if (strcmp(bat_algo_ops_tmp->name, name) != 0)
  414. continue;
  415. bat_algo_ops = bat_algo_ops_tmp;
  416. break;
  417. }
  418. return bat_algo_ops;
  419. }
  420. int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
  421. {
  422. struct batadv_algo_ops *bat_algo_ops_tmp;
  423. int ret;
  424. bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
  425. if (bat_algo_ops_tmp) {
  426. pr_info("Trying to register already registered routing algorithm: %s\n",
  427. bat_algo_ops->name);
  428. ret = -EEXIST;
  429. goto out;
  430. }
  431. /* all algorithms must implement all ops (for now) */
  432. if (!bat_algo_ops->bat_iface_enable ||
  433. !bat_algo_ops->bat_iface_disable ||
  434. !bat_algo_ops->bat_iface_update_mac ||
  435. !bat_algo_ops->bat_primary_iface_set ||
  436. !bat_algo_ops->bat_ogm_schedule ||
  437. !bat_algo_ops->bat_ogm_emit ||
  438. !bat_algo_ops->bat_neigh_cmp ||
  439. !bat_algo_ops->bat_neigh_is_equiv_or_better) {
  440. pr_info("Routing algo '%s' does not implement required ops\n",
  441. bat_algo_ops->name);
  442. ret = -EINVAL;
  443. goto out;
  444. }
  445. INIT_HLIST_NODE(&bat_algo_ops->list);
  446. hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
  447. ret = 0;
  448. out:
  449. return ret;
  450. }
  451. int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
  452. {
  453. struct batadv_algo_ops *bat_algo_ops;
  454. int ret = -EINVAL;
  455. bat_algo_ops = batadv_algo_get(name);
  456. if (!bat_algo_ops)
  457. goto out;
  458. bat_priv->bat_algo_ops = bat_algo_ops;
  459. ret = 0;
  460. out:
  461. return ret;
  462. }
  463. int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
  464. {
  465. struct batadv_algo_ops *bat_algo_ops;
  466. seq_puts(seq, "Available routing algorithms:\n");
  467. hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
  468. seq_printf(seq, "%s\n", bat_algo_ops->name);
  469. }
  470. return 0;
  471. }
  472. /**
  473. * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
  474. * the header
  475. * @skb: skb pointing to fragmented socket buffers
  476. * @payload_ptr: Pointer to position inside the head buffer of the skb
  477. * marking the start of the data to be CRC'ed
  478. *
  479. * payload_ptr must always point to an address in the skb head buffer and not to
  480. * a fragment.
  481. */
  482. __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
  483. {
  484. u32 crc = 0;
  485. unsigned int from;
  486. unsigned int to = skb->len;
  487. struct skb_seq_state st;
  488. const u8 *data;
  489. unsigned int len;
  490. unsigned int consumed = 0;
  491. from = (unsigned int)(payload_ptr - skb->data);
  492. skb_prepare_seq_read(skb, from, to, &st);
  493. while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
  494. crc = crc32c(crc, data, len);
  495. consumed += len;
  496. }
  497. return htonl(crc);
  498. }
  499. /**
  500. * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
  501. * possibly free it
  502. * @tvlv_handler: the tvlv handler to free
  503. */
  504. static void
  505. batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
  506. {
  507. if (atomic_dec_and_test(&tvlv_handler->refcount))
  508. kfree_rcu(tvlv_handler, rcu);
  509. }
  510. /**
  511. * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
  512. * based on the provided type and version (both need to match)
  513. * @bat_priv: the bat priv with all the soft interface information
  514. * @type: tvlv handler type to look for
  515. * @version: tvlv handler version to look for
  516. *
  517. * Returns tvlv handler if found or NULL otherwise.
  518. */
  519. static struct batadv_tvlv_handler
  520. *batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
  521. uint8_t type, uint8_t version)
  522. {
  523. struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
  524. rcu_read_lock();
  525. hlist_for_each_entry_rcu(tvlv_handler_tmp,
  526. &bat_priv->tvlv.handler_list, list) {
  527. if (tvlv_handler_tmp->type != type)
  528. continue;
  529. if (tvlv_handler_tmp->version != version)
  530. continue;
  531. if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
  532. continue;
  533. tvlv_handler = tvlv_handler_tmp;
  534. break;
  535. }
  536. rcu_read_unlock();
  537. return tvlv_handler;
  538. }
  539. /**
  540. * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
  541. * possibly free it
  542. * @tvlv: the tvlv container to free
  543. */
  544. static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
  545. {
  546. if (atomic_dec_and_test(&tvlv->refcount))
  547. kfree(tvlv);
  548. }
  549. /**
  550. * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
  551. * list based on the provided type and version (both need to match)
  552. * @bat_priv: the bat priv with all the soft interface information
  553. * @type: tvlv container type to look for
  554. * @version: tvlv container version to look for
  555. *
  556. * Has to be called with the appropriate locks being acquired
  557. * (tvlv.container_list_lock).
  558. *
  559. * Returns tvlv container if found or NULL otherwise.
  560. */
  561. static struct batadv_tvlv_container
  562. *batadv_tvlv_container_get(struct batadv_priv *bat_priv,
  563. uint8_t type, uint8_t version)
  564. {
  565. struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
  566. hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
  567. if (tvlv_tmp->tvlv_hdr.type != type)
  568. continue;
  569. if (tvlv_tmp->tvlv_hdr.version != version)
  570. continue;
  571. if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
  572. continue;
  573. tvlv = tvlv_tmp;
  574. break;
  575. }
  576. return tvlv;
  577. }
  578. /**
  579. * batadv_tvlv_container_list_size - calculate the size of the tvlv container
  580. * list entries
  581. * @bat_priv: the bat priv with all the soft interface information
  582. *
  583. * Has to be called with the appropriate locks being acquired
  584. * (tvlv.container_list_lock).
  585. *
  586. * Returns size of all currently registered tvlv containers in bytes.
  587. */
  588. static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
  589. {
  590. struct batadv_tvlv_container *tvlv;
  591. uint16_t tvlv_len = 0;
  592. hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
  593. tvlv_len += sizeof(struct batadv_tvlv_hdr);
  594. tvlv_len += ntohs(tvlv->tvlv_hdr.len);
  595. }
  596. return tvlv_len;
  597. }
  598. /**
  599. * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
  600. * list
  601. * @tvlv: the to be removed tvlv container
  602. *
  603. * Has to be called with the appropriate locks being acquired
  604. * (tvlv.container_list_lock).
  605. */
  606. static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
  607. {
  608. if (!tvlv)
  609. return;
  610. hlist_del(&tvlv->list);
  611. /* first call to decrement the counter, second call to free */
  612. batadv_tvlv_container_free_ref(tvlv);
  613. batadv_tvlv_container_free_ref(tvlv);
  614. }
  615. /**
  616. * batadv_tvlv_container_unregister - unregister tvlv container based on the
  617. * provided type and version (both need to match)
  618. * @bat_priv: the bat priv with all the soft interface information
  619. * @type: tvlv container type to unregister
  620. * @version: tvlv container type to unregister
  621. */
  622. void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
  623. uint8_t type, uint8_t version)
  624. {
  625. struct batadv_tvlv_container *tvlv;
  626. spin_lock_bh(&bat_priv->tvlv.container_list_lock);
  627. tvlv = batadv_tvlv_container_get(bat_priv, type, version);
  628. batadv_tvlv_container_remove(tvlv);
  629. spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
  630. }
  631. /**
  632. * batadv_tvlv_container_register - register tvlv type, version and content
  633. * to be propagated with each (primary interface) OGM
  634. * @bat_priv: the bat priv with all the soft interface information
  635. * @type: tvlv container type
  636. * @version: tvlv container version
  637. * @tvlv_value: tvlv container content
  638. * @tvlv_value_len: tvlv container content length
  639. *
  640. * If a container of the same type and version was already registered the new
  641. * content is going to replace the old one.
  642. */
  643. void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
  644. uint8_t type, uint8_t version,
  645. void *tvlv_value, uint16_t tvlv_value_len)
  646. {
  647. struct batadv_tvlv_container *tvlv_old, *tvlv_new;
  648. if (!tvlv_value)
  649. tvlv_value_len = 0;
  650. tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
  651. if (!tvlv_new)
  652. return;
  653. tvlv_new->tvlv_hdr.version = version;
  654. tvlv_new->tvlv_hdr.type = type;
  655. tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
  656. memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
  657. INIT_HLIST_NODE(&tvlv_new->list);
  658. atomic_set(&tvlv_new->refcount, 1);
  659. spin_lock_bh(&bat_priv->tvlv.container_list_lock);
  660. tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
  661. batadv_tvlv_container_remove(tvlv_old);
  662. hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
  663. spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
  664. }
  665. /**
  666. * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
  667. * requested packet size
  668. * @packet_buff: packet buffer
  669. * @packet_buff_len: packet buffer size
  670. * @min_packet_len: requested packet minimum size
  671. * @additional_packet_len: requested additional packet size on top of minimum
  672. * size
  673. *
  674. * Returns true of the packet buffer could be changed to the requested size,
  675. * false otherwise.
  676. */
  677. static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
  678. int *packet_buff_len,
  679. int min_packet_len,
  680. int additional_packet_len)
  681. {
  682. unsigned char *new_buff;
  683. new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
  684. /* keep old buffer if kmalloc should fail */
  685. if (new_buff) {
  686. memcpy(new_buff, *packet_buff, min_packet_len);
  687. kfree(*packet_buff);
  688. *packet_buff = new_buff;
  689. *packet_buff_len = min_packet_len + additional_packet_len;
  690. return true;
  691. }
  692. return false;
  693. }
  694. /**
  695. * batadv_tvlv_container_ogm_append - append tvlv container content to given
  696. * OGM packet buffer
  697. * @bat_priv: the bat priv with all the soft interface information
  698. * @packet_buff: ogm packet buffer
  699. * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
  700. * content
  701. * @packet_min_len: ogm header size to be preserved for the OGM itself
  702. *
  703. * The ogm packet might be enlarged or shrunk depending on the current size
  704. * and the size of the to-be-appended tvlv containers.
  705. *
  706. * Returns size of all appended tvlv containers in bytes.
  707. */
  708. uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
  709. unsigned char **packet_buff,
  710. int *packet_buff_len,
  711. int packet_min_len)
  712. {
  713. struct batadv_tvlv_container *tvlv;
  714. struct batadv_tvlv_hdr *tvlv_hdr;
  715. uint16_t tvlv_value_len;
  716. void *tvlv_value;
  717. bool ret;
  718. spin_lock_bh(&bat_priv->tvlv.container_list_lock);
  719. tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
  720. ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
  721. packet_min_len, tvlv_value_len);
  722. if (!ret)
  723. goto end;
  724. if (!tvlv_value_len)
  725. goto end;
  726. tvlv_value = (*packet_buff) + packet_min_len;
  727. hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
  728. tvlv_hdr = tvlv_value;
  729. tvlv_hdr->type = tvlv->tvlv_hdr.type;
  730. tvlv_hdr->version = tvlv->tvlv_hdr.version;
  731. tvlv_hdr->len = tvlv->tvlv_hdr.len;
  732. tvlv_value = tvlv_hdr + 1;
  733. memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
  734. tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
  735. }
  736. end:
  737. spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
  738. return tvlv_value_len;
  739. }
  740. /**
  741. * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
  742. * appropriate handlers
  743. * @bat_priv: the bat priv with all the soft interface information
  744. * @tvlv_handler: tvlv callback function handling the tvlv content
  745. * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
  746. * @orig_node: orig node emitting the ogm packet
  747. * @src: source mac address of the unicast packet
  748. * @dst: destination mac address of the unicast packet
  749. * @tvlv_value: tvlv content
  750. * @tvlv_value_len: tvlv content length
  751. *
  752. * Returns success if handler was not found or the return value of the handler
  753. * callback.
  754. */
  755. static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
  756. struct batadv_tvlv_handler *tvlv_handler,
  757. bool ogm_source,
  758. struct batadv_orig_node *orig_node,
  759. uint8_t *src, uint8_t *dst,
  760. void *tvlv_value, uint16_t tvlv_value_len)
  761. {
  762. if (!tvlv_handler)
  763. return NET_RX_SUCCESS;
  764. if (ogm_source) {
  765. if (!tvlv_handler->ogm_handler)
  766. return NET_RX_SUCCESS;
  767. if (!orig_node)
  768. return NET_RX_SUCCESS;
  769. tvlv_handler->ogm_handler(bat_priv, orig_node,
  770. BATADV_NO_FLAGS,
  771. tvlv_value, tvlv_value_len);
  772. tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
  773. } else {
  774. if (!src)
  775. return NET_RX_SUCCESS;
  776. if (!dst)
  777. return NET_RX_SUCCESS;
  778. if (!tvlv_handler->unicast_handler)
  779. return NET_RX_SUCCESS;
  780. return tvlv_handler->unicast_handler(bat_priv, src,
  781. dst, tvlv_value,
  782. tvlv_value_len);
  783. }
  784. return NET_RX_SUCCESS;
  785. }
  786. /**
  787. * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
  788. * appropriate handlers
  789. * @bat_priv: the bat priv with all the soft interface information
  790. * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
  791. * @orig_node: orig node emitting the ogm packet
  792. * @src: source mac address of the unicast packet
  793. * @dst: destination mac address of the unicast packet
  794. * @tvlv_value: tvlv content
  795. * @tvlv_value_len: tvlv content length
  796. *
  797. * Returns success when processing an OGM or the return value of all called
  798. * handler callbacks.
  799. */
  800. int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
  801. bool ogm_source,
  802. struct batadv_orig_node *orig_node,
  803. uint8_t *src, uint8_t *dst,
  804. void *tvlv_value, uint16_t tvlv_value_len)
  805. {
  806. struct batadv_tvlv_handler *tvlv_handler;
  807. struct batadv_tvlv_hdr *tvlv_hdr;
  808. uint16_t tvlv_value_cont_len;
  809. uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
  810. int ret = NET_RX_SUCCESS;
  811. while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
  812. tvlv_hdr = tvlv_value;
  813. tvlv_value_cont_len = ntohs(tvlv_hdr->len);
  814. tvlv_value = tvlv_hdr + 1;
  815. tvlv_value_len -= sizeof(*tvlv_hdr);
  816. if (tvlv_value_cont_len > tvlv_value_len)
  817. break;
  818. tvlv_handler = batadv_tvlv_handler_get(bat_priv,
  819. tvlv_hdr->type,
  820. tvlv_hdr->version);
  821. ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
  822. ogm_source, orig_node,
  823. src, dst, tvlv_value,
  824. tvlv_value_cont_len);
  825. if (tvlv_handler)
  826. batadv_tvlv_handler_free_ref(tvlv_handler);
  827. tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
  828. tvlv_value_len -= tvlv_value_cont_len;
  829. }
  830. if (!ogm_source)
  831. return ret;
  832. rcu_read_lock();
  833. hlist_for_each_entry_rcu(tvlv_handler,
  834. &bat_priv->tvlv.handler_list, list) {
  835. if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
  836. !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
  837. tvlv_handler->ogm_handler(bat_priv, orig_node,
  838. cifnotfound, NULL, 0);
  839. tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
  840. }
  841. rcu_read_unlock();
  842. return NET_RX_SUCCESS;
  843. }
  844. /**
  845. * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
  846. * handlers
  847. * @bat_priv: the bat priv with all the soft interface information
  848. * @batadv_ogm_packet: ogm packet containing the tvlv containers
  849. * @orig_node: orig node emitting the ogm packet
  850. */
  851. void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
  852. struct batadv_ogm_packet *batadv_ogm_packet,
  853. struct batadv_orig_node *orig_node)
  854. {
  855. void *tvlv_value;
  856. uint16_t tvlv_value_len;
  857. if (!batadv_ogm_packet)
  858. return;
  859. tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
  860. if (!tvlv_value_len)
  861. return;
  862. tvlv_value = batadv_ogm_packet + 1;
  863. batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
  864. tvlv_value, tvlv_value_len);
  865. }
  866. /**
  867. * batadv_tvlv_handler_register - register tvlv handler based on the provided
  868. * type and version (both need to match) for ogm tvlv payload and/or unicast
  869. * payload
  870. * @bat_priv: the bat priv with all the soft interface information
  871. * @optr: ogm tvlv handler callback function. This function receives the orig
  872. * node, flags and the tvlv content as argument to process.
  873. * @uptr: unicast tvlv handler callback function. This function receives the
  874. * source & destination of the unicast packet as well as the tvlv content
  875. * to process.
  876. * @type: tvlv handler type to be registered
  877. * @version: tvlv handler version to be registered
  878. * @flags: flags to enable or disable TVLV API behavior
  879. */
  880. void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
  881. void (*optr)(struct batadv_priv *bat_priv,
  882. struct batadv_orig_node *orig,
  883. uint8_t flags,
  884. void *tvlv_value,
  885. uint16_t tvlv_value_len),
  886. int (*uptr)(struct batadv_priv *bat_priv,
  887. uint8_t *src, uint8_t *dst,
  888. void *tvlv_value,
  889. uint16_t tvlv_value_len),
  890. uint8_t type, uint8_t version, uint8_t flags)
  891. {
  892. struct batadv_tvlv_handler *tvlv_handler;
  893. tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
  894. if (tvlv_handler) {
  895. batadv_tvlv_handler_free_ref(tvlv_handler);
  896. return;
  897. }
  898. tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
  899. if (!tvlv_handler)
  900. return;
  901. tvlv_handler->ogm_handler = optr;
  902. tvlv_handler->unicast_handler = uptr;
  903. tvlv_handler->type = type;
  904. tvlv_handler->version = version;
  905. tvlv_handler->flags = flags;
  906. atomic_set(&tvlv_handler->refcount, 1);
  907. INIT_HLIST_NODE(&tvlv_handler->list);
  908. spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
  909. hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
  910. spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
  911. }
  912. /**
  913. * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
  914. * provided type and version (both need to match)
  915. * @bat_priv: the bat priv with all the soft interface information
  916. * @type: tvlv handler type to be unregistered
  917. * @version: tvlv handler version to be unregistered
  918. */
  919. void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
  920. uint8_t type, uint8_t version)
  921. {
  922. struct batadv_tvlv_handler *tvlv_handler;
  923. tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
  924. if (!tvlv_handler)
  925. return;
  926. batadv_tvlv_handler_free_ref(tvlv_handler);
  927. spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
  928. hlist_del_rcu(&tvlv_handler->list);
  929. spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
  930. batadv_tvlv_handler_free_ref(tvlv_handler);
  931. }
  932. /**
  933. * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
  934. * specified host
  935. * @bat_priv: the bat priv with all the soft interface information
  936. * @src: source mac address of the unicast packet
  937. * @dst: destination mac address of the unicast packet
  938. * @type: tvlv type
  939. * @version: tvlv version
  940. * @tvlv_value: tvlv content
  941. * @tvlv_value_len: tvlv content length
  942. */
  943. void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
  944. uint8_t *dst, uint8_t type, uint8_t version,
  945. void *tvlv_value, uint16_t tvlv_value_len)
  946. {
  947. struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
  948. struct batadv_tvlv_hdr *tvlv_hdr;
  949. struct batadv_orig_node *orig_node;
  950. struct sk_buff *skb = NULL;
  951. unsigned char *tvlv_buff;
  952. unsigned int tvlv_len;
  953. ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
  954. bool ret = false;
  955. orig_node = batadv_orig_hash_find(bat_priv, dst);
  956. if (!orig_node)
  957. goto out;
  958. tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
  959. skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
  960. if (!skb)
  961. goto out;
  962. skb->priority = TC_PRIO_CONTROL;
  963. skb_reserve(skb, ETH_HLEN);
  964. tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
  965. unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
  966. unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
  967. unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
  968. unicast_tvlv_packet->ttl = BATADV_TTL;
  969. unicast_tvlv_packet->reserved = 0;
  970. unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
  971. unicast_tvlv_packet->align = 0;
  972. ether_addr_copy(unicast_tvlv_packet->src, src);
  973. ether_addr_copy(unicast_tvlv_packet->dst, dst);
  974. tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
  975. tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
  976. tvlv_hdr->version = version;
  977. tvlv_hdr->type = type;
  978. tvlv_hdr->len = htons(tvlv_value_len);
  979. tvlv_buff += sizeof(*tvlv_hdr);
  980. memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
  981. if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
  982. ret = true;
  983. out:
  984. if (skb && !ret)
  985. kfree_skb(skb);
  986. if (orig_node)
  987. batadv_orig_node_free_ref(orig_node);
  988. }
  989. /**
  990. * batadv_get_vid - extract the VLAN identifier from skb if any
  991. * @skb: the buffer containing the packet
  992. * @header_len: length of the batman header preceding the ethernet header
  993. *
  994. * If the packet embedded in the skb is vlan tagged this function returns the
  995. * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
  996. */
  997. unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
  998. {
  999. struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
  1000. struct vlan_ethhdr *vhdr;
  1001. unsigned short vid;
  1002. if (ethhdr->h_proto != htons(ETH_P_8021Q))
  1003. return BATADV_NO_FLAGS;
  1004. if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
  1005. return BATADV_NO_FLAGS;
  1006. vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
  1007. vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
  1008. vid |= BATADV_VLAN_HAS_TAG;
  1009. return vid;
  1010. }
  1011. /**
  1012. * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
  1013. * @bat_priv: the bat priv with all the soft interface information
  1014. * @vid: the VLAN identifier for which the AP isolation attributed as to be
  1015. * looked up
  1016. *
  1017. * Returns true if AP isolation is on for the VLAN idenfied by vid, false
  1018. * otherwise
  1019. */
  1020. bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
  1021. {
  1022. bool ap_isolation_enabled = false;
  1023. struct batadv_softif_vlan *vlan;
  1024. /* if the AP isolation is requested on a VLAN, then check for its
  1025. * setting in the proper VLAN private data structure
  1026. */
  1027. vlan = batadv_softif_vlan_get(bat_priv, vid);
  1028. if (vlan) {
  1029. ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
  1030. batadv_softif_vlan_free_ref(vlan);
  1031. }
  1032. return ap_isolation_enabled;
  1033. }
  1034. static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
  1035. {
  1036. struct batadv_algo_ops *bat_algo_ops;
  1037. char *algo_name = (char *)val;
  1038. size_t name_len = strlen(algo_name);
  1039. if (name_len > 0 && algo_name[name_len - 1] == '\n')
  1040. algo_name[name_len - 1] = '\0';
  1041. bat_algo_ops = batadv_algo_get(algo_name);
  1042. if (!bat_algo_ops) {
  1043. pr_err("Routing algorithm '%s' is not supported\n", algo_name);
  1044. return -EINVAL;
  1045. }
  1046. return param_set_copystring(algo_name, kp);
  1047. }
  1048. static const struct kernel_param_ops batadv_param_ops_ra = {
  1049. .set = batadv_param_set_ra,
  1050. .get = param_get_string,
  1051. };
  1052. static struct kparam_string batadv_param_string_ra = {
  1053. .maxlen = sizeof(batadv_routing_algo),
  1054. .string = batadv_routing_algo,
  1055. };
  1056. module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
  1057. 0644);
  1058. module_init(batadv_init);
  1059. module_exit(batadv_exit);
  1060. MODULE_LICENSE("GPL");
  1061. MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
  1062. MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
  1063. MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
  1064. MODULE_VERSION(BATADV_SOURCE_VERSION);