ipmr.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104
  1. /*
  2. * IP multicast routing support for mrouted 3.6/3.8
  3. *
  4. * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
  5. * Linux Consultancy and Custom Driver Development
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Fixes:
  13. * Michael Chastain : Incorrect size of copying.
  14. * Alan Cox : Added the cache manager code
  15. * Alan Cox : Fixed the clone/copy bug and device race.
  16. * Mike McLagan : Routing by source
  17. * Malcolm Beattie : Buffer handling fixes.
  18. * Alexey Kuznetsov : Double buffer free and other fixes.
  19. * SVR Anand : Fixed several multicast bugs and problems.
  20. * Alexey Kuznetsov : Status, optimisations and more.
  21. * Brad Parker : Better behaviour on mrouted upcall
  22. * overflow.
  23. * Carlos Picoto : PIMv1 Support
  24. * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
  25. * Relax this requirement to work with older peers.
  26. *
  27. */
  28. #include <linux/uaccess.h>
  29. #include <linux/types.h>
  30. #include <linux/cache.h>
  31. #include <linux/capability.h>
  32. #include <linux/errno.h>
  33. #include <linux/mm.h>
  34. #include <linux/kernel.h>
  35. #include <linux/fcntl.h>
  36. #include <linux/stat.h>
  37. #include <linux/socket.h>
  38. #include <linux/in.h>
  39. #include <linux/inet.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/inetdevice.h>
  42. #include <linux/igmp.h>
  43. #include <linux/proc_fs.h>
  44. #include <linux/seq_file.h>
  45. #include <linux/mroute.h>
  46. #include <linux/init.h>
  47. #include <linux/if_ether.h>
  48. #include <linux/slab.h>
  49. #include <net/net_namespace.h>
  50. #include <net/ip.h>
  51. #include <net/protocol.h>
  52. #include <linux/skbuff.h>
  53. #include <net/route.h>
  54. #include <net/icmp.h>
  55. #include <net/udp.h>
  56. #include <net/raw.h>
  57. #include <linux/notifier.h>
  58. #include <linux/if_arp.h>
  59. #include <linux/netfilter_ipv4.h>
  60. #include <linux/compat.h>
  61. #include <linux/export.h>
  62. #include <linux/rhashtable.h>
  63. #include <net/ip_tunnels.h>
  64. #include <net/checksum.h>
  65. #include <net/netlink.h>
  66. #include <net/fib_rules.h>
  67. #include <linux/netconf.h>
  68. #include <net/nexthop.h>
  69. #include <net/switchdev.h>
  70. struct ipmr_rule {
  71. struct fib_rule common;
  72. };
  73. struct ipmr_result {
  74. struct mr_table *mrt;
  75. };
  76. /* Big lock, protecting vif table, mrt cache and mroute socket state.
  77. * Note that the changes are semaphored via rtnl_lock.
  78. */
  79. static DEFINE_RWLOCK(mrt_lock);
  80. /* Multicast router control variables */
  81. /* Special spinlock for queue of unresolved entries */
  82. static DEFINE_SPINLOCK(mfc_unres_lock);
  83. /* We return to original Alan's scheme. Hash table of resolved
  84. * entries is changed only in process context and protected
  85. * with weak lock mrt_lock. Queue of unresolved entries is protected
  86. * with strong spinlock mfc_unres_lock.
  87. *
  88. * In this case data path is free of exclusive locks at all.
  89. */
  90. static struct kmem_cache *mrt_cachep __ro_after_init;
  91. static struct mr_table *ipmr_new_table(struct net *net, u32 id);
  92. static void ipmr_free_table(struct mr_table *mrt);
  93. static void ip_mr_forward(struct net *net, struct mr_table *mrt,
  94. struct net_device *dev, struct sk_buff *skb,
  95. struct mfc_cache *cache, int local);
  96. static int ipmr_cache_report(struct mr_table *mrt,
  97. struct sk_buff *pkt, vifi_t vifi, int assert);
  98. static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
  99. int cmd);
  100. static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
  101. static void mroute_clean_tables(struct mr_table *mrt, bool all);
  102. static void ipmr_expire_process(struct timer_list *t);
  103. #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
  104. #define ipmr_for_each_table(mrt, net) \
  105. list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
  106. static struct mr_table *ipmr_mr_table_iter(struct net *net,
  107. struct mr_table *mrt)
  108. {
  109. struct mr_table *ret;
  110. if (!mrt)
  111. ret = list_entry_rcu(net->ipv4.mr_tables.next,
  112. struct mr_table, list);
  113. else
  114. ret = list_entry_rcu(mrt->list.next,
  115. struct mr_table, list);
  116. if (&ret->list == &net->ipv4.mr_tables)
  117. return NULL;
  118. return ret;
  119. }
  120. static struct mr_table *ipmr_get_table(struct net *net, u32 id)
  121. {
  122. struct mr_table *mrt;
  123. ipmr_for_each_table(mrt, net) {
  124. if (mrt->id == id)
  125. return mrt;
  126. }
  127. return NULL;
  128. }
  129. static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
  130. struct mr_table **mrt)
  131. {
  132. int err;
  133. struct ipmr_result res;
  134. struct fib_lookup_arg arg = {
  135. .result = &res,
  136. .flags = FIB_LOOKUP_NOREF,
  137. };
  138. /* update flow if oif or iif point to device enslaved to l3mdev */
  139. l3mdev_update_flow(net, flowi4_to_flowi(flp4));
  140. err = fib_rules_lookup(net->ipv4.mr_rules_ops,
  141. flowi4_to_flowi(flp4), 0, &arg);
  142. if (err < 0)
  143. return err;
  144. *mrt = res.mrt;
  145. return 0;
  146. }
  147. static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
  148. int flags, struct fib_lookup_arg *arg)
  149. {
  150. struct ipmr_result *res = arg->result;
  151. struct mr_table *mrt;
  152. switch (rule->action) {
  153. case FR_ACT_TO_TBL:
  154. break;
  155. case FR_ACT_UNREACHABLE:
  156. return -ENETUNREACH;
  157. case FR_ACT_PROHIBIT:
  158. return -EACCES;
  159. case FR_ACT_BLACKHOLE:
  160. default:
  161. return -EINVAL;
  162. }
  163. arg->table = fib_rule_get_table(rule, arg);
  164. mrt = ipmr_get_table(rule->fr_net, arg->table);
  165. if (!mrt)
  166. return -EAGAIN;
  167. res->mrt = mrt;
  168. return 0;
  169. }
  170. static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
  171. {
  172. return 1;
  173. }
  174. static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
  175. FRA_GENERIC_POLICY,
  176. };
  177. static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
  178. struct fib_rule_hdr *frh, struct nlattr **tb,
  179. struct netlink_ext_ack *extack)
  180. {
  181. return 0;
  182. }
  183. static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
  184. struct nlattr **tb)
  185. {
  186. return 1;
  187. }
  188. static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
  189. struct fib_rule_hdr *frh)
  190. {
  191. frh->dst_len = 0;
  192. frh->src_len = 0;
  193. frh->tos = 0;
  194. return 0;
  195. }
  196. static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
  197. .family = RTNL_FAMILY_IPMR,
  198. .rule_size = sizeof(struct ipmr_rule),
  199. .addr_size = sizeof(u32),
  200. .action = ipmr_rule_action,
  201. .match = ipmr_rule_match,
  202. .configure = ipmr_rule_configure,
  203. .compare = ipmr_rule_compare,
  204. .fill = ipmr_rule_fill,
  205. .nlgroup = RTNLGRP_IPV4_RULE,
  206. .policy = ipmr_rule_policy,
  207. .owner = THIS_MODULE,
  208. };
  209. static int __net_init ipmr_rules_init(struct net *net)
  210. {
  211. struct fib_rules_ops *ops;
  212. struct mr_table *mrt;
  213. int err;
  214. ops = fib_rules_register(&ipmr_rules_ops_template, net);
  215. if (IS_ERR(ops))
  216. return PTR_ERR(ops);
  217. INIT_LIST_HEAD(&net->ipv4.mr_tables);
  218. mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
  219. if (IS_ERR(mrt)) {
  220. err = PTR_ERR(mrt);
  221. goto err1;
  222. }
  223. err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
  224. if (err < 0)
  225. goto err2;
  226. net->ipv4.mr_rules_ops = ops;
  227. return 0;
  228. err2:
  229. ipmr_free_table(mrt);
  230. err1:
  231. fib_rules_unregister(ops);
  232. return err;
  233. }
  234. static void __net_exit ipmr_rules_exit(struct net *net)
  235. {
  236. struct mr_table *mrt, *next;
  237. rtnl_lock();
  238. list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
  239. list_del(&mrt->list);
  240. ipmr_free_table(mrt);
  241. }
  242. fib_rules_unregister(net->ipv4.mr_rules_ops);
  243. rtnl_unlock();
  244. }
  245. static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
  246. {
  247. return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
  248. }
  249. static unsigned int ipmr_rules_seq_read(struct net *net)
  250. {
  251. return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
  252. }
  253. bool ipmr_rule_default(const struct fib_rule *rule)
  254. {
  255. return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
  256. }
  257. EXPORT_SYMBOL(ipmr_rule_default);
  258. #else
  259. #define ipmr_for_each_table(mrt, net) \
  260. for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
  261. static struct mr_table *ipmr_mr_table_iter(struct net *net,
  262. struct mr_table *mrt)
  263. {
  264. if (!mrt)
  265. return net->ipv4.mrt;
  266. return NULL;
  267. }
  268. static struct mr_table *ipmr_get_table(struct net *net, u32 id)
  269. {
  270. return net->ipv4.mrt;
  271. }
  272. static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
  273. struct mr_table **mrt)
  274. {
  275. *mrt = net->ipv4.mrt;
  276. return 0;
  277. }
  278. static int __net_init ipmr_rules_init(struct net *net)
  279. {
  280. struct mr_table *mrt;
  281. mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
  282. if (IS_ERR(mrt))
  283. return PTR_ERR(mrt);
  284. net->ipv4.mrt = mrt;
  285. return 0;
  286. }
  287. static void __net_exit ipmr_rules_exit(struct net *net)
  288. {
  289. rtnl_lock();
  290. ipmr_free_table(net->ipv4.mrt);
  291. net->ipv4.mrt = NULL;
  292. rtnl_unlock();
  293. }
  294. static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
  295. {
  296. return 0;
  297. }
  298. static unsigned int ipmr_rules_seq_read(struct net *net)
  299. {
  300. return 0;
  301. }
  302. bool ipmr_rule_default(const struct fib_rule *rule)
  303. {
  304. return true;
  305. }
  306. EXPORT_SYMBOL(ipmr_rule_default);
  307. #endif
  308. static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
  309. const void *ptr)
  310. {
  311. const struct mfc_cache_cmp_arg *cmparg = arg->key;
  312. struct mfc_cache *c = (struct mfc_cache *)ptr;
  313. return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
  314. cmparg->mfc_origin != c->mfc_origin;
  315. }
  316. static const struct rhashtable_params ipmr_rht_params = {
  317. .head_offset = offsetof(struct mr_mfc, mnode),
  318. .key_offset = offsetof(struct mfc_cache, cmparg),
  319. .key_len = sizeof(struct mfc_cache_cmp_arg),
  320. .nelem_hint = 3,
  321. .locks_mul = 1,
  322. .obj_cmpfn = ipmr_hash_cmp,
  323. .automatic_shrinking = true,
  324. };
  325. static void ipmr_new_table_set(struct mr_table *mrt,
  326. struct net *net)
  327. {
  328. #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
  329. list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
  330. #endif
  331. }
  332. static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
  333. .mfc_mcastgrp = htonl(INADDR_ANY),
  334. .mfc_origin = htonl(INADDR_ANY),
  335. };
  336. static struct mr_table_ops ipmr_mr_table_ops = {
  337. .rht_params = &ipmr_rht_params,
  338. .cmparg_any = &ipmr_mr_table_ops_cmparg_any,
  339. };
  340. static struct mr_table *ipmr_new_table(struct net *net, u32 id)
  341. {
  342. struct mr_table *mrt;
  343. /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
  344. if (id != RT_TABLE_DEFAULT && id >= 1000000000)
  345. return ERR_PTR(-EINVAL);
  346. mrt = ipmr_get_table(net, id);
  347. if (mrt)
  348. return mrt;
  349. return mr_table_alloc(net, id, &ipmr_mr_table_ops,
  350. ipmr_expire_process, ipmr_new_table_set);
  351. }
  352. static void ipmr_free_table(struct mr_table *mrt)
  353. {
  354. del_timer_sync(&mrt->ipmr_expire_timer);
  355. mroute_clean_tables(mrt, true);
  356. rhltable_destroy(&mrt->mfc_hash);
  357. kfree(mrt);
  358. }
  359. /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
  360. static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
  361. {
  362. struct net *net = dev_net(dev);
  363. dev_close(dev);
  364. dev = __dev_get_by_name(net, "tunl0");
  365. if (dev) {
  366. const struct net_device_ops *ops = dev->netdev_ops;
  367. struct ifreq ifr;
  368. struct ip_tunnel_parm p;
  369. memset(&p, 0, sizeof(p));
  370. p.iph.daddr = v->vifc_rmt_addr.s_addr;
  371. p.iph.saddr = v->vifc_lcl_addr.s_addr;
  372. p.iph.version = 4;
  373. p.iph.ihl = 5;
  374. p.iph.protocol = IPPROTO_IPIP;
  375. sprintf(p.name, "dvmrp%d", v->vifc_vifi);
  376. ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
  377. if (ops->ndo_do_ioctl) {
  378. mm_segment_t oldfs = get_fs();
  379. set_fs(KERNEL_DS);
  380. ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
  381. set_fs(oldfs);
  382. }
  383. }
  384. }
  385. /* Initialize ipmr pimreg/tunnel in_device */
  386. static bool ipmr_init_vif_indev(const struct net_device *dev)
  387. {
  388. struct in_device *in_dev;
  389. ASSERT_RTNL();
  390. in_dev = __in_dev_get_rtnl(dev);
  391. if (!in_dev)
  392. return false;
  393. ipv4_devconf_setall(in_dev);
  394. neigh_parms_data_state_setall(in_dev->arp_parms);
  395. IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
  396. return true;
  397. }
  398. static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
  399. {
  400. struct net_device *dev;
  401. dev = __dev_get_by_name(net, "tunl0");
  402. if (dev) {
  403. const struct net_device_ops *ops = dev->netdev_ops;
  404. int err;
  405. struct ifreq ifr;
  406. struct ip_tunnel_parm p;
  407. memset(&p, 0, sizeof(p));
  408. p.iph.daddr = v->vifc_rmt_addr.s_addr;
  409. p.iph.saddr = v->vifc_lcl_addr.s_addr;
  410. p.iph.version = 4;
  411. p.iph.ihl = 5;
  412. p.iph.protocol = IPPROTO_IPIP;
  413. sprintf(p.name, "dvmrp%d", v->vifc_vifi);
  414. ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
  415. if (ops->ndo_do_ioctl) {
  416. mm_segment_t oldfs = get_fs();
  417. set_fs(KERNEL_DS);
  418. err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
  419. set_fs(oldfs);
  420. } else {
  421. err = -EOPNOTSUPP;
  422. }
  423. dev = NULL;
  424. if (err == 0 &&
  425. (dev = __dev_get_by_name(net, p.name)) != NULL) {
  426. dev->flags |= IFF_MULTICAST;
  427. if (!ipmr_init_vif_indev(dev))
  428. goto failure;
  429. if (dev_open(dev))
  430. goto failure;
  431. dev_hold(dev);
  432. }
  433. }
  434. return dev;
  435. failure:
  436. unregister_netdevice(dev);
  437. return NULL;
  438. }
  439. #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
  440. static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
  441. {
  442. struct net *net = dev_net(dev);
  443. struct mr_table *mrt;
  444. struct flowi4 fl4 = {
  445. .flowi4_oif = dev->ifindex,
  446. .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
  447. .flowi4_mark = skb->mark,
  448. };
  449. int err;
  450. err = ipmr_fib_lookup(net, &fl4, &mrt);
  451. if (err < 0) {
  452. kfree_skb(skb);
  453. return err;
  454. }
  455. read_lock(&mrt_lock);
  456. dev->stats.tx_bytes += skb->len;
  457. dev->stats.tx_packets++;
  458. ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
  459. read_unlock(&mrt_lock);
  460. kfree_skb(skb);
  461. return NETDEV_TX_OK;
  462. }
  463. static int reg_vif_get_iflink(const struct net_device *dev)
  464. {
  465. return 0;
  466. }
  467. static const struct net_device_ops reg_vif_netdev_ops = {
  468. .ndo_start_xmit = reg_vif_xmit,
  469. .ndo_get_iflink = reg_vif_get_iflink,
  470. };
  471. static void reg_vif_setup(struct net_device *dev)
  472. {
  473. dev->type = ARPHRD_PIMREG;
  474. dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
  475. dev->flags = IFF_NOARP;
  476. dev->netdev_ops = &reg_vif_netdev_ops;
  477. dev->needs_free_netdev = true;
  478. dev->features |= NETIF_F_NETNS_LOCAL;
  479. }
  480. static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
  481. {
  482. struct net_device *dev;
  483. char name[IFNAMSIZ];
  484. if (mrt->id == RT_TABLE_DEFAULT)
  485. sprintf(name, "pimreg");
  486. else
  487. sprintf(name, "pimreg%u", mrt->id);
  488. dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
  489. if (!dev)
  490. return NULL;
  491. dev_net_set(dev, net);
  492. if (register_netdevice(dev)) {
  493. free_netdev(dev);
  494. return NULL;
  495. }
  496. if (!ipmr_init_vif_indev(dev))
  497. goto failure;
  498. if (dev_open(dev))
  499. goto failure;
  500. dev_hold(dev);
  501. return dev;
  502. failure:
  503. unregister_netdevice(dev);
  504. return NULL;
  505. }
  506. /* called with rcu_read_lock() */
  507. static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
  508. unsigned int pimlen)
  509. {
  510. struct net_device *reg_dev = NULL;
  511. struct iphdr *encap;
  512. encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
  513. /* Check that:
  514. * a. packet is really sent to a multicast group
  515. * b. packet is not a NULL-REGISTER
  516. * c. packet is not truncated
  517. */
  518. if (!ipv4_is_multicast(encap->daddr) ||
  519. encap->tot_len == 0 ||
  520. ntohs(encap->tot_len) + pimlen > skb->len)
  521. return 1;
  522. read_lock(&mrt_lock);
  523. if (mrt->mroute_reg_vif_num >= 0)
  524. reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
  525. read_unlock(&mrt_lock);
  526. if (!reg_dev)
  527. return 1;
  528. skb->mac_header = skb->network_header;
  529. skb_pull(skb, (u8 *)encap - skb->data);
  530. skb_reset_network_header(skb);
  531. skb->protocol = htons(ETH_P_IP);
  532. skb->ip_summed = CHECKSUM_NONE;
  533. skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
  534. netif_rx(skb);
  535. return NET_RX_SUCCESS;
  536. }
  537. #else
  538. static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
  539. {
  540. return NULL;
  541. }
  542. #endif
  543. static int call_ipmr_vif_entry_notifiers(struct net *net,
  544. enum fib_event_type event_type,
  545. struct vif_device *vif,
  546. vifi_t vif_index, u32 tb_id)
  547. {
  548. return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
  549. vif, vif_index, tb_id,
  550. &net->ipv4.ipmr_seq);
  551. }
  552. static int call_ipmr_mfc_entry_notifiers(struct net *net,
  553. enum fib_event_type event_type,
  554. struct mfc_cache *mfc, u32 tb_id)
  555. {
  556. return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
  557. &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
  558. }
  559. /**
  560. * vif_delete - Delete a VIF entry
  561. * @notify: Set to 1, if the caller is a notifier_call
  562. */
  563. static int vif_delete(struct mr_table *mrt, int vifi, int notify,
  564. struct list_head *head)
  565. {
  566. struct net *net = read_pnet(&mrt->net);
  567. struct vif_device *v;
  568. struct net_device *dev;
  569. struct in_device *in_dev;
  570. if (vifi < 0 || vifi >= mrt->maxvif)
  571. return -EADDRNOTAVAIL;
  572. v = &mrt->vif_table[vifi];
  573. if (VIF_EXISTS(mrt, vifi))
  574. call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
  575. mrt->id);
  576. write_lock_bh(&mrt_lock);
  577. dev = v->dev;
  578. v->dev = NULL;
  579. if (!dev) {
  580. write_unlock_bh(&mrt_lock);
  581. return -EADDRNOTAVAIL;
  582. }
  583. if (vifi == mrt->mroute_reg_vif_num)
  584. mrt->mroute_reg_vif_num = -1;
  585. if (vifi + 1 == mrt->maxvif) {
  586. int tmp;
  587. for (tmp = vifi - 1; tmp >= 0; tmp--) {
  588. if (VIF_EXISTS(mrt, tmp))
  589. break;
  590. }
  591. mrt->maxvif = tmp+1;
  592. }
  593. write_unlock_bh(&mrt_lock);
  594. dev_set_allmulti(dev, -1);
  595. in_dev = __in_dev_get_rtnl(dev);
  596. if (in_dev) {
  597. IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
  598. inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
  599. NETCONFA_MC_FORWARDING,
  600. dev->ifindex, &in_dev->cnf);
  601. ip_rt_multicast_event(in_dev);
  602. }
  603. if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
  604. unregister_netdevice_queue(dev, head);
  605. dev_put(dev);
  606. return 0;
  607. }
  608. static void ipmr_cache_free_rcu(struct rcu_head *head)
  609. {
  610. struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
  611. kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
  612. }
  613. static void ipmr_cache_free(struct mfc_cache *c)
  614. {
  615. call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
  616. }
  617. /* Destroy an unresolved cache entry, killing queued skbs
  618. * and reporting error to netlink readers.
  619. */
  620. static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
  621. {
  622. struct net *net = read_pnet(&mrt->net);
  623. struct sk_buff *skb;
  624. struct nlmsgerr *e;
  625. atomic_dec(&mrt->cache_resolve_queue_len);
  626. while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
  627. if (ip_hdr(skb)->version == 0) {
  628. struct nlmsghdr *nlh = skb_pull(skb,
  629. sizeof(struct iphdr));
  630. nlh->nlmsg_type = NLMSG_ERROR;
  631. nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
  632. skb_trim(skb, nlh->nlmsg_len);
  633. e = nlmsg_data(nlh);
  634. e->error = -ETIMEDOUT;
  635. memset(&e->msg, 0, sizeof(e->msg));
  636. rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
  637. } else {
  638. kfree_skb(skb);
  639. }
  640. }
  641. ipmr_cache_free(c);
  642. }
  643. /* Timer process for the unresolved queue. */
  644. static void ipmr_expire_process(struct timer_list *t)
  645. {
  646. struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
  647. struct mr_mfc *c, *next;
  648. unsigned long expires;
  649. unsigned long now;
  650. if (!spin_trylock(&mfc_unres_lock)) {
  651. mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
  652. return;
  653. }
  654. if (list_empty(&mrt->mfc_unres_queue))
  655. goto out;
  656. now = jiffies;
  657. expires = 10*HZ;
  658. list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
  659. if (time_after(c->mfc_un.unres.expires, now)) {
  660. unsigned long interval = c->mfc_un.unres.expires - now;
  661. if (interval < expires)
  662. expires = interval;
  663. continue;
  664. }
  665. list_del(&c->list);
  666. mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
  667. ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
  668. }
  669. if (!list_empty(&mrt->mfc_unres_queue))
  670. mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
  671. out:
  672. spin_unlock(&mfc_unres_lock);
  673. }
  674. /* Fill oifs list. It is called under write locked mrt_lock. */
  675. static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
  676. unsigned char *ttls)
  677. {
  678. int vifi;
  679. cache->mfc_un.res.minvif = MAXVIFS;
  680. cache->mfc_un.res.maxvif = 0;
  681. memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
  682. for (vifi = 0; vifi < mrt->maxvif; vifi++) {
  683. if (VIF_EXISTS(mrt, vifi) &&
  684. ttls[vifi] && ttls[vifi] < 255) {
  685. cache->mfc_un.res.ttls[vifi] = ttls[vifi];
  686. if (cache->mfc_un.res.minvif > vifi)
  687. cache->mfc_un.res.minvif = vifi;
  688. if (cache->mfc_un.res.maxvif <= vifi)
  689. cache->mfc_un.res.maxvif = vifi + 1;
  690. }
  691. }
  692. cache->mfc_un.res.lastuse = jiffies;
  693. }
  694. static int vif_add(struct net *net, struct mr_table *mrt,
  695. struct vifctl *vifc, int mrtsock)
  696. {
  697. int vifi = vifc->vifc_vifi;
  698. struct switchdev_attr attr = {
  699. .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
  700. };
  701. struct vif_device *v = &mrt->vif_table[vifi];
  702. struct net_device *dev;
  703. struct in_device *in_dev;
  704. int err;
  705. /* Is vif busy ? */
  706. if (VIF_EXISTS(mrt, vifi))
  707. return -EADDRINUSE;
  708. switch (vifc->vifc_flags) {
  709. case VIFF_REGISTER:
  710. if (!ipmr_pimsm_enabled())
  711. return -EINVAL;
  712. /* Special Purpose VIF in PIM
  713. * All the packets will be sent to the daemon
  714. */
  715. if (mrt->mroute_reg_vif_num >= 0)
  716. return -EADDRINUSE;
  717. dev = ipmr_reg_vif(net, mrt);
  718. if (!dev)
  719. return -ENOBUFS;
  720. err = dev_set_allmulti(dev, 1);
  721. if (err) {
  722. unregister_netdevice(dev);
  723. dev_put(dev);
  724. return err;
  725. }
  726. break;
  727. case VIFF_TUNNEL:
  728. dev = ipmr_new_tunnel(net, vifc);
  729. if (!dev)
  730. return -ENOBUFS;
  731. err = dev_set_allmulti(dev, 1);
  732. if (err) {
  733. ipmr_del_tunnel(dev, vifc);
  734. dev_put(dev);
  735. return err;
  736. }
  737. break;
  738. case VIFF_USE_IFINDEX:
  739. case 0:
  740. if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
  741. dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
  742. if (dev && !__in_dev_get_rtnl(dev)) {
  743. dev_put(dev);
  744. return -EADDRNOTAVAIL;
  745. }
  746. } else {
  747. dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
  748. }
  749. if (!dev)
  750. return -EADDRNOTAVAIL;
  751. err = dev_set_allmulti(dev, 1);
  752. if (err) {
  753. dev_put(dev);
  754. return err;
  755. }
  756. break;
  757. default:
  758. return -EINVAL;
  759. }
  760. in_dev = __in_dev_get_rtnl(dev);
  761. if (!in_dev) {
  762. dev_put(dev);
  763. return -EADDRNOTAVAIL;
  764. }
  765. IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
  766. inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
  767. dev->ifindex, &in_dev->cnf);
  768. ip_rt_multicast_event(in_dev);
  769. /* Fill in the VIF structures */
  770. vif_device_init(v, dev, vifc->vifc_rate_limit,
  771. vifc->vifc_threshold,
  772. vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
  773. (VIFF_TUNNEL | VIFF_REGISTER));
  774. attr.orig_dev = dev;
  775. if (!switchdev_port_attr_get(dev, &attr)) {
  776. memcpy(v->dev_parent_id.id, attr.u.ppid.id, attr.u.ppid.id_len);
  777. v->dev_parent_id.id_len = attr.u.ppid.id_len;
  778. } else {
  779. v->dev_parent_id.id_len = 0;
  780. }
  781. v->local = vifc->vifc_lcl_addr.s_addr;
  782. v->remote = vifc->vifc_rmt_addr.s_addr;
  783. /* And finish update writing critical data */
  784. write_lock_bh(&mrt_lock);
  785. v->dev = dev;
  786. if (v->flags & VIFF_REGISTER)
  787. mrt->mroute_reg_vif_num = vifi;
  788. if (vifi+1 > mrt->maxvif)
  789. mrt->maxvif = vifi+1;
  790. write_unlock_bh(&mrt_lock);
  791. call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
  792. return 0;
  793. }
  794. /* called with rcu_read_lock() */
  795. static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
  796. __be32 origin,
  797. __be32 mcastgrp)
  798. {
  799. struct mfc_cache_cmp_arg arg = {
  800. .mfc_mcastgrp = mcastgrp,
  801. .mfc_origin = origin
  802. };
  803. return mr_mfc_find(mrt, &arg);
  804. }
  805. /* Look for a (*,G) entry */
  806. static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
  807. __be32 mcastgrp, int vifi)
  808. {
  809. struct mfc_cache_cmp_arg arg = {
  810. .mfc_mcastgrp = mcastgrp,
  811. .mfc_origin = htonl(INADDR_ANY)
  812. };
  813. if (mcastgrp == htonl(INADDR_ANY))
  814. return mr_mfc_find_any_parent(mrt, vifi);
  815. return mr_mfc_find_any(mrt, vifi, &arg);
  816. }
  817. /* Look for a (S,G,iif) entry if parent != -1 */
  818. static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
  819. __be32 origin, __be32 mcastgrp,
  820. int parent)
  821. {
  822. struct mfc_cache_cmp_arg arg = {
  823. .mfc_mcastgrp = mcastgrp,
  824. .mfc_origin = origin,
  825. };
  826. return mr_mfc_find_parent(mrt, &arg, parent);
  827. }
  828. /* Allocate a multicast cache entry */
  829. static struct mfc_cache *ipmr_cache_alloc(void)
  830. {
  831. struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
  832. if (c) {
  833. c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
  834. c->_c.mfc_un.res.minvif = MAXVIFS;
  835. c->_c.free = ipmr_cache_free_rcu;
  836. refcount_set(&c->_c.mfc_un.res.refcount, 1);
  837. }
  838. return c;
  839. }
  840. static struct mfc_cache *ipmr_cache_alloc_unres(void)
  841. {
  842. struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
  843. if (c) {
  844. skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
  845. c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
  846. }
  847. return c;
  848. }
  849. /* A cache entry has gone into a resolved state from queued */
  850. static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
  851. struct mfc_cache *uc, struct mfc_cache *c)
  852. {
  853. struct sk_buff *skb;
  854. struct nlmsgerr *e;
  855. /* Play the pending entries through our router */
  856. while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
  857. if (ip_hdr(skb)->version == 0) {
  858. struct nlmsghdr *nlh = skb_pull(skb,
  859. sizeof(struct iphdr));
  860. if (mr_fill_mroute(mrt, skb, &c->_c,
  861. nlmsg_data(nlh)) > 0) {
  862. nlh->nlmsg_len = skb_tail_pointer(skb) -
  863. (u8 *)nlh;
  864. } else {
  865. nlh->nlmsg_type = NLMSG_ERROR;
  866. nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
  867. skb_trim(skb, nlh->nlmsg_len);
  868. e = nlmsg_data(nlh);
  869. e->error = -EMSGSIZE;
  870. memset(&e->msg, 0, sizeof(e->msg));
  871. }
  872. rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
  873. } else {
  874. ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
  875. }
  876. }
  877. }
  878. /* Bounce a cache query up to mrouted and netlink.
  879. *
  880. * Called under mrt_lock.
  881. */
  882. static int ipmr_cache_report(struct mr_table *mrt,
  883. struct sk_buff *pkt, vifi_t vifi, int assert)
  884. {
  885. const int ihl = ip_hdrlen(pkt);
  886. struct sock *mroute_sk;
  887. struct igmphdr *igmp;
  888. struct igmpmsg *msg;
  889. struct sk_buff *skb;
  890. int ret;
  891. if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
  892. skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
  893. else
  894. skb = alloc_skb(128, GFP_ATOMIC);
  895. if (!skb)
  896. return -ENOBUFS;
  897. if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
  898. /* Ugly, but we have no choice with this interface.
  899. * Duplicate old header, fix ihl, length etc.
  900. * And all this only to mangle msg->im_msgtype and
  901. * to set msg->im_mbz to "mbz" :-)
  902. */
  903. skb_push(skb, sizeof(struct iphdr));
  904. skb_reset_network_header(skb);
  905. skb_reset_transport_header(skb);
  906. msg = (struct igmpmsg *)skb_network_header(skb);
  907. memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
  908. msg->im_msgtype = assert;
  909. msg->im_mbz = 0;
  910. if (assert == IGMPMSG_WRVIFWHOLE)
  911. msg->im_vif = vifi;
  912. else
  913. msg->im_vif = mrt->mroute_reg_vif_num;
  914. ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
  915. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
  916. sizeof(struct iphdr));
  917. } else {
  918. /* Copy the IP header */
  919. skb_set_network_header(skb, skb->len);
  920. skb_put(skb, ihl);
  921. skb_copy_to_linear_data(skb, pkt->data, ihl);
  922. /* Flag to the kernel this is a route add */
  923. ip_hdr(skb)->protocol = 0;
  924. msg = (struct igmpmsg *)skb_network_header(skb);
  925. msg->im_vif = vifi;
  926. skb_dst_set(skb, dst_clone(skb_dst(pkt)));
  927. /* Add our header */
  928. igmp = skb_put(skb, sizeof(struct igmphdr));
  929. igmp->type = assert;
  930. msg->im_msgtype = assert;
  931. igmp->code = 0;
  932. ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
  933. skb->transport_header = skb->network_header;
  934. }
  935. rcu_read_lock();
  936. mroute_sk = rcu_dereference(mrt->mroute_sk);
  937. if (!mroute_sk) {
  938. rcu_read_unlock();
  939. kfree_skb(skb);
  940. return -EINVAL;
  941. }
  942. igmpmsg_netlink_event(mrt, skb);
  943. /* Deliver to mrouted */
  944. ret = sock_queue_rcv_skb(mroute_sk, skb);
  945. rcu_read_unlock();
  946. if (ret < 0) {
  947. net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
  948. kfree_skb(skb);
  949. }
  950. return ret;
  951. }
  952. /* Queue a packet for resolution. It gets locked cache entry! */
  953. static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
  954. struct sk_buff *skb, struct net_device *dev)
  955. {
  956. const struct iphdr *iph = ip_hdr(skb);
  957. struct mfc_cache *c;
  958. bool found = false;
  959. int err;
  960. spin_lock_bh(&mfc_unres_lock);
  961. list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
  962. if (c->mfc_mcastgrp == iph->daddr &&
  963. c->mfc_origin == iph->saddr) {
  964. found = true;
  965. break;
  966. }
  967. }
  968. if (!found) {
  969. /* Create a new entry if allowable */
  970. if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
  971. (c = ipmr_cache_alloc_unres()) == NULL) {
  972. spin_unlock_bh(&mfc_unres_lock);
  973. kfree_skb(skb);
  974. return -ENOBUFS;
  975. }
  976. /* Fill in the new cache entry */
  977. c->_c.mfc_parent = -1;
  978. c->mfc_origin = iph->saddr;
  979. c->mfc_mcastgrp = iph->daddr;
  980. /* Reflect first query at mrouted. */
  981. err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
  982. if (err < 0) {
  983. /* If the report failed throw the cache entry
  984. out - Brad Parker
  985. */
  986. spin_unlock_bh(&mfc_unres_lock);
  987. ipmr_cache_free(c);
  988. kfree_skb(skb);
  989. return err;
  990. }
  991. atomic_inc(&mrt->cache_resolve_queue_len);
  992. list_add(&c->_c.list, &mrt->mfc_unres_queue);
  993. mroute_netlink_event(mrt, c, RTM_NEWROUTE);
  994. if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
  995. mod_timer(&mrt->ipmr_expire_timer,
  996. c->_c.mfc_un.unres.expires);
  997. }
  998. /* See if we can append the packet */
  999. if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
  1000. kfree_skb(skb);
  1001. err = -ENOBUFS;
  1002. } else {
  1003. if (dev) {
  1004. skb->dev = dev;
  1005. skb->skb_iif = dev->ifindex;
  1006. }
  1007. skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
  1008. err = 0;
  1009. }
  1010. spin_unlock_bh(&mfc_unres_lock);
  1011. return err;
  1012. }
  1013. /* MFC cache manipulation by user space mroute daemon */
  1014. static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
  1015. {
  1016. struct net *net = read_pnet(&mrt->net);
  1017. struct mfc_cache *c;
  1018. /* The entries are added/deleted only under RTNL */
  1019. rcu_read_lock();
  1020. c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
  1021. mfc->mfcc_mcastgrp.s_addr, parent);
  1022. rcu_read_unlock();
  1023. if (!c)
  1024. return -ENOENT;
  1025. rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
  1026. list_del_rcu(&c->_c.list);
  1027. call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
  1028. mroute_netlink_event(mrt, c, RTM_DELROUTE);
  1029. mr_cache_put(&c->_c);
  1030. return 0;
  1031. }
  1032. static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
  1033. struct mfcctl *mfc, int mrtsock, int parent)
  1034. {
  1035. struct mfc_cache *uc, *c;
  1036. struct mr_mfc *_uc;
  1037. bool found;
  1038. int ret;
  1039. if (mfc->mfcc_parent >= MAXVIFS)
  1040. return -ENFILE;
  1041. /* The entries are added/deleted only under RTNL */
  1042. rcu_read_lock();
  1043. c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
  1044. mfc->mfcc_mcastgrp.s_addr, parent);
  1045. rcu_read_unlock();
  1046. if (c) {
  1047. write_lock_bh(&mrt_lock);
  1048. c->_c.mfc_parent = mfc->mfcc_parent;
  1049. ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
  1050. if (!mrtsock)
  1051. c->_c.mfc_flags |= MFC_STATIC;
  1052. write_unlock_bh(&mrt_lock);
  1053. call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
  1054. mrt->id);
  1055. mroute_netlink_event(mrt, c, RTM_NEWROUTE);
  1056. return 0;
  1057. }
  1058. if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
  1059. !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
  1060. return -EINVAL;
  1061. c = ipmr_cache_alloc();
  1062. if (!c)
  1063. return -ENOMEM;
  1064. c->mfc_origin = mfc->mfcc_origin.s_addr;
  1065. c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
  1066. c->_c.mfc_parent = mfc->mfcc_parent;
  1067. ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
  1068. if (!mrtsock)
  1069. c->_c.mfc_flags |= MFC_STATIC;
  1070. ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
  1071. ipmr_rht_params);
  1072. if (ret) {
  1073. pr_err("ipmr: rhtable insert error %d\n", ret);
  1074. ipmr_cache_free(c);
  1075. return ret;
  1076. }
  1077. list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
  1078. /* Check to see if we resolved a queued list. If so we
  1079. * need to send on the frames and tidy up.
  1080. */
  1081. found = false;
  1082. spin_lock_bh(&mfc_unres_lock);
  1083. list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
  1084. uc = (struct mfc_cache *)_uc;
  1085. if (uc->mfc_origin == c->mfc_origin &&
  1086. uc->mfc_mcastgrp == c->mfc_mcastgrp) {
  1087. list_del(&_uc->list);
  1088. atomic_dec(&mrt->cache_resolve_queue_len);
  1089. found = true;
  1090. break;
  1091. }
  1092. }
  1093. if (list_empty(&mrt->mfc_unres_queue))
  1094. del_timer(&mrt->ipmr_expire_timer);
  1095. spin_unlock_bh(&mfc_unres_lock);
  1096. if (found) {
  1097. ipmr_cache_resolve(net, mrt, uc, c);
  1098. ipmr_cache_free(uc);
  1099. }
  1100. call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
  1101. mroute_netlink_event(mrt, c, RTM_NEWROUTE);
  1102. return 0;
  1103. }
  1104. /* Close the multicast socket, and clear the vif tables etc */
  1105. static void mroute_clean_tables(struct mr_table *mrt, bool all)
  1106. {
  1107. struct net *net = read_pnet(&mrt->net);
  1108. struct mr_mfc *c, *tmp;
  1109. struct mfc_cache *cache;
  1110. LIST_HEAD(list);
  1111. int i;
  1112. /* Shut down all active vif entries */
  1113. for (i = 0; i < mrt->maxvif; i++) {
  1114. if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
  1115. continue;
  1116. vif_delete(mrt, i, 0, &list);
  1117. }
  1118. unregister_netdevice_many(&list);
  1119. /* Wipe the cache */
  1120. list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
  1121. if (!all && (c->mfc_flags & MFC_STATIC))
  1122. continue;
  1123. rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
  1124. list_del_rcu(&c->list);
  1125. cache = (struct mfc_cache *)c;
  1126. call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
  1127. mrt->id);
  1128. mroute_netlink_event(mrt, cache, RTM_DELROUTE);
  1129. mr_cache_put(c);
  1130. }
  1131. if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
  1132. spin_lock_bh(&mfc_unres_lock);
  1133. list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
  1134. list_del(&c->list);
  1135. cache = (struct mfc_cache *)c;
  1136. mroute_netlink_event(mrt, cache, RTM_DELROUTE);
  1137. ipmr_destroy_unres(mrt, cache);
  1138. }
  1139. spin_unlock_bh(&mfc_unres_lock);
  1140. }
  1141. }
  1142. /* called from ip_ra_control(), before an RCU grace period,
  1143. * we dont need to call synchronize_rcu() here
  1144. */
  1145. static void mrtsock_destruct(struct sock *sk)
  1146. {
  1147. struct net *net = sock_net(sk);
  1148. struct mr_table *mrt;
  1149. rtnl_lock();
  1150. ipmr_for_each_table(mrt, net) {
  1151. if (sk == rtnl_dereference(mrt->mroute_sk)) {
  1152. IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
  1153. inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
  1154. NETCONFA_MC_FORWARDING,
  1155. NETCONFA_IFINDEX_ALL,
  1156. net->ipv4.devconf_all);
  1157. RCU_INIT_POINTER(mrt->mroute_sk, NULL);
  1158. mroute_clean_tables(mrt, false);
  1159. }
  1160. }
  1161. rtnl_unlock();
  1162. }
  1163. /* Socket options and virtual interface manipulation. The whole
  1164. * virtual interface system is a complete heap, but unfortunately
  1165. * that's how BSD mrouted happens to think. Maybe one day with a proper
  1166. * MOSPF/PIM router set up we can clean this up.
  1167. */
  1168. int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
  1169. unsigned int optlen)
  1170. {
  1171. struct net *net = sock_net(sk);
  1172. int val, ret = 0, parent = 0;
  1173. struct mr_table *mrt;
  1174. struct vifctl vif;
  1175. struct mfcctl mfc;
  1176. bool do_wrvifwhole;
  1177. u32 uval;
  1178. /* There's one exception to the lock - MRT_DONE which needs to unlock */
  1179. rtnl_lock();
  1180. if (sk->sk_type != SOCK_RAW ||
  1181. inet_sk(sk)->inet_num != IPPROTO_IGMP) {
  1182. ret = -EOPNOTSUPP;
  1183. goto out_unlock;
  1184. }
  1185. mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
  1186. if (!mrt) {
  1187. ret = -ENOENT;
  1188. goto out_unlock;
  1189. }
  1190. if (optname != MRT_INIT) {
  1191. if (sk != rcu_access_pointer(mrt->mroute_sk) &&
  1192. !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
  1193. ret = -EACCES;
  1194. goto out_unlock;
  1195. }
  1196. }
  1197. switch (optname) {
  1198. case MRT_INIT:
  1199. if (optlen != sizeof(int)) {
  1200. ret = -EINVAL;
  1201. break;
  1202. }
  1203. if (rtnl_dereference(mrt->mroute_sk)) {
  1204. ret = -EADDRINUSE;
  1205. break;
  1206. }
  1207. ret = ip_ra_control(sk, 1, mrtsock_destruct);
  1208. if (ret == 0) {
  1209. rcu_assign_pointer(mrt->mroute_sk, sk);
  1210. IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
  1211. inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
  1212. NETCONFA_MC_FORWARDING,
  1213. NETCONFA_IFINDEX_ALL,
  1214. net->ipv4.devconf_all);
  1215. }
  1216. break;
  1217. case MRT_DONE:
  1218. if (sk != rcu_access_pointer(mrt->mroute_sk)) {
  1219. ret = -EACCES;
  1220. } else {
  1221. /* We need to unlock here because mrtsock_destruct takes
  1222. * care of rtnl itself and we can't change that due to
  1223. * the IP_ROUTER_ALERT setsockopt which runs without it.
  1224. */
  1225. rtnl_unlock();
  1226. ret = ip_ra_control(sk, 0, NULL);
  1227. goto out;
  1228. }
  1229. break;
  1230. case MRT_ADD_VIF:
  1231. case MRT_DEL_VIF:
  1232. if (optlen != sizeof(vif)) {
  1233. ret = -EINVAL;
  1234. break;
  1235. }
  1236. if (copy_from_user(&vif, optval, sizeof(vif))) {
  1237. ret = -EFAULT;
  1238. break;
  1239. }
  1240. if (vif.vifc_vifi >= MAXVIFS) {
  1241. ret = -ENFILE;
  1242. break;
  1243. }
  1244. if (optname == MRT_ADD_VIF) {
  1245. ret = vif_add(net, mrt, &vif,
  1246. sk == rtnl_dereference(mrt->mroute_sk));
  1247. } else {
  1248. ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
  1249. }
  1250. break;
  1251. /* Manipulate the forwarding caches. These live
  1252. * in a sort of kernel/user symbiosis.
  1253. */
  1254. case MRT_ADD_MFC:
  1255. case MRT_DEL_MFC:
  1256. parent = -1;
  1257. /* fall through */
  1258. case MRT_ADD_MFC_PROXY:
  1259. case MRT_DEL_MFC_PROXY:
  1260. if (optlen != sizeof(mfc)) {
  1261. ret = -EINVAL;
  1262. break;
  1263. }
  1264. if (copy_from_user(&mfc, optval, sizeof(mfc))) {
  1265. ret = -EFAULT;
  1266. break;
  1267. }
  1268. if (parent == 0)
  1269. parent = mfc.mfcc_parent;
  1270. if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
  1271. ret = ipmr_mfc_delete(mrt, &mfc, parent);
  1272. else
  1273. ret = ipmr_mfc_add(net, mrt, &mfc,
  1274. sk == rtnl_dereference(mrt->mroute_sk),
  1275. parent);
  1276. break;
  1277. /* Control PIM assert. */
  1278. case MRT_ASSERT:
  1279. if (optlen != sizeof(val)) {
  1280. ret = -EINVAL;
  1281. break;
  1282. }
  1283. if (get_user(val, (int __user *)optval)) {
  1284. ret = -EFAULT;
  1285. break;
  1286. }
  1287. mrt->mroute_do_assert = val;
  1288. break;
  1289. case MRT_PIM:
  1290. if (!ipmr_pimsm_enabled()) {
  1291. ret = -ENOPROTOOPT;
  1292. break;
  1293. }
  1294. if (optlen != sizeof(val)) {
  1295. ret = -EINVAL;
  1296. break;
  1297. }
  1298. if (get_user(val, (int __user *)optval)) {
  1299. ret = -EFAULT;
  1300. break;
  1301. }
  1302. do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
  1303. val = !!val;
  1304. if (val != mrt->mroute_do_pim) {
  1305. mrt->mroute_do_pim = val;
  1306. mrt->mroute_do_assert = val;
  1307. mrt->mroute_do_wrvifwhole = do_wrvifwhole;
  1308. }
  1309. break;
  1310. case MRT_TABLE:
  1311. if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
  1312. ret = -ENOPROTOOPT;
  1313. break;
  1314. }
  1315. if (optlen != sizeof(uval)) {
  1316. ret = -EINVAL;
  1317. break;
  1318. }
  1319. if (get_user(uval, (u32 __user *)optval)) {
  1320. ret = -EFAULT;
  1321. break;
  1322. }
  1323. if (sk == rtnl_dereference(mrt->mroute_sk)) {
  1324. ret = -EBUSY;
  1325. } else {
  1326. mrt = ipmr_new_table(net, uval);
  1327. if (IS_ERR(mrt))
  1328. ret = PTR_ERR(mrt);
  1329. else
  1330. raw_sk(sk)->ipmr_table = uval;
  1331. }
  1332. break;
  1333. /* Spurious command, or MRT_VERSION which you cannot set. */
  1334. default:
  1335. ret = -ENOPROTOOPT;
  1336. }
  1337. out_unlock:
  1338. rtnl_unlock();
  1339. out:
  1340. return ret;
  1341. }
  1342. /* Getsock opt support for the multicast routing system. */
  1343. int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
  1344. {
  1345. int olr;
  1346. int val;
  1347. struct net *net = sock_net(sk);
  1348. struct mr_table *mrt;
  1349. if (sk->sk_type != SOCK_RAW ||
  1350. inet_sk(sk)->inet_num != IPPROTO_IGMP)
  1351. return -EOPNOTSUPP;
  1352. mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
  1353. if (!mrt)
  1354. return -ENOENT;
  1355. switch (optname) {
  1356. case MRT_VERSION:
  1357. val = 0x0305;
  1358. break;
  1359. case MRT_PIM:
  1360. if (!ipmr_pimsm_enabled())
  1361. return -ENOPROTOOPT;
  1362. val = mrt->mroute_do_pim;
  1363. break;
  1364. case MRT_ASSERT:
  1365. val = mrt->mroute_do_assert;
  1366. break;
  1367. default:
  1368. return -ENOPROTOOPT;
  1369. }
  1370. if (get_user(olr, optlen))
  1371. return -EFAULT;
  1372. olr = min_t(unsigned int, olr, sizeof(int));
  1373. if (olr < 0)
  1374. return -EINVAL;
  1375. if (put_user(olr, optlen))
  1376. return -EFAULT;
  1377. if (copy_to_user(optval, &val, olr))
  1378. return -EFAULT;
  1379. return 0;
  1380. }
  1381. /* The IP multicast ioctl support routines. */
  1382. int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
  1383. {
  1384. struct sioc_sg_req sr;
  1385. struct sioc_vif_req vr;
  1386. struct vif_device *vif;
  1387. struct mfc_cache *c;
  1388. struct net *net = sock_net(sk);
  1389. struct mr_table *mrt;
  1390. mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
  1391. if (!mrt)
  1392. return -ENOENT;
  1393. switch (cmd) {
  1394. case SIOCGETVIFCNT:
  1395. if (copy_from_user(&vr, arg, sizeof(vr)))
  1396. return -EFAULT;
  1397. if (vr.vifi >= mrt->maxvif)
  1398. return -EINVAL;
  1399. read_lock(&mrt_lock);
  1400. vif = &mrt->vif_table[vr.vifi];
  1401. if (VIF_EXISTS(mrt, vr.vifi)) {
  1402. vr.icount = vif->pkt_in;
  1403. vr.ocount = vif->pkt_out;
  1404. vr.ibytes = vif->bytes_in;
  1405. vr.obytes = vif->bytes_out;
  1406. read_unlock(&mrt_lock);
  1407. if (copy_to_user(arg, &vr, sizeof(vr)))
  1408. return -EFAULT;
  1409. return 0;
  1410. }
  1411. read_unlock(&mrt_lock);
  1412. return -EADDRNOTAVAIL;
  1413. case SIOCGETSGCNT:
  1414. if (copy_from_user(&sr, arg, sizeof(sr)))
  1415. return -EFAULT;
  1416. rcu_read_lock();
  1417. c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
  1418. if (c) {
  1419. sr.pktcnt = c->_c.mfc_un.res.pkt;
  1420. sr.bytecnt = c->_c.mfc_un.res.bytes;
  1421. sr.wrong_if = c->_c.mfc_un.res.wrong_if;
  1422. rcu_read_unlock();
  1423. if (copy_to_user(arg, &sr, sizeof(sr)))
  1424. return -EFAULT;
  1425. return 0;
  1426. }
  1427. rcu_read_unlock();
  1428. return -EADDRNOTAVAIL;
  1429. default:
  1430. return -ENOIOCTLCMD;
  1431. }
  1432. }
  1433. #ifdef CONFIG_COMPAT
  1434. struct compat_sioc_sg_req {
  1435. struct in_addr src;
  1436. struct in_addr grp;
  1437. compat_ulong_t pktcnt;
  1438. compat_ulong_t bytecnt;
  1439. compat_ulong_t wrong_if;
  1440. };
  1441. struct compat_sioc_vif_req {
  1442. vifi_t vifi; /* Which iface */
  1443. compat_ulong_t icount;
  1444. compat_ulong_t ocount;
  1445. compat_ulong_t ibytes;
  1446. compat_ulong_t obytes;
  1447. };
  1448. int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
  1449. {
  1450. struct compat_sioc_sg_req sr;
  1451. struct compat_sioc_vif_req vr;
  1452. struct vif_device *vif;
  1453. struct mfc_cache *c;
  1454. struct net *net = sock_net(sk);
  1455. struct mr_table *mrt;
  1456. mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
  1457. if (!mrt)
  1458. return -ENOENT;
  1459. switch (cmd) {
  1460. case SIOCGETVIFCNT:
  1461. if (copy_from_user(&vr, arg, sizeof(vr)))
  1462. return -EFAULT;
  1463. if (vr.vifi >= mrt->maxvif)
  1464. return -EINVAL;
  1465. read_lock(&mrt_lock);
  1466. vif = &mrt->vif_table[vr.vifi];
  1467. if (VIF_EXISTS(mrt, vr.vifi)) {
  1468. vr.icount = vif->pkt_in;
  1469. vr.ocount = vif->pkt_out;
  1470. vr.ibytes = vif->bytes_in;
  1471. vr.obytes = vif->bytes_out;
  1472. read_unlock(&mrt_lock);
  1473. if (copy_to_user(arg, &vr, sizeof(vr)))
  1474. return -EFAULT;
  1475. return 0;
  1476. }
  1477. read_unlock(&mrt_lock);
  1478. return -EADDRNOTAVAIL;
  1479. case SIOCGETSGCNT:
  1480. if (copy_from_user(&sr, arg, sizeof(sr)))
  1481. return -EFAULT;
  1482. rcu_read_lock();
  1483. c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
  1484. if (c) {
  1485. sr.pktcnt = c->_c.mfc_un.res.pkt;
  1486. sr.bytecnt = c->_c.mfc_un.res.bytes;
  1487. sr.wrong_if = c->_c.mfc_un.res.wrong_if;
  1488. rcu_read_unlock();
  1489. if (copy_to_user(arg, &sr, sizeof(sr)))
  1490. return -EFAULT;
  1491. return 0;
  1492. }
  1493. rcu_read_unlock();
  1494. return -EADDRNOTAVAIL;
  1495. default:
  1496. return -ENOIOCTLCMD;
  1497. }
  1498. }
  1499. #endif
  1500. static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
  1501. {
  1502. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1503. struct net *net = dev_net(dev);
  1504. struct mr_table *mrt;
  1505. struct vif_device *v;
  1506. int ct;
  1507. if (event != NETDEV_UNREGISTER)
  1508. return NOTIFY_DONE;
  1509. ipmr_for_each_table(mrt, net) {
  1510. v = &mrt->vif_table[0];
  1511. for (ct = 0; ct < mrt->maxvif; ct++, v++) {
  1512. if (v->dev == dev)
  1513. vif_delete(mrt, ct, 1, NULL);
  1514. }
  1515. }
  1516. return NOTIFY_DONE;
  1517. }
  1518. static struct notifier_block ip_mr_notifier = {
  1519. .notifier_call = ipmr_device_event,
  1520. };
  1521. /* Encapsulate a packet by attaching a valid IPIP header to it.
  1522. * This avoids tunnel drivers and other mess and gives us the speed so
  1523. * important for multicast video.
  1524. */
  1525. static void ip_encap(struct net *net, struct sk_buff *skb,
  1526. __be32 saddr, __be32 daddr)
  1527. {
  1528. struct iphdr *iph;
  1529. const struct iphdr *old_iph = ip_hdr(skb);
  1530. skb_push(skb, sizeof(struct iphdr));
  1531. skb->transport_header = skb->network_header;
  1532. skb_reset_network_header(skb);
  1533. iph = ip_hdr(skb);
  1534. iph->version = 4;
  1535. iph->tos = old_iph->tos;
  1536. iph->ttl = old_iph->ttl;
  1537. iph->frag_off = 0;
  1538. iph->daddr = daddr;
  1539. iph->saddr = saddr;
  1540. iph->protocol = IPPROTO_IPIP;
  1541. iph->ihl = 5;
  1542. iph->tot_len = htons(skb->len);
  1543. ip_select_ident(net, skb, NULL);
  1544. ip_send_check(iph);
  1545. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  1546. nf_reset(skb);
  1547. }
  1548. static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
  1549. struct sk_buff *skb)
  1550. {
  1551. struct ip_options *opt = &(IPCB(skb)->opt);
  1552. IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
  1553. IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
  1554. if (unlikely(opt->optlen))
  1555. ip_forward_options(skb);
  1556. return dst_output(net, sk, skb);
  1557. }
  1558. #ifdef CONFIG_NET_SWITCHDEV
  1559. static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
  1560. int in_vifi, int out_vifi)
  1561. {
  1562. struct vif_device *out_vif = &mrt->vif_table[out_vifi];
  1563. struct vif_device *in_vif = &mrt->vif_table[in_vifi];
  1564. if (!skb->offload_mr_fwd_mark)
  1565. return false;
  1566. if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
  1567. return false;
  1568. return netdev_phys_item_id_same(&out_vif->dev_parent_id,
  1569. &in_vif->dev_parent_id);
  1570. }
  1571. #else
  1572. static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
  1573. int in_vifi, int out_vifi)
  1574. {
  1575. return false;
  1576. }
  1577. #endif
  1578. /* Processing handlers for ipmr_forward */
  1579. static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
  1580. int in_vifi, struct sk_buff *skb,
  1581. struct mfc_cache *c, int vifi)
  1582. {
  1583. const struct iphdr *iph = ip_hdr(skb);
  1584. struct vif_device *vif = &mrt->vif_table[vifi];
  1585. struct net_device *dev;
  1586. struct rtable *rt;
  1587. struct flowi4 fl4;
  1588. int encap = 0;
  1589. if (!vif->dev)
  1590. goto out_free;
  1591. if (vif->flags & VIFF_REGISTER) {
  1592. vif->pkt_out++;
  1593. vif->bytes_out += skb->len;
  1594. vif->dev->stats.tx_bytes += skb->len;
  1595. vif->dev->stats.tx_packets++;
  1596. ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
  1597. goto out_free;
  1598. }
  1599. if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
  1600. goto out_free;
  1601. if (vif->flags & VIFF_TUNNEL) {
  1602. rt = ip_route_output_ports(net, &fl4, NULL,
  1603. vif->remote, vif->local,
  1604. 0, 0,
  1605. IPPROTO_IPIP,
  1606. RT_TOS(iph->tos), vif->link);
  1607. if (IS_ERR(rt))
  1608. goto out_free;
  1609. encap = sizeof(struct iphdr);
  1610. } else {
  1611. rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
  1612. 0, 0,
  1613. IPPROTO_IPIP,
  1614. RT_TOS(iph->tos), vif->link);
  1615. if (IS_ERR(rt))
  1616. goto out_free;
  1617. }
  1618. dev = rt->dst.dev;
  1619. if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
  1620. /* Do not fragment multicasts. Alas, IPv4 does not
  1621. * allow to send ICMP, so that packets will disappear
  1622. * to blackhole.
  1623. */
  1624. IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
  1625. ip_rt_put(rt);
  1626. goto out_free;
  1627. }
  1628. encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
  1629. if (skb_cow(skb, encap)) {
  1630. ip_rt_put(rt);
  1631. goto out_free;
  1632. }
  1633. vif->pkt_out++;
  1634. vif->bytes_out += skb->len;
  1635. skb_dst_drop(skb);
  1636. skb_dst_set(skb, &rt->dst);
  1637. ip_decrease_ttl(ip_hdr(skb));
  1638. /* FIXME: forward and output firewalls used to be called here.
  1639. * What do we do with netfilter? -- RR
  1640. */
  1641. if (vif->flags & VIFF_TUNNEL) {
  1642. ip_encap(net, skb, vif->local, vif->remote);
  1643. /* FIXME: extra output firewall step used to be here. --RR */
  1644. vif->dev->stats.tx_packets++;
  1645. vif->dev->stats.tx_bytes += skb->len;
  1646. }
  1647. IPCB(skb)->flags |= IPSKB_FORWARDED;
  1648. /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
  1649. * not only before forwarding, but after forwarding on all output
  1650. * interfaces. It is clear, if mrouter runs a multicasting
  1651. * program, it should receive packets not depending to what interface
  1652. * program is joined.
  1653. * If we will not make it, the program will have to join on all
  1654. * interfaces. On the other hand, multihoming host (or router, but
  1655. * not mrouter) cannot join to more than one interface - it will
  1656. * result in receiving multiple packets.
  1657. */
  1658. NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
  1659. net, NULL, skb, skb->dev, dev,
  1660. ipmr_forward_finish);
  1661. return;
  1662. out_free:
  1663. kfree_skb(skb);
  1664. }
  1665. static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
  1666. {
  1667. int ct;
  1668. for (ct = mrt->maxvif-1; ct >= 0; ct--) {
  1669. if (mrt->vif_table[ct].dev == dev)
  1670. break;
  1671. }
  1672. return ct;
  1673. }
  1674. /* "local" means that we should preserve one skb (for local delivery) */
  1675. static void ip_mr_forward(struct net *net, struct mr_table *mrt,
  1676. struct net_device *dev, struct sk_buff *skb,
  1677. struct mfc_cache *c, int local)
  1678. {
  1679. int true_vifi = ipmr_find_vif(mrt, dev);
  1680. int psend = -1;
  1681. int vif, ct;
  1682. vif = c->_c.mfc_parent;
  1683. c->_c.mfc_un.res.pkt++;
  1684. c->_c.mfc_un.res.bytes += skb->len;
  1685. c->_c.mfc_un.res.lastuse = jiffies;
  1686. if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
  1687. struct mfc_cache *cache_proxy;
  1688. /* For an (*,G) entry, we only check that the incomming
  1689. * interface is part of the static tree.
  1690. */
  1691. cache_proxy = mr_mfc_find_any_parent(mrt, vif);
  1692. if (cache_proxy &&
  1693. cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
  1694. goto forward;
  1695. }
  1696. /* Wrong interface: drop packet and (maybe) send PIM assert. */
  1697. if (mrt->vif_table[vif].dev != dev) {
  1698. if (rt_is_output_route(skb_rtable(skb))) {
  1699. /* It is our own packet, looped back.
  1700. * Very complicated situation...
  1701. *
  1702. * The best workaround until routing daemons will be
  1703. * fixed is not to redistribute packet, if it was
  1704. * send through wrong interface. It means, that
  1705. * multicast applications WILL NOT work for
  1706. * (S,G), which have default multicast route pointing
  1707. * to wrong oif. In any case, it is not a good
  1708. * idea to use multicasting applications on router.
  1709. */
  1710. goto dont_forward;
  1711. }
  1712. c->_c.mfc_un.res.wrong_if++;
  1713. if (true_vifi >= 0 && mrt->mroute_do_assert &&
  1714. /* pimsm uses asserts, when switching from RPT to SPT,
  1715. * so that we cannot check that packet arrived on an oif.
  1716. * It is bad, but otherwise we would need to move pretty
  1717. * large chunk of pimd to kernel. Ough... --ANK
  1718. */
  1719. (mrt->mroute_do_pim ||
  1720. c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
  1721. time_after(jiffies,
  1722. c->_c.mfc_un.res.last_assert +
  1723. MFC_ASSERT_THRESH)) {
  1724. c->_c.mfc_un.res.last_assert = jiffies;
  1725. ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
  1726. if (mrt->mroute_do_wrvifwhole)
  1727. ipmr_cache_report(mrt, skb, true_vifi,
  1728. IGMPMSG_WRVIFWHOLE);
  1729. }
  1730. goto dont_forward;
  1731. }
  1732. forward:
  1733. mrt->vif_table[vif].pkt_in++;
  1734. mrt->vif_table[vif].bytes_in += skb->len;
  1735. /* Forward the frame */
  1736. if (c->mfc_origin == htonl(INADDR_ANY) &&
  1737. c->mfc_mcastgrp == htonl(INADDR_ANY)) {
  1738. if (true_vifi >= 0 &&
  1739. true_vifi != c->_c.mfc_parent &&
  1740. ip_hdr(skb)->ttl >
  1741. c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
  1742. /* It's an (*,*) entry and the packet is not coming from
  1743. * the upstream: forward the packet to the upstream
  1744. * only.
  1745. */
  1746. psend = c->_c.mfc_parent;
  1747. goto last_forward;
  1748. }
  1749. goto dont_forward;
  1750. }
  1751. for (ct = c->_c.mfc_un.res.maxvif - 1;
  1752. ct >= c->_c.mfc_un.res.minvif; ct--) {
  1753. /* For (*,G) entry, don't forward to the incoming interface */
  1754. if ((c->mfc_origin != htonl(INADDR_ANY) ||
  1755. ct != true_vifi) &&
  1756. ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
  1757. if (psend != -1) {
  1758. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1759. if (skb2)
  1760. ipmr_queue_xmit(net, mrt, true_vifi,
  1761. skb2, c, psend);
  1762. }
  1763. psend = ct;
  1764. }
  1765. }
  1766. last_forward:
  1767. if (psend != -1) {
  1768. if (local) {
  1769. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1770. if (skb2)
  1771. ipmr_queue_xmit(net, mrt, true_vifi, skb2,
  1772. c, psend);
  1773. } else {
  1774. ipmr_queue_xmit(net, mrt, true_vifi, skb, c, psend);
  1775. return;
  1776. }
  1777. }
  1778. dont_forward:
  1779. if (!local)
  1780. kfree_skb(skb);
  1781. }
  1782. static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
  1783. {
  1784. struct rtable *rt = skb_rtable(skb);
  1785. struct iphdr *iph = ip_hdr(skb);
  1786. struct flowi4 fl4 = {
  1787. .daddr = iph->daddr,
  1788. .saddr = iph->saddr,
  1789. .flowi4_tos = RT_TOS(iph->tos),
  1790. .flowi4_oif = (rt_is_output_route(rt) ?
  1791. skb->dev->ifindex : 0),
  1792. .flowi4_iif = (rt_is_output_route(rt) ?
  1793. LOOPBACK_IFINDEX :
  1794. skb->dev->ifindex),
  1795. .flowi4_mark = skb->mark,
  1796. };
  1797. struct mr_table *mrt;
  1798. int err;
  1799. err = ipmr_fib_lookup(net, &fl4, &mrt);
  1800. if (err)
  1801. return ERR_PTR(err);
  1802. return mrt;
  1803. }
  1804. /* Multicast packets for forwarding arrive here
  1805. * Called with rcu_read_lock();
  1806. */
  1807. int ip_mr_input(struct sk_buff *skb)
  1808. {
  1809. struct mfc_cache *cache;
  1810. struct net *net = dev_net(skb->dev);
  1811. int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
  1812. struct mr_table *mrt;
  1813. struct net_device *dev;
  1814. /* skb->dev passed in is the loX master dev for vrfs.
  1815. * As there are no vifs associated with loopback devices,
  1816. * get the proper interface that does have a vif associated with it.
  1817. */
  1818. dev = skb->dev;
  1819. if (netif_is_l3_master(skb->dev)) {
  1820. dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
  1821. if (!dev) {
  1822. kfree_skb(skb);
  1823. return -ENODEV;
  1824. }
  1825. }
  1826. /* Packet is looped back after forward, it should not be
  1827. * forwarded second time, but still can be delivered locally.
  1828. */
  1829. if (IPCB(skb)->flags & IPSKB_FORWARDED)
  1830. goto dont_forward;
  1831. mrt = ipmr_rt_fib_lookup(net, skb);
  1832. if (IS_ERR(mrt)) {
  1833. kfree_skb(skb);
  1834. return PTR_ERR(mrt);
  1835. }
  1836. if (!local) {
  1837. if (IPCB(skb)->opt.router_alert) {
  1838. if (ip_call_ra_chain(skb))
  1839. return 0;
  1840. } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
  1841. /* IGMPv1 (and broken IGMPv2 implementations sort of
  1842. * Cisco IOS <= 11.2(8)) do not put router alert
  1843. * option to IGMP packets destined to routable
  1844. * groups. It is very bad, because it means
  1845. * that we can forward NO IGMP messages.
  1846. */
  1847. struct sock *mroute_sk;
  1848. mroute_sk = rcu_dereference(mrt->mroute_sk);
  1849. if (mroute_sk) {
  1850. nf_reset(skb);
  1851. raw_rcv(mroute_sk, skb);
  1852. return 0;
  1853. }
  1854. }
  1855. }
  1856. /* already under rcu_read_lock() */
  1857. cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
  1858. if (!cache) {
  1859. int vif = ipmr_find_vif(mrt, dev);
  1860. if (vif >= 0)
  1861. cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
  1862. vif);
  1863. }
  1864. /* No usable cache entry */
  1865. if (!cache) {
  1866. int vif;
  1867. if (local) {
  1868. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1869. ip_local_deliver(skb);
  1870. if (!skb2)
  1871. return -ENOBUFS;
  1872. skb = skb2;
  1873. }
  1874. read_lock(&mrt_lock);
  1875. vif = ipmr_find_vif(mrt, dev);
  1876. if (vif >= 0) {
  1877. int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
  1878. read_unlock(&mrt_lock);
  1879. return err2;
  1880. }
  1881. read_unlock(&mrt_lock);
  1882. kfree_skb(skb);
  1883. return -ENODEV;
  1884. }
  1885. read_lock(&mrt_lock);
  1886. ip_mr_forward(net, mrt, dev, skb, cache, local);
  1887. read_unlock(&mrt_lock);
  1888. if (local)
  1889. return ip_local_deliver(skb);
  1890. return 0;
  1891. dont_forward:
  1892. if (local)
  1893. return ip_local_deliver(skb);
  1894. kfree_skb(skb);
  1895. return 0;
  1896. }
  1897. #ifdef CONFIG_IP_PIMSM_V1
  1898. /* Handle IGMP messages of PIMv1 */
  1899. int pim_rcv_v1(struct sk_buff *skb)
  1900. {
  1901. struct igmphdr *pim;
  1902. struct net *net = dev_net(skb->dev);
  1903. struct mr_table *mrt;
  1904. if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
  1905. goto drop;
  1906. pim = igmp_hdr(skb);
  1907. mrt = ipmr_rt_fib_lookup(net, skb);
  1908. if (IS_ERR(mrt))
  1909. goto drop;
  1910. if (!mrt->mroute_do_pim ||
  1911. pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
  1912. goto drop;
  1913. if (__pim_rcv(mrt, skb, sizeof(*pim))) {
  1914. drop:
  1915. kfree_skb(skb);
  1916. }
  1917. return 0;
  1918. }
  1919. #endif
  1920. #ifdef CONFIG_IP_PIMSM_V2
  1921. static int pim_rcv(struct sk_buff *skb)
  1922. {
  1923. struct pimreghdr *pim;
  1924. struct net *net = dev_net(skb->dev);
  1925. struct mr_table *mrt;
  1926. if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
  1927. goto drop;
  1928. pim = (struct pimreghdr *)skb_transport_header(skb);
  1929. if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
  1930. (pim->flags & PIM_NULL_REGISTER) ||
  1931. (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
  1932. csum_fold(skb_checksum(skb, 0, skb->len, 0))))
  1933. goto drop;
  1934. mrt = ipmr_rt_fib_lookup(net, skb);
  1935. if (IS_ERR(mrt))
  1936. goto drop;
  1937. if (__pim_rcv(mrt, skb, sizeof(*pim))) {
  1938. drop:
  1939. kfree_skb(skb);
  1940. }
  1941. return 0;
  1942. }
  1943. #endif
  1944. int ipmr_get_route(struct net *net, struct sk_buff *skb,
  1945. __be32 saddr, __be32 daddr,
  1946. struct rtmsg *rtm, u32 portid)
  1947. {
  1948. struct mfc_cache *cache;
  1949. struct mr_table *mrt;
  1950. int err;
  1951. mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
  1952. if (!mrt)
  1953. return -ENOENT;
  1954. rcu_read_lock();
  1955. cache = ipmr_cache_find(mrt, saddr, daddr);
  1956. if (!cache && skb->dev) {
  1957. int vif = ipmr_find_vif(mrt, skb->dev);
  1958. if (vif >= 0)
  1959. cache = ipmr_cache_find_any(mrt, daddr, vif);
  1960. }
  1961. if (!cache) {
  1962. struct sk_buff *skb2;
  1963. struct iphdr *iph;
  1964. struct net_device *dev;
  1965. int vif = -1;
  1966. dev = skb->dev;
  1967. read_lock(&mrt_lock);
  1968. if (dev)
  1969. vif = ipmr_find_vif(mrt, dev);
  1970. if (vif < 0) {
  1971. read_unlock(&mrt_lock);
  1972. rcu_read_unlock();
  1973. return -ENODEV;
  1974. }
  1975. skb2 = skb_clone(skb, GFP_ATOMIC);
  1976. if (!skb2) {
  1977. read_unlock(&mrt_lock);
  1978. rcu_read_unlock();
  1979. return -ENOMEM;
  1980. }
  1981. NETLINK_CB(skb2).portid = portid;
  1982. skb_push(skb2, sizeof(struct iphdr));
  1983. skb_reset_network_header(skb2);
  1984. iph = ip_hdr(skb2);
  1985. iph->ihl = sizeof(struct iphdr) >> 2;
  1986. iph->saddr = saddr;
  1987. iph->daddr = daddr;
  1988. iph->version = 0;
  1989. err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
  1990. read_unlock(&mrt_lock);
  1991. rcu_read_unlock();
  1992. return err;
  1993. }
  1994. read_lock(&mrt_lock);
  1995. err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
  1996. read_unlock(&mrt_lock);
  1997. rcu_read_unlock();
  1998. return err;
  1999. }
  2000. static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
  2001. u32 portid, u32 seq, struct mfc_cache *c, int cmd,
  2002. int flags)
  2003. {
  2004. struct nlmsghdr *nlh;
  2005. struct rtmsg *rtm;
  2006. int err;
  2007. nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
  2008. if (!nlh)
  2009. return -EMSGSIZE;
  2010. rtm = nlmsg_data(nlh);
  2011. rtm->rtm_family = RTNL_FAMILY_IPMR;
  2012. rtm->rtm_dst_len = 32;
  2013. rtm->rtm_src_len = 32;
  2014. rtm->rtm_tos = 0;
  2015. rtm->rtm_table = mrt->id;
  2016. if (nla_put_u32(skb, RTA_TABLE, mrt->id))
  2017. goto nla_put_failure;
  2018. rtm->rtm_type = RTN_MULTICAST;
  2019. rtm->rtm_scope = RT_SCOPE_UNIVERSE;
  2020. if (c->_c.mfc_flags & MFC_STATIC)
  2021. rtm->rtm_protocol = RTPROT_STATIC;
  2022. else
  2023. rtm->rtm_protocol = RTPROT_MROUTED;
  2024. rtm->rtm_flags = 0;
  2025. if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
  2026. nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
  2027. goto nla_put_failure;
  2028. err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
  2029. /* do not break the dump if cache is unresolved */
  2030. if (err < 0 && err != -ENOENT)
  2031. goto nla_put_failure;
  2032. nlmsg_end(skb, nlh);
  2033. return 0;
  2034. nla_put_failure:
  2035. nlmsg_cancel(skb, nlh);
  2036. return -EMSGSIZE;
  2037. }
  2038. static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
  2039. u32 portid, u32 seq, struct mr_mfc *c, int cmd,
  2040. int flags)
  2041. {
  2042. return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
  2043. cmd, flags);
  2044. }
  2045. static size_t mroute_msgsize(bool unresolved, int maxvif)
  2046. {
  2047. size_t len =
  2048. NLMSG_ALIGN(sizeof(struct rtmsg))
  2049. + nla_total_size(4) /* RTA_TABLE */
  2050. + nla_total_size(4) /* RTA_SRC */
  2051. + nla_total_size(4) /* RTA_DST */
  2052. ;
  2053. if (!unresolved)
  2054. len = len
  2055. + nla_total_size(4) /* RTA_IIF */
  2056. + nla_total_size(0) /* RTA_MULTIPATH */
  2057. + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
  2058. /* RTA_MFC_STATS */
  2059. + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
  2060. ;
  2061. return len;
  2062. }
  2063. static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
  2064. int cmd)
  2065. {
  2066. struct net *net = read_pnet(&mrt->net);
  2067. struct sk_buff *skb;
  2068. int err = -ENOBUFS;
  2069. skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
  2070. mrt->maxvif),
  2071. GFP_ATOMIC);
  2072. if (!skb)
  2073. goto errout;
  2074. err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
  2075. if (err < 0)
  2076. goto errout;
  2077. rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
  2078. return;
  2079. errout:
  2080. kfree_skb(skb);
  2081. if (err < 0)
  2082. rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
  2083. }
  2084. static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
  2085. {
  2086. size_t len =
  2087. NLMSG_ALIGN(sizeof(struct rtgenmsg))
  2088. + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
  2089. + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
  2090. + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
  2091. + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
  2092. /* IPMRA_CREPORT_PKT */
  2093. + nla_total_size(payloadlen)
  2094. ;
  2095. return len;
  2096. }
  2097. static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
  2098. {
  2099. struct net *net = read_pnet(&mrt->net);
  2100. struct nlmsghdr *nlh;
  2101. struct rtgenmsg *rtgenm;
  2102. struct igmpmsg *msg;
  2103. struct sk_buff *skb;
  2104. struct nlattr *nla;
  2105. int payloadlen;
  2106. payloadlen = pkt->len - sizeof(struct igmpmsg);
  2107. msg = (struct igmpmsg *)skb_network_header(pkt);
  2108. skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
  2109. if (!skb)
  2110. goto errout;
  2111. nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
  2112. sizeof(struct rtgenmsg), 0);
  2113. if (!nlh)
  2114. goto errout;
  2115. rtgenm = nlmsg_data(nlh);
  2116. rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
  2117. if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
  2118. nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
  2119. nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
  2120. msg->im_src.s_addr) ||
  2121. nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
  2122. msg->im_dst.s_addr))
  2123. goto nla_put_failure;
  2124. nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
  2125. if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
  2126. nla_data(nla), payloadlen))
  2127. goto nla_put_failure;
  2128. nlmsg_end(skb, nlh);
  2129. rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
  2130. return;
  2131. nla_put_failure:
  2132. nlmsg_cancel(skb, nlh);
  2133. errout:
  2134. kfree_skb(skb);
  2135. rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
  2136. }
  2137. static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
  2138. struct netlink_ext_ack *extack)
  2139. {
  2140. struct net *net = sock_net(in_skb->sk);
  2141. struct nlattr *tb[RTA_MAX + 1];
  2142. struct sk_buff *skb = NULL;
  2143. struct mfc_cache *cache;
  2144. struct mr_table *mrt;
  2145. struct rtmsg *rtm;
  2146. __be32 src, grp;
  2147. u32 tableid;
  2148. int err;
  2149. err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
  2150. rtm_ipv4_policy, extack);
  2151. if (err < 0)
  2152. goto errout;
  2153. rtm = nlmsg_data(nlh);
  2154. src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
  2155. grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
  2156. tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
  2157. mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
  2158. if (!mrt) {
  2159. err = -ENOENT;
  2160. goto errout_free;
  2161. }
  2162. /* entries are added/deleted only under RTNL */
  2163. rcu_read_lock();
  2164. cache = ipmr_cache_find(mrt, src, grp);
  2165. rcu_read_unlock();
  2166. if (!cache) {
  2167. err = -ENOENT;
  2168. goto errout_free;
  2169. }
  2170. skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
  2171. if (!skb) {
  2172. err = -ENOBUFS;
  2173. goto errout_free;
  2174. }
  2175. err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
  2176. nlh->nlmsg_seq, cache,
  2177. RTM_NEWROUTE, 0);
  2178. if (err < 0)
  2179. goto errout_free;
  2180. err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
  2181. errout:
  2182. return err;
  2183. errout_free:
  2184. kfree_skb(skb);
  2185. goto errout;
  2186. }
  2187. static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
  2188. {
  2189. struct fib_dump_filter filter = {};
  2190. int err;
  2191. if (cb->strict_check) {
  2192. err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
  2193. &filter, cb);
  2194. if (err < 0)
  2195. return err;
  2196. }
  2197. if (filter.table_id) {
  2198. struct mr_table *mrt;
  2199. mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
  2200. if (!mrt) {
  2201. if (filter.dump_all_families)
  2202. return skb->len;
  2203. NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
  2204. return -ENOENT;
  2205. }
  2206. err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
  2207. &mfc_unres_lock, &filter);
  2208. return skb->len ? : err;
  2209. }
  2210. return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
  2211. _ipmr_fill_mroute, &mfc_unres_lock, &filter);
  2212. }
  2213. static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
  2214. [RTA_SRC] = { .type = NLA_U32 },
  2215. [RTA_DST] = { .type = NLA_U32 },
  2216. [RTA_IIF] = { .type = NLA_U32 },
  2217. [RTA_TABLE] = { .type = NLA_U32 },
  2218. [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
  2219. };
  2220. static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
  2221. {
  2222. switch (rtm_protocol) {
  2223. case RTPROT_STATIC:
  2224. case RTPROT_MROUTED:
  2225. return true;
  2226. }
  2227. return false;
  2228. }
  2229. static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
  2230. {
  2231. struct rtnexthop *rtnh = nla_data(nla);
  2232. int remaining = nla_len(nla), vifi = 0;
  2233. while (rtnh_ok(rtnh, remaining)) {
  2234. mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
  2235. if (++vifi == MAXVIFS)
  2236. break;
  2237. rtnh = rtnh_next(rtnh, &remaining);
  2238. }
  2239. return remaining > 0 ? -EINVAL : vifi;
  2240. }
  2241. /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
  2242. static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
  2243. struct mfcctl *mfcc, int *mrtsock,
  2244. struct mr_table **mrtret,
  2245. struct netlink_ext_ack *extack)
  2246. {
  2247. struct net_device *dev = NULL;
  2248. u32 tblid = RT_TABLE_DEFAULT;
  2249. struct mr_table *mrt;
  2250. struct nlattr *attr;
  2251. struct rtmsg *rtm;
  2252. int ret, rem;
  2253. ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
  2254. extack);
  2255. if (ret < 0)
  2256. goto out;
  2257. rtm = nlmsg_data(nlh);
  2258. ret = -EINVAL;
  2259. if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
  2260. rtm->rtm_type != RTN_MULTICAST ||
  2261. rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
  2262. !ipmr_rtm_validate_proto(rtm->rtm_protocol))
  2263. goto out;
  2264. memset(mfcc, 0, sizeof(*mfcc));
  2265. mfcc->mfcc_parent = -1;
  2266. ret = 0;
  2267. nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
  2268. switch (nla_type(attr)) {
  2269. case RTA_SRC:
  2270. mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
  2271. break;
  2272. case RTA_DST:
  2273. mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
  2274. break;
  2275. case RTA_IIF:
  2276. dev = __dev_get_by_index(net, nla_get_u32(attr));
  2277. if (!dev) {
  2278. ret = -ENODEV;
  2279. goto out;
  2280. }
  2281. break;
  2282. case RTA_MULTIPATH:
  2283. if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
  2284. ret = -EINVAL;
  2285. goto out;
  2286. }
  2287. break;
  2288. case RTA_PREFSRC:
  2289. ret = 1;
  2290. break;
  2291. case RTA_TABLE:
  2292. tblid = nla_get_u32(attr);
  2293. break;
  2294. }
  2295. }
  2296. mrt = ipmr_get_table(net, tblid);
  2297. if (!mrt) {
  2298. ret = -ENOENT;
  2299. goto out;
  2300. }
  2301. *mrtret = mrt;
  2302. *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
  2303. if (dev)
  2304. mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
  2305. out:
  2306. return ret;
  2307. }
  2308. /* takes care of both newroute and delroute */
  2309. static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
  2310. struct netlink_ext_ack *extack)
  2311. {
  2312. struct net *net = sock_net(skb->sk);
  2313. int ret, mrtsock, parent;
  2314. struct mr_table *tbl;
  2315. struct mfcctl mfcc;
  2316. mrtsock = 0;
  2317. tbl = NULL;
  2318. ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
  2319. if (ret < 0)
  2320. return ret;
  2321. parent = ret ? mfcc.mfcc_parent : -1;
  2322. if (nlh->nlmsg_type == RTM_NEWROUTE)
  2323. return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
  2324. else
  2325. return ipmr_mfc_delete(tbl, &mfcc, parent);
  2326. }
  2327. static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
  2328. {
  2329. u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
  2330. if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
  2331. nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
  2332. nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
  2333. mrt->mroute_reg_vif_num) ||
  2334. nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
  2335. mrt->mroute_do_assert) ||
  2336. nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
  2337. nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
  2338. mrt->mroute_do_wrvifwhole))
  2339. return false;
  2340. return true;
  2341. }
  2342. static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
  2343. {
  2344. struct nlattr *vif_nest;
  2345. struct vif_device *vif;
  2346. /* if the VIF doesn't exist just continue */
  2347. if (!VIF_EXISTS(mrt, vifid))
  2348. return true;
  2349. vif = &mrt->vif_table[vifid];
  2350. vif_nest = nla_nest_start(skb, IPMRA_VIF);
  2351. if (!vif_nest)
  2352. return false;
  2353. if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
  2354. nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
  2355. nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
  2356. nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
  2357. IPMRA_VIFA_PAD) ||
  2358. nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
  2359. IPMRA_VIFA_PAD) ||
  2360. nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
  2361. IPMRA_VIFA_PAD) ||
  2362. nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
  2363. IPMRA_VIFA_PAD) ||
  2364. nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
  2365. nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
  2366. nla_nest_cancel(skb, vif_nest);
  2367. return false;
  2368. }
  2369. nla_nest_end(skb, vif_nest);
  2370. return true;
  2371. }
  2372. static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
  2373. struct netlink_ext_ack *extack)
  2374. {
  2375. struct ifinfomsg *ifm;
  2376. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
  2377. NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
  2378. return -EINVAL;
  2379. }
  2380. if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
  2381. NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
  2382. return -EINVAL;
  2383. }
  2384. ifm = nlmsg_data(nlh);
  2385. if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
  2386. ifm->ifi_change || ifm->ifi_index) {
  2387. NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
  2388. return -EINVAL;
  2389. }
  2390. return 0;
  2391. }
  2392. static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
  2393. {
  2394. struct net *net = sock_net(skb->sk);
  2395. struct nlmsghdr *nlh = NULL;
  2396. unsigned int t = 0, s_t;
  2397. unsigned int e = 0, s_e;
  2398. struct mr_table *mrt;
  2399. if (cb->strict_check) {
  2400. int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
  2401. if (err < 0)
  2402. return err;
  2403. }
  2404. s_t = cb->args[0];
  2405. s_e = cb->args[1];
  2406. ipmr_for_each_table(mrt, net) {
  2407. struct nlattr *vifs, *af;
  2408. struct ifinfomsg *hdr;
  2409. u32 i;
  2410. if (t < s_t)
  2411. goto skip_table;
  2412. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  2413. cb->nlh->nlmsg_seq, RTM_NEWLINK,
  2414. sizeof(*hdr), NLM_F_MULTI);
  2415. if (!nlh)
  2416. break;
  2417. hdr = nlmsg_data(nlh);
  2418. memset(hdr, 0, sizeof(*hdr));
  2419. hdr->ifi_family = RTNL_FAMILY_IPMR;
  2420. af = nla_nest_start(skb, IFLA_AF_SPEC);
  2421. if (!af) {
  2422. nlmsg_cancel(skb, nlh);
  2423. goto out;
  2424. }
  2425. if (!ipmr_fill_table(mrt, skb)) {
  2426. nlmsg_cancel(skb, nlh);
  2427. goto out;
  2428. }
  2429. vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
  2430. if (!vifs) {
  2431. nla_nest_end(skb, af);
  2432. nlmsg_end(skb, nlh);
  2433. goto out;
  2434. }
  2435. for (i = 0; i < mrt->maxvif; i++) {
  2436. if (e < s_e)
  2437. goto skip_entry;
  2438. if (!ipmr_fill_vif(mrt, i, skb)) {
  2439. nla_nest_end(skb, vifs);
  2440. nla_nest_end(skb, af);
  2441. nlmsg_end(skb, nlh);
  2442. goto out;
  2443. }
  2444. skip_entry:
  2445. e++;
  2446. }
  2447. s_e = 0;
  2448. e = 0;
  2449. nla_nest_end(skb, vifs);
  2450. nla_nest_end(skb, af);
  2451. nlmsg_end(skb, nlh);
  2452. skip_table:
  2453. t++;
  2454. }
  2455. out:
  2456. cb->args[1] = e;
  2457. cb->args[0] = t;
  2458. return skb->len;
  2459. }
  2460. #ifdef CONFIG_PROC_FS
  2461. /* The /proc interfaces to multicast routing :
  2462. * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
  2463. */
  2464. static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
  2465. __acquires(mrt_lock)
  2466. {
  2467. struct mr_vif_iter *iter = seq->private;
  2468. struct net *net = seq_file_net(seq);
  2469. struct mr_table *mrt;
  2470. mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
  2471. if (!mrt)
  2472. return ERR_PTR(-ENOENT);
  2473. iter->mrt = mrt;
  2474. read_lock(&mrt_lock);
  2475. return mr_vif_seq_start(seq, pos);
  2476. }
  2477. static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
  2478. __releases(mrt_lock)
  2479. {
  2480. read_unlock(&mrt_lock);
  2481. }
  2482. static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
  2483. {
  2484. struct mr_vif_iter *iter = seq->private;
  2485. struct mr_table *mrt = iter->mrt;
  2486. if (v == SEQ_START_TOKEN) {
  2487. seq_puts(seq,
  2488. "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
  2489. } else {
  2490. const struct vif_device *vif = v;
  2491. const char *name = vif->dev ?
  2492. vif->dev->name : "none";
  2493. seq_printf(seq,
  2494. "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
  2495. vif - mrt->vif_table,
  2496. name, vif->bytes_in, vif->pkt_in,
  2497. vif->bytes_out, vif->pkt_out,
  2498. vif->flags, vif->local, vif->remote);
  2499. }
  2500. return 0;
  2501. }
  2502. static const struct seq_operations ipmr_vif_seq_ops = {
  2503. .start = ipmr_vif_seq_start,
  2504. .next = mr_vif_seq_next,
  2505. .stop = ipmr_vif_seq_stop,
  2506. .show = ipmr_vif_seq_show,
  2507. };
  2508. static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
  2509. {
  2510. struct net *net = seq_file_net(seq);
  2511. struct mr_table *mrt;
  2512. mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
  2513. if (!mrt)
  2514. return ERR_PTR(-ENOENT);
  2515. return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
  2516. }
  2517. static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
  2518. {
  2519. int n;
  2520. if (v == SEQ_START_TOKEN) {
  2521. seq_puts(seq,
  2522. "Group Origin Iif Pkts Bytes Wrong Oifs\n");
  2523. } else {
  2524. const struct mfc_cache *mfc = v;
  2525. const struct mr_mfc_iter *it = seq->private;
  2526. const struct mr_table *mrt = it->mrt;
  2527. seq_printf(seq, "%08X %08X %-3hd",
  2528. (__force u32) mfc->mfc_mcastgrp,
  2529. (__force u32) mfc->mfc_origin,
  2530. mfc->_c.mfc_parent);
  2531. if (it->cache != &mrt->mfc_unres_queue) {
  2532. seq_printf(seq, " %8lu %8lu %8lu",
  2533. mfc->_c.mfc_un.res.pkt,
  2534. mfc->_c.mfc_un.res.bytes,
  2535. mfc->_c.mfc_un.res.wrong_if);
  2536. for (n = mfc->_c.mfc_un.res.minvif;
  2537. n < mfc->_c.mfc_un.res.maxvif; n++) {
  2538. if (VIF_EXISTS(mrt, n) &&
  2539. mfc->_c.mfc_un.res.ttls[n] < 255)
  2540. seq_printf(seq,
  2541. " %2d:%-3d",
  2542. n, mfc->_c.mfc_un.res.ttls[n]);
  2543. }
  2544. } else {
  2545. /* unresolved mfc_caches don't contain
  2546. * pkt, bytes and wrong_if values
  2547. */
  2548. seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
  2549. }
  2550. seq_putc(seq, '\n');
  2551. }
  2552. return 0;
  2553. }
  2554. static const struct seq_operations ipmr_mfc_seq_ops = {
  2555. .start = ipmr_mfc_seq_start,
  2556. .next = mr_mfc_seq_next,
  2557. .stop = mr_mfc_seq_stop,
  2558. .show = ipmr_mfc_seq_show,
  2559. };
  2560. #endif
  2561. #ifdef CONFIG_IP_PIMSM_V2
  2562. static const struct net_protocol pim_protocol = {
  2563. .handler = pim_rcv,
  2564. .netns_ok = 1,
  2565. };
  2566. #endif
  2567. static unsigned int ipmr_seq_read(struct net *net)
  2568. {
  2569. ASSERT_RTNL();
  2570. return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
  2571. }
  2572. static int ipmr_dump(struct net *net, struct notifier_block *nb)
  2573. {
  2574. return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
  2575. ipmr_mr_table_iter, &mrt_lock);
  2576. }
  2577. static const struct fib_notifier_ops ipmr_notifier_ops_template = {
  2578. .family = RTNL_FAMILY_IPMR,
  2579. .fib_seq_read = ipmr_seq_read,
  2580. .fib_dump = ipmr_dump,
  2581. .owner = THIS_MODULE,
  2582. };
  2583. static int __net_init ipmr_notifier_init(struct net *net)
  2584. {
  2585. struct fib_notifier_ops *ops;
  2586. net->ipv4.ipmr_seq = 0;
  2587. ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
  2588. if (IS_ERR(ops))
  2589. return PTR_ERR(ops);
  2590. net->ipv4.ipmr_notifier_ops = ops;
  2591. return 0;
  2592. }
  2593. static void __net_exit ipmr_notifier_exit(struct net *net)
  2594. {
  2595. fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
  2596. net->ipv4.ipmr_notifier_ops = NULL;
  2597. }
  2598. /* Setup for IP multicast routing */
  2599. static int __net_init ipmr_net_init(struct net *net)
  2600. {
  2601. int err;
  2602. err = ipmr_notifier_init(net);
  2603. if (err)
  2604. goto ipmr_notifier_fail;
  2605. err = ipmr_rules_init(net);
  2606. if (err < 0)
  2607. goto ipmr_rules_fail;
  2608. #ifdef CONFIG_PROC_FS
  2609. err = -ENOMEM;
  2610. if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
  2611. sizeof(struct mr_vif_iter)))
  2612. goto proc_vif_fail;
  2613. if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
  2614. sizeof(struct mr_mfc_iter)))
  2615. goto proc_cache_fail;
  2616. #endif
  2617. return 0;
  2618. #ifdef CONFIG_PROC_FS
  2619. proc_cache_fail:
  2620. remove_proc_entry("ip_mr_vif", net->proc_net);
  2621. proc_vif_fail:
  2622. ipmr_rules_exit(net);
  2623. #endif
  2624. ipmr_rules_fail:
  2625. ipmr_notifier_exit(net);
  2626. ipmr_notifier_fail:
  2627. return err;
  2628. }
  2629. static void __net_exit ipmr_net_exit(struct net *net)
  2630. {
  2631. #ifdef CONFIG_PROC_FS
  2632. remove_proc_entry("ip_mr_cache", net->proc_net);
  2633. remove_proc_entry("ip_mr_vif", net->proc_net);
  2634. #endif
  2635. ipmr_notifier_exit(net);
  2636. ipmr_rules_exit(net);
  2637. }
  2638. static struct pernet_operations ipmr_net_ops = {
  2639. .init = ipmr_net_init,
  2640. .exit = ipmr_net_exit,
  2641. };
  2642. int __init ip_mr_init(void)
  2643. {
  2644. int err;
  2645. mrt_cachep = kmem_cache_create("ip_mrt_cache",
  2646. sizeof(struct mfc_cache),
  2647. 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
  2648. NULL);
  2649. err = register_pernet_subsys(&ipmr_net_ops);
  2650. if (err)
  2651. goto reg_pernet_fail;
  2652. err = register_netdevice_notifier(&ip_mr_notifier);
  2653. if (err)
  2654. goto reg_notif_fail;
  2655. #ifdef CONFIG_IP_PIMSM_V2
  2656. if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
  2657. pr_err("%s: can't add PIM protocol\n", __func__);
  2658. err = -EAGAIN;
  2659. goto add_proto_fail;
  2660. }
  2661. #endif
  2662. rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
  2663. ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
  2664. rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
  2665. ipmr_rtm_route, NULL, 0);
  2666. rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
  2667. ipmr_rtm_route, NULL, 0);
  2668. rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
  2669. NULL, ipmr_rtm_dumplink, 0);
  2670. return 0;
  2671. #ifdef CONFIG_IP_PIMSM_V2
  2672. add_proto_fail:
  2673. unregister_netdevice_notifier(&ip_mr_notifier);
  2674. #endif
  2675. reg_notif_fail:
  2676. unregister_pernet_subsys(&ipmr_net_ops);
  2677. reg_pernet_fail:
  2678. kmem_cache_destroy(mrt_cachep);
  2679. return err;
  2680. }