route.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * ROUTE - implementation of the IP router.
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  11. * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
  12. * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  13. *
  14. * Fixes:
  15. * Alan Cox : Verify area fixes.
  16. * Alan Cox : cli() protects routing changes
  17. * Rui Oliveira : ICMP routing table updates
  18. * (rco@di.uminho.pt) Routing table insertion and update
  19. * Linus Torvalds : Rewrote bits to be sensible
  20. * Alan Cox : Added BSD route gw semantics
  21. * Alan Cox : Super /proc >4K
  22. * Alan Cox : MTU in route table
  23. * Alan Cox : MSS actually. Also added the window
  24. * clamper.
  25. * Sam Lantinga : Fixed route matching in rt_del()
  26. * Alan Cox : Routing cache support.
  27. * Alan Cox : Removed compatibility cruft.
  28. * Alan Cox : RTF_REJECT support.
  29. * Alan Cox : TCP irtt support.
  30. * Jonathan Naylor : Added Metric support.
  31. * Miquel van Smoorenburg : BSD API fixes.
  32. * Miquel van Smoorenburg : Metrics.
  33. * Alan Cox : Use __u32 properly
  34. * Alan Cox : Aligned routing errors more closely with BSD
  35. * our system is still very different.
  36. * Alan Cox : Faster /proc handling
  37. * Alexey Kuznetsov : Massive rework to support tree based routing,
  38. * routing caches and better behaviour.
  39. *
  40. * Olaf Erb : irtt wasn't being copied right.
  41. * Bjorn Ekwall : Kerneld route support.
  42. * Alan Cox : Multicast fixed (I hope)
  43. * Pavel Krauz : Limited broadcast fixed
  44. * Mike McLagan : Routing by source
  45. * Alexey Kuznetsov : End of old history. Split to fib.c and
  46. * route.c and rewritten from scratch.
  47. * Andi Kleen : Load-limit warning messages.
  48. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  49. * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
  50. * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
  51. * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
  52. * Marc Boucher : routing by fwmark
  53. * Robert Olsson : Added rt_cache statistics
  54. * Arnaldo C. Melo : Convert proc stuff to seq_file
  55. * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
  56. * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
  57. * Ilia Sotnikov : Removed TOS from hash calculations
  58. *
  59. * This program is free software; you can redistribute it and/or
  60. * modify it under the terms of the GNU General Public License
  61. * as published by the Free Software Foundation; either version
  62. * 2 of the License, or (at your option) any later version.
  63. */
  64. #define pr_fmt(fmt) "IPv4: " fmt
  65. #include <linux/module.h>
  66. #include <asm/uaccess.h>
  67. #include <linux/bitops.h>
  68. #include <linux/types.h>
  69. #include <linux/kernel.h>
  70. #include <linux/mm.h>
  71. #include <linux/string.h>
  72. #include <linux/socket.h>
  73. #include <linux/sockios.h>
  74. #include <linux/errno.h>
  75. #include <linux/in.h>
  76. #include <linux/inet.h>
  77. #include <linux/netdevice.h>
  78. #include <linux/proc_fs.h>
  79. #include <linux/init.h>
  80. #include <linux/skbuff.h>
  81. #include <linux/inetdevice.h>
  82. #include <linux/igmp.h>
  83. #include <linux/pkt_sched.h>
  84. #include <linux/mroute.h>
  85. #include <linux/netfilter_ipv4.h>
  86. #include <linux/random.h>
  87. #include <linux/rcupdate.h>
  88. #include <linux/times.h>
  89. #include <linux/slab.h>
  90. #include <linux/jhash.h>
  91. #include <net/dst.h>
  92. #include <net/net_namespace.h>
  93. #include <net/protocol.h>
  94. #include <net/ip.h>
  95. #include <net/route.h>
  96. #include <net/inetpeer.h>
  97. #include <net/sock.h>
  98. #include <net/ip_fib.h>
  99. #include <net/arp.h>
  100. #include <net/tcp.h>
  101. #include <net/icmp.h>
  102. #include <net/xfrm.h>
  103. #include <net/netevent.h>
  104. #include <net/rtnetlink.h>
  105. #ifdef CONFIG_SYSCTL
  106. #include <linux/sysctl.h>
  107. #include <linux/kmemleak.h>
  108. #endif
  109. #include <net/secure_seq.h>
  110. #define RT_FL_TOS(oldflp4) \
  111. ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
  112. #define RT_GC_TIMEOUT (300*HZ)
  113. static int ip_rt_max_size;
  114. static int ip_rt_redirect_number __read_mostly = 9;
  115. static int ip_rt_redirect_load __read_mostly = HZ / 50;
  116. static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
  117. static int ip_rt_error_cost __read_mostly = HZ;
  118. static int ip_rt_error_burst __read_mostly = 5 * HZ;
  119. static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
  120. static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
  121. static int ip_rt_min_advmss __read_mostly = 256;
  122. /*
  123. * Interface to generic destination cache.
  124. */
  125. static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
  126. static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
  127. static unsigned int ipv4_mtu(const struct dst_entry *dst);
  128. static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
  129. static void ipv4_link_failure(struct sk_buff *skb);
  130. static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
  131. struct sk_buff *skb, u32 mtu);
  132. static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
  133. struct sk_buff *skb);
  134. static void ipv4_dst_destroy(struct dst_entry *dst);
  135. static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
  136. {
  137. WARN_ON(1);
  138. return NULL;
  139. }
  140. static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
  141. struct sk_buff *skb,
  142. const void *daddr);
  143. static struct dst_ops ipv4_dst_ops = {
  144. .family = AF_INET,
  145. .protocol = cpu_to_be16(ETH_P_IP),
  146. .check = ipv4_dst_check,
  147. .default_advmss = ipv4_default_advmss,
  148. .mtu = ipv4_mtu,
  149. .cow_metrics = ipv4_cow_metrics,
  150. .destroy = ipv4_dst_destroy,
  151. .negative_advice = ipv4_negative_advice,
  152. .link_failure = ipv4_link_failure,
  153. .update_pmtu = ip_rt_update_pmtu,
  154. .redirect = ip_do_redirect,
  155. .local_out = __ip_local_out,
  156. .neigh_lookup = ipv4_neigh_lookup,
  157. };
  158. #define ECN_OR_COST(class) TC_PRIO_##class
  159. const __u8 ip_tos2prio[16] = {
  160. TC_PRIO_BESTEFFORT,
  161. ECN_OR_COST(BESTEFFORT),
  162. TC_PRIO_BESTEFFORT,
  163. ECN_OR_COST(BESTEFFORT),
  164. TC_PRIO_BULK,
  165. ECN_OR_COST(BULK),
  166. TC_PRIO_BULK,
  167. ECN_OR_COST(BULK),
  168. TC_PRIO_INTERACTIVE,
  169. ECN_OR_COST(INTERACTIVE),
  170. TC_PRIO_INTERACTIVE,
  171. ECN_OR_COST(INTERACTIVE),
  172. TC_PRIO_INTERACTIVE_BULK,
  173. ECN_OR_COST(INTERACTIVE_BULK),
  174. TC_PRIO_INTERACTIVE_BULK,
  175. ECN_OR_COST(INTERACTIVE_BULK)
  176. };
  177. EXPORT_SYMBOL(ip_tos2prio);
  178. static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
  179. #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
  180. #ifdef CONFIG_PROC_FS
  181. static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
  182. {
  183. if (*pos)
  184. return NULL;
  185. return SEQ_START_TOKEN;
  186. }
  187. static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  188. {
  189. ++*pos;
  190. return NULL;
  191. }
  192. static void rt_cache_seq_stop(struct seq_file *seq, void *v)
  193. {
  194. }
  195. static int rt_cache_seq_show(struct seq_file *seq, void *v)
  196. {
  197. if (v == SEQ_START_TOKEN)
  198. seq_printf(seq, "%-127s\n",
  199. "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
  200. "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
  201. "HHUptod\tSpecDst");
  202. return 0;
  203. }
  204. static const struct seq_operations rt_cache_seq_ops = {
  205. .start = rt_cache_seq_start,
  206. .next = rt_cache_seq_next,
  207. .stop = rt_cache_seq_stop,
  208. .show = rt_cache_seq_show,
  209. };
  210. static int rt_cache_seq_open(struct inode *inode, struct file *file)
  211. {
  212. return seq_open(file, &rt_cache_seq_ops);
  213. }
  214. static const struct file_operations rt_cache_seq_fops = {
  215. .owner = THIS_MODULE,
  216. .open = rt_cache_seq_open,
  217. .read = seq_read,
  218. .llseek = seq_lseek,
  219. .release = seq_release,
  220. };
  221. static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
  222. {
  223. int cpu;
  224. if (*pos == 0)
  225. return SEQ_START_TOKEN;
  226. for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
  227. if (!cpu_possible(cpu))
  228. continue;
  229. *pos = cpu+1;
  230. return &per_cpu(rt_cache_stat, cpu);
  231. }
  232. return NULL;
  233. }
  234. static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  235. {
  236. int cpu;
  237. for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
  238. if (!cpu_possible(cpu))
  239. continue;
  240. *pos = cpu+1;
  241. return &per_cpu(rt_cache_stat, cpu);
  242. }
  243. return NULL;
  244. }
  245. static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
  246. {
  247. }
  248. static int rt_cpu_seq_show(struct seq_file *seq, void *v)
  249. {
  250. struct rt_cache_stat *st = v;
  251. if (v == SEQ_START_TOKEN) {
  252. seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
  253. return 0;
  254. }
  255. seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
  256. " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
  257. dst_entries_get_slow(&ipv4_dst_ops),
  258. 0, /* st->in_hit */
  259. st->in_slow_tot,
  260. st->in_slow_mc,
  261. st->in_no_route,
  262. st->in_brd,
  263. st->in_martian_dst,
  264. st->in_martian_src,
  265. 0, /* st->out_hit */
  266. st->out_slow_tot,
  267. st->out_slow_mc,
  268. 0, /* st->gc_total */
  269. 0, /* st->gc_ignored */
  270. 0, /* st->gc_goal_miss */
  271. 0, /* st->gc_dst_overflow */
  272. 0, /* st->in_hlist_search */
  273. 0 /* st->out_hlist_search */
  274. );
  275. return 0;
  276. }
  277. static const struct seq_operations rt_cpu_seq_ops = {
  278. .start = rt_cpu_seq_start,
  279. .next = rt_cpu_seq_next,
  280. .stop = rt_cpu_seq_stop,
  281. .show = rt_cpu_seq_show,
  282. };
  283. static int rt_cpu_seq_open(struct inode *inode, struct file *file)
  284. {
  285. return seq_open(file, &rt_cpu_seq_ops);
  286. }
  287. static const struct file_operations rt_cpu_seq_fops = {
  288. .owner = THIS_MODULE,
  289. .open = rt_cpu_seq_open,
  290. .read = seq_read,
  291. .llseek = seq_lseek,
  292. .release = seq_release,
  293. };
  294. #ifdef CONFIG_IP_ROUTE_CLASSID
  295. static int rt_acct_proc_show(struct seq_file *m, void *v)
  296. {
  297. struct ip_rt_acct *dst, *src;
  298. unsigned int i, j;
  299. dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
  300. if (!dst)
  301. return -ENOMEM;
  302. for_each_possible_cpu(i) {
  303. src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
  304. for (j = 0; j < 256; j++) {
  305. dst[j].o_bytes += src[j].o_bytes;
  306. dst[j].o_packets += src[j].o_packets;
  307. dst[j].i_bytes += src[j].i_bytes;
  308. dst[j].i_packets += src[j].i_packets;
  309. }
  310. }
  311. seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
  312. kfree(dst);
  313. return 0;
  314. }
  315. static int rt_acct_proc_open(struct inode *inode, struct file *file)
  316. {
  317. return single_open(file, rt_acct_proc_show, NULL);
  318. }
  319. static const struct file_operations rt_acct_proc_fops = {
  320. .owner = THIS_MODULE,
  321. .open = rt_acct_proc_open,
  322. .read = seq_read,
  323. .llseek = seq_lseek,
  324. .release = single_release,
  325. };
  326. #endif
  327. static int __net_init ip_rt_do_proc_init(struct net *net)
  328. {
  329. struct proc_dir_entry *pde;
  330. pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
  331. &rt_cache_seq_fops);
  332. if (!pde)
  333. goto err1;
  334. pde = proc_create("rt_cache", S_IRUGO,
  335. net->proc_net_stat, &rt_cpu_seq_fops);
  336. if (!pde)
  337. goto err2;
  338. #ifdef CONFIG_IP_ROUTE_CLASSID
  339. pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
  340. if (!pde)
  341. goto err3;
  342. #endif
  343. return 0;
  344. #ifdef CONFIG_IP_ROUTE_CLASSID
  345. err3:
  346. remove_proc_entry("rt_cache", net->proc_net_stat);
  347. #endif
  348. err2:
  349. remove_proc_entry("rt_cache", net->proc_net);
  350. err1:
  351. return -ENOMEM;
  352. }
  353. static void __net_exit ip_rt_do_proc_exit(struct net *net)
  354. {
  355. remove_proc_entry("rt_cache", net->proc_net_stat);
  356. remove_proc_entry("rt_cache", net->proc_net);
  357. #ifdef CONFIG_IP_ROUTE_CLASSID
  358. remove_proc_entry("rt_acct", net->proc_net);
  359. #endif
  360. }
  361. static struct pernet_operations ip_rt_proc_ops __net_initdata = {
  362. .init = ip_rt_do_proc_init,
  363. .exit = ip_rt_do_proc_exit,
  364. };
  365. static int __init ip_rt_proc_init(void)
  366. {
  367. return register_pernet_subsys(&ip_rt_proc_ops);
  368. }
  369. #else
  370. static inline int ip_rt_proc_init(void)
  371. {
  372. return 0;
  373. }
  374. #endif /* CONFIG_PROC_FS */
  375. static inline bool rt_is_expired(const struct rtable *rth)
  376. {
  377. return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
  378. }
  379. void rt_cache_flush(struct net *net)
  380. {
  381. rt_genid_bump_ipv4(net);
  382. }
  383. static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
  384. struct sk_buff *skb,
  385. const void *daddr)
  386. {
  387. struct net_device *dev = dst->dev;
  388. const __be32 *pkey = daddr;
  389. const struct rtable *rt;
  390. struct neighbour *n;
  391. rt = (const struct rtable *) dst;
  392. if (rt->rt_gateway)
  393. pkey = (const __be32 *) &rt->rt_gateway;
  394. else if (skb)
  395. pkey = &ip_hdr(skb)->daddr;
  396. n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
  397. if (n)
  398. return n;
  399. return neigh_create(&arp_tbl, pkey, dev);
  400. }
  401. #define IP_IDENTS_SZ 2048u
  402. struct ip_ident_bucket {
  403. atomic_t id;
  404. u32 stamp32;
  405. };
  406. static struct ip_ident_bucket *ip_idents __read_mostly;
  407. /* In order to protect privacy, we add a perturbation to identifiers
  408. * if one generator is seldom used. This makes hard for an attacker
  409. * to infer how many packets were sent between two points in time.
  410. */
  411. u32 ip_idents_reserve(u32 hash, int segs)
  412. {
  413. struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
  414. u32 old = ACCESS_ONCE(bucket->stamp32);
  415. u32 now = (u32)jiffies;
  416. u32 delta = 0;
  417. if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
  418. delta = prandom_u32_max(now - old);
  419. return atomic_add_return(segs + delta, &bucket->id) - segs;
  420. }
  421. EXPORT_SYMBOL(ip_idents_reserve);
  422. void __ip_select_ident(struct iphdr *iph, int segs)
  423. {
  424. static u32 ip_idents_hashrnd __read_mostly;
  425. u32 hash, id;
  426. net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
  427. hash = jhash_3words((__force u32)iph->daddr,
  428. (__force u32)iph->saddr,
  429. iph->protocol,
  430. ip_idents_hashrnd);
  431. id = ip_idents_reserve(hash, segs);
  432. iph->id = htons(id);
  433. }
  434. EXPORT_SYMBOL(__ip_select_ident);
  435. static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
  436. const struct iphdr *iph,
  437. int oif, u8 tos,
  438. u8 prot, u32 mark, int flow_flags)
  439. {
  440. if (sk) {
  441. const struct inet_sock *inet = inet_sk(sk);
  442. oif = sk->sk_bound_dev_if;
  443. mark = sk->sk_mark;
  444. tos = RT_CONN_FLAGS(sk);
  445. prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
  446. }
  447. flowi4_init_output(fl4, oif, mark, tos,
  448. RT_SCOPE_UNIVERSE, prot,
  449. flow_flags,
  450. iph->daddr, iph->saddr, 0, 0);
  451. }
  452. static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
  453. const struct sock *sk)
  454. {
  455. const struct iphdr *iph = ip_hdr(skb);
  456. int oif = skb->dev->ifindex;
  457. u8 tos = RT_TOS(iph->tos);
  458. u8 prot = iph->protocol;
  459. u32 mark = skb->mark;
  460. __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
  461. }
  462. static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
  463. {
  464. const struct inet_sock *inet = inet_sk(sk);
  465. const struct ip_options_rcu *inet_opt;
  466. __be32 daddr = inet->inet_daddr;
  467. rcu_read_lock();
  468. inet_opt = rcu_dereference(inet->inet_opt);
  469. if (inet_opt && inet_opt->opt.srr)
  470. daddr = inet_opt->opt.faddr;
  471. flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
  472. RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
  473. inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
  474. inet_sk_flowi_flags(sk),
  475. daddr, inet->inet_saddr, 0, 0);
  476. rcu_read_unlock();
  477. }
  478. static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
  479. const struct sk_buff *skb)
  480. {
  481. if (skb)
  482. build_skb_flow_key(fl4, skb, sk);
  483. else
  484. build_sk_flow_key(fl4, sk);
  485. }
  486. static inline void rt_free(struct rtable *rt)
  487. {
  488. call_rcu(&rt->dst.rcu_head, dst_rcu_free);
  489. }
  490. static DEFINE_SPINLOCK(fnhe_lock);
  491. static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
  492. {
  493. struct rtable *rt;
  494. rt = rcu_dereference(fnhe->fnhe_rth_input);
  495. if (rt) {
  496. RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
  497. rt_free(rt);
  498. }
  499. rt = rcu_dereference(fnhe->fnhe_rth_output);
  500. if (rt) {
  501. RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
  502. rt_free(rt);
  503. }
  504. }
  505. static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
  506. {
  507. struct fib_nh_exception *fnhe, *oldest;
  508. oldest = rcu_dereference(hash->chain);
  509. for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
  510. fnhe = rcu_dereference(fnhe->fnhe_next)) {
  511. if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
  512. oldest = fnhe;
  513. }
  514. fnhe_flush_routes(oldest);
  515. return oldest;
  516. }
  517. static inline u32 fnhe_hashfun(__be32 daddr)
  518. {
  519. u32 hval;
  520. hval = (__force u32) daddr;
  521. hval ^= (hval >> 11) ^ (hval >> 22);
  522. return hval & (FNHE_HASH_SIZE - 1);
  523. }
  524. static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
  525. {
  526. rt->rt_pmtu = fnhe->fnhe_pmtu;
  527. rt->dst.expires = fnhe->fnhe_expires;
  528. if (fnhe->fnhe_gw) {
  529. rt->rt_flags |= RTCF_REDIRECTED;
  530. rt->rt_gateway = fnhe->fnhe_gw;
  531. rt->rt_uses_gateway = 1;
  532. }
  533. }
  534. static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
  535. u32 pmtu, unsigned long expires)
  536. {
  537. struct fnhe_hash_bucket *hash;
  538. struct fib_nh_exception *fnhe;
  539. struct rtable *rt;
  540. unsigned int i;
  541. int depth;
  542. u32 hval = fnhe_hashfun(daddr);
  543. spin_lock_bh(&fnhe_lock);
  544. hash = nh->nh_exceptions;
  545. if (!hash) {
  546. hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
  547. if (!hash)
  548. goto out_unlock;
  549. nh->nh_exceptions = hash;
  550. }
  551. hash += hval;
  552. depth = 0;
  553. for (fnhe = rcu_dereference(hash->chain); fnhe;
  554. fnhe = rcu_dereference(fnhe->fnhe_next)) {
  555. if (fnhe->fnhe_daddr == daddr)
  556. break;
  557. depth++;
  558. }
  559. if (fnhe) {
  560. if (gw)
  561. fnhe->fnhe_gw = gw;
  562. if (pmtu) {
  563. fnhe->fnhe_pmtu = pmtu;
  564. fnhe->fnhe_expires = max(1UL, expires);
  565. }
  566. /* Update all cached dsts too */
  567. rt = rcu_dereference(fnhe->fnhe_rth_input);
  568. if (rt)
  569. fill_route_from_fnhe(rt, fnhe);
  570. rt = rcu_dereference(fnhe->fnhe_rth_output);
  571. if (rt)
  572. fill_route_from_fnhe(rt, fnhe);
  573. } else {
  574. if (depth > FNHE_RECLAIM_DEPTH)
  575. fnhe = fnhe_oldest(hash);
  576. else {
  577. fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
  578. if (!fnhe)
  579. goto out_unlock;
  580. fnhe->fnhe_next = hash->chain;
  581. rcu_assign_pointer(hash->chain, fnhe);
  582. }
  583. fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
  584. fnhe->fnhe_daddr = daddr;
  585. fnhe->fnhe_gw = gw;
  586. fnhe->fnhe_pmtu = pmtu;
  587. fnhe->fnhe_expires = expires;
  588. /* Exception created; mark the cached routes for the nexthop
  589. * stale, so anyone caching it rechecks if this exception
  590. * applies to them.
  591. */
  592. rt = rcu_dereference(nh->nh_rth_input);
  593. if (rt)
  594. rt->dst.obsolete = DST_OBSOLETE_KILL;
  595. for_each_possible_cpu(i) {
  596. struct rtable __rcu **prt;
  597. prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
  598. rt = rcu_dereference(*prt);
  599. if (rt)
  600. rt->dst.obsolete = DST_OBSOLETE_KILL;
  601. }
  602. }
  603. fnhe->fnhe_stamp = jiffies;
  604. out_unlock:
  605. spin_unlock_bh(&fnhe_lock);
  606. }
  607. static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
  608. bool kill_route)
  609. {
  610. __be32 new_gw = icmp_hdr(skb)->un.gateway;
  611. __be32 old_gw = ip_hdr(skb)->saddr;
  612. struct net_device *dev = skb->dev;
  613. struct in_device *in_dev;
  614. struct fib_result res;
  615. struct neighbour *n;
  616. struct net *net;
  617. switch (icmp_hdr(skb)->code & 7) {
  618. case ICMP_REDIR_NET:
  619. case ICMP_REDIR_NETTOS:
  620. case ICMP_REDIR_HOST:
  621. case ICMP_REDIR_HOSTTOS:
  622. break;
  623. default:
  624. return;
  625. }
  626. if (rt->rt_gateway != old_gw)
  627. return;
  628. in_dev = __in_dev_get_rcu(dev);
  629. if (!in_dev)
  630. return;
  631. net = dev_net(dev);
  632. if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
  633. ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
  634. ipv4_is_zeronet(new_gw))
  635. goto reject_redirect;
  636. if (!IN_DEV_SHARED_MEDIA(in_dev)) {
  637. if (!inet_addr_onlink(in_dev, new_gw, old_gw))
  638. goto reject_redirect;
  639. if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
  640. goto reject_redirect;
  641. } else {
  642. if (inet_addr_type(net, new_gw) != RTN_UNICAST)
  643. goto reject_redirect;
  644. }
  645. n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
  646. if (n) {
  647. if (!(n->nud_state & NUD_VALID)) {
  648. neigh_event_send(n, NULL);
  649. } else {
  650. if (fib_lookup(net, fl4, &res) == 0) {
  651. struct fib_nh *nh = &FIB_RES_NH(res);
  652. update_or_create_fnhe(nh, fl4->daddr, new_gw,
  653. 0, 0);
  654. }
  655. if (kill_route)
  656. rt->dst.obsolete = DST_OBSOLETE_KILL;
  657. call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
  658. }
  659. neigh_release(n);
  660. }
  661. return;
  662. reject_redirect:
  663. #ifdef CONFIG_IP_ROUTE_VERBOSE
  664. if (IN_DEV_LOG_MARTIANS(in_dev)) {
  665. const struct iphdr *iph = (const struct iphdr *) skb->data;
  666. __be32 daddr = iph->daddr;
  667. __be32 saddr = iph->saddr;
  668. net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
  669. " Advised path = %pI4 -> %pI4\n",
  670. &old_gw, dev->name, &new_gw,
  671. &saddr, &daddr);
  672. }
  673. #endif
  674. ;
  675. }
  676. static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
  677. {
  678. struct rtable *rt;
  679. struct flowi4 fl4;
  680. const struct iphdr *iph = (const struct iphdr *) skb->data;
  681. int oif = skb->dev->ifindex;
  682. u8 tos = RT_TOS(iph->tos);
  683. u8 prot = iph->protocol;
  684. u32 mark = skb->mark;
  685. rt = (struct rtable *) dst;
  686. __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
  687. __ip_do_redirect(rt, skb, &fl4, true);
  688. }
  689. static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
  690. {
  691. struct rtable *rt = (struct rtable *)dst;
  692. struct dst_entry *ret = dst;
  693. if (rt) {
  694. if (dst->obsolete > 0) {
  695. ip_rt_put(rt);
  696. ret = NULL;
  697. } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
  698. rt->dst.expires) {
  699. ip_rt_put(rt);
  700. ret = NULL;
  701. }
  702. }
  703. return ret;
  704. }
  705. /*
  706. * Algorithm:
  707. * 1. The first ip_rt_redirect_number redirects are sent
  708. * with exponential backoff, then we stop sending them at all,
  709. * assuming that the host ignores our redirects.
  710. * 2. If we did not see packets requiring redirects
  711. * during ip_rt_redirect_silence, we assume that the host
  712. * forgot redirected route and start to send redirects again.
  713. *
  714. * This algorithm is much cheaper and more intelligent than dumb load limiting
  715. * in icmp.c.
  716. *
  717. * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
  718. * and "frag. need" (breaks PMTU discovery) in icmp.c.
  719. */
  720. void ip_rt_send_redirect(struct sk_buff *skb)
  721. {
  722. struct rtable *rt = skb_rtable(skb);
  723. struct in_device *in_dev;
  724. struct inet_peer *peer;
  725. struct net *net;
  726. int log_martians;
  727. rcu_read_lock();
  728. in_dev = __in_dev_get_rcu(rt->dst.dev);
  729. if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
  730. rcu_read_unlock();
  731. return;
  732. }
  733. log_martians = IN_DEV_LOG_MARTIANS(in_dev);
  734. rcu_read_unlock();
  735. net = dev_net(rt->dst.dev);
  736. peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
  737. if (!peer) {
  738. icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
  739. rt_nexthop(rt, ip_hdr(skb)->daddr));
  740. return;
  741. }
  742. /* No redirected packets during ip_rt_redirect_silence;
  743. * reset the algorithm.
  744. */
  745. if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
  746. peer->rate_tokens = 0;
  747. /* Too many ignored redirects; do not send anything
  748. * set dst.rate_last to the last seen redirected packet.
  749. */
  750. if (peer->rate_tokens >= ip_rt_redirect_number) {
  751. peer->rate_last = jiffies;
  752. goto out_put_peer;
  753. }
  754. /* Check for load limit; set rate_last to the latest sent
  755. * redirect.
  756. */
  757. if (peer->rate_tokens == 0 ||
  758. time_after(jiffies,
  759. (peer->rate_last +
  760. (ip_rt_redirect_load << peer->rate_tokens)))) {
  761. __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
  762. icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
  763. peer->rate_last = jiffies;
  764. ++peer->rate_tokens;
  765. #ifdef CONFIG_IP_ROUTE_VERBOSE
  766. if (log_martians &&
  767. peer->rate_tokens == ip_rt_redirect_number)
  768. net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
  769. &ip_hdr(skb)->saddr, inet_iif(skb),
  770. &ip_hdr(skb)->daddr, &gw);
  771. #endif
  772. }
  773. out_put_peer:
  774. inet_putpeer(peer);
  775. }
  776. static int ip_error(struct sk_buff *skb)
  777. {
  778. struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
  779. struct rtable *rt = skb_rtable(skb);
  780. struct inet_peer *peer;
  781. unsigned long now;
  782. struct net *net;
  783. bool send;
  784. int code;
  785. net = dev_net(rt->dst.dev);
  786. if (!IN_DEV_FORWARD(in_dev)) {
  787. switch (rt->dst.error) {
  788. case EHOSTUNREACH:
  789. IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
  790. break;
  791. case ENETUNREACH:
  792. IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
  793. break;
  794. }
  795. goto out;
  796. }
  797. switch (rt->dst.error) {
  798. case EINVAL:
  799. default:
  800. goto out;
  801. case EHOSTUNREACH:
  802. code = ICMP_HOST_UNREACH;
  803. break;
  804. case ENETUNREACH:
  805. code = ICMP_NET_UNREACH;
  806. IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
  807. break;
  808. case EACCES:
  809. code = ICMP_PKT_FILTERED;
  810. break;
  811. }
  812. peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
  813. send = true;
  814. if (peer) {
  815. now = jiffies;
  816. peer->rate_tokens += now - peer->rate_last;
  817. if (peer->rate_tokens > ip_rt_error_burst)
  818. peer->rate_tokens = ip_rt_error_burst;
  819. peer->rate_last = now;
  820. if (peer->rate_tokens >= ip_rt_error_cost)
  821. peer->rate_tokens -= ip_rt_error_cost;
  822. else
  823. send = false;
  824. inet_putpeer(peer);
  825. }
  826. if (send)
  827. icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
  828. out: kfree_skb(skb);
  829. return 0;
  830. }
  831. static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
  832. {
  833. struct dst_entry *dst = &rt->dst;
  834. struct fib_result res;
  835. if (dst_metric_locked(dst, RTAX_MTU))
  836. return;
  837. if (dst->dev->mtu < mtu)
  838. return;
  839. if (mtu < ip_rt_min_pmtu)
  840. mtu = ip_rt_min_pmtu;
  841. if (rt->rt_pmtu == mtu &&
  842. time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
  843. return;
  844. rcu_read_lock();
  845. if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
  846. struct fib_nh *nh = &FIB_RES_NH(res);
  847. update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
  848. jiffies + ip_rt_mtu_expires);
  849. }
  850. rcu_read_unlock();
  851. }
  852. static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
  853. struct sk_buff *skb, u32 mtu)
  854. {
  855. struct rtable *rt = (struct rtable *) dst;
  856. struct flowi4 fl4;
  857. ip_rt_build_flow_key(&fl4, sk, skb);
  858. __ip_rt_update_pmtu(rt, &fl4, mtu);
  859. }
  860. void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
  861. int oif, u32 mark, u8 protocol, int flow_flags)
  862. {
  863. const struct iphdr *iph = (const struct iphdr *) skb->data;
  864. struct flowi4 fl4;
  865. struct rtable *rt;
  866. if (!mark)
  867. mark = IP4_REPLY_MARK(net, skb->mark);
  868. __build_flow_key(&fl4, NULL, iph, oif,
  869. RT_TOS(iph->tos), protocol, mark, flow_flags);
  870. rt = __ip_route_output_key(net, &fl4);
  871. if (!IS_ERR(rt)) {
  872. __ip_rt_update_pmtu(rt, &fl4, mtu);
  873. ip_rt_put(rt);
  874. }
  875. }
  876. EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
  877. static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
  878. {
  879. const struct iphdr *iph = (const struct iphdr *) skb->data;
  880. struct flowi4 fl4;
  881. struct rtable *rt;
  882. __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
  883. if (!fl4.flowi4_mark)
  884. fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
  885. rt = __ip_route_output_key(sock_net(sk), &fl4);
  886. if (!IS_ERR(rt)) {
  887. __ip_rt_update_pmtu(rt, &fl4, mtu);
  888. ip_rt_put(rt);
  889. }
  890. }
  891. void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
  892. {
  893. const struct iphdr *iph = (const struct iphdr *) skb->data;
  894. struct flowi4 fl4;
  895. struct rtable *rt;
  896. struct dst_entry *odst = NULL;
  897. bool new = false;
  898. bh_lock_sock(sk);
  899. if (!ip_sk_accept_pmtu(sk))
  900. goto out;
  901. odst = sk_dst_get(sk);
  902. if (sock_owned_by_user(sk) || !odst) {
  903. __ipv4_sk_update_pmtu(skb, sk, mtu);
  904. goto out;
  905. }
  906. __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
  907. rt = (struct rtable *)odst;
  908. if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
  909. rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
  910. if (IS_ERR(rt))
  911. goto out;
  912. new = true;
  913. }
  914. __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
  915. if (!dst_check(&rt->dst, 0)) {
  916. if (new)
  917. dst_release(&rt->dst);
  918. rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
  919. if (IS_ERR(rt))
  920. goto out;
  921. new = true;
  922. }
  923. if (new)
  924. sk_dst_set(sk, &rt->dst);
  925. out:
  926. bh_unlock_sock(sk);
  927. dst_release(odst);
  928. }
  929. EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
  930. void ipv4_redirect(struct sk_buff *skb, struct net *net,
  931. int oif, u32 mark, u8 protocol, int flow_flags)
  932. {
  933. const struct iphdr *iph = (const struct iphdr *) skb->data;
  934. struct flowi4 fl4;
  935. struct rtable *rt;
  936. __build_flow_key(&fl4, NULL, iph, oif,
  937. RT_TOS(iph->tos), protocol, mark, flow_flags);
  938. rt = __ip_route_output_key(net, &fl4);
  939. if (!IS_ERR(rt)) {
  940. __ip_do_redirect(rt, skb, &fl4, false);
  941. ip_rt_put(rt);
  942. }
  943. }
  944. EXPORT_SYMBOL_GPL(ipv4_redirect);
  945. void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
  946. {
  947. const struct iphdr *iph = (const struct iphdr *) skb->data;
  948. struct flowi4 fl4;
  949. struct rtable *rt;
  950. __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
  951. rt = __ip_route_output_key(sock_net(sk), &fl4);
  952. if (!IS_ERR(rt)) {
  953. __ip_do_redirect(rt, skb, &fl4, false);
  954. ip_rt_put(rt);
  955. }
  956. }
  957. EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
  958. static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
  959. {
  960. struct rtable *rt = (struct rtable *) dst;
  961. /* All IPV4 dsts are created with ->obsolete set to the value
  962. * DST_OBSOLETE_FORCE_CHK which forces validation calls down
  963. * into this function always.
  964. *
  965. * When a PMTU/redirect information update invalidates a route,
  966. * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
  967. * DST_OBSOLETE_DEAD by dst_free().
  968. */
  969. if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
  970. return NULL;
  971. return dst;
  972. }
  973. static void ipv4_link_failure(struct sk_buff *skb)
  974. {
  975. struct rtable *rt;
  976. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
  977. rt = skb_rtable(skb);
  978. if (rt)
  979. dst_set_expires(&rt->dst, 0);
  980. }
  981. static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
  982. {
  983. pr_debug("%s: %pI4 -> %pI4, %s\n",
  984. __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
  985. skb->dev ? skb->dev->name : "?");
  986. kfree_skb(skb);
  987. WARN_ON(1);
  988. return 0;
  989. }
  990. /*
  991. We do not cache source address of outgoing interface,
  992. because it is used only by IP RR, TS and SRR options,
  993. so that it out of fast path.
  994. BTW remember: "addr" is allowed to be not aligned
  995. in IP options!
  996. */
  997. void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
  998. {
  999. __be32 src;
  1000. if (rt_is_output_route(rt))
  1001. src = ip_hdr(skb)->saddr;
  1002. else {
  1003. struct fib_result res;
  1004. struct flowi4 fl4;
  1005. struct iphdr *iph;
  1006. iph = ip_hdr(skb);
  1007. memset(&fl4, 0, sizeof(fl4));
  1008. fl4.daddr = iph->daddr;
  1009. fl4.saddr = iph->saddr;
  1010. fl4.flowi4_tos = RT_TOS(iph->tos);
  1011. fl4.flowi4_oif = rt->dst.dev->ifindex;
  1012. fl4.flowi4_iif = skb->dev->ifindex;
  1013. fl4.flowi4_mark = skb->mark;
  1014. rcu_read_lock();
  1015. if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
  1016. src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
  1017. else
  1018. src = inet_select_addr(rt->dst.dev,
  1019. rt_nexthop(rt, iph->daddr),
  1020. RT_SCOPE_UNIVERSE);
  1021. rcu_read_unlock();
  1022. }
  1023. memcpy(addr, &src, 4);
  1024. }
  1025. #ifdef CONFIG_IP_ROUTE_CLASSID
  1026. static void set_class_tag(struct rtable *rt, u32 tag)
  1027. {
  1028. if (!(rt->dst.tclassid & 0xFFFF))
  1029. rt->dst.tclassid |= tag & 0xFFFF;
  1030. if (!(rt->dst.tclassid & 0xFFFF0000))
  1031. rt->dst.tclassid |= tag & 0xFFFF0000;
  1032. }
  1033. #endif
  1034. static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
  1035. {
  1036. unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
  1037. if (advmss == 0) {
  1038. advmss = max_t(unsigned int, dst->dev->mtu - 40,
  1039. ip_rt_min_advmss);
  1040. if (advmss > 65535 - 40)
  1041. advmss = 65535 - 40;
  1042. }
  1043. return advmss;
  1044. }
  1045. static unsigned int ipv4_mtu(const struct dst_entry *dst)
  1046. {
  1047. const struct rtable *rt = (const struct rtable *) dst;
  1048. unsigned int mtu = rt->rt_pmtu;
  1049. if (!mtu || time_after_eq(jiffies, rt->dst.expires))
  1050. mtu = dst_metric_raw(dst, RTAX_MTU);
  1051. if (mtu)
  1052. return mtu;
  1053. mtu = dst->dev->mtu;
  1054. if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
  1055. if (rt->rt_uses_gateway && mtu > 576)
  1056. mtu = 576;
  1057. }
  1058. return min_t(unsigned int, mtu, IP_MAX_MTU);
  1059. }
  1060. static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
  1061. {
  1062. struct fnhe_hash_bucket *hash = nh->nh_exceptions;
  1063. struct fib_nh_exception *fnhe;
  1064. u32 hval;
  1065. if (!hash)
  1066. return NULL;
  1067. hval = fnhe_hashfun(daddr);
  1068. for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
  1069. fnhe = rcu_dereference(fnhe->fnhe_next)) {
  1070. if (fnhe->fnhe_daddr == daddr)
  1071. return fnhe;
  1072. }
  1073. return NULL;
  1074. }
  1075. static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
  1076. __be32 daddr)
  1077. {
  1078. bool ret = false;
  1079. spin_lock_bh(&fnhe_lock);
  1080. if (daddr == fnhe->fnhe_daddr) {
  1081. struct rtable __rcu **porig;
  1082. struct rtable *orig;
  1083. int genid = fnhe_genid(dev_net(rt->dst.dev));
  1084. if (rt_is_input_route(rt))
  1085. porig = &fnhe->fnhe_rth_input;
  1086. else
  1087. porig = &fnhe->fnhe_rth_output;
  1088. orig = rcu_dereference(*porig);
  1089. if (fnhe->fnhe_genid != genid) {
  1090. fnhe->fnhe_genid = genid;
  1091. fnhe->fnhe_gw = 0;
  1092. fnhe->fnhe_pmtu = 0;
  1093. fnhe->fnhe_expires = 0;
  1094. fnhe_flush_routes(fnhe);
  1095. orig = NULL;
  1096. }
  1097. fill_route_from_fnhe(rt, fnhe);
  1098. if (!rt->rt_gateway)
  1099. rt->rt_gateway = daddr;
  1100. if (!(rt->dst.flags & DST_NOCACHE)) {
  1101. rcu_assign_pointer(*porig, rt);
  1102. if (orig)
  1103. rt_free(orig);
  1104. ret = true;
  1105. }
  1106. fnhe->fnhe_stamp = jiffies;
  1107. }
  1108. spin_unlock_bh(&fnhe_lock);
  1109. return ret;
  1110. }
  1111. static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
  1112. {
  1113. struct rtable *orig, *prev, **p;
  1114. bool ret = true;
  1115. if (rt_is_input_route(rt)) {
  1116. p = (struct rtable **)&nh->nh_rth_input;
  1117. } else {
  1118. p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
  1119. }
  1120. orig = *p;
  1121. prev = cmpxchg(p, orig, rt);
  1122. if (prev == orig) {
  1123. if (orig)
  1124. rt_free(orig);
  1125. } else
  1126. ret = false;
  1127. return ret;
  1128. }
  1129. static DEFINE_SPINLOCK(rt_uncached_lock);
  1130. static LIST_HEAD(rt_uncached_list);
  1131. static void rt_add_uncached_list(struct rtable *rt)
  1132. {
  1133. spin_lock_bh(&rt_uncached_lock);
  1134. list_add_tail(&rt->rt_uncached, &rt_uncached_list);
  1135. spin_unlock_bh(&rt_uncached_lock);
  1136. }
  1137. static void ipv4_dst_destroy(struct dst_entry *dst)
  1138. {
  1139. struct rtable *rt = (struct rtable *) dst;
  1140. if (!list_empty(&rt->rt_uncached)) {
  1141. spin_lock_bh(&rt_uncached_lock);
  1142. list_del(&rt->rt_uncached);
  1143. spin_unlock_bh(&rt_uncached_lock);
  1144. }
  1145. }
  1146. void rt_flush_dev(struct net_device *dev)
  1147. {
  1148. if (!list_empty(&rt_uncached_list)) {
  1149. struct net *net = dev_net(dev);
  1150. struct rtable *rt;
  1151. spin_lock_bh(&rt_uncached_lock);
  1152. list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
  1153. if (rt->dst.dev != dev)
  1154. continue;
  1155. rt->dst.dev = net->loopback_dev;
  1156. dev_hold(rt->dst.dev);
  1157. dev_put(dev);
  1158. }
  1159. spin_unlock_bh(&rt_uncached_lock);
  1160. }
  1161. }
  1162. static bool rt_cache_valid(const struct rtable *rt)
  1163. {
  1164. return rt &&
  1165. rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
  1166. !rt_is_expired(rt);
  1167. }
  1168. static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
  1169. const struct fib_result *res,
  1170. struct fib_nh_exception *fnhe,
  1171. struct fib_info *fi, u16 type, u32 itag)
  1172. {
  1173. bool cached = false;
  1174. if (fi) {
  1175. struct fib_nh *nh = &FIB_RES_NH(*res);
  1176. if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
  1177. rt->rt_gateway = nh->nh_gw;
  1178. rt->rt_uses_gateway = 1;
  1179. }
  1180. dst_init_metrics(&rt->dst, fi->fib_metrics, true);
  1181. #ifdef CONFIG_IP_ROUTE_CLASSID
  1182. rt->dst.tclassid = nh->nh_tclassid;
  1183. #endif
  1184. if (unlikely(fnhe))
  1185. cached = rt_bind_exception(rt, fnhe, daddr);
  1186. else if (!(rt->dst.flags & DST_NOCACHE))
  1187. cached = rt_cache_route(nh, rt);
  1188. if (unlikely(!cached)) {
  1189. /* Routes we intend to cache in nexthop exception or
  1190. * FIB nexthop have the DST_NOCACHE bit clear.
  1191. * However, if we are unsuccessful at storing this
  1192. * route into the cache we really need to set it.
  1193. */
  1194. rt->dst.flags |= DST_NOCACHE;
  1195. if (!rt->rt_gateway)
  1196. rt->rt_gateway = daddr;
  1197. rt_add_uncached_list(rt);
  1198. }
  1199. } else
  1200. rt_add_uncached_list(rt);
  1201. #ifdef CONFIG_IP_ROUTE_CLASSID
  1202. #ifdef CONFIG_IP_MULTIPLE_TABLES
  1203. set_class_tag(rt, res->tclassid);
  1204. #endif
  1205. set_class_tag(rt, itag);
  1206. #endif
  1207. }
  1208. static struct rtable *rt_dst_alloc(struct net_device *dev,
  1209. bool nopolicy, bool noxfrm, bool will_cache)
  1210. {
  1211. return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
  1212. (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
  1213. (nopolicy ? DST_NOPOLICY : 0) |
  1214. (noxfrm ? DST_NOXFRM : 0));
  1215. }
  1216. /* called in rcu_read_lock() section */
  1217. static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
  1218. u8 tos, struct net_device *dev, int our)
  1219. {
  1220. struct rtable *rth;
  1221. struct in_device *in_dev = __in_dev_get_rcu(dev);
  1222. u32 itag = 0;
  1223. int err;
  1224. /* Primary sanity checks. */
  1225. if (in_dev == NULL)
  1226. return -EINVAL;
  1227. if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
  1228. skb->protocol != htons(ETH_P_IP))
  1229. goto e_inval;
  1230. if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
  1231. if (ipv4_is_loopback(saddr))
  1232. goto e_inval;
  1233. if (ipv4_is_zeronet(saddr)) {
  1234. if (!ipv4_is_local_multicast(daddr))
  1235. goto e_inval;
  1236. } else {
  1237. err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
  1238. in_dev, &itag);
  1239. if (err < 0)
  1240. goto e_err;
  1241. }
  1242. rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
  1243. IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
  1244. if (!rth)
  1245. goto e_nobufs;
  1246. #ifdef CONFIG_IP_ROUTE_CLASSID
  1247. rth->dst.tclassid = itag;
  1248. #endif
  1249. rth->dst.output = ip_rt_bug;
  1250. rth->rt_genid = rt_genid_ipv4(dev_net(dev));
  1251. rth->rt_flags = RTCF_MULTICAST;
  1252. rth->rt_type = RTN_MULTICAST;
  1253. rth->rt_is_input= 1;
  1254. rth->rt_iif = 0;
  1255. rth->rt_pmtu = 0;
  1256. rth->rt_gateway = 0;
  1257. rth->rt_uses_gateway = 0;
  1258. INIT_LIST_HEAD(&rth->rt_uncached);
  1259. if (our) {
  1260. rth->dst.input= ip_local_deliver;
  1261. rth->rt_flags |= RTCF_LOCAL;
  1262. }
  1263. #ifdef CONFIG_IP_MROUTE
  1264. if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
  1265. rth->dst.input = ip_mr_input;
  1266. #endif
  1267. RT_CACHE_STAT_INC(in_slow_mc);
  1268. skb_dst_set(skb, &rth->dst);
  1269. return 0;
  1270. e_nobufs:
  1271. return -ENOBUFS;
  1272. e_inval:
  1273. return -EINVAL;
  1274. e_err:
  1275. return err;
  1276. }
  1277. static void ip_handle_martian_source(struct net_device *dev,
  1278. struct in_device *in_dev,
  1279. struct sk_buff *skb,
  1280. __be32 daddr,
  1281. __be32 saddr)
  1282. {
  1283. RT_CACHE_STAT_INC(in_martian_src);
  1284. #ifdef CONFIG_IP_ROUTE_VERBOSE
  1285. if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
  1286. /*
  1287. * RFC1812 recommendation, if source is martian,
  1288. * the only hint is MAC header.
  1289. */
  1290. pr_warn("martian source %pI4 from %pI4, on dev %s\n",
  1291. &daddr, &saddr, dev->name);
  1292. if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
  1293. print_hex_dump(KERN_WARNING, "ll header: ",
  1294. DUMP_PREFIX_OFFSET, 16, 1,
  1295. skb_mac_header(skb),
  1296. dev->hard_header_len, true);
  1297. }
  1298. }
  1299. #endif
  1300. }
  1301. /* called in rcu_read_lock() section */
  1302. static int __mkroute_input(struct sk_buff *skb,
  1303. const struct fib_result *res,
  1304. struct in_device *in_dev,
  1305. __be32 daddr, __be32 saddr, u32 tos)
  1306. {
  1307. struct fib_nh_exception *fnhe;
  1308. struct rtable *rth;
  1309. int err;
  1310. struct in_device *out_dev;
  1311. unsigned int flags = 0;
  1312. bool do_cache;
  1313. u32 itag = 0;
  1314. /* get a working reference to the output device */
  1315. out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
  1316. if (out_dev == NULL) {
  1317. net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
  1318. return -EINVAL;
  1319. }
  1320. err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
  1321. in_dev->dev, in_dev, &itag);
  1322. if (err < 0) {
  1323. ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
  1324. saddr);
  1325. goto cleanup;
  1326. }
  1327. do_cache = res->fi && !itag;
  1328. if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
  1329. (IN_DEV_SHARED_MEDIA(out_dev) ||
  1330. inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
  1331. flags |= RTCF_DOREDIRECT;
  1332. do_cache = false;
  1333. }
  1334. if (skb->protocol != htons(ETH_P_IP)) {
  1335. /* Not IP (i.e. ARP). Do not create route, if it is
  1336. * invalid for proxy arp. DNAT routes are always valid.
  1337. *
  1338. * Proxy arp feature have been extended to allow, ARP
  1339. * replies back to the same interface, to support
  1340. * Private VLAN switch technologies. See arp.c.
  1341. */
  1342. if (out_dev == in_dev &&
  1343. IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
  1344. err = -EINVAL;
  1345. goto cleanup;
  1346. }
  1347. }
  1348. fnhe = find_exception(&FIB_RES_NH(*res), daddr);
  1349. if (do_cache) {
  1350. if (fnhe != NULL)
  1351. rth = rcu_dereference(fnhe->fnhe_rth_input);
  1352. else
  1353. rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
  1354. if (rt_cache_valid(rth)) {
  1355. skb_dst_set_noref(skb, &rth->dst);
  1356. goto out;
  1357. }
  1358. }
  1359. rth = rt_dst_alloc(out_dev->dev,
  1360. IN_DEV_CONF_GET(in_dev, NOPOLICY),
  1361. IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
  1362. if (!rth) {
  1363. err = -ENOBUFS;
  1364. goto cleanup;
  1365. }
  1366. rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
  1367. rth->rt_flags = flags;
  1368. rth->rt_type = res->type;
  1369. rth->rt_is_input = 1;
  1370. rth->rt_iif = 0;
  1371. rth->rt_pmtu = 0;
  1372. rth->rt_gateway = 0;
  1373. rth->rt_uses_gateway = 0;
  1374. INIT_LIST_HEAD(&rth->rt_uncached);
  1375. RT_CACHE_STAT_INC(in_slow_tot);
  1376. rth->dst.input = ip_forward;
  1377. rth->dst.output = ip_output;
  1378. rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
  1379. skb_dst_set(skb, &rth->dst);
  1380. out:
  1381. err = 0;
  1382. cleanup:
  1383. return err;
  1384. }
  1385. static int ip_mkroute_input(struct sk_buff *skb,
  1386. struct fib_result *res,
  1387. const struct flowi4 *fl4,
  1388. struct in_device *in_dev,
  1389. __be32 daddr, __be32 saddr, u32 tos)
  1390. {
  1391. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  1392. if (res->fi && res->fi->fib_nhs > 1)
  1393. fib_select_multipath(res);
  1394. #endif
  1395. /* create a routing cache entry */
  1396. return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
  1397. }
  1398. /*
  1399. * NOTE. We drop all the packets that has local source
  1400. * addresses, because every properly looped back packet
  1401. * must have correct destination already attached by output routine.
  1402. *
  1403. * Such approach solves two big problems:
  1404. * 1. Not simplex devices are handled properly.
  1405. * 2. IP spoofing attempts are filtered with 100% of guarantee.
  1406. * called with rcu_read_lock()
  1407. */
  1408. static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
  1409. u8 tos, struct net_device *dev)
  1410. {
  1411. struct fib_result res;
  1412. struct in_device *in_dev = __in_dev_get_rcu(dev);
  1413. struct flowi4 fl4;
  1414. unsigned int flags = 0;
  1415. u32 itag = 0;
  1416. struct rtable *rth;
  1417. int err = -EINVAL;
  1418. struct net *net = dev_net(dev);
  1419. bool do_cache;
  1420. /* IP on this device is disabled. */
  1421. if (!in_dev)
  1422. goto out;
  1423. /* Check for the most weird martians, which can be not detected
  1424. by fib_lookup.
  1425. */
  1426. if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
  1427. goto martian_source;
  1428. res.fi = NULL;
  1429. if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
  1430. goto brd_input;
  1431. /* Accept zero addresses only to limited broadcast;
  1432. * I even do not know to fix it or not. Waiting for complains :-)
  1433. */
  1434. if (ipv4_is_zeronet(saddr))
  1435. goto martian_source;
  1436. if (ipv4_is_zeronet(daddr))
  1437. goto martian_destination;
  1438. /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
  1439. * and call it once if daddr or/and saddr are loopback addresses
  1440. */
  1441. if (ipv4_is_loopback(daddr)) {
  1442. if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
  1443. goto martian_destination;
  1444. } else if (ipv4_is_loopback(saddr)) {
  1445. if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
  1446. goto martian_source;
  1447. }
  1448. /*
  1449. * Now we are ready to route packet.
  1450. */
  1451. fl4.flowi4_oif = 0;
  1452. fl4.flowi4_iif = dev->ifindex;
  1453. fl4.flowi4_mark = skb->mark;
  1454. fl4.flowi4_tos = tos;
  1455. fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
  1456. fl4.daddr = daddr;
  1457. fl4.saddr = saddr;
  1458. err = fib_lookup(net, &fl4, &res);
  1459. if (err != 0) {
  1460. if (!IN_DEV_FORWARD(in_dev))
  1461. err = -EHOSTUNREACH;
  1462. goto no_route;
  1463. }
  1464. if (res.type == RTN_BROADCAST)
  1465. goto brd_input;
  1466. if (res.type == RTN_LOCAL) {
  1467. err = fib_validate_source(skb, saddr, daddr, tos,
  1468. 0, dev, in_dev, &itag);
  1469. if (err < 0)
  1470. goto martian_source_keep_err;
  1471. goto local_input;
  1472. }
  1473. if (!IN_DEV_FORWARD(in_dev)) {
  1474. err = -EHOSTUNREACH;
  1475. goto no_route;
  1476. }
  1477. if (res.type != RTN_UNICAST)
  1478. goto martian_destination;
  1479. err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
  1480. out: return err;
  1481. brd_input:
  1482. if (skb->protocol != htons(ETH_P_IP))
  1483. goto e_inval;
  1484. if (!ipv4_is_zeronet(saddr)) {
  1485. err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
  1486. in_dev, &itag);
  1487. if (err < 0)
  1488. goto martian_source_keep_err;
  1489. }
  1490. flags |= RTCF_BROADCAST;
  1491. res.type = RTN_BROADCAST;
  1492. RT_CACHE_STAT_INC(in_brd);
  1493. local_input:
  1494. do_cache = false;
  1495. if (res.fi) {
  1496. if (!itag) {
  1497. rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
  1498. if (rt_cache_valid(rth)) {
  1499. skb_dst_set_noref(skb, &rth->dst);
  1500. err = 0;
  1501. goto out;
  1502. }
  1503. do_cache = true;
  1504. }
  1505. }
  1506. rth = rt_dst_alloc(net->loopback_dev,
  1507. IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
  1508. if (!rth)
  1509. goto e_nobufs;
  1510. rth->dst.input= ip_local_deliver;
  1511. rth->dst.output= ip_rt_bug;
  1512. #ifdef CONFIG_IP_ROUTE_CLASSID
  1513. rth->dst.tclassid = itag;
  1514. #endif
  1515. rth->rt_genid = rt_genid_ipv4(net);
  1516. rth->rt_flags = flags|RTCF_LOCAL;
  1517. rth->rt_type = res.type;
  1518. rth->rt_is_input = 1;
  1519. rth->rt_iif = 0;
  1520. rth->rt_pmtu = 0;
  1521. rth->rt_gateway = 0;
  1522. rth->rt_uses_gateway = 0;
  1523. INIT_LIST_HEAD(&rth->rt_uncached);
  1524. RT_CACHE_STAT_INC(in_slow_tot);
  1525. if (res.type == RTN_UNREACHABLE) {
  1526. rth->dst.input= ip_error;
  1527. rth->dst.error= -err;
  1528. rth->rt_flags &= ~RTCF_LOCAL;
  1529. }
  1530. if (do_cache) {
  1531. if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
  1532. rth->dst.flags |= DST_NOCACHE;
  1533. rt_add_uncached_list(rth);
  1534. }
  1535. }
  1536. skb_dst_set(skb, &rth->dst);
  1537. err = 0;
  1538. goto out;
  1539. no_route:
  1540. RT_CACHE_STAT_INC(in_no_route);
  1541. res.type = RTN_UNREACHABLE;
  1542. if (err == -ESRCH)
  1543. err = -ENETUNREACH;
  1544. goto local_input;
  1545. /*
  1546. * Do not cache martian addresses: they should be logged (RFC1812)
  1547. */
  1548. martian_destination:
  1549. RT_CACHE_STAT_INC(in_martian_dst);
  1550. #ifdef CONFIG_IP_ROUTE_VERBOSE
  1551. if (IN_DEV_LOG_MARTIANS(in_dev))
  1552. net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
  1553. &daddr, &saddr, dev->name);
  1554. #endif
  1555. e_inval:
  1556. err = -EINVAL;
  1557. goto out;
  1558. e_nobufs:
  1559. err = -ENOBUFS;
  1560. goto out;
  1561. martian_source:
  1562. err = -EINVAL;
  1563. martian_source_keep_err:
  1564. ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
  1565. goto out;
  1566. }
  1567. int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
  1568. u8 tos, struct net_device *dev)
  1569. {
  1570. int res;
  1571. rcu_read_lock();
  1572. /* Multicast recognition logic is moved from route cache to here.
  1573. The problem was that too many Ethernet cards have broken/missing
  1574. hardware multicast filters :-( As result the host on multicasting
  1575. network acquires a lot of useless route cache entries, sort of
  1576. SDR messages from all the world. Now we try to get rid of them.
  1577. Really, provided software IP multicast filter is organized
  1578. reasonably (at least, hashed), it does not result in a slowdown
  1579. comparing with route cache reject entries.
  1580. Note, that multicast routers are not affected, because
  1581. route cache entry is created eventually.
  1582. */
  1583. if (ipv4_is_multicast(daddr)) {
  1584. struct in_device *in_dev = __in_dev_get_rcu(dev);
  1585. if (in_dev) {
  1586. int our = ip_check_mc_rcu(in_dev, daddr, saddr,
  1587. ip_hdr(skb)->protocol);
  1588. if (our
  1589. #ifdef CONFIG_IP_MROUTE
  1590. ||
  1591. (!ipv4_is_local_multicast(daddr) &&
  1592. IN_DEV_MFORWARD(in_dev))
  1593. #endif
  1594. ) {
  1595. int res = ip_route_input_mc(skb, daddr, saddr,
  1596. tos, dev, our);
  1597. rcu_read_unlock();
  1598. return res;
  1599. }
  1600. }
  1601. rcu_read_unlock();
  1602. return -EINVAL;
  1603. }
  1604. res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
  1605. rcu_read_unlock();
  1606. return res;
  1607. }
  1608. EXPORT_SYMBOL(ip_route_input_noref);
  1609. /* called with rcu_read_lock() */
  1610. static struct rtable *__mkroute_output(const struct fib_result *res,
  1611. const struct flowi4 *fl4, int orig_oif,
  1612. struct net_device *dev_out,
  1613. unsigned int flags)
  1614. {
  1615. struct fib_info *fi = res->fi;
  1616. struct fib_nh_exception *fnhe;
  1617. struct in_device *in_dev;
  1618. u16 type = res->type;
  1619. struct rtable *rth;
  1620. bool do_cache;
  1621. in_dev = __in_dev_get_rcu(dev_out);
  1622. if (!in_dev)
  1623. return ERR_PTR(-EINVAL);
  1624. if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
  1625. if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
  1626. return ERR_PTR(-EINVAL);
  1627. if (ipv4_is_lbcast(fl4->daddr))
  1628. type = RTN_BROADCAST;
  1629. else if (ipv4_is_multicast(fl4->daddr))
  1630. type = RTN_MULTICAST;
  1631. else if (ipv4_is_zeronet(fl4->daddr))
  1632. return ERR_PTR(-EINVAL);
  1633. if (dev_out->flags & IFF_LOOPBACK)
  1634. flags |= RTCF_LOCAL;
  1635. do_cache = true;
  1636. if (type == RTN_BROADCAST) {
  1637. flags |= RTCF_BROADCAST | RTCF_LOCAL;
  1638. fi = NULL;
  1639. } else if (type == RTN_MULTICAST) {
  1640. flags |= RTCF_MULTICAST | RTCF_LOCAL;
  1641. if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
  1642. fl4->flowi4_proto))
  1643. flags &= ~RTCF_LOCAL;
  1644. else
  1645. do_cache = false;
  1646. /* If multicast route do not exist use
  1647. * default one, but do not gateway in this case.
  1648. * Yes, it is hack.
  1649. */
  1650. if (fi && res->prefixlen < 4)
  1651. fi = NULL;
  1652. }
  1653. fnhe = NULL;
  1654. do_cache &= fi != NULL;
  1655. if (do_cache) {
  1656. struct rtable __rcu **prth;
  1657. struct fib_nh *nh = &FIB_RES_NH(*res);
  1658. fnhe = find_exception(nh, fl4->daddr);
  1659. if (fnhe)
  1660. prth = &fnhe->fnhe_rth_output;
  1661. else {
  1662. if (unlikely(fl4->flowi4_flags &
  1663. FLOWI_FLAG_KNOWN_NH &&
  1664. !(nh->nh_gw &&
  1665. nh->nh_scope == RT_SCOPE_LINK))) {
  1666. do_cache = false;
  1667. goto add;
  1668. }
  1669. prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
  1670. }
  1671. rth = rcu_dereference(*prth);
  1672. if (rt_cache_valid(rth)) {
  1673. dst_hold(&rth->dst);
  1674. return rth;
  1675. }
  1676. }
  1677. add:
  1678. rth = rt_dst_alloc(dev_out,
  1679. IN_DEV_CONF_GET(in_dev, NOPOLICY),
  1680. IN_DEV_CONF_GET(in_dev, NOXFRM),
  1681. do_cache);
  1682. if (!rth)
  1683. return ERR_PTR(-ENOBUFS);
  1684. rth->dst.output = ip_output;
  1685. rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
  1686. rth->rt_flags = flags;
  1687. rth->rt_type = type;
  1688. rth->rt_is_input = 0;
  1689. rth->rt_iif = orig_oif ? : 0;
  1690. rth->rt_pmtu = 0;
  1691. rth->rt_gateway = 0;
  1692. rth->rt_uses_gateway = 0;
  1693. INIT_LIST_HEAD(&rth->rt_uncached);
  1694. RT_CACHE_STAT_INC(out_slow_tot);
  1695. if (flags & RTCF_LOCAL)
  1696. rth->dst.input = ip_local_deliver;
  1697. if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
  1698. if (flags & RTCF_LOCAL &&
  1699. !(dev_out->flags & IFF_LOOPBACK)) {
  1700. rth->dst.output = ip_mc_output;
  1701. RT_CACHE_STAT_INC(out_slow_mc);
  1702. }
  1703. #ifdef CONFIG_IP_MROUTE
  1704. if (type == RTN_MULTICAST) {
  1705. if (IN_DEV_MFORWARD(in_dev) &&
  1706. !ipv4_is_local_multicast(fl4->daddr)) {
  1707. rth->dst.input = ip_mr_input;
  1708. rth->dst.output = ip_mc_output;
  1709. }
  1710. }
  1711. #endif
  1712. }
  1713. rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
  1714. return rth;
  1715. }
  1716. /*
  1717. * Major route resolver routine.
  1718. */
  1719. struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
  1720. {
  1721. struct net_device *dev_out = NULL;
  1722. __u8 tos = RT_FL_TOS(fl4);
  1723. unsigned int flags = 0;
  1724. struct fib_result res;
  1725. struct rtable *rth;
  1726. int orig_oif;
  1727. res.tclassid = 0;
  1728. res.fi = NULL;
  1729. res.table = NULL;
  1730. orig_oif = fl4->flowi4_oif;
  1731. fl4->flowi4_iif = LOOPBACK_IFINDEX;
  1732. fl4->flowi4_tos = tos & IPTOS_RT_MASK;
  1733. fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
  1734. RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
  1735. rcu_read_lock();
  1736. if (fl4->saddr) {
  1737. rth = ERR_PTR(-EINVAL);
  1738. if (ipv4_is_multicast(fl4->saddr) ||
  1739. ipv4_is_lbcast(fl4->saddr) ||
  1740. ipv4_is_zeronet(fl4->saddr))
  1741. goto out;
  1742. /* I removed check for oif == dev_out->oif here.
  1743. It was wrong for two reasons:
  1744. 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
  1745. is assigned to multiple interfaces.
  1746. 2. Moreover, we are allowed to send packets with saddr
  1747. of another iface. --ANK
  1748. */
  1749. if (fl4->flowi4_oif == 0 &&
  1750. (ipv4_is_multicast(fl4->daddr) ||
  1751. ipv4_is_lbcast(fl4->daddr))) {
  1752. /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
  1753. dev_out = __ip_dev_find(net, fl4->saddr, false);
  1754. if (dev_out == NULL)
  1755. goto out;
  1756. /* Special hack: user can direct multicasts
  1757. and limited broadcast via necessary interface
  1758. without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
  1759. This hack is not just for fun, it allows
  1760. vic,vat and friends to work.
  1761. They bind socket to loopback, set ttl to zero
  1762. and expect that it will work.
  1763. From the viewpoint of routing cache they are broken,
  1764. because we are not allowed to build multicast path
  1765. with loopback source addr (look, routing cache
  1766. cannot know, that ttl is zero, so that packet
  1767. will not leave this host and route is valid).
  1768. Luckily, this hack is good workaround.
  1769. */
  1770. fl4->flowi4_oif = dev_out->ifindex;
  1771. goto make_route;
  1772. }
  1773. if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
  1774. /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
  1775. if (!__ip_dev_find(net, fl4->saddr, false))
  1776. goto out;
  1777. }
  1778. }
  1779. if (fl4->flowi4_oif) {
  1780. dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
  1781. rth = ERR_PTR(-ENODEV);
  1782. if (dev_out == NULL)
  1783. goto out;
  1784. /* RACE: Check return value of inet_select_addr instead. */
  1785. if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
  1786. rth = ERR_PTR(-ENETUNREACH);
  1787. goto out;
  1788. }
  1789. if (ipv4_is_local_multicast(fl4->daddr) ||
  1790. ipv4_is_lbcast(fl4->daddr)) {
  1791. if (!fl4->saddr)
  1792. fl4->saddr = inet_select_addr(dev_out, 0,
  1793. RT_SCOPE_LINK);
  1794. goto make_route;
  1795. }
  1796. if (!fl4->saddr) {
  1797. if (ipv4_is_multicast(fl4->daddr))
  1798. fl4->saddr = inet_select_addr(dev_out, 0,
  1799. fl4->flowi4_scope);
  1800. else if (!fl4->daddr)
  1801. fl4->saddr = inet_select_addr(dev_out, 0,
  1802. RT_SCOPE_HOST);
  1803. }
  1804. }
  1805. if (!fl4->daddr) {
  1806. fl4->daddr = fl4->saddr;
  1807. if (!fl4->daddr)
  1808. fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
  1809. dev_out = net->loopback_dev;
  1810. fl4->flowi4_oif = LOOPBACK_IFINDEX;
  1811. res.type = RTN_LOCAL;
  1812. flags |= RTCF_LOCAL;
  1813. goto make_route;
  1814. }
  1815. if (fib_lookup(net, fl4, &res)) {
  1816. res.fi = NULL;
  1817. res.table = NULL;
  1818. if (fl4->flowi4_oif) {
  1819. /* Apparently, routing tables are wrong. Assume,
  1820. that the destination is on link.
  1821. WHY? DW.
  1822. Because we are allowed to send to iface
  1823. even if it has NO routes and NO assigned
  1824. addresses. When oif is specified, routing
  1825. tables are looked up with only one purpose:
  1826. to catch if destination is gatewayed, rather than
  1827. direct. Moreover, if MSG_DONTROUTE is set,
  1828. we send packet, ignoring both routing tables
  1829. and ifaddr state. --ANK
  1830. We could make it even if oif is unknown,
  1831. likely IPv6, but we do not.
  1832. */
  1833. if (fl4->saddr == 0)
  1834. fl4->saddr = inet_select_addr(dev_out, 0,
  1835. RT_SCOPE_LINK);
  1836. res.type = RTN_UNICAST;
  1837. goto make_route;
  1838. }
  1839. rth = ERR_PTR(-ENETUNREACH);
  1840. goto out;
  1841. }
  1842. if (res.type == RTN_LOCAL) {
  1843. if (!fl4->saddr) {
  1844. if (res.fi->fib_prefsrc)
  1845. fl4->saddr = res.fi->fib_prefsrc;
  1846. else
  1847. fl4->saddr = fl4->daddr;
  1848. }
  1849. dev_out = net->loopback_dev;
  1850. fl4->flowi4_oif = dev_out->ifindex;
  1851. flags |= RTCF_LOCAL;
  1852. goto make_route;
  1853. }
  1854. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  1855. if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
  1856. fib_select_multipath(&res);
  1857. else
  1858. #endif
  1859. if (!res.prefixlen &&
  1860. res.table->tb_num_default > 1 &&
  1861. res.type == RTN_UNICAST && !fl4->flowi4_oif)
  1862. fib_select_default(&res);
  1863. if (!fl4->saddr)
  1864. fl4->saddr = FIB_RES_PREFSRC(net, res);
  1865. dev_out = FIB_RES_DEV(res);
  1866. fl4->flowi4_oif = dev_out->ifindex;
  1867. make_route:
  1868. rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
  1869. out:
  1870. rcu_read_unlock();
  1871. return rth;
  1872. }
  1873. EXPORT_SYMBOL_GPL(__ip_route_output_key);
  1874. static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
  1875. {
  1876. return NULL;
  1877. }
  1878. static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
  1879. {
  1880. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  1881. return mtu ? : dst->dev->mtu;
  1882. }
  1883. static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
  1884. struct sk_buff *skb, u32 mtu)
  1885. {
  1886. }
  1887. static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
  1888. struct sk_buff *skb)
  1889. {
  1890. }
  1891. static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
  1892. unsigned long old)
  1893. {
  1894. return NULL;
  1895. }
  1896. static struct dst_ops ipv4_dst_blackhole_ops = {
  1897. .family = AF_INET,
  1898. .protocol = cpu_to_be16(ETH_P_IP),
  1899. .check = ipv4_blackhole_dst_check,
  1900. .mtu = ipv4_blackhole_mtu,
  1901. .default_advmss = ipv4_default_advmss,
  1902. .update_pmtu = ipv4_rt_blackhole_update_pmtu,
  1903. .redirect = ipv4_rt_blackhole_redirect,
  1904. .cow_metrics = ipv4_rt_blackhole_cow_metrics,
  1905. .neigh_lookup = ipv4_neigh_lookup,
  1906. };
  1907. struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
  1908. {
  1909. struct rtable *ort = (struct rtable *) dst_orig;
  1910. struct rtable *rt;
  1911. rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
  1912. if (rt) {
  1913. struct dst_entry *new = &rt->dst;
  1914. new->__use = 1;
  1915. new->input = dst_discard;
  1916. new->output = dst_discard_sk;
  1917. new->dev = ort->dst.dev;
  1918. if (new->dev)
  1919. dev_hold(new->dev);
  1920. rt->rt_is_input = ort->rt_is_input;
  1921. rt->rt_iif = ort->rt_iif;
  1922. rt->rt_pmtu = ort->rt_pmtu;
  1923. rt->rt_genid = rt_genid_ipv4(net);
  1924. rt->rt_flags = ort->rt_flags;
  1925. rt->rt_type = ort->rt_type;
  1926. rt->rt_gateway = ort->rt_gateway;
  1927. rt->rt_uses_gateway = ort->rt_uses_gateway;
  1928. INIT_LIST_HEAD(&rt->rt_uncached);
  1929. dst_free(new);
  1930. }
  1931. dst_release(dst_orig);
  1932. return rt ? &rt->dst : ERR_PTR(-ENOMEM);
  1933. }
  1934. struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
  1935. struct sock *sk)
  1936. {
  1937. struct rtable *rt = __ip_route_output_key(net, flp4);
  1938. if (IS_ERR(rt))
  1939. return rt;
  1940. if (flp4->flowi4_proto)
  1941. rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
  1942. flowi4_to_flowi(flp4),
  1943. sk, 0);
  1944. return rt;
  1945. }
  1946. EXPORT_SYMBOL_GPL(ip_route_output_flow);
  1947. static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
  1948. struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
  1949. u32 seq, int event, int nowait, unsigned int flags)
  1950. {
  1951. struct rtable *rt = skb_rtable(skb);
  1952. struct rtmsg *r;
  1953. struct nlmsghdr *nlh;
  1954. unsigned long expires = 0;
  1955. u32 error;
  1956. u32 metrics[RTAX_MAX];
  1957. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
  1958. if (nlh == NULL)
  1959. return -EMSGSIZE;
  1960. r = nlmsg_data(nlh);
  1961. r->rtm_family = AF_INET;
  1962. r->rtm_dst_len = 32;
  1963. r->rtm_src_len = 0;
  1964. r->rtm_tos = fl4->flowi4_tos;
  1965. r->rtm_table = RT_TABLE_MAIN;
  1966. if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
  1967. goto nla_put_failure;
  1968. r->rtm_type = rt->rt_type;
  1969. r->rtm_scope = RT_SCOPE_UNIVERSE;
  1970. r->rtm_protocol = RTPROT_UNSPEC;
  1971. r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
  1972. if (rt->rt_flags & RTCF_NOTIFY)
  1973. r->rtm_flags |= RTM_F_NOTIFY;
  1974. if (nla_put_be32(skb, RTA_DST, dst))
  1975. goto nla_put_failure;
  1976. if (src) {
  1977. r->rtm_src_len = 32;
  1978. if (nla_put_be32(skb, RTA_SRC, src))
  1979. goto nla_put_failure;
  1980. }
  1981. if (rt->dst.dev &&
  1982. nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
  1983. goto nla_put_failure;
  1984. #ifdef CONFIG_IP_ROUTE_CLASSID
  1985. if (rt->dst.tclassid &&
  1986. nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
  1987. goto nla_put_failure;
  1988. #endif
  1989. if (!rt_is_input_route(rt) &&
  1990. fl4->saddr != src) {
  1991. if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
  1992. goto nla_put_failure;
  1993. }
  1994. if (rt->rt_uses_gateway &&
  1995. nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
  1996. goto nla_put_failure;
  1997. expires = rt->dst.expires;
  1998. if (expires) {
  1999. unsigned long now = jiffies;
  2000. if (time_before(now, expires))
  2001. expires -= now;
  2002. else
  2003. expires = 0;
  2004. }
  2005. memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
  2006. if (rt->rt_pmtu && expires)
  2007. metrics[RTAX_MTU - 1] = rt->rt_pmtu;
  2008. if (rtnetlink_put_metrics(skb, metrics) < 0)
  2009. goto nla_put_failure;
  2010. if (fl4->flowi4_mark &&
  2011. nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
  2012. goto nla_put_failure;
  2013. error = rt->dst.error;
  2014. if (rt_is_input_route(rt)) {
  2015. #ifdef CONFIG_IP_MROUTE
  2016. if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
  2017. IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
  2018. int err = ipmr_get_route(net, skb,
  2019. fl4->saddr, fl4->daddr,
  2020. r, nowait);
  2021. if (err <= 0) {
  2022. if (!nowait) {
  2023. if (err == 0)
  2024. return 0;
  2025. goto nla_put_failure;
  2026. } else {
  2027. if (err == -EMSGSIZE)
  2028. goto nla_put_failure;
  2029. error = err;
  2030. }
  2031. }
  2032. } else
  2033. #endif
  2034. if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
  2035. goto nla_put_failure;
  2036. }
  2037. if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
  2038. goto nla_put_failure;
  2039. return nlmsg_end(skb, nlh);
  2040. nla_put_failure:
  2041. nlmsg_cancel(skb, nlh);
  2042. return -EMSGSIZE;
  2043. }
  2044. static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
  2045. {
  2046. struct net *net = sock_net(in_skb->sk);
  2047. struct rtmsg *rtm;
  2048. struct nlattr *tb[RTA_MAX+1];
  2049. struct rtable *rt = NULL;
  2050. struct flowi4 fl4;
  2051. __be32 dst = 0;
  2052. __be32 src = 0;
  2053. u32 iif;
  2054. int err;
  2055. int mark;
  2056. struct sk_buff *skb;
  2057. err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
  2058. if (err < 0)
  2059. goto errout;
  2060. rtm = nlmsg_data(nlh);
  2061. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  2062. if (skb == NULL) {
  2063. err = -ENOBUFS;
  2064. goto errout;
  2065. }
  2066. /* Reserve room for dummy headers, this skb can pass
  2067. through good chunk of routing engine.
  2068. */
  2069. skb_reset_mac_header(skb);
  2070. skb_reset_network_header(skb);
  2071. /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
  2072. ip_hdr(skb)->protocol = IPPROTO_ICMP;
  2073. skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
  2074. src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
  2075. dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
  2076. iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
  2077. mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
  2078. memset(&fl4, 0, sizeof(fl4));
  2079. fl4.daddr = dst;
  2080. fl4.saddr = src;
  2081. fl4.flowi4_tos = rtm->rtm_tos;
  2082. fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
  2083. fl4.flowi4_mark = mark;
  2084. if (iif) {
  2085. struct net_device *dev;
  2086. dev = __dev_get_by_index(net, iif);
  2087. if (dev == NULL) {
  2088. err = -ENODEV;
  2089. goto errout_free;
  2090. }
  2091. skb->protocol = htons(ETH_P_IP);
  2092. skb->dev = dev;
  2093. skb->mark = mark;
  2094. local_bh_disable();
  2095. err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
  2096. local_bh_enable();
  2097. rt = skb_rtable(skb);
  2098. if (err == 0 && rt->dst.error)
  2099. err = -rt->dst.error;
  2100. } else {
  2101. rt = ip_route_output_key(net, &fl4);
  2102. err = 0;
  2103. if (IS_ERR(rt))
  2104. err = PTR_ERR(rt);
  2105. }
  2106. if (err)
  2107. goto errout_free;
  2108. skb_dst_set(skb, &rt->dst);
  2109. if (rtm->rtm_flags & RTM_F_NOTIFY)
  2110. rt->rt_flags |= RTCF_NOTIFY;
  2111. err = rt_fill_info(net, dst, src, &fl4, skb,
  2112. NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
  2113. RTM_NEWROUTE, 0, 0);
  2114. if (err <= 0)
  2115. goto errout_free;
  2116. err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
  2117. errout:
  2118. return err;
  2119. errout_free:
  2120. kfree_skb(skb);
  2121. goto errout;
  2122. }
  2123. void ip_rt_multicast_event(struct in_device *in_dev)
  2124. {
  2125. rt_cache_flush(dev_net(in_dev->dev));
  2126. }
  2127. #ifdef CONFIG_SYSCTL
  2128. static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
  2129. static int ip_rt_gc_interval __read_mostly = 60 * HZ;
  2130. static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
  2131. static int ip_rt_gc_elasticity __read_mostly = 8;
  2132. static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
  2133. void __user *buffer,
  2134. size_t *lenp, loff_t *ppos)
  2135. {
  2136. struct net *net = (struct net *)__ctl->extra1;
  2137. if (write) {
  2138. rt_cache_flush(net);
  2139. fnhe_genid_bump(net);
  2140. return 0;
  2141. }
  2142. return -EINVAL;
  2143. }
  2144. static struct ctl_table ipv4_route_table[] = {
  2145. {
  2146. .procname = "gc_thresh",
  2147. .data = &ipv4_dst_ops.gc_thresh,
  2148. .maxlen = sizeof(int),
  2149. .mode = 0644,
  2150. .proc_handler = proc_dointvec,
  2151. },
  2152. {
  2153. .procname = "max_size",
  2154. .data = &ip_rt_max_size,
  2155. .maxlen = sizeof(int),
  2156. .mode = 0644,
  2157. .proc_handler = proc_dointvec,
  2158. },
  2159. {
  2160. /* Deprecated. Use gc_min_interval_ms */
  2161. .procname = "gc_min_interval",
  2162. .data = &ip_rt_gc_min_interval,
  2163. .maxlen = sizeof(int),
  2164. .mode = 0644,
  2165. .proc_handler = proc_dointvec_jiffies,
  2166. },
  2167. {
  2168. .procname = "gc_min_interval_ms",
  2169. .data = &ip_rt_gc_min_interval,
  2170. .maxlen = sizeof(int),
  2171. .mode = 0644,
  2172. .proc_handler = proc_dointvec_ms_jiffies,
  2173. },
  2174. {
  2175. .procname = "gc_timeout",
  2176. .data = &ip_rt_gc_timeout,
  2177. .maxlen = sizeof(int),
  2178. .mode = 0644,
  2179. .proc_handler = proc_dointvec_jiffies,
  2180. },
  2181. {
  2182. .procname = "gc_interval",
  2183. .data = &ip_rt_gc_interval,
  2184. .maxlen = sizeof(int),
  2185. .mode = 0644,
  2186. .proc_handler = proc_dointvec_jiffies,
  2187. },
  2188. {
  2189. .procname = "redirect_load",
  2190. .data = &ip_rt_redirect_load,
  2191. .maxlen = sizeof(int),
  2192. .mode = 0644,
  2193. .proc_handler = proc_dointvec,
  2194. },
  2195. {
  2196. .procname = "redirect_number",
  2197. .data = &ip_rt_redirect_number,
  2198. .maxlen = sizeof(int),
  2199. .mode = 0644,
  2200. .proc_handler = proc_dointvec,
  2201. },
  2202. {
  2203. .procname = "redirect_silence",
  2204. .data = &ip_rt_redirect_silence,
  2205. .maxlen = sizeof(int),
  2206. .mode = 0644,
  2207. .proc_handler = proc_dointvec,
  2208. },
  2209. {
  2210. .procname = "error_cost",
  2211. .data = &ip_rt_error_cost,
  2212. .maxlen = sizeof(int),
  2213. .mode = 0644,
  2214. .proc_handler = proc_dointvec,
  2215. },
  2216. {
  2217. .procname = "error_burst",
  2218. .data = &ip_rt_error_burst,
  2219. .maxlen = sizeof(int),
  2220. .mode = 0644,
  2221. .proc_handler = proc_dointvec,
  2222. },
  2223. {
  2224. .procname = "gc_elasticity",
  2225. .data = &ip_rt_gc_elasticity,
  2226. .maxlen = sizeof(int),
  2227. .mode = 0644,
  2228. .proc_handler = proc_dointvec,
  2229. },
  2230. {
  2231. .procname = "mtu_expires",
  2232. .data = &ip_rt_mtu_expires,
  2233. .maxlen = sizeof(int),
  2234. .mode = 0644,
  2235. .proc_handler = proc_dointvec_jiffies,
  2236. },
  2237. {
  2238. .procname = "min_pmtu",
  2239. .data = &ip_rt_min_pmtu,
  2240. .maxlen = sizeof(int),
  2241. .mode = 0644,
  2242. .proc_handler = proc_dointvec,
  2243. },
  2244. {
  2245. .procname = "min_adv_mss",
  2246. .data = &ip_rt_min_advmss,
  2247. .maxlen = sizeof(int),
  2248. .mode = 0644,
  2249. .proc_handler = proc_dointvec,
  2250. },
  2251. { }
  2252. };
  2253. static struct ctl_table ipv4_route_flush_table[] = {
  2254. {
  2255. .procname = "flush",
  2256. .maxlen = sizeof(int),
  2257. .mode = 0200,
  2258. .proc_handler = ipv4_sysctl_rtcache_flush,
  2259. },
  2260. { },
  2261. };
  2262. static __net_init int sysctl_route_net_init(struct net *net)
  2263. {
  2264. struct ctl_table *tbl;
  2265. tbl = ipv4_route_flush_table;
  2266. if (!net_eq(net, &init_net)) {
  2267. tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
  2268. if (tbl == NULL)
  2269. goto err_dup;
  2270. /* Don't export sysctls to unprivileged users */
  2271. if (net->user_ns != &init_user_ns)
  2272. tbl[0].procname = NULL;
  2273. }
  2274. tbl[0].extra1 = net;
  2275. net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
  2276. if (net->ipv4.route_hdr == NULL)
  2277. goto err_reg;
  2278. return 0;
  2279. err_reg:
  2280. if (tbl != ipv4_route_flush_table)
  2281. kfree(tbl);
  2282. err_dup:
  2283. return -ENOMEM;
  2284. }
  2285. static __net_exit void sysctl_route_net_exit(struct net *net)
  2286. {
  2287. struct ctl_table *tbl;
  2288. tbl = net->ipv4.route_hdr->ctl_table_arg;
  2289. unregister_net_sysctl_table(net->ipv4.route_hdr);
  2290. BUG_ON(tbl == ipv4_route_flush_table);
  2291. kfree(tbl);
  2292. }
  2293. static __net_initdata struct pernet_operations sysctl_route_ops = {
  2294. .init = sysctl_route_net_init,
  2295. .exit = sysctl_route_net_exit,
  2296. };
  2297. #endif
  2298. static __net_init int rt_genid_init(struct net *net)
  2299. {
  2300. atomic_set(&net->ipv4.rt_genid, 0);
  2301. atomic_set(&net->fnhe_genid, 0);
  2302. get_random_bytes(&net->ipv4.dev_addr_genid,
  2303. sizeof(net->ipv4.dev_addr_genid));
  2304. return 0;
  2305. }
  2306. static __net_initdata struct pernet_operations rt_genid_ops = {
  2307. .init = rt_genid_init,
  2308. };
  2309. static int __net_init ipv4_inetpeer_init(struct net *net)
  2310. {
  2311. struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
  2312. if (!bp)
  2313. return -ENOMEM;
  2314. inet_peer_base_init(bp);
  2315. net->ipv4.peers = bp;
  2316. return 0;
  2317. }
  2318. static void __net_exit ipv4_inetpeer_exit(struct net *net)
  2319. {
  2320. struct inet_peer_base *bp = net->ipv4.peers;
  2321. net->ipv4.peers = NULL;
  2322. inetpeer_invalidate_tree(bp);
  2323. kfree(bp);
  2324. }
  2325. static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
  2326. .init = ipv4_inetpeer_init,
  2327. .exit = ipv4_inetpeer_exit,
  2328. };
  2329. #ifdef CONFIG_IP_ROUTE_CLASSID
  2330. struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
  2331. #endif /* CONFIG_IP_ROUTE_CLASSID */
  2332. int __init ip_rt_init(void)
  2333. {
  2334. int rc = 0;
  2335. ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
  2336. if (!ip_idents)
  2337. panic("IP: failed to allocate ip_idents\n");
  2338. prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
  2339. #ifdef CONFIG_IP_ROUTE_CLASSID
  2340. ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
  2341. if (!ip_rt_acct)
  2342. panic("IP: failed to allocate ip_rt_acct\n");
  2343. #endif
  2344. ipv4_dst_ops.kmem_cachep =
  2345. kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
  2346. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  2347. ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
  2348. if (dst_entries_init(&ipv4_dst_ops) < 0)
  2349. panic("IP: failed to allocate ipv4_dst_ops counter\n");
  2350. if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
  2351. panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
  2352. ipv4_dst_ops.gc_thresh = ~0;
  2353. ip_rt_max_size = INT_MAX;
  2354. devinet_init();
  2355. ip_fib_init();
  2356. if (ip_rt_proc_init())
  2357. pr_err("Unable to create route proc files\n");
  2358. #ifdef CONFIG_XFRM
  2359. xfrm_init();
  2360. xfrm4_init();
  2361. #endif
  2362. rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
  2363. #ifdef CONFIG_SYSCTL
  2364. register_pernet_subsys(&sysctl_route_ops);
  2365. #endif
  2366. register_pernet_subsys(&rt_genid_ops);
  2367. register_pernet_subsys(&ipv4_inetpeer_ops);
  2368. return rc;
  2369. }
  2370. #ifdef CONFIG_SYSCTL
  2371. /*
  2372. * We really need to sanitize the damn ipv4 init order, then all
  2373. * this nonsense will go away.
  2374. */
  2375. void __init ip_static_sysctl_init(void)
  2376. {
  2377. register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
  2378. }
  2379. #endif