udp.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The User Datagram Protocol (UDP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  11. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  12. * Hirokazu Takahashi, <taka@valinux.co.jp>
  13. *
  14. * Fixes:
  15. * Alan Cox : verify_area() calls
  16. * Alan Cox : stopped close while in use off icmp
  17. * messages. Not a fix but a botch that
  18. * for udp at least is 'valid'.
  19. * Alan Cox : Fixed icmp handling properly
  20. * Alan Cox : Correct error for oversized datagrams
  21. * Alan Cox : Tidied select() semantics.
  22. * Alan Cox : udp_err() fixed properly, also now
  23. * select and read wake correctly on errors
  24. * Alan Cox : udp_send verify_area moved to avoid mem leak
  25. * Alan Cox : UDP can count its memory
  26. * Alan Cox : send to an unknown connection causes
  27. * an ECONNREFUSED off the icmp, but
  28. * does NOT close.
  29. * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  30. * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  31. * bug no longer crashes it.
  32. * Fred Van Kempen : Net2e support for sk->broadcast.
  33. * Alan Cox : Uses skb_free_datagram
  34. * Alan Cox : Added get/set sockopt support.
  35. * Alan Cox : Broadcasting without option set returns EACCES.
  36. * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  37. * Alan Cox : Use ip_tos and ip_ttl
  38. * Alan Cox : SNMP Mibs
  39. * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  40. * Matt Dillon : UDP length checks.
  41. * Alan Cox : Smarter af_inet used properly.
  42. * Alan Cox : Use new kernel side addressing.
  43. * Alan Cox : Incorrect return on truncated datagram receive.
  44. * Arnt Gulbrandsen : New udp_send and stuff
  45. * Alan Cox : Cache last socket
  46. * Alan Cox : Route cache
  47. * Jon Peatfield : Minor efficiency fix to sendto().
  48. * Mike Shaver : RFC1122 checks.
  49. * Alan Cox : Nonblocking error fix.
  50. * Willy Konynenberg : Transparent proxying support.
  51. * Mike McLagan : Routing by source
  52. * David S. Miller : New socket lookup architecture.
  53. * Last socket cache retained as it
  54. * does have a high hit rate.
  55. * Olaf Kirch : Don't linearise iovec on sendmsg.
  56. * Andi Kleen : Some cleanups, cache destination entry
  57. * for connect.
  58. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  59. * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  60. * return ENOTCONN for unconnected sockets (POSIX)
  61. * Janos Farkas : don't deliver multi/broadcasts to a different
  62. * bound-to-device socket
  63. * Hirokazu Takahashi : HW checksumming for outgoing UDP
  64. * datagrams.
  65. * Hirokazu Takahashi : sendfile() on UDP works now.
  66. * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  67. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  68. * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  69. * a single port at the same time.
  70. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  71. * James Chapman : Add L2TP encapsulation type.
  72. *
  73. *
  74. * This program is free software; you can redistribute it and/or
  75. * modify it under the terms of the GNU General Public License
  76. * as published by the Free Software Foundation; either version
  77. * 2 of the License, or (at your option) any later version.
  78. */
  79. #define pr_fmt(fmt) "UDP: " fmt
  80. #include <linux/uaccess.h>
  81. #include <asm/ioctls.h>
  82. #include <linux/bootmem.h>
  83. #include <linux/highmem.h>
  84. #include <linux/swap.h>
  85. #include <linux/types.h>
  86. #include <linux/fcntl.h>
  87. #include <linux/module.h>
  88. #include <linux/socket.h>
  89. #include <linux/sockios.h>
  90. #include <linux/igmp.h>
  91. #include <linux/inetdevice.h>
  92. #include <linux/in.h>
  93. #include <linux/errno.h>
  94. #include <linux/timer.h>
  95. #include <linux/mm.h>
  96. #include <linux/inet.h>
  97. #include <linux/netdevice.h>
  98. #include <linux/slab.h>
  99. #include <net/tcp_states.h>
  100. #include <linux/skbuff.h>
  101. #include <linux/proc_fs.h>
  102. #include <linux/seq_file.h>
  103. #include <net/net_namespace.h>
  104. #include <net/icmp.h>
  105. #include <net/inet_hashtables.h>
  106. #include <net/route.h>
  107. #include <net/checksum.h>
  108. #include <net/xfrm.h>
  109. #include <trace/events/udp.h>
  110. #include <linux/static_key.h>
  111. #include <trace/events/skb.h>
  112. #include <net/busy_poll.h>
  113. #include "udp_impl.h"
  114. #include <net/sock_reuseport.h>
  115. #include <net/addrconf.h>
  116. struct udp_table udp_table __read_mostly;
  117. EXPORT_SYMBOL(udp_table);
  118. long sysctl_udp_mem[3] __read_mostly;
  119. EXPORT_SYMBOL(sysctl_udp_mem);
  120. atomic_long_t udp_memory_allocated;
  121. EXPORT_SYMBOL(udp_memory_allocated);
  122. #define MAX_UDP_PORTS 65536
  123. #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
  124. /* IPCB reference means this can not be used from early demux */
  125. static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
  126. {
  127. #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
  128. if (!net->ipv4.sysctl_udp_l3mdev_accept &&
  129. skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
  130. return true;
  131. #endif
  132. return false;
  133. }
  134. static int udp_lib_lport_inuse(struct net *net, __u16 num,
  135. const struct udp_hslot *hslot,
  136. unsigned long *bitmap,
  137. struct sock *sk, unsigned int log)
  138. {
  139. struct sock *sk2;
  140. kuid_t uid = sock_i_uid(sk);
  141. sk_for_each(sk2, &hslot->head) {
  142. if (net_eq(sock_net(sk2), net) &&
  143. sk2 != sk &&
  144. (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
  145. (!sk2->sk_reuse || !sk->sk_reuse) &&
  146. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  147. sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  148. inet_rcv_saddr_equal(sk, sk2, true)) {
  149. if (sk2->sk_reuseport && sk->sk_reuseport &&
  150. !rcu_access_pointer(sk->sk_reuseport_cb) &&
  151. uid_eq(uid, sock_i_uid(sk2))) {
  152. if (!bitmap)
  153. return 0;
  154. } else {
  155. if (!bitmap)
  156. return 1;
  157. __set_bit(udp_sk(sk2)->udp_port_hash >> log,
  158. bitmap);
  159. }
  160. }
  161. }
  162. return 0;
  163. }
  164. /*
  165. * Note: we still hold spinlock of primary hash chain, so no other writer
  166. * can insert/delete a socket with local_port == num
  167. */
  168. static int udp_lib_lport_inuse2(struct net *net, __u16 num,
  169. struct udp_hslot *hslot2,
  170. struct sock *sk)
  171. {
  172. struct sock *sk2;
  173. kuid_t uid = sock_i_uid(sk);
  174. int res = 0;
  175. spin_lock(&hslot2->lock);
  176. udp_portaddr_for_each_entry(sk2, &hslot2->head) {
  177. if (net_eq(sock_net(sk2), net) &&
  178. sk2 != sk &&
  179. (udp_sk(sk2)->udp_port_hash == num) &&
  180. (!sk2->sk_reuse || !sk->sk_reuse) &&
  181. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  182. sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  183. inet_rcv_saddr_equal(sk, sk2, true)) {
  184. if (sk2->sk_reuseport && sk->sk_reuseport &&
  185. !rcu_access_pointer(sk->sk_reuseport_cb) &&
  186. uid_eq(uid, sock_i_uid(sk2))) {
  187. res = 0;
  188. } else {
  189. res = 1;
  190. }
  191. break;
  192. }
  193. }
  194. spin_unlock(&hslot2->lock);
  195. return res;
  196. }
  197. static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
  198. {
  199. struct net *net = sock_net(sk);
  200. kuid_t uid = sock_i_uid(sk);
  201. struct sock *sk2;
  202. sk_for_each(sk2, &hslot->head) {
  203. if (net_eq(sock_net(sk2), net) &&
  204. sk2 != sk &&
  205. sk2->sk_family == sk->sk_family &&
  206. ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
  207. (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
  208. (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  209. sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
  210. inet_rcv_saddr_equal(sk, sk2, false)) {
  211. return reuseport_add_sock(sk, sk2,
  212. inet_rcv_saddr_any(sk));
  213. }
  214. }
  215. return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
  216. }
  217. /**
  218. * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  219. *
  220. * @sk: socket struct in question
  221. * @snum: port number to look up
  222. * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
  223. * with NULL address
  224. */
  225. int udp_lib_get_port(struct sock *sk, unsigned short snum,
  226. unsigned int hash2_nulladdr)
  227. {
  228. struct udp_hslot *hslot, *hslot2;
  229. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  230. int error = 1;
  231. struct net *net = sock_net(sk);
  232. if (!snum) {
  233. int low, high, remaining;
  234. unsigned int rand;
  235. unsigned short first, last;
  236. DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
  237. inet_get_local_port_range(net, &low, &high);
  238. remaining = (high - low) + 1;
  239. rand = prandom_u32();
  240. first = reciprocal_scale(rand, remaining) + low;
  241. /*
  242. * force rand to be an odd multiple of UDP_HTABLE_SIZE
  243. */
  244. rand = (rand | 1) * (udptable->mask + 1);
  245. last = first + udptable->mask + 1;
  246. do {
  247. hslot = udp_hashslot(udptable, net, first);
  248. bitmap_zero(bitmap, PORTS_PER_CHAIN);
  249. spin_lock_bh(&hslot->lock);
  250. udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
  251. udptable->log);
  252. snum = first;
  253. /*
  254. * Iterate on all possible values of snum for this hash.
  255. * Using steps of an odd multiple of UDP_HTABLE_SIZE
  256. * give us randomization and full range coverage.
  257. */
  258. do {
  259. if (low <= snum && snum <= high &&
  260. !test_bit(snum >> udptable->log, bitmap) &&
  261. !inet_is_local_reserved_port(net, snum))
  262. goto found;
  263. snum += rand;
  264. } while (snum != first);
  265. spin_unlock_bh(&hslot->lock);
  266. cond_resched();
  267. } while (++first != last);
  268. goto fail;
  269. } else {
  270. hslot = udp_hashslot(udptable, net, snum);
  271. spin_lock_bh(&hslot->lock);
  272. if (hslot->count > 10) {
  273. int exist;
  274. unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
  275. slot2 &= udptable->mask;
  276. hash2_nulladdr &= udptable->mask;
  277. hslot2 = udp_hashslot2(udptable, slot2);
  278. if (hslot->count < hslot2->count)
  279. goto scan_primary_hash;
  280. exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
  281. if (!exist && (hash2_nulladdr != slot2)) {
  282. hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
  283. exist = udp_lib_lport_inuse2(net, snum, hslot2,
  284. sk);
  285. }
  286. if (exist)
  287. goto fail_unlock;
  288. else
  289. goto found;
  290. }
  291. scan_primary_hash:
  292. if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
  293. goto fail_unlock;
  294. }
  295. found:
  296. inet_sk(sk)->inet_num = snum;
  297. udp_sk(sk)->udp_port_hash = snum;
  298. udp_sk(sk)->udp_portaddr_hash ^= snum;
  299. if (sk_unhashed(sk)) {
  300. if (sk->sk_reuseport &&
  301. udp_reuseport_add_sock(sk, hslot)) {
  302. inet_sk(sk)->inet_num = 0;
  303. udp_sk(sk)->udp_port_hash = 0;
  304. udp_sk(sk)->udp_portaddr_hash ^= snum;
  305. goto fail_unlock;
  306. }
  307. sk_add_node_rcu(sk, &hslot->head);
  308. hslot->count++;
  309. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  310. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  311. spin_lock(&hslot2->lock);
  312. if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
  313. sk->sk_family == AF_INET6)
  314. hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
  315. &hslot2->head);
  316. else
  317. hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
  318. &hslot2->head);
  319. hslot2->count++;
  320. spin_unlock(&hslot2->lock);
  321. }
  322. sock_set_flag(sk, SOCK_RCU_FREE);
  323. error = 0;
  324. fail_unlock:
  325. spin_unlock_bh(&hslot->lock);
  326. fail:
  327. return error;
  328. }
  329. EXPORT_SYMBOL(udp_lib_get_port);
  330. int udp_v4_get_port(struct sock *sk, unsigned short snum)
  331. {
  332. unsigned int hash2_nulladdr =
  333. ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
  334. unsigned int hash2_partial =
  335. ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
  336. /* precompute partial secondary hash */
  337. udp_sk(sk)->udp_portaddr_hash = hash2_partial;
  338. return udp_lib_get_port(sk, snum, hash2_nulladdr);
  339. }
  340. static int compute_score(struct sock *sk, struct net *net,
  341. __be32 saddr, __be16 sport,
  342. __be32 daddr, unsigned short hnum,
  343. int dif, int sdif, bool exact_dif)
  344. {
  345. int score;
  346. struct inet_sock *inet;
  347. if (!net_eq(sock_net(sk), net) ||
  348. udp_sk(sk)->udp_port_hash != hnum ||
  349. ipv6_only_sock(sk))
  350. return -1;
  351. score = (sk->sk_family == PF_INET) ? 2 : 1;
  352. inet = inet_sk(sk);
  353. if (inet->inet_rcv_saddr) {
  354. if (inet->inet_rcv_saddr != daddr)
  355. return -1;
  356. score += 4;
  357. }
  358. if (inet->inet_daddr) {
  359. if (inet->inet_daddr != saddr)
  360. return -1;
  361. score += 4;
  362. }
  363. if (inet->inet_dport) {
  364. if (inet->inet_dport != sport)
  365. return -1;
  366. score += 4;
  367. }
  368. if (sk->sk_bound_dev_if || exact_dif) {
  369. bool dev_match = (sk->sk_bound_dev_if == dif ||
  370. sk->sk_bound_dev_if == sdif);
  371. if (!dev_match)
  372. return -1;
  373. if (sk->sk_bound_dev_if)
  374. score += 4;
  375. }
  376. if (sk->sk_incoming_cpu == raw_smp_processor_id())
  377. score++;
  378. return score;
  379. }
  380. static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
  381. const __u16 lport, const __be32 faddr,
  382. const __be16 fport)
  383. {
  384. static u32 udp_ehash_secret __read_mostly;
  385. net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
  386. return __inet_ehashfn(laddr, lport, faddr, fport,
  387. udp_ehash_secret + net_hash_mix(net));
  388. }
  389. /* called with rcu_read_lock() */
  390. static struct sock *udp4_lib_lookup2(struct net *net,
  391. __be32 saddr, __be16 sport,
  392. __be32 daddr, unsigned int hnum,
  393. int dif, int sdif, bool exact_dif,
  394. struct udp_hslot *hslot2,
  395. struct sk_buff *skb)
  396. {
  397. struct sock *sk, *result;
  398. int score, badness;
  399. u32 hash = 0;
  400. result = NULL;
  401. badness = 0;
  402. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  403. score = compute_score(sk, net, saddr, sport,
  404. daddr, hnum, dif, sdif, exact_dif);
  405. if (score > badness) {
  406. if (sk->sk_reuseport) {
  407. hash = udp_ehashfn(net, daddr, hnum,
  408. saddr, sport);
  409. result = reuseport_select_sock(sk, hash, skb,
  410. sizeof(struct udphdr));
  411. if (result)
  412. return result;
  413. }
  414. badness = score;
  415. result = sk;
  416. }
  417. }
  418. return result;
  419. }
  420. /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  421. * harder than this. -DaveM
  422. */
  423. struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
  424. __be16 sport, __be32 daddr, __be16 dport, int dif,
  425. int sdif, struct udp_table *udptable, struct sk_buff *skb)
  426. {
  427. struct sock *sk, *result;
  428. unsigned short hnum = ntohs(dport);
  429. unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
  430. struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
  431. bool exact_dif = udp_lib_exact_dif_match(net, skb);
  432. int score, badness;
  433. u32 hash = 0;
  434. if (hslot->count > 10) {
  435. hash2 = ipv4_portaddr_hash(net, daddr, hnum);
  436. slot2 = hash2 & udptable->mask;
  437. hslot2 = &udptable->hash2[slot2];
  438. if (hslot->count < hslot2->count)
  439. goto begin;
  440. result = udp4_lib_lookup2(net, saddr, sport,
  441. daddr, hnum, dif, sdif,
  442. exact_dif, hslot2, skb);
  443. if (!result) {
  444. unsigned int old_slot2 = slot2;
  445. hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
  446. slot2 = hash2 & udptable->mask;
  447. /* avoid searching the same slot again. */
  448. if (unlikely(slot2 == old_slot2))
  449. return result;
  450. hslot2 = &udptable->hash2[slot2];
  451. if (hslot->count < hslot2->count)
  452. goto begin;
  453. result = udp4_lib_lookup2(net, saddr, sport,
  454. daddr, hnum, dif, sdif,
  455. exact_dif, hslot2, skb);
  456. }
  457. if (unlikely(IS_ERR(result)))
  458. return NULL;
  459. return result;
  460. }
  461. begin:
  462. result = NULL;
  463. badness = 0;
  464. sk_for_each_rcu(sk, &hslot->head) {
  465. score = compute_score(sk, net, saddr, sport,
  466. daddr, hnum, dif, sdif, exact_dif);
  467. if (score > badness) {
  468. if (sk->sk_reuseport) {
  469. hash = udp_ehashfn(net, daddr, hnum,
  470. saddr, sport);
  471. result = reuseport_select_sock(sk, hash, skb,
  472. sizeof(struct udphdr));
  473. if (unlikely(IS_ERR(result)))
  474. return NULL;
  475. if (result)
  476. return result;
  477. }
  478. result = sk;
  479. badness = score;
  480. }
  481. }
  482. return result;
  483. }
  484. EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
  485. static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
  486. __be16 sport, __be16 dport,
  487. struct udp_table *udptable)
  488. {
  489. const struct iphdr *iph = ip_hdr(skb);
  490. return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
  491. iph->daddr, dport, inet_iif(skb),
  492. inet_sdif(skb), udptable, skb);
  493. }
  494. struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
  495. __be16 sport, __be16 dport)
  496. {
  497. return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
  498. }
  499. EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
  500. /* Must be called under rcu_read_lock().
  501. * Does increment socket refcount.
  502. */
  503. #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
  504. struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  505. __be32 daddr, __be16 dport, int dif)
  506. {
  507. struct sock *sk;
  508. sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
  509. dif, 0, &udp_table, NULL);
  510. if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
  511. sk = NULL;
  512. return sk;
  513. }
  514. EXPORT_SYMBOL_GPL(udp4_lib_lookup);
  515. #endif
  516. static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
  517. __be16 loc_port, __be32 loc_addr,
  518. __be16 rmt_port, __be32 rmt_addr,
  519. int dif, int sdif, unsigned short hnum)
  520. {
  521. struct inet_sock *inet = inet_sk(sk);
  522. if (!net_eq(sock_net(sk), net) ||
  523. udp_sk(sk)->udp_port_hash != hnum ||
  524. (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
  525. (inet->inet_dport != rmt_port && inet->inet_dport) ||
  526. (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
  527. ipv6_only_sock(sk) ||
  528. (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
  529. sk->sk_bound_dev_if != sdif))
  530. return false;
  531. if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
  532. return false;
  533. return true;
  534. }
  535. /*
  536. * This routine is called by the ICMP module when it gets some
  537. * sort of error condition. If err < 0 then the socket should
  538. * be closed and the error returned to the user. If err > 0
  539. * it's just the icmp type << 8 | icmp code.
  540. * Header points to the ip header of the error packet. We move
  541. * on past this. Then (as it used to claim before adjustment)
  542. * header points to the first 8 bytes of the udp header. We need
  543. * to find the appropriate port.
  544. */
  545. void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
  546. {
  547. struct inet_sock *inet;
  548. const struct iphdr *iph = (const struct iphdr *)skb->data;
  549. struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
  550. const int type = icmp_hdr(skb)->type;
  551. const int code = icmp_hdr(skb)->code;
  552. struct sock *sk;
  553. int harderr;
  554. int err;
  555. struct net *net = dev_net(skb->dev);
  556. sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
  557. iph->saddr, uh->source, skb->dev->ifindex, 0,
  558. udptable, NULL);
  559. if (!sk) {
  560. __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
  561. return; /* No socket for error */
  562. }
  563. err = 0;
  564. harderr = 0;
  565. inet = inet_sk(sk);
  566. switch (type) {
  567. default:
  568. case ICMP_TIME_EXCEEDED:
  569. err = EHOSTUNREACH;
  570. break;
  571. case ICMP_SOURCE_QUENCH:
  572. goto out;
  573. case ICMP_PARAMETERPROB:
  574. err = EPROTO;
  575. harderr = 1;
  576. break;
  577. case ICMP_DEST_UNREACH:
  578. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  579. ipv4_sk_update_pmtu(skb, sk, info);
  580. if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  581. err = EMSGSIZE;
  582. harderr = 1;
  583. break;
  584. }
  585. goto out;
  586. }
  587. err = EHOSTUNREACH;
  588. if (code <= NR_ICMP_UNREACH) {
  589. harderr = icmp_err_convert[code].fatal;
  590. err = icmp_err_convert[code].errno;
  591. }
  592. break;
  593. case ICMP_REDIRECT:
  594. ipv4_sk_redirect(skb, sk);
  595. goto out;
  596. }
  597. /*
  598. * RFC1122: OK. Passes ICMP errors back to application, as per
  599. * 4.1.3.3.
  600. */
  601. if (!inet->recverr) {
  602. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  603. goto out;
  604. } else
  605. ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
  606. sk->sk_err = err;
  607. sk->sk_error_report(sk);
  608. out:
  609. return;
  610. }
  611. void udp_err(struct sk_buff *skb, u32 info)
  612. {
  613. __udp4_lib_err(skb, info, &udp_table);
  614. }
  615. /*
  616. * Throw away all pending data and cancel the corking. Socket is locked.
  617. */
  618. void udp_flush_pending_frames(struct sock *sk)
  619. {
  620. struct udp_sock *up = udp_sk(sk);
  621. if (up->pending) {
  622. up->len = 0;
  623. up->pending = 0;
  624. ip_flush_pending_frames(sk);
  625. }
  626. }
  627. EXPORT_SYMBOL(udp_flush_pending_frames);
  628. /**
  629. * udp4_hwcsum - handle outgoing HW checksumming
  630. * @skb: sk_buff containing the filled-in UDP header
  631. * (checksum field must be zeroed out)
  632. * @src: source IP address
  633. * @dst: destination IP address
  634. */
  635. void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
  636. {
  637. struct udphdr *uh = udp_hdr(skb);
  638. int offset = skb_transport_offset(skb);
  639. int len = skb->len - offset;
  640. int hlen = len;
  641. __wsum csum = 0;
  642. if (!skb_has_frag_list(skb)) {
  643. /*
  644. * Only one fragment on the socket.
  645. */
  646. skb->csum_start = skb_transport_header(skb) - skb->head;
  647. skb->csum_offset = offsetof(struct udphdr, check);
  648. uh->check = ~csum_tcpudp_magic(src, dst, len,
  649. IPPROTO_UDP, 0);
  650. } else {
  651. struct sk_buff *frags;
  652. /*
  653. * HW-checksum won't work as there are two or more
  654. * fragments on the socket so that all csums of sk_buffs
  655. * should be together
  656. */
  657. skb_walk_frags(skb, frags) {
  658. csum = csum_add(csum, frags->csum);
  659. hlen -= frags->len;
  660. }
  661. csum = skb_checksum(skb, offset, hlen, csum);
  662. skb->ip_summed = CHECKSUM_NONE;
  663. uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  664. if (uh->check == 0)
  665. uh->check = CSUM_MANGLED_0;
  666. }
  667. }
  668. EXPORT_SYMBOL_GPL(udp4_hwcsum);
  669. /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
  670. * for the simple case like when setting the checksum for a UDP tunnel.
  671. */
  672. void udp_set_csum(bool nocheck, struct sk_buff *skb,
  673. __be32 saddr, __be32 daddr, int len)
  674. {
  675. struct udphdr *uh = udp_hdr(skb);
  676. if (nocheck) {
  677. uh->check = 0;
  678. } else if (skb_is_gso(skb)) {
  679. uh->check = ~udp_v4_check(len, saddr, daddr, 0);
  680. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  681. uh->check = 0;
  682. uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
  683. if (uh->check == 0)
  684. uh->check = CSUM_MANGLED_0;
  685. } else {
  686. skb->ip_summed = CHECKSUM_PARTIAL;
  687. skb->csum_start = skb_transport_header(skb) - skb->head;
  688. skb->csum_offset = offsetof(struct udphdr, check);
  689. uh->check = ~udp_v4_check(len, saddr, daddr, 0);
  690. }
  691. }
  692. EXPORT_SYMBOL(udp_set_csum);
  693. static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
  694. struct inet_cork *cork)
  695. {
  696. struct sock *sk = skb->sk;
  697. struct inet_sock *inet = inet_sk(sk);
  698. struct udphdr *uh;
  699. int err = 0;
  700. int is_udplite = IS_UDPLITE(sk);
  701. int offset = skb_transport_offset(skb);
  702. int len = skb->len - offset;
  703. __wsum csum = 0;
  704. /*
  705. * Create a UDP header
  706. */
  707. uh = udp_hdr(skb);
  708. uh->source = inet->inet_sport;
  709. uh->dest = fl4->fl4_dport;
  710. uh->len = htons(len);
  711. uh->check = 0;
  712. if (cork->gso_size) {
  713. const int hlen = skb_network_header_len(skb) +
  714. sizeof(struct udphdr);
  715. if (hlen + cork->gso_size > cork->fragsize)
  716. return -EINVAL;
  717. if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
  718. return -EINVAL;
  719. if (sk->sk_no_check_tx)
  720. return -EINVAL;
  721. if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
  722. dst_xfrm(skb_dst(skb)))
  723. return -EIO;
  724. skb_shinfo(skb)->gso_size = cork->gso_size;
  725. skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
  726. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
  727. cork->gso_size);
  728. goto csum_partial;
  729. }
  730. if (is_udplite) /* UDP-Lite */
  731. csum = udplite_csum(skb);
  732. else if (sk->sk_no_check_tx) { /* UDP csum off */
  733. skb->ip_summed = CHECKSUM_NONE;
  734. goto send;
  735. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  736. csum_partial:
  737. udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
  738. goto send;
  739. } else
  740. csum = udp_csum(skb);
  741. /* add protocol-dependent pseudo-header */
  742. uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
  743. sk->sk_protocol, csum);
  744. if (uh->check == 0)
  745. uh->check = CSUM_MANGLED_0;
  746. send:
  747. err = ip_send_skb(sock_net(sk), skb);
  748. if (err) {
  749. if (err == -ENOBUFS && !inet->recverr) {
  750. UDP_INC_STATS(sock_net(sk),
  751. UDP_MIB_SNDBUFERRORS, is_udplite);
  752. err = 0;
  753. }
  754. } else
  755. UDP_INC_STATS(sock_net(sk),
  756. UDP_MIB_OUTDATAGRAMS, is_udplite);
  757. return err;
  758. }
  759. /*
  760. * Push out all pending data as one UDP datagram. Socket is locked.
  761. */
  762. int udp_push_pending_frames(struct sock *sk)
  763. {
  764. struct udp_sock *up = udp_sk(sk);
  765. struct inet_sock *inet = inet_sk(sk);
  766. struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
  767. struct sk_buff *skb;
  768. int err = 0;
  769. skb = ip_finish_skb(sk, fl4);
  770. if (!skb)
  771. goto out;
  772. err = udp_send_skb(skb, fl4, &inet->cork.base);
  773. out:
  774. up->len = 0;
  775. up->pending = 0;
  776. return err;
  777. }
  778. EXPORT_SYMBOL(udp_push_pending_frames);
  779. static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
  780. {
  781. switch (cmsg->cmsg_type) {
  782. case UDP_SEGMENT:
  783. if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
  784. return -EINVAL;
  785. *gso_size = *(__u16 *)CMSG_DATA(cmsg);
  786. return 0;
  787. default:
  788. return -EINVAL;
  789. }
  790. }
  791. int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
  792. {
  793. struct cmsghdr *cmsg;
  794. bool need_ip = false;
  795. int err;
  796. for_each_cmsghdr(cmsg, msg) {
  797. if (!CMSG_OK(msg, cmsg))
  798. return -EINVAL;
  799. if (cmsg->cmsg_level != SOL_UDP) {
  800. need_ip = true;
  801. continue;
  802. }
  803. err = __udp_cmsg_send(cmsg, gso_size);
  804. if (err)
  805. return err;
  806. }
  807. return need_ip;
  808. }
  809. EXPORT_SYMBOL_GPL(udp_cmsg_send);
  810. int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  811. {
  812. struct inet_sock *inet = inet_sk(sk);
  813. struct udp_sock *up = udp_sk(sk);
  814. DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
  815. struct flowi4 fl4_stack;
  816. struct flowi4 *fl4;
  817. int ulen = len;
  818. struct ipcm_cookie ipc;
  819. struct rtable *rt = NULL;
  820. int free = 0;
  821. int connected = 0;
  822. __be32 daddr, faddr, saddr;
  823. __be16 dport;
  824. u8 tos;
  825. int err, is_udplite = IS_UDPLITE(sk);
  826. int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  827. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  828. struct sk_buff *skb;
  829. struct ip_options_data opt_copy;
  830. if (len > 0xFFFF)
  831. return -EMSGSIZE;
  832. /*
  833. * Check the flags.
  834. */
  835. if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
  836. return -EOPNOTSUPP;
  837. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  838. fl4 = &inet->cork.fl.u.ip4;
  839. if (up->pending) {
  840. /*
  841. * There are pending frames.
  842. * The socket lock must be held while it's corked.
  843. */
  844. lock_sock(sk);
  845. if (likely(up->pending)) {
  846. if (unlikely(up->pending != AF_INET)) {
  847. release_sock(sk);
  848. return -EINVAL;
  849. }
  850. goto do_append_data;
  851. }
  852. release_sock(sk);
  853. }
  854. ulen += sizeof(struct udphdr);
  855. /*
  856. * Get and verify the address.
  857. */
  858. if (usin) {
  859. if (msg->msg_namelen < sizeof(*usin))
  860. return -EINVAL;
  861. if (usin->sin_family != AF_INET) {
  862. if (usin->sin_family != AF_UNSPEC)
  863. return -EAFNOSUPPORT;
  864. }
  865. daddr = usin->sin_addr.s_addr;
  866. dport = usin->sin_port;
  867. if (dport == 0)
  868. return -EINVAL;
  869. } else {
  870. if (sk->sk_state != TCP_ESTABLISHED)
  871. return -EDESTADDRREQ;
  872. daddr = inet->inet_daddr;
  873. dport = inet->inet_dport;
  874. /* Open fast path for connected socket.
  875. Route will not be used, if at least one option is set.
  876. */
  877. connected = 1;
  878. }
  879. ipcm_init_sk(&ipc, inet);
  880. ipc.gso_size = up->gso_size;
  881. if (msg->msg_controllen) {
  882. err = udp_cmsg_send(sk, msg, &ipc.gso_size);
  883. if (err > 0)
  884. err = ip_cmsg_send(sk, msg, &ipc,
  885. sk->sk_family == AF_INET6);
  886. if (unlikely(err < 0)) {
  887. kfree(ipc.opt);
  888. return err;
  889. }
  890. if (ipc.opt)
  891. free = 1;
  892. connected = 0;
  893. }
  894. if (!ipc.opt) {
  895. struct ip_options_rcu *inet_opt;
  896. rcu_read_lock();
  897. inet_opt = rcu_dereference(inet->inet_opt);
  898. if (inet_opt) {
  899. memcpy(&opt_copy, inet_opt,
  900. sizeof(*inet_opt) + inet_opt->opt.optlen);
  901. ipc.opt = &opt_copy.opt;
  902. }
  903. rcu_read_unlock();
  904. }
  905. if (cgroup_bpf_enabled && !connected) {
  906. err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
  907. (struct sockaddr *)usin, &ipc.addr);
  908. if (err)
  909. goto out_free;
  910. if (usin) {
  911. if (usin->sin_port == 0) {
  912. /* BPF program set invalid port. Reject it. */
  913. err = -EINVAL;
  914. goto out_free;
  915. }
  916. daddr = usin->sin_addr.s_addr;
  917. dport = usin->sin_port;
  918. }
  919. }
  920. saddr = ipc.addr;
  921. ipc.addr = faddr = daddr;
  922. if (ipc.opt && ipc.opt->opt.srr) {
  923. if (!daddr) {
  924. err = -EINVAL;
  925. goto out_free;
  926. }
  927. faddr = ipc.opt->opt.faddr;
  928. connected = 0;
  929. }
  930. tos = get_rttos(&ipc, inet);
  931. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  932. (msg->msg_flags & MSG_DONTROUTE) ||
  933. (ipc.opt && ipc.opt->opt.is_strictroute)) {
  934. tos |= RTO_ONLINK;
  935. connected = 0;
  936. }
  937. if (ipv4_is_multicast(daddr)) {
  938. if (!ipc.oif)
  939. ipc.oif = inet->mc_index;
  940. if (!saddr)
  941. saddr = inet->mc_addr;
  942. connected = 0;
  943. } else if (!ipc.oif) {
  944. ipc.oif = inet->uc_index;
  945. } else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
  946. /* oif is set, packet is to local broadcast and
  947. * and uc_index is set. oif is most likely set
  948. * by sk_bound_dev_if. If uc_index != oif check if the
  949. * oif is an L3 master and uc_index is an L3 slave.
  950. * If so, we want to allow the send using the uc_index.
  951. */
  952. if (ipc.oif != inet->uc_index &&
  953. ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
  954. inet->uc_index)) {
  955. ipc.oif = inet->uc_index;
  956. }
  957. }
  958. if (connected)
  959. rt = (struct rtable *)sk_dst_check(sk, 0);
  960. if (!rt) {
  961. struct net *net = sock_net(sk);
  962. __u8 flow_flags = inet_sk_flowi_flags(sk);
  963. fl4 = &fl4_stack;
  964. flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
  965. RT_SCOPE_UNIVERSE, sk->sk_protocol,
  966. flow_flags,
  967. faddr, saddr, dport, inet->inet_sport,
  968. sk->sk_uid);
  969. security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
  970. rt = ip_route_output_flow(net, fl4, sk);
  971. if (IS_ERR(rt)) {
  972. err = PTR_ERR(rt);
  973. rt = NULL;
  974. if (err == -ENETUNREACH)
  975. IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
  976. goto out;
  977. }
  978. err = -EACCES;
  979. if ((rt->rt_flags & RTCF_BROADCAST) &&
  980. !sock_flag(sk, SOCK_BROADCAST))
  981. goto out;
  982. if (connected)
  983. sk_dst_set(sk, dst_clone(&rt->dst));
  984. }
  985. if (msg->msg_flags&MSG_CONFIRM)
  986. goto do_confirm;
  987. back_from_confirm:
  988. saddr = fl4->saddr;
  989. if (!ipc.addr)
  990. daddr = ipc.addr = fl4->daddr;
  991. /* Lockless fast path for the non-corking case. */
  992. if (!corkreq) {
  993. struct inet_cork cork;
  994. skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
  995. sizeof(struct udphdr), &ipc, &rt,
  996. &cork, msg->msg_flags);
  997. err = PTR_ERR(skb);
  998. if (!IS_ERR_OR_NULL(skb))
  999. err = udp_send_skb(skb, fl4, &cork);
  1000. goto out;
  1001. }
  1002. lock_sock(sk);
  1003. if (unlikely(up->pending)) {
  1004. /* The socket is already corked while preparing it. */
  1005. /* ... which is an evident application bug. --ANK */
  1006. release_sock(sk);
  1007. net_dbg_ratelimited("socket already corked\n");
  1008. err = -EINVAL;
  1009. goto out;
  1010. }
  1011. /*
  1012. * Now cork the socket to pend data.
  1013. */
  1014. fl4 = &inet->cork.fl.u.ip4;
  1015. fl4->daddr = daddr;
  1016. fl4->saddr = saddr;
  1017. fl4->fl4_dport = dport;
  1018. fl4->fl4_sport = inet->inet_sport;
  1019. up->pending = AF_INET;
  1020. do_append_data:
  1021. up->len += ulen;
  1022. err = ip_append_data(sk, fl4, getfrag, msg, ulen,
  1023. sizeof(struct udphdr), &ipc, &rt,
  1024. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  1025. if (err)
  1026. udp_flush_pending_frames(sk);
  1027. else if (!corkreq)
  1028. err = udp_push_pending_frames(sk);
  1029. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  1030. up->pending = 0;
  1031. release_sock(sk);
  1032. out:
  1033. ip_rt_put(rt);
  1034. out_free:
  1035. if (free)
  1036. kfree(ipc.opt);
  1037. if (!err)
  1038. return len;
  1039. /*
  1040. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  1041. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  1042. * we don't have a good statistic (IpOutDiscards but it can be too many
  1043. * things). We could add another new stat but at least for now that
  1044. * seems like overkill.
  1045. */
  1046. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  1047. UDP_INC_STATS(sock_net(sk),
  1048. UDP_MIB_SNDBUFERRORS, is_udplite);
  1049. }
  1050. return err;
  1051. do_confirm:
  1052. if (msg->msg_flags & MSG_PROBE)
  1053. dst_confirm_neigh(&rt->dst, &fl4->daddr);
  1054. if (!(msg->msg_flags&MSG_PROBE) || len)
  1055. goto back_from_confirm;
  1056. err = 0;
  1057. goto out;
  1058. }
  1059. EXPORT_SYMBOL(udp_sendmsg);
  1060. int udp_sendpage(struct sock *sk, struct page *page, int offset,
  1061. size_t size, int flags)
  1062. {
  1063. struct inet_sock *inet = inet_sk(sk);
  1064. struct udp_sock *up = udp_sk(sk);
  1065. int ret;
  1066. if (flags & MSG_SENDPAGE_NOTLAST)
  1067. flags |= MSG_MORE;
  1068. if (!up->pending) {
  1069. struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  1070. /* Call udp_sendmsg to specify destination address which
  1071. * sendpage interface can't pass.
  1072. * This will succeed only when the socket is connected.
  1073. */
  1074. ret = udp_sendmsg(sk, &msg, 0);
  1075. if (ret < 0)
  1076. return ret;
  1077. }
  1078. lock_sock(sk);
  1079. if (unlikely(!up->pending)) {
  1080. release_sock(sk);
  1081. net_dbg_ratelimited("cork failed\n");
  1082. return -EINVAL;
  1083. }
  1084. ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
  1085. page, offset, size, flags);
  1086. if (ret == -EOPNOTSUPP) {
  1087. release_sock(sk);
  1088. return sock_no_sendpage(sk->sk_socket, page, offset,
  1089. size, flags);
  1090. }
  1091. if (ret < 0) {
  1092. udp_flush_pending_frames(sk);
  1093. goto out;
  1094. }
  1095. up->len += size;
  1096. if (!(up->corkflag || (flags&MSG_MORE)))
  1097. ret = udp_push_pending_frames(sk);
  1098. if (!ret)
  1099. ret = size;
  1100. out:
  1101. release_sock(sk);
  1102. return ret;
  1103. }
  1104. #define UDP_SKB_IS_STATELESS 0x80000000
  1105. static void udp_set_dev_scratch(struct sk_buff *skb)
  1106. {
  1107. struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
  1108. BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
  1109. scratch->_tsize_state = skb->truesize;
  1110. #if BITS_PER_LONG == 64
  1111. scratch->len = skb->len;
  1112. scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
  1113. scratch->is_linear = !skb_is_nonlinear(skb);
  1114. #endif
  1115. /* all head states execept sp (dst, sk, nf) are always cleared by
  1116. * udp_rcv() and we need to preserve secpath, if present, to eventually
  1117. * process IP_CMSG_PASSSEC at recvmsg() time
  1118. */
  1119. if (likely(!skb_sec_path(skb)))
  1120. scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
  1121. }
  1122. static int udp_skb_truesize(struct sk_buff *skb)
  1123. {
  1124. return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
  1125. }
  1126. static bool udp_skb_has_head_state(struct sk_buff *skb)
  1127. {
  1128. return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
  1129. }
  1130. /* fully reclaim rmem/fwd memory allocated for skb */
  1131. static void udp_rmem_release(struct sock *sk, int size, int partial,
  1132. bool rx_queue_lock_held)
  1133. {
  1134. struct udp_sock *up = udp_sk(sk);
  1135. struct sk_buff_head *sk_queue;
  1136. int amt;
  1137. if (likely(partial)) {
  1138. up->forward_deficit += size;
  1139. size = up->forward_deficit;
  1140. if (size < (sk->sk_rcvbuf >> 2))
  1141. return;
  1142. } else {
  1143. size += up->forward_deficit;
  1144. }
  1145. up->forward_deficit = 0;
  1146. /* acquire the sk_receive_queue for fwd allocated memory scheduling,
  1147. * if the called don't held it already
  1148. */
  1149. sk_queue = &sk->sk_receive_queue;
  1150. if (!rx_queue_lock_held)
  1151. spin_lock(&sk_queue->lock);
  1152. sk->sk_forward_alloc += size;
  1153. amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
  1154. sk->sk_forward_alloc -= amt;
  1155. if (amt)
  1156. __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
  1157. atomic_sub(size, &sk->sk_rmem_alloc);
  1158. /* this can save us from acquiring the rx queue lock on next receive */
  1159. skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
  1160. if (!rx_queue_lock_held)
  1161. spin_unlock(&sk_queue->lock);
  1162. }
  1163. /* Note: called with reader_queue.lock held.
  1164. * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
  1165. * This avoids a cache line miss while receive_queue lock is held.
  1166. * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
  1167. */
  1168. void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
  1169. {
  1170. prefetch(&skb->data);
  1171. udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
  1172. }
  1173. EXPORT_SYMBOL(udp_skb_destructor);
  1174. /* as above, but the caller held the rx queue lock, too */
  1175. static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
  1176. {
  1177. prefetch(&skb->data);
  1178. udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
  1179. }
  1180. /* Idea of busylocks is to let producers grab an extra spinlock
  1181. * to relieve pressure on the receive_queue spinlock shared by consumer.
  1182. * Under flood, this means that only one producer can be in line
  1183. * trying to acquire the receive_queue spinlock.
  1184. * These busylock can be allocated on a per cpu manner, instead of a
  1185. * per socket one (that would consume a cache line per socket)
  1186. */
  1187. static int udp_busylocks_log __read_mostly;
  1188. static spinlock_t *udp_busylocks __read_mostly;
  1189. static spinlock_t *busylock_acquire(void *ptr)
  1190. {
  1191. spinlock_t *busy;
  1192. busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
  1193. spin_lock(busy);
  1194. return busy;
  1195. }
  1196. static void busylock_release(spinlock_t *busy)
  1197. {
  1198. if (busy)
  1199. spin_unlock(busy);
  1200. }
  1201. int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
  1202. {
  1203. struct sk_buff_head *list = &sk->sk_receive_queue;
  1204. int rmem, delta, amt, err = -ENOMEM;
  1205. spinlock_t *busy = NULL;
  1206. int size;
  1207. /* try to avoid the costly atomic add/sub pair when the receive
  1208. * queue is full; always allow at least a packet
  1209. */
  1210. rmem = atomic_read(&sk->sk_rmem_alloc);
  1211. if (rmem > sk->sk_rcvbuf)
  1212. goto drop;
  1213. /* Under mem pressure, it might be helpful to help udp_recvmsg()
  1214. * having linear skbs :
  1215. * - Reduce memory overhead and thus increase receive queue capacity
  1216. * - Less cache line misses at copyout() time
  1217. * - Less work at consume_skb() (less alien page frag freeing)
  1218. */
  1219. if (rmem > (sk->sk_rcvbuf >> 1)) {
  1220. skb_condense(skb);
  1221. busy = busylock_acquire(sk);
  1222. }
  1223. size = skb->truesize;
  1224. udp_set_dev_scratch(skb);
  1225. /* we drop only if the receive buf is full and the receive
  1226. * queue contains some other skb
  1227. */
  1228. rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
  1229. if (rmem > (size + sk->sk_rcvbuf))
  1230. goto uncharge_drop;
  1231. spin_lock(&list->lock);
  1232. if (size >= sk->sk_forward_alloc) {
  1233. amt = sk_mem_pages(size);
  1234. delta = amt << SK_MEM_QUANTUM_SHIFT;
  1235. if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
  1236. err = -ENOBUFS;
  1237. spin_unlock(&list->lock);
  1238. goto uncharge_drop;
  1239. }
  1240. sk->sk_forward_alloc += delta;
  1241. }
  1242. sk->sk_forward_alloc -= size;
  1243. /* no need to setup a destructor, we will explicitly release the
  1244. * forward allocated memory on dequeue
  1245. */
  1246. sock_skb_set_dropcount(sk, skb);
  1247. __skb_queue_tail(list, skb);
  1248. spin_unlock(&list->lock);
  1249. if (!sock_flag(sk, SOCK_DEAD))
  1250. sk->sk_data_ready(sk);
  1251. busylock_release(busy);
  1252. return 0;
  1253. uncharge_drop:
  1254. atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
  1255. drop:
  1256. atomic_inc(&sk->sk_drops);
  1257. busylock_release(busy);
  1258. return err;
  1259. }
  1260. EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
  1261. void udp_destruct_sock(struct sock *sk)
  1262. {
  1263. /* reclaim completely the forward allocated memory */
  1264. struct udp_sock *up = udp_sk(sk);
  1265. unsigned int total = 0;
  1266. struct sk_buff *skb;
  1267. skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
  1268. while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
  1269. total += skb->truesize;
  1270. kfree_skb(skb);
  1271. }
  1272. udp_rmem_release(sk, total, 0, true);
  1273. inet_sock_destruct(sk);
  1274. }
  1275. EXPORT_SYMBOL_GPL(udp_destruct_sock);
  1276. int udp_init_sock(struct sock *sk)
  1277. {
  1278. skb_queue_head_init(&udp_sk(sk)->reader_queue);
  1279. sk->sk_destruct = udp_destruct_sock;
  1280. return 0;
  1281. }
  1282. EXPORT_SYMBOL_GPL(udp_init_sock);
  1283. void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
  1284. {
  1285. if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
  1286. bool slow = lock_sock_fast(sk);
  1287. sk_peek_offset_bwd(sk, len);
  1288. unlock_sock_fast(sk, slow);
  1289. }
  1290. if (!skb_unref(skb))
  1291. return;
  1292. /* In the more common cases we cleared the head states previously,
  1293. * see __udp_queue_rcv_skb().
  1294. */
  1295. if (unlikely(udp_skb_has_head_state(skb)))
  1296. skb_release_head_state(skb);
  1297. __consume_stateless_skb(skb);
  1298. }
  1299. EXPORT_SYMBOL_GPL(skb_consume_udp);
  1300. static struct sk_buff *__first_packet_length(struct sock *sk,
  1301. struct sk_buff_head *rcvq,
  1302. int *total)
  1303. {
  1304. struct sk_buff *skb;
  1305. while ((skb = skb_peek(rcvq)) != NULL) {
  1306. if (udp_lib_checksum_complete(skb)) {
  1307. __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
  1308. IS_UDPLITE(sk));
  1309. __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
  1310. IS_UDPLITE(sk));
  1311. atomic_inc(&sk->sk_drops);
  1312. __skb_unlink(skb, rcvq);
  1313. *total += skb->truesize;
  1314. kfree_skb(skb);
  1315. } else {
  1316. /* the csum related bits could be changed, refresh
  1317. * the scratch area
  1318. */
  1319. udp_set_dev_scratch(skb);
  1320. break;
  1321. }
  1322. }
  1323. return skb;
  1324. }
  1325. /**
  1326. * first_packet_length - return length of first packet in receive queue
  1327. * @sk: socket
  1328. *
  1329. * Drops all bad checksum frames, until a valid one is found.
  1330. * Returns the length of found skb, or -1 if none is found.
  1331. */
  1332. static int first_packet_length(struct sock *sk)
  1333. {
  1334. struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
  1335. struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
  1336. struct sk_buff *skb;
  1337. int total = 0;
  1338. int res;
  1339. spin_lock_bh(&rcvq->lock);
  1340. skb = __first_packet_length(sk, rcvq, &total);
  1341. if (!skb && !skb_queue_empty(sk_queue)) {
  1342. spin_lock(&sk_queue->lock);
  1343. skb_queue_splice_tail_init(sk_queue, rcvq);
  1344. spin_unlock(&sk_queue->lock);
  1345. skb = __first_packet_length(sk, rcvq, &total);
  1346. }
  1347. res = skb ? skb->len : -1;
  1348. if (total)
  1349. udp_rmem_release(sk, total, 1, false);
  1350. spin_unlock_bh(&rcvq->lock);
  1351. return res;
  1352. }
  1353. /*
  1354. * IOCTL requests applicable to the UDP protocol
  1355. */
  1356. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  1357. {
  1358. switch (cmd) {
  1359. case SIOCOUTQ:
  1360. {
  1361. int amount = sk_wmem_alloc_get(sk);
  1362. return put_user(amount, (int __user *)arg);
  1363. }
  1364. case SIOCINQ:
  1365. {
  1366. int amount = max_t(int, 0, first_packet_length(sk));
  1367. return put_user(amount, (int __user *)arg);
  1368. }
  1369. default:
  1370. return -ENOIOCTLCMD;
  1371. }
  1372. return 0;
  1373. }
  1374. EXPORT_SYMBOL(udp_ioctl);
  1375. struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
  1376. int noblock, int *peeked, int *off, int *err)
  1377. {
  1378. struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
  1379. struct sk_buff_head *queue;
  1380. struct sk_buff *last;
  1381. long timeo;
  1382. int error;
  1383. queue = &udp_sk(sk)->reader_queue;
  1384. flags |= noblock ? MSG_DONTWAIT : 0;
  1385. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  1386. do {
  1387. struct sk_buff *skb;
  1388. error = sock_error(sk);
  1389. if (error)
  1390. break;
  1391. error = -EAGAIN;
  1392. *peeked = 0;
  1393. do {
  1394. spin_lock_bh(&queue->lock);
  1395. skb = __skb_try_recv_from_queue(sk, queue, flags,
  1396. udp_skb_destructor,
  1397. peeked, off, err,
  1398. &last);
  1399. if (skb) {
  1400. spin_unlock_bh(&queue->lock);
  1401. return skb;
  1402. }
  1403. if (skb_queue_empty(sk_queue)) {
  1404. spin_unlock_bh(&queue->lock);
  1405. goto busy_check;
  1406. }
  1407. /* refill the reader queue and walk it again
  1408. * keep both queues locked to avoid re-acquiring
  1409. * the sk_receive_queue lock if fwd memory scheduling
  1410. * is needed.
  1411. */
  1412. spin_lock(&sk_queue->lock);
  1413. skb_queue_splice_tail_init(sk_queue, queue);
  1414. skb = __skb_try_recv_from_queue(sk, queue, flags,
  1415. udp_skb_dtor_locked,
  1416. peeked, off, err,
  1417. &last);
  1418. spin_unlock(&sk_queue->lock);
  1419. spin_unlock_bh(&queue->lock);
  1420. if (skb)
  1421. return skb;
  1422. busy_check:
  1423. if (!sk_can_busy_loop(sk))
  1424. break;
  1425. sk_busy_loop(sk, flags & MSG_DONTWAIT);
  1426. } while (!skb_queue_empty(sk_queue));
  1427. /* sk_queue is empty, reader_queue may contain peeked packets */
  1428. } while (timeo &&
  1429. !__skb_wait_for_more_packets(sk, &error, &timeo,
  1430. (struct sk_buff *)sk_queue));
  1431. *err = error;
  1432. return NULL;
  1433. }
  1434. EXPORT_SYMBOL_GPL(__skb_recv_udp);
  1435. /*
  1436. * This should be easy, if there is something there we
  1437. * return it, otherwise we block.
  1438. */
  1439. int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
  1440. int flags, int *addr_len)
  1441. {
  1442. struct inet_sock *inet = inet_sk(sk);
  1443. DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
  1444. struct sk_buff *skb;
  1445. unsigned int ulen, copied;
  1446. int peeked, peeking, off;
  1447. int err;
  1448. int is_udplite = IS_UDPLITE(sk);
  1449. bool checksum_valid = false;
  1450. if (flags & MSG_ERRQUEUE)
  1451. return ip_recv_error(sk, msg, len, addr_len);
  1452. try_again:
  1453. peeking = flags & MSG_PEEK;
  1454. off = sk_peek_offset(sk, flags);
  1455. skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
  1456. if (!skb)
  1457. return err;
  1458. ulen = udp_skb_len(skb);
  1459. copied = len;
  1460. if (copied > ulen - off)
  1461. copied = ulen - off;
  1462. else if (copied < ulen)
  1463. msg->msg_flags |= MSG_TRUNC;
  1464. /*
  1465. * If checksum is needed at all, try to do it while copying the
  1466. * data. If the data is truncated, or if we only want a partial
  1467. * coverage checksum (UDP-Lite), do it before the copy.
  1468. */
  1469. if (copied < ulen || peeking ||
  1470. (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
  1471. checksum_valid = udp_skb_csum_unnecessary(skb) ||
  1472. !__udp_lib_checksum_complete(skb);
  1473. if (!checksum_valid)
  1474. goto csum_copy_err;
  1475. }
  1476. if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
  1477. if (udp_skb_is_linear(skb))
  1478. err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
  1479. else
  1480. err = skb_copy_datagram_msg(skb, off, msg, copied);
  1481. } else {
  1482. err = skb_copy_and_csum_datagram_msg(skb, off, msg);
  1483. if (err == -EINVAL)
  1484. goto csum_copy_err;
  1485. }
  1486. if (unlikely(err)) {
  1487. if (!peeked) {
  1488. atomic_inc(&sk->sk_drops);
  1489. UDP_INC_STATS(sock_net(sk),
  1490. UDP_MIB_INERRORS, is_udplite);
  1491. }
  1492. kfree_skb(skb);
  1493. return err;
  1494. }
  1495. if (!peeked)
  1496. UDP_INC_STATS(sock_net(sk),
  1497. UDP_MIB_INDATAGRAMS, is_udplite);
  1498. sock_recv_ts_and_drops(msg, sk, skb);
  1499. /* Copy the address. */
  1500. if (sin) {
  1501. sin->sin_family = AF_INET;
  1502. sin->sin_port = udp_hdr(skb)->source;
  1503. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  1504. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  1505. *addr_len = sizeof(*sin);
  1506. }
  1507. if (inet->cmsg_flags)
  1508. ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
  1509. err = copied;
  1510. if (flags & MSG_TRUNC)
  1511. err = ulen;
  1512. skb_consume_udp(sk, skb, peeking ? -err : err);
  1513. return err;
  1514. csum_copy_err:
  1515. if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
  1516. udp_skb_destructor)) {
  1517. UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  1518. UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1519. }
  1520. kfree_skb(skb);
  1521. /* starting over for a new packet, but check if we need to yield */
  1522. cond_resched();
  1523. msg->msg_flags &= ~MSG_TRUNC;
  1524. goto try_again;
  1525. }
  1526. int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
  1527. {
  1528. /* This check is replicated from __ip4_datagram_connect() and
  1529. * intended to prevent BPF program called below from accessing bytes
  1530. * that are out of the bound specified by user in addr_len.
  1531. */
  1532. if (addr_len < sizeof(struct sockaddr_in))
  1533. return -EINVAL;
  1534. return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
  1535. }
  1536. EXPORT_SYMBOL(udp_pre_connect);
  1537. int __udp_disconnect(struct sock *sk, int flags)
  1538. {
  1539. struct inet_sock *inet = inet_sk(sk);
  1540. /*
  1541. * 1003.1g - break association.
  1542. */
  1543. sk->sk_state = TCP_CLOSE;
  1544. inet->inet_daddr = 0;
  1545. inet->inet_dport = 0;
  1546. sock_rps_reset_rxhash(sk);
  1547. sk->sk_bound_dev_if = 0;
  1548. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  1549. inet_reset_saddr(sk);
  1550. if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  1551. sk->sk_prot->unhash(sk);
  1552. inet->inet_sport = 0;
  1553. }
  1554. sk_dst_reset(sk);
  1555. return 0;
  1556. }
  1557. EXPORT_SYMBOL(__udp_disconnect);
  1558. int udp_disconnect(struct sock *sk, int flags)
  1559. {
  1560. lock_sock(sk);
  1561. __udp_disconnect(sk, flags);
  1562. release_sock(sk);
  1563. return 0;
  1564. }
  1565. EXPORT_SYMBOL(udp_disconnect);
  1566. void udp_lib_unhash(struct sock *sk)
  1567. {
  1568. if (sk_hashed(sk)) {
  1569. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1570. struct udp_hslot *hslot, *hslot2;
  1571. hslot = udp_hashslot(udptable, sock_net(sk),
  1572. udp_sk(sk)->udp_port_hash);
  1573. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  1574. spin_lock_bh(&hslot->lock);
  1575. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1576. reuseport_detach_sock(sk);
  1577. if (sk_del_node_init_rcu(sk)) {
  1578. hslot->count--;
  1579. inet_sk(sk)->inet_num = 0;
  1580. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  1581. spin_lock(&hslot2->lock);
  1582. hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
  1583. hslot2->count--;
  1584. spin_unlock(&hslot2->lock);
  1585. }
  1586. spin_unlock_bh(&hslot->lock);
  1587. }
  1588. }
  1589. EXPORT_SYMBOL(udp_lib_unhash);
  1590. /*
  1591. * inet_rcv_saddr was changed, we must rehash secondary hash
  1592. */
  1593. void udp_lib_rehash(struct sock *sk, u16 newhash)
  1594. {
  1595. if (sk_hashed(sk)) {
  1596. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1597. struct udp_hslot *hslot, *hslot2, *nhslot2;
  1598. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  1599. nhslot2 = udp_hashslot2(udptable, newhash);
  1600. udp_sk(sk)->udp_portaddr_hash = newhash;
  1601. if (hslot2 != nhslot2 ||
  1602. rcu_access_pointer(sk->sk_reuseport_cb)) {
  1603. hslot = udp_hashslot(udptable, sock_net(sk),
  1604. udp_sk(sk)->udp_port_hash);
  1605. /* we must lock primary chain too */
  1606. spin_lock_bh(&hslot->lock);
  1607. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1608. reuseport_detach_sock(sk);
  1609. if (hslot2 != nhslot2) {
  1610. spin_lock(&hslot2->lock);
  1611. hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
  1612. hslot2->count--;
  1613. spin_unlock(&hslot2->lock);
  1614. spin_lock(&nhslot2->lock);
  1615. hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
  1616. &nhslot2->head);
  1617. nhslot2->count++;
  1618. spin_unlock(&nhslot2->lock);
  1619. }
  1620. spin_unlock_bh(&hslot->lock);
  1621. }
  1622. }
  1623. }
  1624. EXPORT_SYMBOL(udp_lib_rehash);
  1625. static void udp_v4_rehash(struct sock *sk)
  1626. {
  1627. u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
  1628. inet_sk(sk)->inet_rcv_saddr,
  1629. inet_sk(sk)->inet_num);
  1630. udp_lib_rehash(sk, new_hash);
  1631. }
  1632. static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  1633. {
  1634. int rc;
  1635. if (inet_sk(sk)->inet_daddr) {
  1636. sock_rps_save_rxhash(sk, skb);
  1637. sk_mark_napi_id(sk, skb);
  1638. sk_incoming_cpu_update(sk);
  1639. } else {
  1640. sk_mark_napi_id_once(sk, skb);
  1641. }
  1642. rc = __udp_enqueue_schedule_skb(sk, skb);
  1643. if (rc < 0) {
  1644. int is_udplite = IS_UDPLITE(sk);
  1645. /* Note that an ENOMEM error is charged twice */
  1646. if (rc == -ENOMEM)
  1647. UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
  1648. is_udplite);
  1649. UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1650. kfree_skb(skb);
  1651. trace_udp_fail_queue_rcv_skb(rc, sk);
  1652. return -1;
  1653. }
  1654. return 0;
  1655. }
  1656. static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
  1657. void udp_encap_enable(void)
  1658. {
  1659. static_branch_enable(&udp_encap_needed_key);
  1660. }
  1661. EXPORT_SYMBOL(udp_encap_enable);
  1662. /* returns:
  1663. * -1: error
  1664. * 0: success
  1665. * >0: "udp encap" protocol resubmission
  1666. *
  1667. * Note that in the success and error cases, the skb is assumed to
  1668. * have either been requeued or freed.
  1669. */
  1670. static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  1671. {
  1672. struct udp_sock *up = udp_sk(sk);
  1673. int is_udplite = IS_UDPLITE(sk);
  1674. /*
  1675. * Charge it to the socket, dropping if the queue is full.
  1676. */
  1677. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  1678. goto drop;
  1679. nf_reset(skb);
  1680. if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
  1681. int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
  1682. /*
  1683. * This is an encapsulation socket so pass the skb to
  1684. * the socket's udp_encap_rcv() hook. Otherwise, just
  1685. * fall through and pass this up the UDP socket.
  1686. * up->encap_rcv() returns the following value:
  1687. * =0 if skb was successfully passed to the encap
  1688. * handler or was discarded by it.
  1689. * >0 if skb should be passed on to UDP.
  1690. * <0 if skb should be resubmitted as proto -N
  1691. */
  1692. /* if we're overly short, let UDP handle it */
  1693. encap_rcv = READ_ONCE(up->encap_rcv);
  1694. if (encap_rcv) {
  1695. int ret;
  1696. /* Verify checksum before giving to encap */
  1697. if (udp_lib_checksum_complete(skb))
  1698. goto csum_error;
  1699. ret = encap_rcv(sk, skb);
  1700. if (ret <= 0) {
  1701. __UDP_INC_STATS(sock_net(sk),
  1702. UDP_MIB_INDATAGRAMS,
  1703. is_udplite);
  1704. return -ret;
  1705. }
  1706. }
  1707. /* FALLTHROUGH -- it's a UDP Packet */
  1708. }
  1709. /*
  1710. * UDP-Lite specific tests, ignored on UDP sockets
  1711. */
  1712. if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  1713. /*
  1714. * MIB statistics other than incrementing the error count are
  1715. * disabled for the following two types of errors: these depend
  1716. * on the application settings, not on the functioning of the
  1717. * protocol stack as such.
  1718. *
  1719. * RFC 3828 here recommends (sec 3.3): "There should also be a
  1720. * way ... to ... at least let the receiving application block
  1721. * delivery of packets with coverage values less than a value
  1722. * provided by the application."
  1723. */
  1724. if (up->pcrlen == 0) { /* full coverage was set */
  1725. net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
  1726. UDP_SKB_CB(skb)->cscov, skb->len);
  1727. goto drop;
  1728. }
  1729. /* The next case involves violating the min. coverage requested
  1730. * by the receiver. This is subtle: if receiver wants x and x is
  1731. * greater than the buffersize/MTU then receiver will complain
  1732. * that it wants x while sender emits packets of smaller size y.
  1733. * Therefore the above ...()->partial_cov statement is essential.
  1734. */
  1735. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  1736. net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
  1737. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  1738. goto drop;
  1739. }
  1740. }
  1741. prefetch(&sk->sk_rmem_alloc);
  1742. if (rcu_access_pointer(sk->sk_filter) &&
  1743. udp_lib_checksum_complete(skb))
  1744. goto csum_error;
  1745. if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
  1746. goto drop;
  1747. udp_csum_pull_header(skb);
  1748. ipv4_pktinfo_prepare(sk, skb);
  1749. return __udp_queue_rcv_skb(sk, skb);
  1750. csum_error:
  1751. __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  1752. drop:
  1753. __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1754. atomic_inc(&sk->sk_drops);
  1755. kfree_skb(skb);
  1756. return -1;
  1757. }
  1758. /* For TCP sockets, sk_rx_dst is protected by socket lock
  1759. * For UDP, we use xchg() to guard against concurrent changes.
  1760. */
  1761. bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
  1762. {
  1763. struct dst_entry *old;
  1764. if (dst_hold_safe(dst)) {
  1765. old = xchg(&sk->sk_rx_dst, dst);
  1766. dst_release(old);
  1767. return old != dst;
  1768. }
  1769. return false;
  1770. }
  1771. EXPORT_SYMBOL(udp_sk_rx_dst_set);
  1772. /*
  1773. * Multicasts and broadcasts go to each listener.
  1774. *
  1775. * Note: called only from the BH handler context.
  1776. */
  1777. static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
  1778. struct udphdr *uh,
  1779. __be32 saddr, __be32 daddr,
  1780. struct udp_table *udptable,
  1781. int proto)
  1782. {
  1783. struct sock *sk, *first = NULL;
  1784. unsigned short hnum = ntohs(uh->dest);
  1785. struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
  1786. unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
  1787. unsigned int offset = offsetof(typeof(*sk), sk_node);
  1788. int dif = skb->dev->ifindex;
  1789. int sdif = inet_sdif(skb);
  1790. struct hlist_node *node;
  1791. struct sk_buff *nskb;
  1792. if (use_hash2) {
  1793. hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
  1794. udptable->mask;
  1795. hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
  1796. start_lookup:
  1797. hslot = &udptable->hash2[hash2];
  1798. offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
  1799. }
  1800. sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
  1801. if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
  1802. uh->source, saddr, dif, sdif, hnum))
  1803. continue;
  1804. if (!first) {
  1805. first = sk;
  1806. continue;
  1807. }
  1808. nskb = skb_clone(skb, GFP_ATOMIC);
  1809. if (unlikely(!nskb)) {
  1810. atomic_inc(&sk->sk_drops);
  1811. __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
  1812. IS_UDPLITE(sk));
  1813. __UDP_INC_STATS(net, UDP_MIB_INERRORS,
  1814. IS_UDPLITE(sk));
  1815. continue;
  1816. }
  1817. if (udp_queue_rcv_skb(sk, nskb) > 0)
  1818. consume_skb(nskb);
  1819. }
  1820. /* Also lookup *:port if we are using hash2 and haven't done so yet. */
  1821. if (use_hash2 && hash2 != hash2_any) {
  1822. hash2 = hash2_any;
  1823. goto start_lookup;
  1824. }
  1825. if (first) {
  1826. if (udp_queue_rcv_skb(first, skb) > 0)
  1827. consume_skb(skb);
  1828. } else {
  1829. kfree_skb(skb);
  1830. __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
  1831. proto == IPPROTO_UDPLITE);
  1832. }
  1833. return 0;
  1834. }
  1835. /* Initialize UDP checksum. If exited with zero value (success),
  1836. * CHECKSUM_UNNECESSARY means, that no more checks are required.
  1837. * Otherwise, csum completion requires chacksumming packet body,
  1838. * including udp header and folding it to skb->csum.
  1839. */
  1840. static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  1841. int proto)
  1842. {
  1843. int err;
  1844. UDP_SKB_CB(skb)->partial_cov = 0;
  1845. UDP_SKB_CB(skb)->cscov = skb->len;
  1846. if (proto == IPPROTO_UDPLITE) {
  1847. err = udplite_checksum_init(skb, uh);
  1848. if (err)
  1849. return err;
  1850. if (UDP_SKB_CB(skb)->partial_cov) {
  1851. skb->csum = inet_compute_pseudo(skb, proto);
  1852. return 0;
  1853. }
  1854. }
  1855. /* Note, we are only interested in != 0 or == 0, thus the
  1856. * force to int.
  1857. */
  1858. return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
  1859. inet_compute_pseudo);
  1860. }
  1861. /*
  1862. * All we need to do is get the socket, and then do a checksum.
  1863. */
  1864. int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
  1865. int proto)
  1866. {
  1867. struct sock *sk;
  1868. struct udphdr *uh;
  1869. unsigned short ulen;
  1870. struct rtable *rt = skb_rtable(skb);
  1871. __be32 saddr, daddr;
  1872. struct net *net = dev_net(skb->dev);
  1873. /*
  1874. * Validate the packet.
  1875. */
  1876. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  1877. goto drop; /* No space for header. */
  1878. uh = udp_hdr(skb);
  1879. ulen = ntohs(uh->len);
  1880. saddr = ip_hdr(skb)->saddr;
  1881. daddr = ip_hdr(skb)->daddr;
  1882. if (ulen > skb->len)
  1883. goto short_packet;
  1884. if (proto == IPPROTO_UDP) {
  1885. /* UDP validates ulen. */
  1886. if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  1887. goto short_packet;
  1888. uh = udp_hdr(skb);
  1889. }
  1890. if (udp4_csum_init(skb, uh, proto))
  1891. goto csum_error;
  1892. sk = skb_steal_sock(skb);
  1893. if (sk) {
  1894. struct dst_entry *dst = skb_dst(skb);
  1895. int ret;
  1896. if (unlikely(sk->sk_rx_dst != dst))
  1897. udp_sk_rx_dst_set(sk, dst);
  1898. ret = udp_queue_rcv_skb(sk, skb);
  1899. sock_put(sk);
  1900. /* a return value > 0 means to resubmit the input, but
  1901. * it wants the return to be -protocol, or 0
  1902. */
  1903. if (ret > 0)
  1904. return -ret;
  1905. return 0;
  1906. }
  1907. if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  1908. return __udp4_lib_mcast_deliver(net, skb, uh,
  1909. saddr, daddr, udptable, proto);
  1910. sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
  1911. if (sk) {
  1912. int ret;
  1913. if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
  1914. skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
  1915. inet_compute_pseudo);
  1916. ret = udp_queue_rcv_skb(sk, skb);
  1917. /* a return value > 0 means to resubmit the input, but
  1918. * it wants the return to be -protocol, or 0
  1919. */
  1920. if (ret > 0)
  1921. return -ret;
  1922. return 0;
  1923. }
  1924. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  1925. goto drop;
  1926. nf_reset(skb);
  1927. /* No socket. Drop packet silently, if checksum is wrong */
  1928. if (udp_lib_checksum_complete(skb))
  1929. goto csum_error;
  1930. __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  1931. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  1932. /*
  1933. * Hmm. We got an UDP packet to a port to which we
  1934. * don't wanna listen. Ignore it.
  1935. */
  1936. kfree_skb(skb);
  1937. return 0;
  1938. short_packet:
  1939. net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
  1940. proto == IPPROTO_UDPLITE ? "Lite" : "",
  1941. &saddr, ntohs(uh->source),
  1942. ulen, skb->len,
  1943. &daddr, ntohs(uh->dest));
  1944. goto drop;
  1945. csum_error:
  1946. /*
  1947. * RFC1122: OK. Discards the bad packet silently (as far as
  1948. * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  1949. */
  1950. net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
  1951. proto == IPPROTO_UDPLITE ? "Lite" : "",
  1952. &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
  1953. ulen);
  1954. __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
  1955. drop:
  1956. __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  1957. kfree_skb(skb);
  1958. return 0;
  1959. }
  1960. /* We can only early demux multicast if there is a single matching socket.
  1961. * If more than one socket found returns NULL
  1962. */
  1963. static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
  1964. __be16 loc_port, __be32 loc_addr,
  1965. __be16 rmt_port, __be32 rmt_addr,
  1966. int dif, int sdif)
  1967. {
  1968. struct sock *sk, *result;
  1969. unsigned short hnum = ntohs(loc_port);
  1970. unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
  1971. struct udp_hslot *hslot = &udp_table.hash[slot];
  1972. /* Do not bother scanning a too big list */
  1973. if (hslot->count > 10)
  1974. return NULL;
  1975. result = NULL;
  1976. sk_for_each_rcu(sk, &hslot->head) {
  1977. if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
  1978. rmt_port, rmt_addr, dif, sdif, hnum)) {
  1979. if (result)
  1980. return NULL;
  1981. result = sk;
  1982. }
  1983. }
  1984. return result;
  1985. }
  1986. /* For unicast we should only early demux connected sockets or we can
  1987. * break forwarding setups. The chains here can be long so only check
  1988. * if the first socket is an exact match and if not move on.
  1989. */
  1990. static struct sock *__udp4_lib_demux_lookup(struct net *net,
  1991. __be16 loc_port, __be32 loc_addr,
  1992. __be16 rmt_port, __be32 rmt_addr,
  1993. int dif, int sdif)
  1994. {
  1995. unsigned short hnum = ntohs(loc_port);
  1996. unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
  1997. unsigned int slot2 = hash2 & udp_table.mask;
  1998. struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
  1999. INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
  2000. const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
  2001. struct sock *sk;
  2002. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  2003. if (INET_MATCH(sk, net, acookie, rmt_addr,
  2004. loc_addr, ports, dif, sdif))
  2005. return sk;
  2006. /* Only check first socket in chain */
  2007. break;
  2008. }
  2009. return NULL;
  2010. }
  2011. int udp_v4_early_demux(struct sk_buff *skb)
  2012. {
  2013. struct net *net = dev_net(skb->dev);
  2014. struct in_device *in_dev = NULL;
  2015. const struct iphdr *iph;
  2016. const struct udphdr *uh;
  2017. struct sock *sk = NULL;
  2018. struct dst_entry *dst;
  2019. int dif = skb->dev->ifindex;
  2020. int sdif = inet_sdif(skb);
  2021. int ours;
  2022. /* validate the packet */
  2023. if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
  2024. return 0;
  2025. iph = ip_hdr(skb);
  2026. uh = udp_hdr(skb);
  2027. if (skb->pkt_type == PACKET_MULTICAST) {
  2028. in_dev = __in_dev_get_rcu(skb->dev);
  2029. if (!in_dev)
  2030. return 0;
  2031. ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
  2032. iph->protocol);
  2033. if (!ours)
  2034. return 0;
  2035. sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
  2036. uh->source, iph->saddr,
  2037. dif, sdif);
  2038. } else if (skb->pkt_type == PACKET_HOST) {
  2039. sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
  2040. uh->source, iph->saddr, dif, sdif);
  2041. }
  2042. if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
  2043. return 0;
  2044. skb->sk = sk;
  2045. skb->destructor = sock_efree;
  2046. dst = READ_ONCE(sk->sk_rx_dst);
  2047. if (dst)
  2048. dst = dst_check(dst, 0);
  2049. if (dst) {
  2050. u32 itag = 0;
  2051. /* set noref for now.
  2052. * any place which wants to hold dst has to call
  2053. * dst_hold_safe()
  2054. */
  2055. skb_dst_set_noref(skb, dst);
  2056. /* for unconnected multicast sockets we need to validate
  2057. * the source on each packet
  2058. */
  2059. if (!inet_sk(sk)->inet_daddr && in_dev)
  2060. return ip_mc_validate_source(skb, iph->daddr,
  2061. iph->saddr, iph->tos,
  2062. skb->dev, in_dev, &itag);
  2063. }
  2064. return 0;
  2065. }
  2066. int udp_rcv(struct sk_buff *skb)
  2067. {
  2068. return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
  2069. }
  2070. void udp_destroy_sock(struct sock *sk)
  2071. {
  2072. struct udp_sock *up = udp_sk(sk);
  2073. bool slow = lock_sock_fast(sk);
  2074. udp_flush_pending_frames(sk);
  2075. unlock_sock_fast(sk, slow);
  2076. if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
  2077. void (*encap_destroy)(struct sock *sk);
  2078. encap_destroy = READ_ONCE(up->encap_destroy);
  2079. if (encap_destroy)
  2080. encap_destroy(sk);
  2081. }
  2082. }
  2083. /*
  2084. * Socket option code for UDP
  2085. */
  2086. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  2087. char __user *optval, unsigned int optlen,
  2088. int (*push_pending_frames)(struct sock *))
  2089. {
  2090. struct udp_sock *up = udp_sk(sk);
  2091. int val, valbool;
  2092. int err = 0;
  2093. int is_udplite = IS_UDPLITE(sk);
  2094. if (optlen < sizeof(int))
  2095. return -EINVAL;
  2096. if (get_user(val, (int __user *)optval))
  2097. return -EFAULT;
  2098. valbool = val ? 1 : 0;
  2099. switch (optname) {
  2100. case UDP_CORK:
  2101. if (val != 0) {
  2102. up->corkflag = 1;
  2103. } else {
  2104. up->corkflag = 0;
  2105. lock_sock(sk);
  2106. push_pending_frames(sk);
  2107. release_sock(sk);
  2108. }
  2109. break;
  2110. case UDP_ENCAP:
  2111. switch (val) {
  2112. case 0:
  2113. case UDP_ENCAP_ESPINUDP:
  2114. case UDP_ENCAP_ESPINUDP_NON_IKE:
  2115. up->encap_rcv = xfrm4_udp_encap_rcv;
  2116. /* FALLTHROUGH */
  2117. case UDP_ENCAP_L2TPINUDP:
  2118. up->encap_type = val;
  2119. udp_encap_enable();
  2120. break;
  2121. default:
  2122. err = -ENOPROTOOPT;
  2123. break;
  2124. }
  2125. break;
  2126. case UDP_NO_CHECK6_TX:
  2127. up->no_check6_tx = valbool;
  2128. break;
  2129. case UDP_NO_CHECK6_RX:
  2130. up->no_check6_rx = valbool;
  2131. break;
  2132. case UDP_SEGMENT:
  2133. if (val < 0 || val > USHRT_MAX)
  2134. return -EINVAL;
  2135. up->gso_size = val;
  2136. break;
  2137. /*
  2138. * UDP-Lite's partial checksum coverage (RFC 3828).
  2139. */
  2140. /* The sender sets actual checksum coverage length via this option.
  2141. * The case coverage > packet length is handled by send module. */
  2142. case UDPLITE_SEND_CSCOV:
  2143. if (!is_udplite) /* Disable the option on UDP sockets */
  2144. return -ENOPROTOOPT;
  2145. if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  2146. val = 8;
  2147. else if (val > USHRT_MAX)
  2148. val = USHRT_MAX;
  2149. up->pcslen = val;
  2150. up->pcflag |= UDPLITE_SEND_CC;
  2151. break;
  2152. /* The receiver specifies a minimum checksum coverage value. To make
  2153. * sense, this should be set to at least 8 (as done below). If zero is
  2154. * used, this again means full checksum coverage. */
  2155. case UDPLITE_RECV_CSCOV:
  2156. if (!is_udplite) /* Disable the option on UDP sockets */
  2157. return -ENOPROTOOPT;
  2158. if (val != 0 && val < 8) /* Avoid silly minimal values. */
  2159. val = 8;
  2160. else if (val > USHRT_MAX)
  2161. val = USHRT_MAX;
  2162. up->pcrlen = val;
  2163. up->pcflag |= UDPLITE_RECV_CC;
  2164. break;
  2165. default:
  2166. err = -ENOPROTOOPT;
  2167. break;
  2168. }
  2169. return err;
  2170. }
  2171. EXPORT_SYMBOL(udp_lib_setsockopt);
  2172. int udp_setsockopt(struct sock *sk, int level, int optname,
  2173. char __user *optval, unsigned int optlen)
  2174. {
  2175. if (level == SOL_UDP || level == SOL_UDPLITE)
  2176. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2177. udp_push_pending_frames);
  2178. return ip_setsockopt(sk, level, optname, optval, optlen);
  2179. }
  2180. #ifdef CONFIG_COMPAT
  2181. int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  2182. char __user *optval, unsigned int optlen)
  2183. {
  2184. if (level == SOL_UDP || level == SOL_UDPLITE)
  2185. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2186. udp_push_pending_frames);
  2187. return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  2188. }
  2189. #endif
  2190. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  2191. char __user *optval, int __user *optlen)
  2192. {
  2193. struct udp_sock *up = udp_sk(sk);
  2194. int val, len;
  2195. if (get_user(len, optlen))
  2196. return -EFAULT;
  2197. len = min_t(unsigned int, len, sizeof(int));
  2198. if (len < 0)
  2199. return -EINVAL;
  2200. switch (optname) {
  2201. case UDP_CORK:
  2202. val = up->corkflag;
  2203. break;
  2204. case UDP_ENCAP:
  2205. val = up->encap_type;
  2206. break;
  2207. case UDP_NO_CHECK6_TX:
  2208. val = up->no_check6_tx;
  2209. break;
  2210. case UDP_NO_CHECK6_RX:
  2211. val = up->no_check6_rx;
  2212. break;
  2213. case UDP_SEGMENT:
  2214. val = up->gso_size;
  2215. break;
  2216. /* The following two cannot be changed on UDP sockets, the return is
  2217. * always 0 (which corresponds to the full checksum coverage of UDP). */
  2218. case UDPLITE_SEND_CSCOV:
  2219. val = up->pcslen;
  2220. break;
  2221. case UDPLITE_RECV_CSCOV:
  2222. val = up->pcrlen;
  2223. break;
  2224. default:
  2225. return -ENOPROTOOPT;
  2226. }
  2227. if (put_user(len, optlen))
  2228. return -EFAULT;
  2229. if (copy_to_user(optval, &val, len))
  2230. return -EFAULT;
  2231. return 0;
  2232. }
  2233. EXPORT_SYMBOL(udp_lib_getsockopt);
  2234. int udp_getsockopt(struct sock *sk, int level, int optname,
  2235. char __user *optval, int __user *optlen)
  2236. {
  2237. if (level == SOL_UDP || level == SOL_UDPLITE)
  2238. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2239. return ip_getsockopt(sk, level, optname, optval, optlen);
  2240. }
  2241. #ifdef CONFIG_COMPAT
  2242. int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  2243. char __user *optval, int __user *optlen)
  2244. {
  2245. if (level == SOL_UDP || level == SOL_UDPLITE)
  2246. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2247. return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  2248. }
  2249. #endif
  2250. /**
  2251. * udp_poll - wait for a UDP event.
  2252. * @file - file struct
  2253. * @sock - socket
  2254. * @wait - poll table
  2255. *
  2256. * This is same as datagram poll, except for the special case of
  2257. * blocking sockets. If application is using a blocking fd
  2258. * and a packet with checksum error is in the queue;
  2259. * then it could get return from select indicating data available
  2260. * but then block when reading it. Add special case code
  2261. * to work around these arguably broken applications.
  2262. */
  2263. __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  2264. {
  2265. __poll_t mask = datagram_poll(file, sock, wait);
  2266. struct sock *sk = sock->sk;
  2267. if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
  2268. mask |= EPOLLIN | EPOLLRDNORM;
  2269. /* Check for false positives due to checksum errors */
  2270. if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
  2271. !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
  2272. mask &= ~(EPOLLIN | EPOLLRDNORM);
  2273. return mask;
  2274. }
  2275. EXPORT_SYMBOL(udp_poll);
  2276. int udp_abort(struct sock *sk, int err)
  2277. {
  2278. lock_sock(sk);
  2279. sk->sk_err = err;
  2280. sk->sk_error_report(sk);
  2281. __udp_disconnect(sk, 0);
  2282. release_sock(sk);
  2283. return 0;
  2284. }
  2285. EXPORT_SYMBOL_GPL(udp_abort);
  2286. struct proto udp_prot = {
  2287. .name = "UDP",
  2288. .owner = THIS_MODULE,
  2289. .close = udp_lib_close,
  2290. .pre_connect = udp_pre_connect,
  2291. .connect = ip4_datagram_connect,
  2292. .disconnect = udp_disconnect,
  2293. .ioctl = udp_ioctl,
  2294. .init = udp_init_sock,
  2295. .destroy = udp_destroy_sock,
  2296. .setsockopt = udp_setsockopt,
  2297. .getsockopt = udp_getsockopt,
  2298. .sendmsg = udp_sendmsg,
  2299. .recvmsg = udp_recvmsg,
  2300. .sendpage = udp_sendpage,
  2301. .release_cb = ip4_datagram_release_cb,
  2302. .hash = udp_lib_hash,
  2303. .unhash = udp_lib_unhash,
  2304. .rehash = udp_v4_rehash,
  2305. .get_port = udp_v4_get_port,
  2306. .memory_allocated = &udp_memory_allocated,
  2307. .sysctl_mem = sysctl_udp_mem,
  2308. .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
  2309. .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
  2310. .obj_size = sizeof(struct udp_sock),
  2311. .h.udp_table = &udp_table,
  2312. #ifdef CONFIG_COMPAT
  2313. .compat_setsockopt = compat_udp_setsockopt,
  2314. .compat_getsockopt = compat_udp_getsockopt,
  2315. #endif
  2316. .diag_destroy = udp_abort,
  2317. };
  2318. EXPORT_SYMBOL(udp_prot);
  2319. /* ------------------------------------------------------------------------ */
  2320. #ifdef CONFIG_PROC_FS
  2321. static struct sock *udp_get_first(struct seq_file *seq, int start)
  2322. {
  2323. struct sock *sk;
  2324. struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
  2325. struct udp_iter_state *state = seq->private;
  2326. struct net *net = seq_file_net(seq);
  2327. for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
  2328. ++state->bucket) {
  2329. struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
  2330. if (hlist_empty(&hslot->head))
  2331. continue;
  2332. spin_lock_bh(&hslot->lock);
  2333. sk_for_each(sk, &hslot->head) {
  2334. if (!net_eq(sock_net(sk), net))
  2335. continue;
  2336. if (sk->sk_family == afinfo->family)
  2337. goto found;
  2338. }
  2339. spin_unlock_bh(&hslot->lock);
  2340. }
  2341. sk = NULL;
  2342. found:
  2343. return sk;
  2344. }
  2345. static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  2346. {
  2347. struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
  2348. struct udp_iter_state *state = seq->private;
  2349. struct net *net = seq_file_net(seq);
  2350. do {
  2351. sk = sk_next(sk);
  2352. } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
  2353. if (!sk) {
  2354. if (state->bucket <= afinfo->udp_table->mask)
  2355. spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
  2356. return udp_get_first(seq, state->bucket + 1);
  2357. }
  2358. return sk;
  2359. }
  2360. static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  2361. {
  2362. struct sock *sk = udp_get_first(seq, 0);
  2363. if (sk)
  2364. while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  2365. --pos;
  2366. return pos ? NULL : sk;
  2367. }
  2368. void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  2369. {
  2370. struct udp_iter_state *state = seq->private;
  2371. state->bucket = MAX_UDP_PORTS;
  2372. return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  2373. }
  2374. EXPORT_SYMBOL(udp_seq_start);
  2375. void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2376. {
  2377. struct sock *sk;
  2378. if (v == SEQ_START_TOKEN)
  2379. sk = udp_get_idx(seq, 0);
  2380. else
  2381. sk = udp_get_next(seq, v);
  2382. ++*pos;
  2383. return sk;
  2384. }
  2385. EXPORT_SYMBOL(udp_seq_next);
  2386. void udp_seq_stop(struct seq_file *seq, void *v)
  2387. {
  2388. struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
  2389. struct udp_iter_state *state = seq->private;
  2390. if (state->bucket <= afinfo->udp_table->mask)
  2391. spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
  2392. }
  2393. EXPORT_SYMBOL(udp_seq_stop);
  2394. /* ------------------------------------------------------------------------ */
  2395. static void udp4_format_sock(struct sock *sp, struct seq_file *f,
  2396. int bucket)
  2397. {
  2398. struct inet_sock *inet = inet_sk(sp);
  2399. __be32 dest = inet->inet_daddr;
  2400. __be32 src = inet->inet_rcv_saddr;
  2401. __u16 destp = ntohs(inet->inet_dport);
  2402. __u16 srcp = ntohs(inet->inet_sport);
  2403. seq_printf(f, "%5d: %08X:%04X %08X:%04X"
  2404. " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
  2405. bucket, src, srcp, dest, destp, sp->sk_state,
  2406. sk_wmem_alloc_get(sp),
  2407. udp_rqueue_get(sp),
  2408. 0, 0L, 0,
  2409. from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
  2410. 0, sock_i_ino(sp),
  2411. refcount_read(&sp->sk_refcnt), sp,
  2412. atomic_read(&sp->sk_drops));
  2413. }
  2414. int udp4_seq_show(struct seq_file *seq, void *v)
  2415. {
  2416. seq_setwidth(seq, 127);
  2417. if (v == SEQ_START_TOKEN)
  2418. seq_puts(seq, " sl local_address rem_address st tx_queue "
  2419. "rx_queue tr tm->when retrnsmt uid timeout "
  2420. "inode ref pointer drops");
  2421. else {
  2422. struct udp_iter_state *state = seq->private;
  2423. udp4_format_sock(v, seq, state->bucket);
  2424. }
  2425. seq_pad(seq, '\n');
  2426. return 0;
  2427. }
  2428. const struct seq_operations udp_seq_ops = {
  2429. .start = udp_seq_start,
  2430. .next = udp_seq_next,
  2431. .stop = udp_seq_stop,
  2432. .show = udp4_seq_show,
  2433. };
  2434. EXPORT_SYMBOL(udp_seq_ops);
  2435. static struct udp_seq_afinfo udp4_seq_afinfo = {
  2436. .family = AF_INET,
  2437. .udp_table = &udp_table,
  2438. };
  2439. static int __net_init udp4_proc_init_net(struct net *net)
  2440. {
  2441. if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
  2442. sizeof(struct udp_iter_state), &udp4_seq_afinfo))
  2443. return -ENOMEM;
  2444. return 0;
  2445. }
  2446. static void __net_exit udp4_proc_exit_net(struct net *net)
  2447. {
  2448. remove_proc_entry("udp", net->proc_net);
  2449. }
  2450. static struct pernet_operations udp4_net_ops = {
  2451. .init = udp4_proc_init_net,
  2452. .exit = udp4_proc_exit_net,
  2453. };
  2454. int __init udp4_proc_init(void)
  2455. {
  2456. return register_pernet_subsys(&udp4_net_ops);
  2457. }
  2458. void udp4_proc_exit(void)
  2459. {
  2460. unregister_pernet_subsys(&udp4_net_ops);
  2461. }
  2462. #endif /* CONFIG_PROC_FS */
  2463. static __initdata unsigned long uhash_entries;
  2464. static int __init set_uhash_entries(char *str)
  2465. {
  2466. ssize_t ret;
  2467. if (!str)
  2468. return 0;
  2469. ret = kstrtoul(str, 0, &uhash_entries);
  2470. if (ret)
  2471. return 0;
  2472. if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
  2473. uhash_entries = UDP_HTABLE_SIZE_MIN;
  2474. return 1;
  2475. }
  2476. __setup("uhash_entries=", set_uhash_entries);
  2477. void __init udp_table_init(struct udp_table *table, const char *name)
  2478. {
  2479. unsigned int i;
  2480. table->hash = alloc_large_system_hash(name,
  2481. 2 * sizeof(struct udp_hslot),
  2482. uhash_entries,
  2483. 21, /* one slot per 2 MB */
  2484. 0,
  2485. &table->log,
  2486. &table->mask,
  2487. UDP_HTABLE_SIZE_MIN,
  2488. 64 * 1024);
  2489. table->hash2 = table->hash + (table->mask + 1);
  2490. for (i = 0; i <= table->mask; i++) {
  2491. INIT_HLIST_HEAD(&table->hash[i].head);
  2492. table->hash[i].count = 0;
  2493. spin_lock_init(&table->hash[i].lock);
  2494. }
  2495. for (i = 0; i <= table->mask; i++) {
  2496. INIT_HLIST_HEAD(&table->hash2[i].head);
  2497. table->hash2[i].count = 0;
  2498. spin_lock_init(&table->hash2[i].lock);
  2499. }
  2500. }
  2501. u32 udp_flow_hashrnd(void)
  2502. {
  2503. static u32 hashrnd __read_mostly;
  2504. net_get_random_once(&hashrnd, sizeof(hashrnd));
  2505. return hashrnd;
  2506. }
  2507. EXPORT_SYMBOL(udp_flow_hashrnd);
  2508. static void __udp_sysctl_init(struct net *net)
  2509. {
  2510. net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
  2511. net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;
  2512. #ifdef CONFIG_NET_L3_MASTER_DEV
  2513. net->ipv4.sysctl_udp_l3mdev_accept = 0;
  2514. #endif
  2515. }
  2516. static int __net_init udp_sysctl_init(struct net *net)
  2517. {
  2518. __udp_sysctl_init(net);
  2519. return 0;
  2520. }
  2521. static struct pernet_operations __net_initdata udp_sysctl_ops = {
  2522. .init = udp_sysctl_init,
  2523. };
  2524. void __init udp_init(void)
  2525. {
  2526. unsigned long limit;
  2527. unsigned int i;
  2528. udp_table_init(&udp_table, "UDP");
  2529. limit = nr_free_buffer_pages() / 8;
  2530. limit = max(limit, 128UL);
  2531. sysctl_udp_mem[0] = limit / 4 * 3;
  2532. sysctl_udp_mem[1] = limit;
  2533. sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
  2534. __udp_sysctl_init(&init_net);
  2535. /* 16 spinlocks per cpu */
  2536. udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
  2537. udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
  2538. GFP_KERNEL);
  2539. if (!udp_busylocks)
  2540. panic("UDP: failed to alloc udp_busylocks\n");
  2541. for (i = 0; i < (1U << udp_busylocks_log); i++)
  2542. spin_lock_init(udp_busylocks + i);
  2543. if (register_pernet_subsys(&udp_sysctl_ops))
  2544. panic("UDP: failed to init sysctl parameters.\n");
  2545. }