udp.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The User Datagram Protocol (UDP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  11. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  12. * Hirokazu Takahashi, <taka@valinux.co.jp>
  13. *
  14. * Fixes:
  15. * Alan Cox : verify_area() calls
  16. * Alan Cox : stopped close while in use off icmp
  17. * messages. Not a fix but a botch that
  18. * for udp at least is 'valid'.
  19. * Alan Cox : Fixed icmp handling properly
  20. * Alan Cox : Correct error for oversized datagrams
  21. * Alan Cox : Tidied select() semantics.
  22. * Alan Cox : udp_err() fixed properly, also now
  23. * select and read wake correctly on errors
  24. * Alan Cox : udp_send verify_area moved to avoid mem leak
  25. * Alan Cox : UDP can count its memory
  26. * Alan Cox : send to an unknown connection causes
  27. * an ECONNREFUSED off the icmp, but
  28. * does NOT close.
  29. * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  30. * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  31. * bug no longer crashes it.
  32. * Fred Van Kempen : Net2e support for sk->broadcast.
  33. * Alan Cox : Uses skb_free_datagram
  34. * Alan Cox : Added get/set sockopt support.
  35. * Alan Cox : Broadcasting without option set returns EACCES.
  36. * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  37. * Alan Cox : Use ip_tos and ip_ttl
  38. * Alan Cox : SNMP Mibs
  39. * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  40. * Matt Dillon : UDP length checks.
  41. * Alan Cox : Smarter af_inet used properly.
  42. * Alan Cox : Use new kernel side addressing.
  43. * Alan Cox : Incorrect return on truncated datagram receive.
  44. * Arnt Gulbrandsen : New udp_send and stuff
  45. * Alan Cox : Cache last socket
  46. * Alan Cox : Route cache
  47. * Jon Peatfield : Minor efficiency fix to sendto().
  48. * Mike Shaver : RFC1122 checks.
  49. * Alan Cox : Nonblocking error fix.
  50. * Willy Konynenberg : Transparent proxying support.
  51. * Mike McLagan : Routing by source
  52. * David S. Miller : New socket lookup architecture.
  53. * Last socket cache retained as it
  54. * does have a high hit rate.
  55. * Olaf Kirch : Don't linearise iovec on sendmsg.
  56. * Andi Kleen : Some cleanups, cache destination entry
  57. * for connect.
  58. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  59. * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  60. * return ENOTCONN for unconnected sockets (POSIX)
  61. * Janos Farkas : don't deliver multi/broadcasts to a different
  62. * bound-to-device socket
  63. * Hirokazu Takahashi : HW checksumming for outgoing UDP
  64. * datagrams.
  65. * Hirokazu Takahashi : sendfile() on UDP works now.
  66. * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  67. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  68. * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  69. * a single port at the same time.
  70. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  71. * James Chapman : Add L2TP encapsulation type.
  72. *
  73. *
  74. * This program is free software; you can redistribute it and/or
  75. * modify it under the terms of the GNU General Public License
  76. * as published by the Free Software Foundation; either version
  77. * 2 of the License, or (at your option) any later version.
  78. */
  79. #define pr_fmt(fmt) "UDP: " fmt
  80. #include <linux/uaccess.h>
  81. #include <asm/ioctls.h>
  82. #include <linux/bootmem.h>
  83. #include <linux/highmem.h>
  84. #include <linux/swap.h>
  85. #include <linux/types.h>
  86. #include <linux/fcntl.h>
  87. #include <linux/module.h>
  88. #include <linux/socket.h>
  89. #include <linux/sockios.h>
  90. #include <linux/igmp.h>
  91. #include <linux/inetdevice.h>
  92. #include <linux/in.h>
  93. #include <linux/errno.h>
  94. #include <linux/timer.h>
  95. #include <linux/mm.h>
  96. #include <linux/inet.h>
  97. #include <linux/netdevice.h>
  98. #include <linux/slab.h>
  99. #include <net/tcp_states.h>
  100. #include <linux/skbuff.h>
  101. #include <linux/proc_fs.h>
  102. #include <linux/seq_file.h>
  103. #include <net/net_namespace.h>
  104. #include <net/icmp.h>
  105. #include <net/inet_hashtables.h>
  106. #include <net/route.h>
  107. #include <net/checksum.h>
  108. #include <net/xfrm.h>
  109. #include <trace/events/udp.h>
  110. #include <linux/static_key.h>
  111. #include <trace/events/skb.h>
  112. #include <net/busy_poll.h>
  113. #include "udp_impl.h"
  114. #include <net/sock_reuseport.h>
  115. #include <net/addrconf.h>
  116. struct udp_table udp_table __read_mostly;
  117. EXPORT_SYMBOL(udp_table);
  118. long sysctl_udp_mem[3] __read_mostly;
  119. EXPORT_SYMBOL(sysctl_udp_mem);
  120. int sysctl_udp_rmem_min __read_mostly;
  121. EXPORT_SYMBOL(sysctl_udp_rmem_min);
  122. int sysctl_udp_wmem_min __read_mostly;
  123. EXPORT_SYMBOL(sysctl_udp_wmem_min);
  124. atomic_long_t udp_memory_allocated;
  125. EXPORT_SYMBOL(udp_memory_allocated);
  126. #define MAX_UDP_PORTS 65536
  127. #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
  128. /* IPCB reference means this can not be used from early demux */
  129. static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
  130. {
  131. #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
  132. if (!net->ipv4.sysctl_udp_l3mdev_accept &&
  133. skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
  134. return true;
  135. #endif
  136. return false;
  137. }
  138. static int udp_lib_lport_inuse(struct net *net, __u16 num,
  139. const struct udp_hslot *hslot,
  140. unsigned long *bitmap,
  141. struct sock *sk, unsigned int log)
  142. {
  143. struct sock *sk2;
  144. kuid_t uid = sock_i_uid(sk);
  145. sk_for_each(sk2, &hslot->head) {
  146. if (net_eq(sock_net(sk2), net) &&
  147. sk2 != sk &&
  148. (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
  149. (!sk2->sk_reuse || !sk->sk_reuse) &&
  150. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  151. sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  152. inet_rcv_saddr_equal(sk, sk2, true)) {
  153. if (sk2->sk_reuseport && sk->sk_reuseport &&
  154. !rcu_access_pointer(sk->sk_reuseport_cb) &&
  155. uid_eq(uid, sock_i_uid(sk2))) {
  156. if (!bitmap)
  157. return 0;
  158. } else {
  159. if (!bitmap)
  160. return 1;
  161. __set_bit(udp_sk(sk2)->udp_port_hash >> log,
  162. bitmap);
  163. }
  164. }
  165. }
  166. return 0;
  167. }
  168. /*
  169. * Note: we still hold spinlock of primary hash chain, so no other writer
  170. * can insert/delete a socket with local_port == num
  171. */
  172. static int udp_lib_lport_inuse2(struct net *net, __u16 num,
  173. struct udp_hslot *hslot2,
  174. struct sock *sk)
  175. {
  176. struct sock *sk2;
  177. kuid_t uid = sock_i_uid(sk);
  178. int res = 0;
  179. spin_lock(&hslot2->lock);
  180. udp_portaddr_for_each_entry(sk2, &hslot2->head) {
  181. if (net_eq(sock_net(sk2), net) &&
  182. sk2 != sk &&
  183. (udp_sk(sk2)->udp_port_hash == num) &&
  184. (!sk2->sk_reuse || !sk->sk_reuse) &&
  185. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
  186. sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  187. inet_rcv_saddr_equal(sk, sk2, true)) {
  188. if (sk2->sk_reuseport && sk->sk_reuseport &&
  189. !rcu_access_pointer(sk->sk_reuseport_cb) &&
  190. uid_eq(uid, sock_i_uid(sk2))) {
  191. res = 0;
  192. } else {
  193. res = 1;
  194. }
  195. break;
  196. }
  197. }
  198. spin_unlock(&hslot2->lock);
  199. return res;
  200. }
  201. static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
  202. {
  203. struct net *net = sock_net(sk);
  204. kuid_t uid = sock_i_uid(sk);
  205. struct sock *sk2;
  206. sk_for_each(sk2, &hslot->head) {
  207. if (net_eq(sock_net(sk2), net) &&
  208. sk2 != sk &&
  209. sk2->sk_family == sk->sk_family &&
  210. ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
  211. (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
  212. (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  213. sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
  214. inet_rcv_saddr_equal(sk, sk2, false)) {
  215. return reuseport_add_sock(sk, sk2);
  216. }
  217. }
  218. /* Initial allocation may have already happened via setsockopt */
  219. if (!rcu_access_pointer(sk->sk_reuseport_cb))
  220. return reuseport_alloc(sk);
  221. return 0;
  222. }
  223. /**
  224. * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  225. *
  226. * @sk: socket struct in question
  227. * @snum: port number to look up
  228. * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
  229. * with NULL address
  230. */
  231. int udp_lib_get_port(struct sock *sk, unsigned short snum,
  232. unsigned int hash2_nulladdr)
  233. {
  234. struct udp_hslot *hslot, *hslot2;
  235. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  236. int error = 1;
  237. struct net *net = sock_net(sk);
  238. if (!snum) {
  239. int low, high, remaining;
  240. unsigned int rand;
  241. unsigned short first, last;
  242. DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
  243. inet_get_local_port_range(net, &low, &high);
  244. remaining = (high - low) + 1;
  245. rand = prandom_u32();
  246. first = reciprocal_scale(rand, remaining) + low;
  247. /*
  248. * force rand to be an odd multiple of UDP_HTABLE_SIZE
  249. */
  250. rand = (rand | 1) * (udptable->mask + 1);
  251. last = first + udptable->mask + 1;
  252. do {
  253. hslot = udp_hashslot(udptable, net, first);
  254. bitmap_zero(bitmap, PORTS_PER_CHAIN);
  255. spin_lock_bh(&hslot->lock);
  256. udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
  257. udptable->log);
  258. snum = first;
  259. /*
  260. * Iterate on all possible values of snum for this hash.
  261. * Using steps of an odd multiple of UDP_HTABLE_SIZE
  262. * give us randomization and full range coverage.
  263. */
  264. do {
  265. if (low <= snum && snum <= high &&
  266. !test_bit(snum >> udptable->log, bitmap) &&
  267. !inet_is_local_reserved_port(net, snum))
  268. goto found;
  269. snum += rand;
  270. } while (snum != first);
  271. spin_unlock_bh(&hslot->lock);
  272. cond_resched();
  273. } while (++first != last);
  274. goto fail;
  275. } else {
  276. hslot = udp_hashslot(udptable, net, snum);
  277. spin_lock_bh(&hslot->lock);
  278. if (hslot->count > 10) {
  279. int exist;
  280. unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
  281. slot2 &= udptable->mask;
  282. hash2_nulladdr &= udptable->mask;
  283. hslot2 = udp_hashslot2(udptable, slot2);
  284. if (hslot->count < hslot2->count)
  285. goto scan_primary_hash;
  286. exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
  287. if (!exist && (hash2_nulladdr != slot2)) {
  288. hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
  289. exist = udp_lib_lport_inuse2(net, snum, hslot2,
  290. sk);
  291. }
  292. if (exist)
  293. goto fail_unlock;
  294. else
  295. goto found;
  296. }
  297. scan_primary_hash:
  298. if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
  299. goto fail_unlock;
  300. }
  301. found:
  302. inet_sk(sk)->inet_num = snum;
  303. udp_sk(sk)->udp_port_hash = snum;
  304. udp_sk(sk)->udp_portaddr_hash ^= snum;
  305. if (sk_unhashed(sk)) {
  306. if (sk->sk_reuseport &&
  307. udp_reuseport_add_sock(sk, hslot)) {
  308. inet_sk(sk)->inet_num = 0;
  309. udp_sk(sk)->udp_port_hash = 0;
  310. udp_sk(sk)->udp_portaddr_hash ^= snum;
  311. goto fail_unlock;
  312. }
  313. sk_add_node_rcu(sk, &hslot->head);
  314. hslot->count++;
  315. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  316. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  317. spin_lock(&hslot2->lock);
  318. if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
  319. sk->sk_family == AF_INET6)
  320. hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
  321. &hslot2->head);
  322. else
  323. hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
  324. &hslot2->head);
  325. hslot2->count++;
  326. spin_unlock(&hslot2->lock);
  327. }
  328. sock_set_flag(sk, SOCK_RCU_FREE);
  329. error = 0;
  330. fail_unlock:
  331. spin_unlock_bh(&hslot->lock);
  332. fail:
  333. return error;
  334. }
  335. EXPORT_SYMBOL(udp_lib_get_port);
  336. static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
  337. unsigned int port)
  338. {
  339. return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
  340. }
  341. int udp_v4_get_port(struct sock *sk, unsigned short snum)
  342. {
  343. unsigned int hash2_nulladdr =
  344. udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
  345. unsigned int hash2_partial =
  346. udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
  347. /* precompute partial secondary hash */
  348. udp_sk(sk)->udp_portaddr_hash = hash2_partial;
  349. return udp_lib_get_port(sk, snum, hash2_nulladdr);
  350. }
  351. static int compute_score(struct sock *sk, struct net *net,
  352. __be32 saddr, __be16 sport,
  353. __be32 daddr, unsigned short hnum, int dif,
  354. bool exact_dif)
  355. {
  356. int score;
  357. struct inet_sock *inet;
  358. if (!net_eq(sock_net(sk), net) ||
  359. udp_sk(sk)->udp_port_hash != hnum ||
  360. ipv6_only_sock(sk))
  361. return -1;
  362. score = (sk->sk_family == PF_INET) ? 2 : 1;
  363. inet = inet_sk(sk);
  364. if (inet->inet_rcv_saddr) {
  365. if (inet->inet_rcv_saddr != daddr)
  366. return -1;
  367. score += 4;
  368. }
  369. if (inet->inet_daddr) {
  370. if (inet->inet_daddr != saddr)
  371. return -1;
  372. score += 4;
  373. }
  374. if (inet->inet_dport) {
  375. if (inet->inet_dport != sport)
  376. return -1;
  377. score += 4;
  378. }
  379. if (sk->sk_bound_dev_if || exact_dif) {
  380. if (sk->sk_bound_dev_if != dif)
  381. return -1;
  382. score += 4;
  383. }
  384. if (sk->sk_incoming_cpu == raw_smp_processor_id())
  385. score++;
  386. return score;
  387. }
  388. static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
  389. const __u16 lport, const __be32 faddr,
  390. const __be16 fport)
  391. {
  392. static u32 udp_ehash_secret __read_mostly;
  393. net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
  394. return __inet_ehashfn(laddr, lport, faddr, fport,
  395. udp_ehash_secret + net_hash_mix(net));
  396. }
  397. /* called with rcu_read_lock() */
  398. static struct sock *udp4_lib_lookup2(struct net *net,
  399. __be32 saddr, __be16 sport,
  400. __be32 daddr, unsigned int hnum, int dif, bool exact_dif,
  401. struct udp_hslot *hslot2,
  402. struct sk_buff *skb)
  403. {
  404. struct sock *sk, *result;
  405. int score, badness, matches = 0, reuseport = 0;
  406. u32 hash = 0;
  407. result = NULL;
  408. badness = 0;
  409. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  410. score = compute_score(sk, net, saddr, sport,
  411. daddr, hnum, dif, exact_dif);
  412. if (score > badness) {
  413. reuseport = sk->sk_reuseport;
  414. if (reuseport) {
  415. hash = udp_ehashfn(net, daddr, hnum,
  416. saddr, sport);
  417. result = reuseport_select_sock(sk, hash, skb,
  418. sizeof(struct udphdr));
  419. if (result)
  420. return result;
  421. matches = 1;
  422. }
  423. badness = score;
  424. result = sk;
  425. } else if (score == badness && reuseport) {
  426. matches++;
  427. if (reciprocal_scale(hash, matches) == 0)
  428. result = sk;
  429. hash = next_pseudo_random32(hash);
  430. }
  431. }
  432. return result;
  433. }
  434. /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  435. * harder than this. -DaveM
  436. */
  437. struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
  438. __be16 sport, __be32 daddr, __be16 dport,
  439. int dif, struct udp_table *udptable, struct sk_buff *skb)
  440. {
  441. struct sock *sk, *result;
  442. unsigned short hnum = ntohs(dport);
  443. unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
  444. struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
  445. bool exact_dif = udp_lib_exact_dif_match(net, skb);
  446. int score, badness, matches = 0, reuseport = 0;
  447. u32 hash = 0;
  448. if (hslot->count > 10) {
  449. hash2 = udp4_portaddr_hash(net, daddr, hnum);
  450. slot2 = hash2 & udptable->mask;
  451. hslot2 = &udptable->hash2[slot2];
  452. if (hslot->count < hslot2->count)
  453. goto begin;
  454. result = udp4_lib_lookup2(net, saddr, sport,
  455. daddr, hnum, dif,
  456. exact_dif, hslot2, skb);
  457. if (!result) {
  458. unsigned int old_slot2 = slot2;
  459. hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
  460. slot2 = hash2 & udptable->mask;
  461. /* avoid searching the same slot again. */
  462. if (unlikely(slot2 == old_slot2))
  463. return result;
  464. hslot2 = &udptable->hash2[slot2];
  465. if (hslot->count < hslot2->count)
  466. goto begin;
  467. result = udp4_lib_lookup2(net, saddr, sport,
  468. daddr, hnum, dif,
  469. exact_dif, hslot2, skb);
  470. }
  471. return result;
  472. }
  473. begin:
  474. result = NULL;
  475. badness = 0;
  476. sk_for_each_rcu(sk, &hslot->head) {
  477. score = compute_score(sk, net, saddr, sport,
  478. daddr, hnum, dif, exact_dif);
  479. if (score > badness) {
  480. reuseport = sk->sk_reuseport;
  481. if (reuseport) {
  482. hash = udp_ehashfn(net, daddr, hnum,
  483. saddr, sport);
  484. result = reuseport_select_sock(sk, hash, skb,
  485. sizeof(struct udphdr));
  486. if (result)
  487. return result;
  488. matches = 1;
  489. }
  490. result = sk;
  491. badness = score;
  492. } else if (score == badness && reuseport) {
  493. matches++;
  494. if (reciprocal_scale(hash, matches) == 0)
  495. result = sk;
  496. hash = next_pseudo_random32(hash);
  497. }
  498. }
  499. return result;
  500. }
  501. EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
  502. static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
  503. __be16 sport, __be16 dport,
  504. struct udp_table *udptable)
  505. {
  506. const struct iphdr *iph = ip_hdr(skb);
  507. return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
  508. iph->daddr, dport, inet_iif(skb),
  509. udptable, skb);
  510. }
  511. struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
  512. __be16 sport, __be16 dport)
  513. {
  514. return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
  515. }
  516. EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
  517. /* Must be called under rcu_read_lock().
  518. * Does increment socket refcount.
  519. */
  520. #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
  521. IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \
  522. IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
  523. struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  524. __be32 daddr, __be16 dport, int dif)
  525. {
  526. struct sock *sk;
  527. sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
  528. dif, &udp_table, NULL);
  529. if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
  530. sk = NULL;
  531. return sk;
  532. }
  533. EXPORT_SYMBOL_GPL(udp4_lib_lookup);
  534. #endif
  535. static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
  536. __be16 loc_port, __be32 loc_addr,
  537. __be16 rmt_port, __be32 rmt_addr,
  538. int dif, unsigned short hnum)
  539. {
  540. struct inet_sock *inet = inet_sk(sk);
  541. if (!net_eq(sock_net(sk), net) ||
  542. udp_sk(sk)->udp_port_hash != hnum ||
  543. (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
  544. (inet->inet_dport != rmt_port && inet->inet_dport) ||
  545. (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
  546. ipv6_only_sock(sk) ||
  547. (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
  548. return false;
  549. if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
  550. return false;
  551. return true;
  552. }
  553. /*
  554. * This routine is called by the ICMP module when it gets some
  555. * sort of error condition. If err < 0 then the socket should
  556. * be closed and the error returned to the user. If err > 0
  557. * it's just the icmp type << 8 | icmp code.
  558. * Header points to the ip header of the error packet. We move
  559. * on past this. Then (as it used to claim before adjustment)
  560. * header points to the first 8 bytes of the udp header. We need
  561. * to find the appropriate port.
  562. */
  563. void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
  564. {
  565. struct inet_sock *inet;
  566. const struct iphdr *iph = (const struct iphdr *)skb->data;
  567. struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
  568. const int type = icmp_hdr(skb)->type;
  569. const int code = icmp_hdr(skb)->code;
  570. struct sock *sk;
  571. int harderr;
  572. int err;
  573. struct net *net = dev_net(skb->dev);
  574. sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
  575. iph->saddr, uh->source, skb->dev->ifindex, udptable,
  576. NULL);
  577. if (!sk) {
  578. __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
  579. return; /* No socket for error */
  580. }
  581. err = 0;
  582. harderr = 0;
  583. inet = inet_sk(sk);
  584. switch (type) {
  585. default:
  586. case ICMP_TIME_EXCEEDED:
  587. err = EHOSTUNREACH;
  588. break;
  589. case ICMP_SOURCE_QUENCH:
  590. goto out;
  591. case ICMP_PARAMETERPROB:
  592. err = EPROTO;
  593. harderr = 1;
  594. break;
  595. case ICMP_DEST_UNREACH:
  596. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  597. ipv4_sk_update_pmtu(skb, sk, info);
  598. if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  599. err = EMSGSIZE;
  600. harderr = 1;
  601. break;
  602. }
  603. goto out;
  604. }
  605. err = EHOSTUNREACH;
  606. if (code <= NR_ICMP_UNREACH) {
  607. harderr = icmp_err_convert[code].fatal;
  608. err = icmp_err_convert[code].errno;
  609. }
  610. break;
  611. case ICMP_REDIRECT:
  612. ipv4_sk_redirect(skb, sk);
  613. goto out;
  614. }
  615. /*
  616. * RFC1122: OK. Passes ICMP errors back to application, as per
  617. * 4.1.3.3.
  618. */
  619. if (!inet->recverr) {
  620. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  621. goto out;
  622. } else
  623. ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
  624. sk->sk_err = err;
  625. sk->sk_error_report(sk);
  626. out:
  627. return;
  628. }
  629. void udp_err(struct sk_buff *skb, u32 info)
  630. {
  631. __udp4_lib_err(skb, info, &udp_table);
  632. }
  633. /*
  634. * Throw away all pending data and cancel the corking. Socket is locked.
  635. */
  636. void udp_flush_pending_frames(struct sock *sk)
  637. {
  638. struct udp_sock *up = udp_sk(sk);
  639. if (up->pending) {
  640. up->len = 0;
  641. up->pending = 0;
  642. ip_flush_pending_frames(sk);
  643. }
  644. }
  645. EXPORT_SYMBOL(udp_flush_pending_frames);
  646. /**
  647. * udp4_hwcsum - handle outgoing HW checksumming
  648. * @skb: sk_buff containing the filled-in UDP header
  649. * (checksum field must be zeroed out)
  650. * @src: source IP address
  651. * @dst: destination IP address
  652. */
  653. void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
  654. {
  655. struct udphdr *uh = udp_hdr(skb);
  656. int offset = skb_transport_offset(skb);
  657. int len = skb->len - offset;
  658. int hlen = len;
  659. __wsum csum = 0;
  660. if (!skb_has_frag_list(skb)) {
  661. /*
  662. * Only one fragment on the socket.
  663. */
  664. skb->csum_start = skb_transport_header(skb) - skb->head;
  665. skb->csum_offset = offsetof(struct udphdr, check);
  666. uh->check = ~csum_tcpudp_magic(src, dst, len,
  667. IPPROTO_UDP, 0);
  668. } else {
  669. struct sk_buff *frags;
  670. /*
  671. * HW-checksum won't work as there are two or more
  672. * fragments on the socket so that all csums of sk_buffs
  673. * should be together
  674. */
  675. skb_walk_frags(skb, frags) {
  676. csum = csum_add(csum, frags->csum);
  677. hlen -= frags->len;
  678. }
  679. csum = skb_checksum(skb, offset, hlen, csum);
  680. skb->ip_summed = CHECKSUM_NONE;
  681. uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  682. if (uh->check == 0)
  683. uh->check = CSUM_MANGLED_0;
  684. }
  685. }
  686. EXPORT_SYMBOL_GPL(udp4_hwcsum);
  687. /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
  688. * for the simple case like when setting the checksum for a UDP tunnel.
  689. */
  690. void udp_set_csum(bool nocheck, struct sk_buff *skb,
  691. __be32 saddr, __be32 daddr, int len)
  692. {
  693. struct udphdr *uh = udp_hdr(skb);
  694. if (nocheck) {
  695. uh->check = 0;
  696. } else if (skb_is_gso(skb)) {
  697. uh->check = ~udp_v4_check(len, saddr, daddr, 0);
  698. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  699. uh->check = 0;
  700. uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
  701. if (uh->check == 0)
  702. uh->check = CSUM_MANGLED_0;
  703. } else {
  704. skb->ip_summed = CHECKSUM_PARTIAL;
  705. skb->csum_start = skb_transport_header(skb) - skb->head;
  706. skb->csum_offset = offsetof(struct udphdr, check);
  707. uh->check = ~udp_v4_check(len, saddr, daddr, 0);
  708. }
  709. }
  710. EXPORT_SYMBOL(udp_set_csum);
  711. static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
  712. {
  713. struct sock *sk = skb->sk;
  714. struct inet_sock *inet = inet_sk(sk);
  715. struct udphdr *uh;
  716. int err = 0;
  717. int is_udplite = IS_UDPLITE(sk);
  718. int offset = skb_transport_offset(skb);
  719. int len = skb->len - offset;
  720. __wsum csum = 0;
  721. /*
  722. * Create a UDP header
  723. */
  724. uh = udp_hdr(skb);
  725. uh->source = inet->inet_sport;
  726. uh->dest = fl4->fl4_dport;
  727. uh->len = htons(len);
  728. uh->check = 0;
  729. if (is_udplite) /* UDP-Lite */
  730. csum = udplite_csum(skb);
  731. else if (sk->sk_no_check_tx) { /* UDP csum disabled */
  732. skb->ip_summed = CHECKSUM_NONE;
  733. goto send;
  734. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  735. udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
  736. goto send;
  737. } else
  738. csum = udp_csum(skb);
  739. /* add protocol-dependent pseudo-header */
  740. uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
  741. sk->sk_protocol, csum);
  742. if (uh->check == 0)
  743. uh->check = CSUM_MANGLED_0;
  744. send:
  745. err = ip_send_skb(sock_net(sk), skb);
  746. if (err) {
  747. if (err == -ENOBUFS && !inet->recverr) {
  748. UDP_INC_STATS(sock_net(sk),
  749. UDP_MIB_SNDBUFERRORS, is_udplite);
  750. err = 0;
  751. }
  752. } else
  753. UDP_INC_STATS(sock_net(sk),
  754. UDP_MIB_OUTDATAGRAMS, is_udplite);
  755. return err;
  756. }
  757. /*
  758. * Push out all pending data as one UDP datagram. Socket is locked.
  759. */
  760. int udp_push_pending_frames(struct sock *sk)
  761. {
  762. struct udp_sock *up = udp_sk(sk);
  763. struct inet_sock *inet = inet_sk(sk);
  764. struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
  765. struct sk_buff *skb;
  766. int err = 0;
  767. skb = ip_finish_skb(sk, fl4);
  768. if (!skb)
  769. goto out;
  770. err = udp_send_skb(skb, fl4);
  771. out:
  772. up->len = 0;
  773. up->pending = 0;
  774. return err;
  775. }
  776. EXPORT_SYMBOL(udp_push_pending_frames);
  777. int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  778. {
  779. struct inet_sock *inet = inet_sk(sk);
  780. struct udp_sock *up = udp_sk(sk);
  781. struct flowi4 fl4_stack;
  782. struct flowi4 *fl4;
  783. int ulen = len;
  784. struct ipcm_cookie ipc;
  785. struct rtable *rt = NULL;
  786. int free = 0;
  787. int connected = 0;
  788. __be32 daddr, faddr, saddr;
  789. __be16 dport;
  790. u8 tos;
  791. int err, is_udplite = IS_UDPLITE(sk);
  792. int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  793. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  794. struct sk_buff *skb;
  795. struct ip_options_data opt_copy;
  796. if (len > 0xFFFF)
  797. return -EMSGSIZE;
  798. /*
  799. * Check the flags.
  800. */
  801. if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
  802. return -EOPNOTSUPP;
  803. ipc.opt = NULL;
  804. ipc.tx_flags = 0;
  805. ipc.ttl = 0;
  806. ipc.tos = -1;
  807. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  808. fl4 = &inet->cork.fl.u.ip4;
  809. if (up->pending) {
  810. /*
  811. * There are pending frames.
  812. * The socket lock must be held while it's corked.
  813. */
  814. lock_sock(sk);
  815. if (likely(up->pending)) {
  816. if (unlikely(up->pending != AF_INET)) {
  817. release_sock(sk);
  818. return -EINVAL;
  819. }
  820. goto do_append_data;
  821. }
  822. release_sock(sk);
  823. }
  824. ulen += sizeof(struct udphdr);
  825. /*
  826. * Get and verify the address.
  827. */
  828. if (msg->msg_name) {
  829. DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
  830. if (msg->msg_namelen < sizeof(*usin))
  831. return -EINVAL;
  832. if (usin->sin_family != AF_INET) {
  833. if (usin->sin_family != AF_UNSPEC)
  834. return -EAFNOSUPPORT;
  835. }
  836. daddr = usin->sin_addr.s_addr;
  837. dport = usin->sin_port;
  838. if (dport == 0)
  839. return -EINVAL;
  840. } else {
  841. if (sk->sk_state != TCP_ESTABLISHED)
  842. return -EDESTADDRREQ;
  843. daddr = inet->inet_daddr;
  844. dport = inet->inet_dport;
  845. /* Open fast path for connected socket.
  846. Route will not be used, if at least one option is set.
  847. */
  848. connected = 1;
  849. }
  850. ipc.sockc.tsflags = sk->sk_tsflags;
  851. ipc.addr = inet->inet_saddr;
  852. ipc.oif = sk->sk_bound_dev_if;
  853. if (msg->msg_controllen) {
  854. err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6);
  855. if (unlikely(err)) {
  856. kfree(ipc.opt);
  857. return err;
  858. }
  859. if (ipc.opt)
  860. free = 1;
  861. connected = 0;
  862. }
  863. if (!ipc.opt) {
  864. struct ip_options_rcu *inet_opt;
  865. rcu_read_lock();
  866. inet_opt = rcu_dereference(inet->inet_opt);
  867. if (inet_opt) {
  868. memcpy(&opt_copy, inet_opt,
  869. sizeof(*inet_opt) + inet_opt->opt.optlen);
  870. ipc.opt = &opt_copy.opt;
  871. }
  872. rcu_read_unlock();
  873. }
  874. saddr = ipc.addr;
  875. ipc.addr = faddr = daddr;
  876. sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
  877. if (ipc.opt && ipc.opt->opt.srr) {
  878. if (!daddr)
  879. return -EINVAL;
  880. faddr = ipc.opt->opt.faddr;
  881. connected = 0;
  882. }
  883. tos = get_rttos(&ipc, inet);
  884. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  885. (msg->msg_flags & MSG_DONTROUTE) ||
  886. (ipc.opt && ipc.opt->opt.is_strictroute)) {
  887. tos |= RTO_ONLINK;
  888. connected = 0;
  889. }
  890. if (ipv4_is_multicast(daddr)) {
  891. if (!ipc.oif)
  892. ipc.oif = inet->mc_index;
  893. if (!saddr)
  894. saddr = inet->mc_addr;
  895. connected = 0;
  896. } else if (!ipc.oif)
  897. ipc.oif = inet->uc_index;
  898. if (connected)
  899. rt = (struct rtable *)sk_dst_check(sk, 0);
  900. if (!rt) {
  901. struct net *net = sock_net(sk);
  902. __u8 flow_flags = inet_sk_flowi_flags(sk);
  903. fl4 = &fl4_stack;
  904. flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
  905. RT_SCOPE_UNIVERSE, sk->sk_protocol,
  906. flow_flags,
  907. faddr, saddr, dport, inet->inet_sport,
  908. sk->sk_uid);
  909. security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
  910. rt = ip_route_output_flow(net, fl4, sk);
  911. if (IS_ERR(rt)) {
  912. err = PTR_ERR(rt);
  913. rt = NULL;
  914. if (err == -ENETUNREACH)
  915. IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
  916. goto out;
  917. }
  918. err = -EACCES;
  919. if ((rt->rt_flags & RTCF_BROADCAST) &&
  920. !sock_flag(sk, SOCK_BROADCAST))
  921. goto out;
  922. if (connected)
  923. sk_dst_set(sk, dst_clone(&rt->dst));
  924. }
  925. if (msg->msg_flags&MSG_CONFIRM)
  926. goto do_confirm;
  927. back_from_confirm:
  928. saddr = fl4->saddr;
  929. if (!ipc.addr)
  930. daddr = ipc.addr = fl4->daddr;
  931. /* Lockless fast path for the non-corking case. */
  932. if (!corkreq) {
  933. skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
  934. sizeof(struct udphdr), &ipc, &rt,
  935. msg->msg_flags);
  936. err = PTR_ERR(skb);
  937. if (!IS_ERR_OR_NULL(skb))
  938. err = udp_send_skb(skb, fl4);
  939. goto out;
  940. }
  941. lock_sock(sk);
  942. if (unlikely(up->pending)) {
  943. /* The socket is already corked while preparing it. */
  944. /* ... which is an evident application bug. --ANK */
  945. release_sock(sk);
  946. net_dbg_ratelimited("cork app bug 2\n");
  947. err = -EINVAL;
  948. goto out;
  949. }
  950. /*
  951. * Now cork the socket to pend data.
  952. */
  953. fl4 = &inet->cork.fl.u.ip4;
  954. fl4->daddr = daddr;
  955. fl4->saddr = saddr;
  956. fl4->fl4_dport = dport;
  957. fl4->fl4_sport = inet->inet_sport;
  958. up->pending = AF_INET;
  959. do_append_data:
  960. up->len += ulen;
  961. err = ip_append_data(sk, fl4, getfrag, msg, ulen,
  962. sizeof(struct udphdr), &ipc, &rt,
  963. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  964. if (err)
  965. udp_flush_pending_frames(sk);
  966. else if (!corkreq)
  967. err = udp_push_pending_frames(sk);
  968. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  969. up->pending = 0;
  970. release_sock(sk);
  971. out:
  972. ip_rt_put(rt);
  973. if (free)
  974. kfree(ipc.opt);
  975. if (!err)
  976. return len;
  977. /*
  978. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  979. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  980. * we don't have a good statistic (IpOutDiscards but it can be too many
  981. * things). We could add another new stat but at least for now that
  982. * seems like overkill.
  983. */
  984. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  985. UDP_INC_STATS(sock_net(sk),
  986. UDP_MIB_SNDBUFERRORS, is_udplite);
  987. }
  988. return err;
  989. do_confirm:
  990. if (msg->msg_flags & MSG_PROBE)
  991. dst_confirm_neigh(&rt->dst, &fl4->daddr);
  992. if (!(msg->msg_flags&MSG_PROBE) || len)
  993. goto back_from_confirm;
  994. err = 0;
  995. goto out;
  996. }
  997. EXPORT_SYMBOL(udp_sendmsg);
  998. int udp_sendpage(struct sock *sk, struct page *page, int offset,
  999. size_t size, int flags)
  1000. {
  1001. struct inet_sock *inet = inet_sk(sk);
  1002. struct udp_sock *up = udp_sk(sk);
  1003. int ret;
  1004. if (flags & MSG_SENDPAGE_NOTLAST)
  1005. flags |= MSG_MORE;
  1006. if (!up->pending) {
  1007. struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  1008. /* Call udp_sendmsg to specify destination address which
  1009. * sendpage interface can't pass.
  1010. * This will succeed only when the socket is connected.
  1011. */
  1012. ret = udp_sendmsg(sk, &msg, 0);
  1013. if (ret < 0)
  1014. return ret;
  1015. }
  1016. lock_sock(sk);
  1017. if (unlikely(!up->pending)) {
  1018. release_sock(sk);
  1019. net_dbg_ratelimited("udp cork app bug 3\n");
  1020. return -EINVAL;
  1021. }
  1022. ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
  1023. page, offset, size, flags);
  1024. if (ret == -EOPNOTSUPP) {
  1025. release_sock(sk);
  1026. return sock_no_sendpage(sk->sk_socket, page, offset,
  1027. size, flags);
  1028. }
  1029. if (ret < 0) {
  1030. udp_flush_pending_frames(sk);
  1031. goto out;
  1032. }
  1033. up->len += size;
  1034. if (!(up->corkflag || (flags&MSG_MORE)))
  1035. ret = udp_push_pending_frames(sk);
  1036. if (!ret)
  1037. ret = size;
  1038. out:
  1039. release_sock(sk);
  1040. return ret;
  1041. }
  1042. #define UDP_SKB_IS_STATELESS 0x80000000
  1043. static void udp_set_dev_scratch(struct sk_buff *skb)
  1044. {
  1045. struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
  1046. BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
  1047. scratch->_tsize_state = skb->truesize;
  1048. #if BITS_PER_LONG == 64
  1049. scratch->len = skb->len;
  1050. scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
  1051. scratch->is_linear = !skb_is_nonlinear(skb);
  1052. #endif
  1053. if (likely(!skb->_skb_refdst))
  1054. scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
  1055. }
  1056. static int udp_skb_truesize(struct sk_buff *skb)
  1057. {
  1058. return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
  1059. }
  1060. static bool udp_skb_has_head_state(struct sk_buff *skb)
  1061. {
  1062. return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
  1063. }
  1064. /* fully reclaim rmem/fwd memory allocated for skb */
  1065. static void udp_rmem_release(struct sock *sk, int size, int partial,
  1066. bool rx_queue_lock_held)
  1067. {
  1068. struct udp_sock *up = udp_sk(sk);
  1069. struct sk_buff_head *sk_queue;
  1070. int amt;
  1071. if (likely(partial)) {
  1072. up->forward_deficit += size;
  1073. size = up->forward_deficit;
  1074. if (size < (sk->sk_rcvbuf >> 2) &&
  1075. !skb_queue_empty(&up->reader_queue))
  1076. return;
  1077. } else {
  1078. size += up->forward_deficit;
  1079. }
  1080. up->forward_deficit = 0;
  1081. /* acquire the sk_receive_queue for fwd allocated memory scheduling,
  1082. * if the called don't held it already
  1083. */
  1084. sk_queue = &sk->sk_receive_queue;
  1085. if (!rx_queue_lock_held)
  1086. spin_lock(&sk_queue->lock);
  1087. sk->sk_forward_alloc += size;
  1088. amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
  1089. sk->sk_forward_alloc -= amt;
  1090. if (amt)
  1091. __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
  1092. atomic_sub(size, &sk->sk_rmem_alloc);
  1093. /* this can save us from acquiring the rx queue lock on next receive */
  1094. skb_queue_splice_tail_init(sk_queue, &up->reader_queue);
  1095. if (!rx_queue_lock_held)
  1096. spin_unlock(&sk_queue->lock);
  1097. }
  1098. /* Note: called with reader_queue.lock held.
  1099. * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
  1100. * This avoids a cache line miss while receive_queue lock is held.
  1101. * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
  1102. */
  1103. void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
  1104. {
  1105. prefetch(&skb->data);
  1106. udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
  1107. }
  1108. EXPORT_SYMBOL(udp_skb_destructor);
  1109. /* as above, but the caller held the rx queue lock, too */
  1110. static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
  1111. {
  1112. prefetch(&skb->data);
  1113. udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
  1114. }
  1115. /* Idea of busylocks is to let producers grab an extra spinlock
  1116. * to relieve pressure on the receive_queue spinlock shared by consumer.
  1117. * Under flood, this means that only one producer can be in line
  1118. * trying to acquire the receive_queue spinlock.
  1119. * These busylock can be allocated on a per cpu manner, instead of a
  1120. * per socket one (that would consume a cache line per socket)
  1121. */
  1122. static int udp_busylocks_log __read_mostly;
  1123. static spinlock_t *udp_busylocks __read_mostly;
  1124. static spinlock_t *busylock_acquire(void *ptr)
  1125. {
  1126. spinlock_t *busy;
  1127. busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
  1128. spin_lock(busy);
  1129. return busy;
  1130. }
  1131. static void busylock_release(spinlock_t *busy)
  1132. {
  1133. if (busy)
  1134. spin_unlock(busy);
  1135. }
  1136. int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
  1137. {
  1138. struct sk_buff_head *list = &sk->sk_receive_queue;
  1139. int rmem, delta, amt, err = -ENOMEM;
  1140. spinlock_t *busy = NULL;
  1141. int size;
  1142. /* try to avoid the costly atomic add/sub pair when the receive
  1143. * queue is full; always allow at least a packet
  1144. */
  1145. rmem = atomic_read(&sk->sk_rmem_alloc);
  1146. if (rmem > sk->sk_rcvbuf)
  1147. goto drop;
  1148. /* Under mem pressure, it might be helpful to help udp_recvmsg()
  1149. * having linear skbs :
  1150. * - Reduce memory overhead and thus increase receive queue capacity
  1151. * - Less cache line misses at copyout() time
  1152. * - Less work at consume_skb() (less alien page frag freeing)
  1153. */
  1154. if (rmem > (sk->sk_rcvbuf >> 1)) {
  1155. skb_condense(skb);
  1156. busy = busylock_acquire(sk);
  1157. }
  1158. size = skb->truesize;
  1159. udp_set_dev_scratch(skb);
  1160. /* we drop only if the receive buf is full and the receive
  1161. * queue contains some other skb
  1162. */
  1163. rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
  1164. if (rmem > (size + sk->sk_rcvbuf))
  1165. goto uncharge_drop;
  1166. spin_lock(&list->lock);
  1167. if (size >= sk->sk_forward_alloc) {
  1168. amt = sk_mem_pages(size);
  1169. delta = amt << SK_MEM_QUANTUM_SHIFT;
  1170. if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
  1171. err = -ENOBUFS;
  1172. spin_unlock(&list->lock);
  1173. goto uncharge_drop;
  1174. }
  1175. sk->sk_forward_alloc += delta;
  1176. }
  1177. sk->sk_forward_alloc -= size;
  1178. /* no need to setup a destructor, we will explicitly release the
  1179. * forward allocated memory on dequeue
  1180. */
  1181. sock_skb_set_dropcount(sk, skb);
  1182. __skb_queue_tail(list, skb);
  1183. spin_unlock(&list->lock);
  1184. if (!sock_flag(sk, SOCK_DEAD))
  1185. sk->sk_data_ready(sk);
  1186. busylock_release(busy);
  1187. return 0;
  1188. uncharge_drop:
  1189. atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
  1190. drop:
  1191. atomic_inc(&sk->sk_drops);
  1192. busylock_release(busy);
  1193. return err;
  1194. }
  1195. EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
  1196. void udp_destruct_sock(struct sock *sk)
  1197. {
  1198. /* reclaim completely the forward allocated memory */
  1199. struct udp_sock *up = udp_sk(sk);
  1200. unsigned int total = 0;
  1201. struct sk_buff *skb;
  1202. skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
  1203. while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
  1204. total += skb->truesize;
  1205. kfree_skb(skb);
  1206. }
  1207. udp_rmem_release(sk, total, 0, true);
  1208. inet_sock_destruct(sk);
  1209. }
  1210. EXPORT_SYMBOL_GPL(udp_destruct_sock);
  1211. int udp_init_sock(struct sock *sk)
  1212. {
  1213. skb_queue_head_init(&udp_sk(sk)->reader_queue);
  1214. sk->sk_destruct = udp_destruct_sock;
  1215. return 0;
  1216. }
  1217. EXPORT_SYMBOL_GPL(udp_init_sock);
  1218. void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
  1219. {
  1220. if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
  1221. bool slow = lock_sock_fast(sk);
  1222. sk_peek_offset_bwd(sk, len);
  1223. unlock_sock_fast(sk, slow);
  1224. }
  1225. /* In the more common cases we cleared the head states previously,
  1226. * see __udp_queue_rcv_skb().
  1227. */
  1228. if (unlikely(udp_skb_has_head_state(skb)))
  1229. skb_release_head_state(skb);
  1230. consume_stateless_skb(skb);
  1231. }
  1232. EXPORT_SYMBOL_GPL(skb_consume_udp);
  1233. static struct sk_buff *__first_packet_length(struct sock *sk,
  1234. struct sk_buff_head *rcvq,
  1235. int *total)
  1236. {
  1237. struct sk_buff *skb;
  1238. while ((skb = skb_peek(rcvq)) != NULL) {
  1239. if (udp_lib_checksum_complete(skb)) {
  1240. __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
  1241. IS_UDPLITE(sk));
  1242. __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
  1243. IS_UDPLITE(sk));
  1244. atomic_inc(&sk->sk_drops);
  1245. __skb_unlink(skb, rcvq);
  1246. *total += skb->truesize;
  1247. kfree_skb(skb);
  1248. } else {
  1249. /* the csum related bits could be changed, refresh
  1250. * the scratch area
  1251. */
  1252. udp_set_dev_scratch(skb);
  1253. break;
  1254. }
  1255. }
  1256. return skb;
  1257. }
  1258. /**
  1259. * first_packet_length - return length of first packet in receive queue
  1260. * @sk: socket
  1261. *
  1262. * Drops all bad checksum frames, until a valid one is found.
  1263. * Returns the length of found skb, or -1 if none is found.
  1264. */
  1265. static int first_packet_length(struct sock *sk)
  1266. {
  1267. struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
  1268. struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
  1269. struct sk_buff *skb;
  1270. int total = 0;
  1271. int res;
  1272. spin_lock_bh(&rcvq->lock);
  1273. skb = __first_packet_length(sk, rcvq, &total);
  1274. if (!skb && !skb_queue_empty(sk_queue)) {
  1275. spin_lock(&sk_queue->lock);
  1276. skb_queue_splice_tail_init(sk_queue, rcvq);
  1277. spin_unlock(&sk_queue->lock);
  1278. skb = __first_packet_length(sk, rcvq, &total);
  1279. }
  1280. res = skb ? skb->len : -1;
  1281. if (total)
  1282. udp_rmem_release(sk, total, 1, false);
  1283. spin_unlock_bh(&rcvq->lock);
  1284. return res;
  1285. }
  1286. /*
  1287. * IOCTL requests applicable to the UDP protocol
  1288. */
  1289. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  1290. {
  1291. switch (cmd) {
  1292. case SIOCOUTQ:
  1293. {
  1294. int amount = sk_wmem_alloc_get(sk);
  1295. return put_user(amount, (int __user *)arg);
  1296. }
  1297. case SIOCINQ:
  1298. {
  1299. int amount = max_t(int, 0, first_packet_length(sk));
  1300. return put_user(amount, (int __user *)arg);
  1301. }
  1302. default:
  1303. return -ENOIOCTLCMD;
  1304. }
  1305. return 0;
  1306. }
  1307. EXPORT_SYMBOL(udp_ioctl);
  1308. struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
  1309. int noblock, int *peeked, int *off, int *err)
  1310. {
  1311. struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
  1312. struct sk_buff_head *queue;
  1313. struct sk_buff *last;
  1314. long timeo;
  1315. int error;
  1316. queue = &udp_sk(sk)->reader_queue;
  1317. flags |= noblock ? MSG_DONTWAIT : 0;
  1318. timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  1319. do {
  1320. struct sk_buff *skb;
  1321. error = sock_error(sk);
  1322. if (error)
  1323. break;
  1324. error = -EAGAIN;
  1325. *peeked = 0;
  1326. do {
  1327. spin_lock_bh(&queue->lock);
  1328. skb = __skb_try_recv_from_queue(sk, queue, flags,
  1329. udp_skb_destructor,
  1330. peeked, off, err,
  1331. &last);
  1332. if (skb) {
  1333. spin_unlock_bh(&queue->lock);
  1334. return skb;
  1335. }
  1336. if (skb_queue_empty(sk_queue)) {
  1337. spin_unlock_bh(&queue->lock);
  1338. goto busy_check;
  1339. }
  1340. /* refill the reader queue and walk it again
  1341. * keep both queues locked to avoid re-acquiring
  1342. * the sk_receive_queue lock if fwd memory scheduling
  1343. * is needed.
  1344. */
  1345. spin_lock(&sk_queue->lock);
  1346. skb_queue_splice_tail_init(sk_queue, queue);
  1347. skb = __skb_try_recv_from_queue(sk, queue, flags,
  1348. udp_skb_dtor_locked,
  1349. peeked, off, err,
  1350. &last);
  1351. spin_unlock(&sk_queue->lock);
  1352. spin_unlock_bh(&queue->lock);
  1353. if (skb)
  1354. return skb;
  1355. busy_check:
  1356. if (!sk_can_busy_loop(sk))
  1357. break;
  1358. sk_busy_loop(sk, flags & MSG_DONTWAIT);
  1359. } while (!skb_queue_empty(sk_queue));
  1360. /* sk_queue is empty, reader_queue may contain peeked packets */
  1361. } while (timeo &&
  1362. !__skb_wait_for_more_packets(sk, &error, &timeo,
  1363. (struct sk_buff *)sk_queue));
  1364. *err = error;
  1365. return NULL;
  1366. }
  1367. EXPORT_SYMBOL_GPL(__skb_recv_udp);
  1368. /*
  1369. * This should be easy, if there is something there we
  1370. * return it, otherwise we block.
  1371. */
  1372. int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
  1373. int flags, int *addr_len)
  1374. {
  1375. struct inet_sock *inet = inet_sk(sk);
  1376. DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
  1377. struct sk_buff *skb;
  1378. unsigned int ulen, copied;
  1379. int peeked, peeking, off;
  1380. int err;
  1381. int is_udplite = IS_UDPLITE(sk);
  1382. bool checksum_valid = false;
  1383. if (flags & MSG_ERRQUEUE)
  1384. return ip_recv_error(sk, msg, len, addr_len);
  1385. try_again:
  1386. peeking = off = sk_peek_offset(sk, flags);
  1387. skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
  1388. if (!skb)
  1389. return err;
  1390. ulen = udp_skb_len(skb);
  1391. copied = len;
  1392. if (copied > ulen - off)
  1393. copied = ulen - off;
  1394. else if (copied < ulen)
  1395. msg->msg_flags |= MSG_TRUNC;
  1396. /*
  1397. * If checksum is needed at all, try to do it while copying the
  1398. * data. If the data is truncated, or if we only want a partial
  1399. * coverage checksum (UDP-Lite), do it before the copy.
  1400. */
  1401. if (copied < ulen || peeking ||
  1402. (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
  1403. checksum_valid = udp_skb_csum_unnecessary(skb) ||
  1404. !__udp_lib_checksum_complete(skb);
  1405. if (!checksum_valid)
  1406. goto csum_copy_err;
  1407. }
  1408. if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
  1409. if (udp_skb_is_linear(skb))
  1410. err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
  1411. else
  1412. err = skb_copy_datagram_msg(skb, off, msg, copied);
  1413. } else {
  1414. err = skb_copy_and_csum_datagram_msg(skb, off, msg);
  1415. if (err == -EINVAL)
  1416. goto csum_copy_err;
  1417. }
  1418. if (unlikely(err)) {
  1419. if (!peeked) {
  1420. atomic_inc(&sk->sk_drops);
  1421. UDP_INC_STATS(sock_net(sk),
  1422. UDP_MIB_INERRORS, is_udplite);
  1423. }
  1424. kfree_skb(skb);
  1425. return err;
  1426. }
  1427. if (!peeked)
  1428. UDP_INC_STATS(sock_net(sk),
  1429. UDP_MIB_INDATAGRAMS, is_udplite);
  1430. sock_recv_ts_and_drops(msg, sk, skb);
  1431. /* Copy the address. */
  1432. if (sin) {
  1433. sin->sin_family = AF_INET;
  1434. sin->sin_port = udp_hdr(skb)->source;
  1435. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  1436. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  1437. *addr_len = sizeof(*sin);
  1438. }
  1439. if (inet->cmsg_flags)
  1440. ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
  1441. err = copied;
  1442. if (flags & MSG_TRUNC)
  1443. err = ulen;
  1444. skb_consume_udp(sk, skb, peeking ? -err : err);
  1445. return err;
  1446. csum_copy_err:
  1447. if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
  1448. udp_skb_destructor)) {
  1449. UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  1450. UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1451. }
  1452. kfree_skb(skb);
  1453. /* starting over for a new packet, but check if we need to yield */
  1454. cond_resched();
  1455. msg->msg_flags &= ~MSG_TRUNC;
  1456. goto try_again;
  1457. }
  1458. int __udp_disconnect(struct sock *sk, int flags)
  1459. {
  1460. struct inet_sock *inet = inet_sk(sk);
  1461. /*
  1462. * 1003.1g - break association.
  1463. */
  1464. sk->sk_state = TCP_CLOSE;
  1465. inet->inet_daddr = 0;
  1466. inet->inet_dport = 0;
  1467. sock_rps_reset_rxhash(sk);
  1468. sk->sk_bound_dev_if = 0;
  1469. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  1470. inet_reset_saddr(sk);
  1471. if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  1472. sk->sk_prot->unhash(sk);
  1473. inet->inet_sport = 0;
  1474. }
  1475. sk_dst_reset(sk);
  1476. return 0;
  1477. }
  1478. EXPORT_SYMBOL(__udp_disconnect);
  1479. int udp_disconnect(struct sock *sk, int flags)
  1480. {
  1481. lock_sock(sk);
  1482. __udp_disconnect(sk, flags);
  1483. release_sock(sk);
  1484. return 0;
  1485. }
  1486. EXPORT_SYMBOL(udp_disconnect);
  1487. void udp_lib_unhash(struct sock *sk)
  1488. {
  1489. if (sk_hashed(sk)) {
  1490. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1491. struct udp_hslot *hslot, *hslot2;
  1492. hslot = udp_hashslot(udptable, sock_net(sk),
  1493. udp_sk(sk)->udp_port_hash);
  1494. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  1495. spin_lock_bh(&hslot->lock);
  1496. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1497. reuseport_detach_sock(sk);
  1498. if (sk_del_node_init_rcu(sk)) {
  1499. hslot->count--;
  1500. inet_sk(sk)->inet_num = 0;
  1501. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  1502. spin_lock(&hslot2->lock);
  1503. hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
  1504. hslot2->count--;
  1505. spin_unlock(&hslot2->lock);
  1506. }
  1507. spin_unlock_bh(&hslot->lock);
  1508. }
  1509. }
  1510. EXPORT_SYMBOL(udp_lib_unhash);
  1511. /*
  1512. * inet_rcv_saddr was changed, we must rehash secondary hash
  1513. */
  1514. void udp_lib_rehash(struct sock *sk, u16 newhash)
  1515. {
  1516. if (sk_hashed(sk)) {
  1517. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  1518. struct udp_hslot *hslot, *hslot2, *nhslot2;
  1519. hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
  1520. nhslot2 = udp_hashslot2(udptable, newhash);
  1521. udp_sk(sk)->udp_portaddr_hash = newhash;
  1522. if (hslot2 != nhslot2 ||
  1523. rcu_access_pointer(sk->sk_reuseport_cb)) {
  1524. hslot = udp_hashslot(udptable, sock_net(sk),
  1525. udp_sk(sk)->udp_port_hash);
  1526. /* we must lock primary chain too */
  1527. spin_lock_bh(&hslot->lock);
  1528. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1529. reuseport_detach_sock(sk);
  1530. if (hslot2 != nhslot2) {
  1531. spin_lock(&hslot2->lock);
  1532. hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
  1533. hslot2->count--;
  1534. spin_unlock(&hslot2->lock);
  1535. spin_lock(&nhslot2->lock);
  1536. hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
  1537. &nhslot2->head);
  1538. nhslot2->count++;
  1539. spin_unlock(&nhslot2->lock);
  1540. }
  1541. spin_unlock_bh(&hslot->lock);
  1542. }
  1543. }
  1544. }
  1545. EXPORT_SYMBOL(udp_lib_rehash);
  1546. static void udp_v4_rehash(struct sock *sk)
  1547. {
  1548. u16 new_hash = udp4_portaddr_hash(sock_net(sk),
  1549. inet_sk(sk)->inet_rcv_saddr,
  1550. inet_sk(sk)->inet_num);
  1551. udp_lib_rehash(sk, new_hash);
  1552. }
  1553. static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  1554. {
  1555. int rc;
  1556. if (inet_sk(sk)->inet_daddr) {
  1557. sock_rps_save_rxhash(sk, skb);
  1558. sk_mark_napi_id(sk, skb);
  1559. sk_incoming_cpu_update(sk);
  1560. } else {
  1561. sk_mark_napi_id_once(sk, skb);
  1562. }
  1563. /* At recvmsg() time we may access skb->dst or skb->sp depending on
  1564. * the IP options and the cmsg flags, elsewhere can we clear all
  1565. * pending head states while they are hot in the cache
  1566. */
  1567. if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb)))
  1568. skb_release_head_state(skb);
  1569. rc = __udp_enqueue_schedule_skb(sk, skb);
  1570. if (rc < 0) {
  1571. int is_udplite = IS_UDPLITE(sk);
  1572. /* Note that an ENOMEM error is charged twice */
  1573. if (rc == -ENOMEM)
  1574. UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
  1575. is_udplite);
  1576. UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1577. kfree_skb(skb);
  1578. trace_udp_fail_queue_rcv_skb(rc, sk);
  1579. return -1;
  1580. }
  1581. return 0;
  1582. }
  1583. static struct static_key udp_encap_needed __read_mostly;
  1584. void udp_encap_enable(void)
  1585. {
  1586. if (!static_key_enabled(&udp_encap_needed))
  1587. static_key_slow_inc(&udp_encap_needed);
  1588. }
  1589. EXPORT_SYMBOL(udp_encap_enable);
  1590. /* returns:
  1591. * -1: error
  1592. * 0: success
  1593. * >0: "udp encap" protocol resubmission
  1594. *
  1595. * Note that in the success and error cases, the skb is assumed to
  1596. * have either been requeued or freed.
  1597. */
  1598. static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  1599. {
  1600. struct udp_sock *up = udp_sk(sk);
  1601. int is_udplite = IS_UDPLITE(sk);
  1602. /*
  1603. * Charge it to the socket, dropping if the queue is full.
  1604. */
  1605. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  1606. goto drop;
  1607. nf_reset(skb);
  1608. if (static_key_false(&udp_encap_needed) && up->encap_type) {
  1609. int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
  1610. /*
  1611. * This is an encapsulation socket so pass the skb to
  1612. * the socket's udp_encap_rcv() hook. Otherwise, just
  1613. * fall through and pass this up the UDP socket.
  1614. * up->encap_rcv() returns the following value:
  1615. * =0 if skb was successfully passed to the encap
  1616. * handler or was discarded by it.
  1617. * >0 if skb should be passed on to UDP.
  1618. * <0 if skb should be resubmitted as proto -N
  1619. */
  1620. /* if we're overly short, let UDP handle it */
  1621. encap_rcv = ACCESS_ONCE(up->encap_rcv);
  1622. if (encap_rcv) {
  1623. int ret;
  1624. /* Verify checksum before giving to encap */
  1625. if (udp_lib_checksum_complete(skb))
  1626. goto csum_error;
  1627. ret = encap_rcv(sk, skb);
  1628. if (ret <= 0) {
  1629. __UDP_INC_STATS(sock_net(sk),
  1630. UDP_MIB_INDATAGRAMS,
  1631. is_udplite);
  1632. return -ret;
  1633. }
  1634. }
  1635. /* FALLTHROUGH -- it's a UDP Packet */
  1636. }
  1637. /*
  1638. * UDP-Lite specific tests, ignored on UDP sockets
  1639. */
  1640. if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  1641. /*
  1642. * MIB statistics other than incrementing the error count are
  1643. * disabled for the following two types of errors: these depend
  1644. * on the application settings, not on the functioning of the
  1645. * protocol stack as such.
  1646. *
  1647. * RFC 3828 here recommends (sec 3.3): "There should also be a
  1648. * way ... to ... at least let the receiving application block
  1649. * delivery of packets with coverage values less than a value
  1650. * provided by the application."
  1651. */
  1652. if (up->pcrlen == 0) { /* full coverage was set */
  1653. net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
  1654. UDP_SKB_CB(skb)->cscov, skb->len);
  1655. goto drop;
  1656. }
  1657. /* The next case involves violating the min. coverage requested
  1658. * by the receiver. This is subtle: if receiver wants x and x is
  1659. * greater than the buffersize/MTU then receiver will complain
  1660. * that it wants x while sender emits packets of smaller size y.
  1661. * Therefore the above ...()->partial_cov statement is essential.
  1662. */
  1663. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  1664. net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
  1665. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  1666. goto drop;
  1667. }
  1668. }
  1669. prefetch(&sk->sk_rmem_alloc);
  1670. if (rcu_access_pointer(sk->sk_filter) &&
  1671. udp_lib_checksum_complete(skb))
  1672. goto csum_error;
  1673. if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
  1674. goto drop;
  1675. udp_csum_pull_header(skb);
  1676. ipv4_pktinfo_prepare(sk, skb);
  1677. return __udp_queue_rcv_skb(sk, skb);
  1678. csum_error:
  1679. __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
  1680. drop:
  1681. __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1682. atomic_inc(&sk->sk_drops);
  1683. kfree_skb(skb);
  1684. return -1;
  1685. }
  1686. /* For TCP sockets, sk_rx_dst is protected by socket lock
  1687. * For UDP, we use xchg() to guard against concurrent changes.
  1688. */
  1689. void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
  1690. {
  1691. struct dst_entry *old;
  1692. if (dst_hold_safe(dst)) {
  1693. old = xchg(&sk->sk_rx_dst, dst);
  1694. dst_release(old);
  1695. }
  1696. }
  1697. EXPORT_SYMBOL(udp_sk_rx_dst_set);
  1698. /*
  1699. * Multicasts and broadcasts go to each listener.
  1700. *
  1701. * Note: called only from the BH handler context.
  1702. */
  1703. static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
  1704. struct udphdr *uh,
  1705. __be32 saddr, __be32 daddr,
  1706. struct udp_table *udptable,
  1707. int proto)
  1708. {
  1709. struct sock *sk, *first = NULL;
  1710. unsigned short hnum = ntohs(uh->dest);
  1711. struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
  1712. unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
  1713. unsigned int offset = offsetof(typeof(*sk), sk_node);
  1714. int dif = skb->dev->ifindex;
  1715. struct hlist_node *node;
  1716. struct sk_buff *nskb;
  1717. if (use_hash2) {
  1718. hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
  1719. udptable->mask;
  1720. hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
  1721. start_lookup:
  1722. hslot = &udptable->hash2[hash2];
  1723. offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
  1724. }
  1725. sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
  1726. if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
  1727. uh->source, saddr, dif, hnum))
  1728. continue;
  1729. if (!first) {
  1730. first = sk;
  1731. continue;
  1732. }
  1733. nskb = skb_clone(skb, GFP_ATOMIC);
  1734. if (unlikely(!nskb)) {
  1735. atomic_inc(&sk->sk_drops);
  1736. __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
  1737. IS_UDPLITE(sk));
  1738. __UDP_INC_STATS(net, UDP_MIB_INERRORS,
  1739. IS_UDPLITE(sk));
  1740. continue;
  1741. }
  1742. if (udp_queue_rcv_skb(sk, nskb) > 0)
  1743. consume_skb(nskb);
  1744. }
  1745. /* Also lookup *:port if we are using hash2 and haven't done so yet. */
  1746. if (use_hash2 && hash2 != hash2_any) {
  1747. hash2 = hash2_any;
  1748. goto start_lookup;
  1749. }
  1750. if (first) {
  1751. if (udp_queue_rcv_skb(first, skb) > 0)
  1752. consume_skb(skb);
  1753. } else {
  1754. kfree_skb(skb);
  1755. __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
  1756. proto == IPPROTO_UDPLITE);
  1757. }
  1758. return 0;
  1759. }
  1760. /* Initialize UDP checksum. If exited with zero value (success),
  1761. * CHECKSUM_UNNECESSARY means, that no more checks are required.
  1762. * Otherwise, csum completion requires chacksumming packet body,
  1763. * including udp header and folding it to skb->csum.
  1764. */
  1765. static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  1766. int proto)
  1767. {
  1768. int err;
  1769. UDP_SKB_CB(skb)->partial_cov = 0;
  1770. UDP_SKB_CB(skb)->cscov = skb->len;
  1771. if (proto == IPPROTO_UDPLITE) {
  1772. err = udplite_checksum_init(skb, uh);
  1773. if (err)
  1774. return err;
  1775. }
  1776. /* Note, we are only interested in != 0 or == 0, thus the
  1777. * force to int.
  1778. */
  1779. return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
  1780. inet_compute_pseudo);
  1781. }
  1782. /*
  1783. * All we need to do is get the socket, and then do a checksum.
  1784. */
  1785. int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
  1786. int proto)
  1787. {
  1788. struct sock *sk;
  1789. struct udphdr *uh;
  1790. unsigned short ulen;
  1791. struct rtable *rt = skb_rtable(skb);
  1792. __be32 saddr, daddr;
  1793. struct net *net = dev_net(skb->dev);
  1794. /*
  1795. * Validate the packet.
  1796. */
  1797. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  1798. goto drop; /* No space for header. */
  1799. uh = udp_hdr(skb);
  1800. ulen = ntohs(uh->len);
  1801. saddr = ip_hdr(skb)->saddr;
  1802. daddr = ip_hdr(skb)->daddr;
  1803. if (ulen > skb->len)
  1804. goto short_packet;
  1805. if (proto == IPPROTO_UDP) {
  1806. /* UDP validates ulen. */
  1807. if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  1808. goto short_packet;
  1809. uh = udp_hdr(skb);
  1810. }
  1811. if (udp4_csum_init(skb, uh, proto))
  1812. goto csum_error;
  1813. sk = skb_steal_sock(skb);
  1814. if (sk) {
  1815. struct dst_entry *dst = skb_dst(skb);
  1816. int ret;
  1817. if (unlikely(sk->sk_rx_dst != dst))
  1818. udp_sk_rx_dst_set(sk, dst);
  1819. ret = udp_queue_rcv_skb(sk, skb);
  1820. sock_put(sk);
  1821. /* a return value > 0 means to resubmit the input, but
  1822. * it wants the return to be -protocol, or 0
  1823. */
  1824. if (ret > 0)
  1825. return -ret;
  1826. return 0;
  1827. }
  1828. if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  1829. return __udp4_lib_mcast_deliver(net, skb, uh,
  1830. saddr, daddr, udptable, proto);
  1831. sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
  1832. if (sk) {
  1833. int ret;
  1834. if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
  1835. skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
  1836. inet_compute_pseudo);
  1837. ret = udp_queue_rcv_skb(sk, skb);
  1838. /* a return value > 0 means to resubmit the input, but
  1839. * it wants the return to be -protocol, or 0
  1840. */
  1841. if (ret > 0)
  1842. return -ret;
  1843. return 0;
  1844. }
  1845. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  1846. goto drop;
  1847. nf_reset(skb);
  1848. /* No socket. Drop packet silently, if checksum is wrong */
  1849. if (udp_lib_checksum_complete(skb))
  1850. goto csum_error;
  1851. __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  1852. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  1853. /*
  1854. * Hmm. We got an UDP packet to a port to which we
  1855. * don't wanna listen. Ignore it.
  1856. */
  1857. kfree_skb(skb);
  1858. return 0;
  1859. short_packet:
  1860. net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
  1861. proto == IPPROTO_UDPLITE ? "Lite" : "",
  1862. &saddr, ntohs(uh->source),
  1863. ulen, skb->len,
  1864. &daddr, ntohs(uh->dest));
  1865. goto drop;
  1866. csum_error:
  1867. /*
  1868. * RFC1122: OK. Discards the bad packet silently (as far as
  1869. * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  1870. */
  1871. net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
  1872. proto == IPPROTO_UDPLITE ? "Lite" : "",
  1873. &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
  1874. ulen);
  1875. __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
  1876. drop:
  1877. __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  1878. kfree_skb(skb);
  1879. return 0;
  1880. }
  1881. /* We can only early demux multicast if there is a single matching socket.
  1882. * If more than one socket found returns NULL
  1883. */
  1884. static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
  1885. __be16 loc_port, __be32 loc_addr,
  1886. __be16 rmt_port, __be32 rmt_addr,
  1887. int dif)
  1888. {
  1889. struct sock *sk, *result;
  1890. unsigned short hnum = ntohs(loc_port);
  1891. unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
  1892. struct udp_hslot *hslot = &udp_table.hash[slot];
  1893. /* Do not bother scanning a too big list */
  1894. if (hslot->count > 10)
  1895. return NULL;
  1896. result = NULL;
  1897. sk_for_each_rcu(sk, &hslot->head) {
  1898. if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
  1899. rmt_port, rmt_addr, dif, hnum)) {
  1900. if (result)
  1901. return NULL;
  1902. result = sk;
  1903. }
  1904. }
  1905. return result;
  1906. }
  1907. /* For unicast we should only early demux connected sockets or we can
  1908. * break forwarding setups. The chains here can be long so only check
  1909. * if the first socket is an exact match and if not move on.
  1910. */
  1911. static struct sock *__udp4_lib_demux_lookup(struct net *net,
  1912. __be16 loc_port, __be32 loc_addr,
  1913. __be16 rmt_port, __be32 rmt_addr,
  1914. int dif)
  1915. {
  1916. unsigned short hnum = ntohs(loc_port);
  1917. unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
  1918. unsigned int slot2 = hash2 & udp_table.mask;
  1919. struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
  1920. INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
  1921. const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
  1922. struct sock *sk;
  1923. udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
  1924. if (INET_MATCH(sk, net, acookie, rmt_addr,
  1925. loc_addr, ports, dif))
  1926. return sk;
  1927. /* Only check first socket in chain */
  1928. break;
  1929. }
  1930. return NULL;
  1931. }
  1932. void udp_v4_early_demux(struct sk_buff *skb)
  1933. {
  1934. struct net *net = dev_net(skb->dev);
  1935. const struct iphdr *iph;
  1936. const struct udphdr *uh;
  1937. struct sock *sk = NULL;
  1938. struct dst_entry *dst;
  1939. int dif = skb->dev->ifindex;
  1940. int ours;
  1941. /* validate the packet */
  1942. if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
  1943. return;
  1944. iph = ip_hdr(skb);
  1945. uh = udp_hdr(skb);
  1946. if (skb->pkt_type == PACKET_BROADCAST ||
  1947. skb->pkt_type == PACKET_MULTICAST) {
  1948. struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
  1949. if (!in_dev)
  1950. return;
  1951. /* we are supposed to accept bcast packets */
  1952. if (skb->pkt_type == PACKET_MULTICAST) {
  1953. ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
  1954. iph->protocol);
  1955. if (!ours)
  1956. return;
  1957. }
  1958. sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
  1959. uh->source, iph->saddr, dif);
  1960. } else if (skb->pkt_type == PACKET_HOST) {
  1961. sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
  1962. uh->source, iph->saddr, dif);
  1963. }
  1964. if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
  1965. return;
  1966. skb->sk = sk;
  1967. skb->destructor = sock_efree;
  1968. dst = READ_ONCE(sk->sk_rx_dst);
  1969. if (dst)
  1970. dst = dst_check(dst, 0);
  1971. if (dst) {
  1972. /* set noref for now.
  1973. * any place which wants to hold dst has to call
  1974. * dst_hold_safe()
  1975. */
  1976. skb_dst_set_noref(skb, dst);
  1977. }
  1978. }
  1979. int udp_rcv(struct sk_buff *skb)
  1980. {
  1981. return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
  1982. }
  1983. void udp_destroy_sock(struct sock *sk)
  1984. {
  1985. struct udp_sock *up = udp_sk(sk);
  1986. bool slow = lock_sock_fast(sk);
  1987. udp_flush_pending_frames(sk);
  1988. unlock_sock_fast(sk, slow);
  1989. if (static_key_false(&udp_encap_needed) && up->encap_type) {
  1990. void (*encap_destroy)(struct sock *sk);
  1991. encap_destroy = ACCESS_ONCE(up->encap_destroy);
  1992. if (encap_destroy)
  1993. encap_destroy(sk);
  1994. }
  1995. }
  1996. /*
  1997. * Socket option code for UDP
  1998. */
  1999. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  2000. char __user *optval, unsigned int optlen,
  2001. int (*push_pending_frames)(struct sock *))
  2002. {
  2003. struct udp_sock *up = udp_sk(sk);
  2004. int val, valbool;
  2005. int err = 0;
  2006. int is_udplite = IS_UDPLITE(sk);
  2007. if (optlen < sizeof(int))
  2008. return -EINVAL;
  2009. if (get_user(val, (int __user *)optval))
  2010. return -EFAULT;
  2011. valbool = val ? 1 : 0;
  2012. switch (optname) {
  2013. case UDP_CORK:
  2014. if (val != 0) {
  2015. up->corkflag = 1;
  2016. } else {
  2017. up->corkflag = 0;
  2018. lock_sock(sk);
  2019. push_pending_frames(sk);
  2020. release_sock(sk);
  2021. }
  2022. break;
  2023. case UDP_ENCAP:
  2024. switch (val) {
  2025. case 0:
  2026. case UDP_ENCAP_ESPINUDP:
  2027. case UDP_ENCAP_ESPINUDP_NON_IKE:
  2028. up->encap_rcv = xfrm4_udp_encap_rcv;
  2029. /* FALLTHROUGH */
  2030. case UDP_ENCAP_L2TPINUDP:
  2031. up->encap_type = val;
  2032. udp_encap_enable();
  2033. break;
  2034. default:
  2035. err = -ENOPROTOOPT;
  2036. break;
  2037. }
  2038. break;
  2039. case UDP_NO_CHECK6_TX:
  2040. up->no_check6_tx = valbool;
  2041. break;
  2042. case UDP_NO_CHECK6_RX:
  2043. up->no_check6_rx = valbool;
  2044. break;
  2045. /*
  2046. * UDP-Lite's partial checksum coverage (RFC 3828).
  2047. */
  2048. /* The sender sets actual checksum coverage length via this option.
  2049. * The case coverage > packet length is handled by send module. */
  2050. case UDPLITE_SEND_CSCOV:
  2051. if (!is_udplite) /* Disable the option on UDP sockets */
  2052. return -ENOPROTOOPT;
  2053. if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  2054. val = 8;
  2055. else if (val > USHRT_MAX)
  2056. val = USHRT_MAX;
  2057. up->pcslen = val;
  2058. up->pcflag |= UDPLITE_SEND_CC;
  2059. break;
  2060. /* The receiver specifies a minimum checksum coverage value. To make
  2061. * sense, this should be set to at least 8 (as done below). If zero is
  2062. * used, this again means full checksum coverage. */
  2063. case UDPLITE_RECV_CSCOV:
  2064. if (!is_udplite) /* Disable the option on UDP sockets */
  2065. return -ENOPROTOOPT;
  2066. if (val != 0 && val < 8) /* Avoid silly minimal values. */
  2067. val = 8;
  2068. else if (val > USHRT_MAX)
  2069. val = USHRT_MAX;
  2070. up->pcrlen = val;
  2071. up->pcflag |= UDPLITE_RECV_CC;
  2072. break;
  2073. default:
  2074. err = -ENOPROTOOPT;
  2075. break;
  2076. }
  2077. return err;
  2078. }
  2079. EXPORT_SYMBOL(udp_lib_setsockopt);
  2080. int udp_setsockopt(struct sock *sk, int level, int optname,
  2081. char __user *optval, unsigned int optlen)
  2082. {
  2083. if (level == SOL_UDP || level == SOL_UDPLITE)
  2084. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2085. udp_push_pending_frames);
  2086. return ip_setsockopt(sk, level, optname, optval, optlen);
  2087. }
  2088. #ifdef CONFIG_COMPAT
  2089. int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  2090. char __user *optval, unsigned int optlen)
  2091. {
  2092. if (level == SOL_UDP || level == SOL_UDPLITE)
  2093. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  2094. udp_push_pending_frames);
  2095. return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  2096. }
  2097. #endif
  2098. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  2099. char __user *optval, int __user *optlen)
  2100. {
  2101. struct udp_sock *up = udp_sk(sk);
  2102. int val, len;
  2103. if (get_user(len, optlen))
  2104. return -EFAULT;
  2105. len = min_t(unsigned int, len, sizeof(int));
  2106. if (len < 0)
  2107. return -EINVAL;
  2108. switch (optname) {
  2109. case UDP_CORK:
  2110. val = up->corkflag;
  2111. break;
  2112. case UDP_ENCAP:
  2113. val = up->encap_type;
  2114. break;
  2115. case UDP_NO_CHECK6_TX:
  2116. val = up->no_check6_tx;
  2117. break;
  2118. case UDP_NO_CHECK6_RX:
  2119. val = up->no_check6_rx;
  2120. break;
  2121. /* The following two cannot be changed on UDP sockets, the return is
  2122. * always 0 (which corresponds to the full checksum coverage of UDP). */
  2123. case UDPLITE_SEND_CSCOV:
  2124. val = up->pcslen;
  2125. break;
  2126. case UDPLITE_RECV_CSCOV:
  2127. val = up->pcrlen;
  2128. break;
  2129. default:
  2130. return -ENOPROTOOPT;
  2131. }
  2132. if (put_user(len, optlen))
  2133. return -EFAULT;
  2134. if (copy_to_user(optval, &val, len))
  2135. return -EFAULT;
  2136. return 0;
  2137. }
  2138. EXPORT_SYMBOL(udp_lib_getsockopt);
  2139. int udp_getsockopt(struct sock *sk, int level, int optname,
  2140. char __user *optval, int __user *optlen)
  2141. {
  2142. if (level == SOL_UDP || level == SOL_UDPLITE)
  2143. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2144. return ip_getsockopt(sk, level, optname, optval, optlen);
  2145. }
  2146. #ifdef CONFIG_COMPAT
  2147. int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  2148. char __user *optval, int __user *optlen)
  2149. {
  2150. if (level == SOL_UDP || level == SOL_UDPLITE)
  2151. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  2152. return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  2153. }
  2154. #endif
  2155. /**
  2156. * udp_poll - wait for a UDP event.
  2157. * @file - file struct
  2158. * @sock - socket
  2159. * @wait - poll table
  2160. *
  2161. * This is same as datagram poll, except for the special case of
  2162. * blocking sockets. If application is using a blocking fd
  2163. * and a packet with checksum error is in the queue;
  2164. * then it could get return from select indicating data available
  2165. * but then block when reading it. Add special case code
  2166. * to work around these arguably broken applications.
  2167. */
  2168. unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  2169. {
  2170. unsigned int mask = datagram_poll(file, sock, wait);
  2171. struct sock *sk = sock->sk;
  2172. if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
  2173. mask |= POLLIN | POLLRDNORM;
  2174. sock_rps_record_flow(sk);
  2175. /* Check for false positives due to checksum errors */
  2176. if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
  2177. !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
  2178. mask &= ~(POLLIN | POLLRDNORM);
  2179. return mask;
  2180. }
  2181. EXPORT_SYMBOL(udp_poll);
  2182. int udp_abort(struct sock *sk, int err)
  2183. {
  2184. lock_sock(sk);
  2185. sk->sk_err = err;
  2186. sk->sk_error_report(sk);
  2187. __udp_disconnect(sk, 0);
  2188. release_sock(sk);
  2189. return 0;
  2190. }
  2191. EXPORT_SYMBOL_GPL(udp_abort);
  2192. struct proto udp_prot = {
  2193. .name = "UDP",
  2194. .owner = THIS_MODULE,
  2195. .close = udp_lib_close,
  2196. .connect = ip4_datagram_connect,
  2197. .disconnect = udp_disconnect,
  2198. .ioctl = udp_ioctl,
  2199. .init = udp_init_sock,
  2200. .destroy = udp_destroy_sock,
  2201. .setsockopt = udp_setsockopt,
  2202. .getsockopt = udp_getsockopt,
  2203. .sendmsg = udp_sendmsg,
  2204. .recvmsg = udp_recvmsg,
  2205. .sendpage = udp_sendpage,
  2206. .release_cb = ip4_datagram_release_cb,
  2207. .hash = udp_lib_hash,
  2208. .unhash = udp_lib_unhash,
  2209. .rehash = udp_v4_rehash,
  2210. .get_port = udp_v4_get_port,
  2211. .memory_allocated = &udp_memory_allocated,
  2212. .sysctl_mem = sysctl_udp_mem,
  2213. .sysctl_wmem = &sysctl_udp_wmem_min,
  2214. .sysctl_rmem = &sysctl_udp_rmem_min,
  2215. .obj_size = sizeof(struct udp_sock),
  2216. .h.udp_table = &udp_table,
  2217. #ifdef CONFIG_COMPAT
  2218. .compat_setsockopt = compat_udp_setsockopt,
  2219. .compat_getsockopt = compat_udp_getsockopt,
  2220. #endif
  2221. .diag_destroy = udp_abort,
  2222. };
  2223. EXPORT_SYMBOL(udp_prot);
  2224. /* ------------------------------------------------------------------------ */
  2225. #ifdef CONFIG_PROC_FS
  2226. static struct sock *udp_get_first(struct seq_file *seq, int start)
  2227. {
  2228. struct sock *sk;
  2229. struct udp_iter_state *state = seq->private;
  2230. struct net *net = seq_file_net(seq);
  2231. for (state->bucket = start; state->bucket <= state->udp_table->mask;
  2232. ++state->bucket) {
  2233. struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
  2234. if (hlist_empty(&hslot->head))
  2235. continue;
  2236. spin_lock_bh(&hslot->lock);
  2237. sk_for_each(sk, &hslot->head) {
  2238. if (!net_eq(sock_net(sk), net))
  2239. continue;
  2240. if (sk->sk_family == state->family)
  2241. goto found;
  2242. }
  2243. spin_unlock_bh(&hslot->lock);
  2244. }
  2245. sk = NULL;
  2246. found:
  2247. return sk;
  2248. }
  2249. static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  2250. {
  2251. struct udp_iter_state *state = seq->private;
  2252. struct net *net = seq_file_net(seq);
  2253. do {
  2254. sk = sk_next(sk);
  2255. } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
  2256. if (!sk) {
  2257. if (state->bucket <= state->udp_table->mask)
  2258. spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  2259. return udp_get_first(seq, state->bucket + 1);
  2260. }
  2261. return sk;
  2262. }
  2263. static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  2264. {
  2265. struct sock *sk = udp_get_first(seq, 0);
  2266. if (sk)
  2267. while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  2268. --pos;
  2269. return pos ? NULL : sk;
  2270. }
  2271. static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  2272. {
  2273. struct udp_iter_state *state = seq->private;
  2274. state->bucket = MAX_UDP_PORTS;
  2275. return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  2276. }
  2277. static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2278. {
  2279. struct sock *sk;
  2280. if (v == SEQ_START_TOKEN)
  2281. sk = udp_get_idx(seq, 0);
  2282. else
  2283. sk = udp_get_next(seq, v);
  2284. ++*pos;
  2285. return sk;
  2286. }
  2287. static void udp_seq_stop(struct seq_file *seq, void *v)
  2288. {
  2289. struct udp_iter_state *state = seq->private;
  2290. if (state->bucket <= state->udp_table->mask)
  2291. spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  2292. }
  2293. int udp_seq_open(struct inode *inode, struct file *file)
  2294. {
  2295. struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
  2296. struct udp_iter_state *s;
  2297. int err;
  2298. err = seq_open_net(inode, file, &afinfo->seq_ops,
  2299. sizeof(struct udp_iter_state));
  2300. if (err < 0)
  2301. return err;
  2302. s = ((struct seq_file *)file->private_data)->private;
  2303. s->family = afinfo->family;
  2304. s->udp_table = afinfo->udp_table;
  2305. return err;
  2306. }
  2307. EXPORT_SYMBOL(udp_seq_open);
  2308. /* ------------------------------------------------------------------------ */
  2309. int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
  2310. {
  2311. struct proc_dir_entry *p;
  2312. int rc = 0;
  2313. afinfo->seq_ops.start = udp_seq_start;
  2314. afinfo->seq_ops.next = udp_seq_next;
  2315. afinfo->seq_ops.stop = udp_seq_stop;
  2316. p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
  2317. afinfo->seq_fops, afinfo);
  2318. if (!p)
  2319. rc = -ENOMEM;
  2320. return rc;
  2321. }
  2322. EXPORT_SYMBOL(udp_proc_register);
  2323. void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
  2324. {
  2325. remove_proc_entry(afinfo->name, net->proc_net);
  2326. }
  2327. EXPORT_SYMBOL(udp_proc_unregister);
  2328. /* ------------------------------------------------------------------------ */
  2329. static void udp4_format_sock(struct sock *sp, struct seq_file *f,
  2330. int bucket)
  2331. {
  2332. struct inet_sock *inet = inet_sk(sp);
  2333. __be32 dest = inet->inet_daddr;
  2334. __be32 src = inet->inet_rcv_saddr;
  2335. __u16 destp = ntohs(inet->inet_dport);
  2336. __u16 srcp = ntohs(inet->inet_sport);
  2337. seq_printf(f, "%5d: %08X:%04X %08X:%04X"
  2338. " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
  2339. bucket, src, srcp, dest, destp, sp->sk_state,
  2340. sk_wmem_alloc_get(sp),
  2341. sk_rmem_alloc_get(sp),
  2342. 0, 0L, 0,
  2343. from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
  2344. 0, sock_i_ino(sp),
  2345. refcount_read(&sp->sk_refcnt), sp,
  2346. atomic_read(&sp->sk_drops));
  2347. }
  2348. int udp4_seq_show(struct seq_file *seq, void *v)
  2349. {
  2350. seq_setwidth(seq, 127);
  2351. if (v == SEQ_START_TOKEN)
  2352. seq_puts(seq, " sl local_address rem_address st tx_queue "
  2353. "rx_queue tr tm->when retrnsmt uid timeout "
  2354. "inode ref pointer drops");
  2355. else {
  2356. struct udp_iter_state *state = seq->private;
  2357. udp4_format_sock(v, seq, state->bucket);
  2358. }
  2359. seq_pad(seq, '\n');
  2360. return 0;
  2361. }
  2362. static const struct file_operations udp_afinfo_seq_fops = {
  2363. .owner = THIS_MODULE,
  2364. .open = udp_seq_open,
  2365. .read = seq_read,
  2366. .llseek = seq_lseek,
  2367. .release = seq_release_net
  2368. };
  2369. /* ------------------------------------------------------------------------ */
  2370. static struct udp_seq_afinfo udp4_seq_afinfo = {
  2371. .name = "udp",
  2372. .family = AF_INET,
  2373. .udp_table = &udp_table,
  2374. .seq_fops = &udp_afinfo_seq_fops,
  2375. .seq_ops = {
  2376. .show = udp4_seq_show,
  2377. },
  2378. };
  2379. static int __net_init udp4_proc_init_net(struct net *net)
  2380. {
  2381. return udp_proc_register(net, &udp4_seq_afinfo);
  2382. }
  2383. static void __net_exit udp4_proc_exit_net(struct net *net)
  2384. {
  2385. udp_proc_unregister(net, &udp4_seq_afinfo);
  2386. }
  2387. static struct pernet_operations udp4_net_ops = {
  2388. .init = udp4_proc_init_net,
  2389. .exit = udp4_proc_exit_net,
  2390. };
  2391. int __init udp4_proc_init(void)
  2392. {
  2393. return register_pernet_subsys(&udp4_net_ops);
  2394. }
  2395. void udp4_proc_exit(void)
  2396. {
  2397. unregister_pernet_subsys(&udp4_net_ops);
  2398. }
  2399. #endif /* CONFIG_PROC_FS */
  2400. static __initdata unsigned long uhash_entries;
  2401. static int __init set_uhash_entries(char *str)
  2402. {
  2403. ssize_t ret;
  2404. if (!str)
  2405. return 0;
  2406. ret = kstrtoul(str, 0, &uhash_entries);
  2407. if (ret)
  2408. return 0;
  2409. if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
  2410. uhash_entries = UDP_HTABLE_SIZE_MIN;
  2411. return 1;
  2412. }
  2413. __setup("uhash_entries=", set_uhash_entries);
  2414. void __init udp_table_init(struct udp_table *table, const char *name)
  2415. {
  2416. unsigned int i;
  2417. table->hash = alloc_large_system_hash(name,
  2418. 2 * sizeof(struct udp_hslot),
  2419. uhash_entries,
  2420. 21, /* one slot per 2 MB */
  2421. 0,
  2422. &table->log,
  2423. &table->mask,
  2424. UDP_HTABLE_SIZE_MIN,
  2425. 64 * 1024);
  2426. table->hash2 = table->hash + (table->mask + 1);
  2427. for (i = 0; i <= table->mask; i++) {
  2428. INIT_HLIST_HEAD(&table->hash[i].head);
  2429. table->hash[i].count = 0;
  2430. spin_lock_init(&table->hash[i].lock);
  2431. }
  2432. for (i = 0; i <= table->mask; i++) {
  2433. INIT_HLIST_HEAD(&table->hash2[i].head);
  2434. table->hash2[i].count = 0;
  2435. spin_lock_init(&table->hash2[i].lock);
  2436. }
  2437. }
  2438. u32 udp_flow_hashrnd(void)
  2439. {
  2440. static u32 hashrnd __read_mostly;
  2441. net_get_random_once(&hashrnd, sizeof(hashrnd));
  2442. return hashrnd;
  2443. }
  2444. EXPORT_SYMBOL(udp_flow_hashrnd);
  2445. void __init udp_init(void)
  2446. {
  2447. unsigned long limit;
  2448. unsigned int i;
  2449. udp_table_init(&udp_table, "UDP");
  2450. limit = nr_free_buffer_pages() / 8;
  2451. limit = max(limit, 128UL);
  2452. sysctl_udp_mem[0] = limit / 4 * 3;
  2453. sysctl_udp_mem[1] = limit;
  2454. sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
  2455. sysctl_udp_rmem_min = SK_MEM_QUANTUM;
  2456. sysctl_udp_wmem_min = SK_MEM_QUANTUM;
  2457. /* 16 spinlocks per cpu */
  2458. udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
  2459. udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
  2460. GFP_KERNEL);
  2461. if (!udp_busylocks)
  2462. panic("UDP: failed to alloc udp_busylocks\n");
  2463. for (i = 0; i < (1U << udp_busylocks_log); i++)
  2464. spin_lock_init(udp_busylocks + i);
  2465. }