udp.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * The User Datagram Protocol (UDP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  11. * Alan Cox, <alan@lxorguk.ukuu.org.uk>
  12. * Hirokazu Takahashi, <taka@valinux.co.jp>
  13. *
  14. * Fixes:
  15. * Alan Cox : verify_area() calls
  16. * Alan Cox : stopped close while in use off icmp
  17. * messages. Not a fix but a botch that
  18. * for udp at least is 'valid'.
  19. * Alan Cox : Fixed icmp handling properly
  20. * Alan Cox : Correct error for oversized datagrams
  21. * Alan Cox : Tidied select() semantics.
  22. * Alan Cox : udp_err() fixed properly, also now
  23. * select and read wake correctly on errors
  24. * Alan Cox : udp_send verify_area moved to avoid mem leak
  25. * Alan Cox : UDP can count its memory
  26. * Alan Cox : send to an unknown connection causes
  27. * an ECONNREFUSED off the icmp, but
  28. * does NOT close.
  29. * Alan Cox : Switched to new sk_buff handlers. No more backlog!
  30. * Alan Cox : Using generic datagram code. Even smaller and the PEEK
  31. * bug no longer crashes it.
  32. * Fred Van Kempen : Net2e support for sk->broadcast.
  33. * Alan Cox : Uses skb_free_datagram
  34. * Alan Cox : Added get/set sockopt support.
  35. * Alan Cox : Broadcasting without option set returns EACCES.
  36. * Alan Cox : No wakeup calls. Instead we now use the callbacks.
  37. * Alan Cox : Use ip_tos and ip_ttl
  38. * Alan Cox : SNMP Mibs
  39. * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
  40. * Matt Dillon : UDP length checks.
  41. * Alan Cox : Smarter af_inet used properly.
  42. * Alan Cox : Use new kernel side addressing.
  43. * Alan Cox : Incorrect return on truncated datagram receive.
  44. * Arnt Gulbrandsen : New udp_send and stuff
  45. * Alan Cox : Cache last socket
  46. * Alan Cox : Route cache
  47. * Jon Peatfield : Minor efficiency fix to sendto().
  48. * Mike Shaver : RFC1122 checks.
  49. * Alan Cox : Nonblocking error fix.
  50. * Willy Konynenberg : Transparent proxying support.
  51. * Mike McLagan : Routing by source
  52. * David S. Miller : New socket lookup architecture.
  53. * Last socket cache retained as it
  54. * does have a high hit rate.
  55. * Olaf Kirch : Don't linearise iovec on sendmsg.
  56. * Andi Kleen : Some cleanups, cache destination entry
  57. * for connect.
  58. * Vitaly E. Lavrov : Transparent proxy revived after year coma.
  59. * Melvin Smith : Check msg_name not msg_namelen in sendto(),
  60. * return ENOTCONN for unconnected sockets (POSIX)
  61. * Janos Farkas : don't deliver multi/broadcasts to a different
  62. * bound-to-device socket
  63. * Hirokazu Takahashi : HW checksumming for outgoing UDP
  64. * datagrams.
  65. * Hirokazu Takahashi : sendfile() on UDP works now.
  66. * Arnaldo C. Melo : convert /proc/net/udp to seq_file
  67. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
  68. * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
  69. * a single port at the same time.
  70. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
  71. * James Chapman : Add L2TP encapsulation type.
  72. *
  73. *
  74. * This program is free software; you can redistribute it and/or
  75. * modify it under the terms of the GNU General Public License
  76. * as published by the Free Software Foundation; either version
  77. * 2 of the License, or (at your option) any later version.
  78. */
  79. #include <asm/system.h>
  80. #include <asm/uaccess.h>
  81. #include <asm/ioctls.h>
  82. #include <linux/bootmem.h>
  83. #include <linux/highmem.h>
  84. #include <linux/swap.h>
  85. #include <linux/types.h>
  86. #include <linux/fcntl.h>
  87. #include <linux/module.h>
  88. #include <linux/socket.h>
  89. #include <linux/sockios.h>
  90. #include <linux/igmp.h>
  91. #include <linux/in.h>
  92. #include <linux/errno.h>
  93. #include <linux/timer.h>
  94. #include <linux/mm.h>
  95. #include <linux/inet.h>
  96. #include <linux/netdevice.h>
  97. #include <net/tcp_states.h>
  98. #include <linux/skbuff.h>
  99. #include <linux/proc_fs.h>
  100. #include <linux/seq_file.h>
  101. #include <net/net_namespace.h>
  102. #include <net/icmp.h>
  103. #include <net/route.h>
  104. #include <net/checksum.h>
  105. #include <net/xfrm.h>
  106. #include "udp_impl.h"
  107. struct udp_table udp_table __read_mostly;
  108. EXPORT_SYMBOL(udp_table);
  109. int sysctl_udp_mem[3] __read_mostly;
  110. EXPORT_SYMBOL(sysctl_udp_mem);
  111. int sysctl_udp_rmem_min __read_mostly;
  112. EXPORT_SYMBOL(sysctl_udp_rmem_min);
  113. int sysctl_udp_wmem_min __read_mostly;
  114. EXPORT_SYMBOL(sysctl_udp_wmem_min);
  115. atomic_t udp_memory_allocated;
  116. EXPORT_SYMBOL(udp_memory_allocated);
  117. #define MAX_UDP_PORTS 65536
  118. #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
  119. static int udp_lib_lport_inuse(struct net *net, __u16 num,
  120. const struct udp_hslot *hslot,
  121. unsigned long *bitmap,
  122. struct sock *sk,
  123. int (*saddr_comp)(const struct sock *sk1,
  124. const struct sock *sk2),
  125. unsigned int log)
  126. {
  127. struct sock *sk2;
  128. struct hlist_nulls_node *node;
  129. sk_nulls_for_each(sk2, node, &hslot->head)
  130. if (net_eq(sock_net(sk2), net) &&
  131. sk2 != sk &&
  132. (bitmap || sk2->sk_hash == num) &&
  133. (!sk2->sk_reuse || !sk->sk_reuse) &&
  134. (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
  135. || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
  136. (*saddr_comp)(sk, sk2)) {
  137. if (bitmap)
  138. __set_bit(sk2->sk_hash >> log, bitmap);
  139. else
  140. return 1;
  141. }
  142. return 0;
  143. }
  144. /**
  145. * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
  146. *
  147. * @sk: socket struct in question
  148. * @snum: port number to look up
  149. * @saddr_comp: AF-dependent comparison of bound local IP addresses
  150. */
  151. int udp_lib_get_port(struct sock *sk, unsigned short snum,
  152. int (*saddr_comp)(const struct sock *sk1,
  153. const struct sock *sk2))
  154. {
  155. struct udp_hslot *hslot;
  156. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  157. int error = 1;
  158. struct net *net = sock_net(sk);
  159. if (!snum) {
  160. int low, high, remaining;
  161. unsigned rand;
  162. unsigned short first, last;
  163. DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
  164. inet_get_local_port_range(&low, &high);
  165. remaining = (high - low) + 1;
  166. rand = net_random();
  167. first = (((u64)rand * remaining) >> 32) + low;
  168. /*
  169. * force rand to be an odd multiple of UDP_HTABLE_SIZE
  170. */
  171. rand = (rand | 1) * (udptable->mask + 1);
  172. for (last = first + udptable->mask + 1;
  173. first != last;
  174. first++) {
  175. hslot = udp_hashslot(udptable, net, first);
  176. bitmap_zero(bitmap, PORTS_PER_CHAIN);
  177. spin_lock_bh(&hslot->lock);
  178. udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
  179. saddr_comp, udptable->log);
  180. snum = first;
  181. /*
  182. * Iterate on all possible values of snum for this hash.
  183. * Using steps of an odd multiple of UDP_HTABLE_SIZE
  184. * give us randomization and full range coverage.
  185. */
  186. do {
  187. if (low <= snum && snum <= high &&
  188. !test_bit(snum >> udptable->log, bitmap))
  189. goto found;
  190. snum += rand;
  191. } while (snum != first);
  192. spin_unlock_bh(&hslot->lock);
  193. }
  194. goto fail;
  195. } else {
  196. hslot = udp_hashslot(udptable, net, snum);
  197. spin_lock_bh(&hslot->lock);
  198. if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
  199. saddr_comp, 0))
  200. goto fail_unlock;
  201. }
  202. found:
  203. inet_sk(sk)->inet_num = snum;
  204. sk->sk_hash = snum;
  205. if (sk_unhashed(sk)) {
  206. sk_nulls_add_node_rcu(sk, &hslot->head);
  207. hslot->count++;
  208. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  209. }
  210. error = 0;
  211. fail_unlock:
  212. spin_unlock_bh(&hslot->lock);
  213. fail:
  214. return error;
  215. }
  216. EXPORT_SYMBOL(udp_lib_get_port);
  217. static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
  218. {
  219. struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
  220. return (!ipv6_only_sock(sk2) &&
  221. (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr ||
  222. inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
  223. }
  224. int udp_v4_get_port(struct sock *sk, unsigned short snum)
  225. {
  226. return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
  227. }
  228. static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
  229. unsigned short hnum,
  230. __be16 sport, __be32 daddr, __be16 dport, int dif)
  231. {
  232. int score = -1;
  233. if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
  234. !ipv6_only_sock(sk)) {
  235. struct inet_sock *inet = inet_sk(sk);
  236. score = (sk->sk_family == PF_INET ? 1 : 0);
  237. if (inet->inet_rcv_saddr) {
  238. if (inet->inet_rcv_saddr != daddr)
  239. return -1;
  240. score += 2;
  241. }
  242. if (inet->inet_daddr) {
  243. if (inet->inet_daddr != saddr)
  244. return -1;
  245. score += 2;
  246. }
  247. if (inet->inet_dport) {
  248. if (inet->inet_dport != sport)
  249. return -1;
  250. score += 2;
  251. }
  252. if (sk->sk_bound_dev_if) {
  253. if (sk->sk_bound_dev_if != dif)
  254. return -1;
  255. score += 2;
  256. }
  257. }
  258. return score;
  259. }
  260. /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  261. * harder than this. -DaveM
  262. */
  263. static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
  264. __be16 sport, __be32 daddr, __be16 dport,
  265. int dif, struct udp_table *udptable)
  266. {
  267. struct sock *sk, *result;
  268. struct hlist_nulls_node *node;
  269. unsigned short hnum = ntohs(dport);
  270. unsigned int hash = udp_hashfn(net, hnum, udptable->mask);
  271. struct udp_hslot *hslot = &udptable->hash[hash];
  272. int score, badness;
  273. rcu_read_lock();
  274. begin:
  275. result = NULL;
  276. badness = -1;
  277. sk_nulls_for_each_rcu(sk, node, &hslot->head) {
  278. score = compute_score(sk, net, saddr, hnum, sport,
  279. daddr, dport, dif);
  280. if (score > badness) {
  281. result = sk;
  282. badness = score;
  283. }
  284. }
  285. /*
  286. * if the nulls value we got at the end of this lookup is
  287. * not the expected one, we must restart lookup.
  288. * We probably met an item that was moved to another chain.
  289. */
  290. if (get_nulls_value(node) != hash)
  291. goto begin;
  292. if (result) {
  293. if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
  294. result = NULL;
  295. else if (unlikely(compute_score(result, net, saddr, hnum, sport,
  296. daddr, dport, dif) < badness)) {
  297. sock_put(result);
  298. goto begin;
  299. }
  300. }
  301. rcu_read_unlock();
  302. return result;
  303. }
  304. static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
  305. __be16 sport, __be16 dport,
  306. struct udp_table *udptable)
  307. {
  308. struct sock *sk;
  309. const struct iphdr *iph = ip_hdr(skb);
  310. if (unlikely(sk = skb_steal_sock(skb)))
  311. return sk;
  312. else
  313. return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
  314. iph->daddr, dport, inet_iif(skb),
  315. udptable);
  316. }
  317. struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
  318. __be32 daddr, __be16 dport, int dif)
  319. {
  320. return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
  321. }
  322. EXPORT_SYMBOL_GPL(udp4_lib_lookup);
  323. static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
  324. __be16 loc_port, __be32 loc_addr,
  325. __be16 rmt_port, __be32 rmt_addr,
  326. int dif)
  327. {
  328. struct hlist_nulls_node *node;
  329. struct sock *s = sk;
  330. unsigned short hnum = ntohs(loc_port);
  331. sk_nulls_for_each_from(s, node) {
  332. struct inet_sock *inet = inet_sk(s);
  333. if (!net_eq(sock_net(s), net) ||
  334. s->sk_hash != hnum ||
  335. (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
  336. (inet->inet_dport != rmt_port && inet->inet_dport) ||
  337. (inet->inet_rcv_saddr &&
  338. inet->inet_rcv_saddr != loc_addr) ||
  339. ipv6_only_sock(s) ||
  340. (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
  341. continue;
  342. if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
  343. continue;
  344. goto found;
  345. }
  346. s = NULL;
  347. found:
  348. return s;
  349. }
  350. /*
  351. * This routine is called by the ICMP module when it gets some
  352. * sort of error condition. If err < 0 then the socket should
  353. * be closed and the error returned to the user. If err > 0
  354. * it's just the icmp type << 8 | icmp code.
  355. * Header points to the ip header of the error packet. We move
  356. * on past this. Then (as it used to claim before adjustment)
  357. * header points to the first 8 bytes of the udp header. We need
  358. * to find the appropriate port.
  359. */
  360. void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
  361. {
  362. struct inet_sock *inet;
  363. struct iphdr *iph = (struct iphdr *)skb->data;
  364. struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
  365. const int type = icmp_hdr(skb)->type;
  366. const int code = icmp_hdr(skb)->code;
  367. struct sock *sk;
  368. int harderr;
  369. int err;
  370. struct net *net = dev_net(skb->dev);
  371. sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
  372. iph->saddr, uh->source, skb->dev->ifindex, udptable);
  373. if (sk == NULL) {
  374. ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
  375. return; /* No socket for error */
  376. }
  377. err = 0;
  378. harderr = 0;
  379. inet = inet_sk(sk);
  380. switch (type) {
  381. default:
  382. case ICMP_TIME_EXCEEDED:
  383. err = EHOSTUNREACH;
  384. break;
  385. case ICMP_SOURCE_QUENCH:
  386. goto out;
  387. case ICMP_PARAMETERPROB:
  388. err = EPROTO;
  389. harderr = 1;
  390. break;
  391. case ICMP_DEST_UNREACH:
  392. if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
  393. if (inet->pmtudisc != IP_PMTUDISC_DONT) {
  394. err = EMSGSIZE;
  395. harderr = 1;
  396. break;
  397. }
  398. goto out;
  399. }
  400. err = EHOSTUNREACH;
  401. if (code <= NR_ICMP_UNREACH) {
  402. harderr = icmp_err_convert[code].fatal;
  403. err = icmp_err_convert[code].errno;
  404. }
  405. break;
  406. }
  407. /*
  408. * RFC1122: OK. Passes ICMP errors back to application, as per
  409. * 4.1.3.3.
  410. */
  411. if (!inet->recverr) {
  412. if (!harderr || sk->sk_state != TCP_ESTABLISHED)
  413. goto out;
  414. } else {
  415. ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
  416. }
  417. sk->sk_err = err;
  418. sk->sk_error_report(sk);
  419. out:
  420. sock_put(sk);
  421. }
  422. void udp_err(struct sk_buff *skb, u32 info)
  423. {
  424. __udp4_lib_err(skb, info, &udp_table);
  425. }
  426. /*
  427. * Throw away all pending data and cancel the corking. Socket is locked.
  428. */
  429. void udp_flush_pending_frames(struct sock *sk)
  430. {
  431. struct udp_sock *up = udp_sk(sk);
  432. if (up->pending) {
  433. up->len = 0;
  434. up->pending = 0;
  435. ip_flush_pending_frames(sk);
  436. }
  437. }
  438. EXPORT_SYMBOL(udp_flush_pending_frames);
  439. /**
  440. * udp4_hwcsum_outgoing - handle outgoing HW checksumming
  441. * @sk: socket we are sending on
  442. * @skb: sk_buff containing the filled-in UDP header
  443. * (checksum field must be zeroed out)
  444. */
  445. static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
  446. __be32 src, __be32 dst, int len)
  447. {
  448. unsigned int offset;
  449. struct udphdr *uh = udp_hdr(skb);
  450. __wsum csum = 0;
  451. if (skb_queue_len(&sk->sk_write_queue) == 1) {
  452. /*
  453. * Only one fragment on the socket.
  454. */
  455. skb->csum_start = skb_transport_header(skb) - skb->head;
  456. skb->csum_offset = offsetof(struct udphdr, check);
  457. uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0);
  458. } else {
  459. /*
  460. * HW-checksum won't work as there are two or more
  461. * fragments on the socket so that all csums of sk_buffs
  462. * should be together
  463. */
  464. offset = skb_transport_offset(skb);
  465. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  466. skb->ip_summed = CHECKSUM_NONE;
  467. skb_queue_walk(&sk->sk_write_queue, skb) {
  468. csum = csum_add(csum, skb->csum);
  469. }
  470. uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
  471. if (uh->check == 0)
  472. uh->check = CSUM_MANGLED_0;
  473. }
  474. }
  475. /*
  476. * Push out all pending data as one UDP datagram. Socket is locked.
  477. */
  478. static int udp_push_pending_frames(struct sock *sk)
  479. {
  480. struct udp_sock *up = udp_sk(sk);
  481. struct inet_sock *inet = inet_sk(sk);
  482. struct flowi *fl = &inet->cork.fl;
  483. struct sk_buff *skb;
  484. struct udphdr *uh;
  485. int err = 0;
  486. int is_udplite = IS_UDPLITE(sk);
  487. __wsum csum = 0;
  488. /* Grab the skbuff where UDP header space exists. */
  489. if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
  490. goto out;
  491. /*
  492. * Create a UDP header
  493. */
  494. uh = udp_hdr(skb);
  495. uh->source = fl->fl_ip_sport;
  496. uh->dest = fl->fl_ip_dport;
  497. uh->len = htons(up->len);
  498. uh->check = 0;
  499. if (is_udplite) /* UDP-Lite */
  500. csum = udplite_csum_outgoing(sk, skb);
  501. else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
  502. skb->ip_summed = CHECKSUM_NONE;
  503. goto send;
  504. } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
  505. udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
  506. goto send;
  507. } else /* `normal' UDP */
  508. csum = udp_csum_outgoing(sk, skb);
  509. /* add protocol-dependent pseudo-header */
  510. uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
  511. sk->sk_protocol, csum);
  512. if (uh->check == 0)
  513. uh->check = CSUM_MANGLED_0;
  514. send:
  515. err = ip_push_pending_frames(sk);
  516. if (err) {
  517. if (err == -ENOBUFS && !inet->recverr) {
  518. UDP_INC_STATS_USER(sock_net(sk),
  519. UDP_MIB_SNDBUFERRORS, is_udplite);
  520. err = 0;
  521. }
  522. } else
  523. UDP_INC_STATS_USER(sock_net(sk),
  524. UDP_MIB_OUTDATAGRAMS, is_udplite);
  525. out:
  526. up->len = 0;
  527. up->pending = 0;
  528. return err;
  529. }
  530. int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  531. size_t len)
  532. {
  533. struct inet_sock *inet = inet_sk(sk);
  534. struct udp_sock *up = udp_sk(sk);
  535. int ulen = len;
  536. struct ipcm_cookie ipc;
  537. struct rtable *rt = NULL;
  538. int free = 0;
  539. int connected = 0;
  540. __be32 daddr, faddr, saddr;
  541. __be16 dport;
  542. u8 tos;
  543. int err, is_udplite = IS_UDPLITE(sk);
  544. int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
  545. int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
  546. if (len > 0xFFFF)
  547. return -EMSGSIZE;
  548. /*
  549. * Check the flags.
  550. */
  551. if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
  552. return -EOPNOTSUPP;
  553. ipc.opt = NULL;
  554. ipc.shtx.flags = 0;
  555. if (up->pending) {
  556. /*
  557. * There are pending frames.
  558. * The socket lock must be held while it's corked.
  559. */
  560. lock_sock(sk);
  561. if (likely(up->pending)) {
  562. if (unlikely(up->pending != AF_INET)) {
  563. release_sock(sk);
  564. return -EINVAL;
  565. }
  566. goto do_append_data;
  567. }
  568. release_sock(sk);
  569. }
  570. ulen += sizeof(struct udphdr);
  571. /*
  572. * Get and verify the address.
  573. */
  574. if (msg->msg_name) {
  575. struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
  576. if (msg->msg_namelen < sizeof(*usin))
  577. return -EINVAL;
  578. if (usin->sin_family != AF_INET) {
  579. if (usin->sin_family != AF_UNSPEC)
  580. return -EAFNOSUPPORT;
  581. }
  582. daddr = usin->sin_addr.s_addr;
  583. dport = usin->sin_port;
  584. if (dport == 0)
  585. return -EINVAL;
  586. } else {
  587. if (sk->sk_state != TCP_ESTABLISHED)
  588. return -EDESTADDRREQ;
  589. daddr = inet->inet_daddr;
  590. dport = inet->inet_dport;
  591. /* Open fast path for connected socket.
  592. Route will not be used, if at least one option is set.
  593. */
  594. connected = 1;
  595. }
  596. ipc.addr = inet->inet_saddr;
  597. ipc.oif = sk->sk_bound_dev_if;
  598. err = sock_tx_timestamp(msg, sk, &ipc.shtx);
  599. if (err)
  600. return err;
  601. if (msg->msg_controllen) {
  602. err = ip_cmsg_send(sock_net(sk), msg, &ipc);
  603. if (err)
  604. return err;
  605. if (ipc.opt)
  606. free = 1;
  607. connected = 0;
  608. }
  609. if (!ipc.opt)
  610. ipc.opt = inet->opt;
  611. saddr = ipc.addr;
  612. ipc.addr = faddr = daddr;
  613. if (ipc.opt && ipc.opt->srr) {
  614. if (!daddr)
  615. return -EINVAL;
  616. faddr = ipc.opt->faddr;
  617. connected = 0;
  618. }
  619. tos = RT_TOS(inet->tos);
  620. if (sock_flag(sk, SOCK_LOCALROUTE) ||
  621. (msg->msg_flags & MSG_DONTROUTE) ||
  622. (ipc.opt && ipc.opt->is_strictroute)) {
  623. tos |= RTO_ONLINK;
  624. connected = 0;
  625. }
  626. if (ipv4_is_multicast(daddr)) {
  627. if (!ipc.oif)
  628. ipc.oif = inet->mc_index;
  629. if (!saddr)
  630. saddr = inet->mc_addr;
  631. connected = 0;
  632. }
  633. if (connected)
  634. rt = (struct rtable *)sk_dst_check(sk, 0);
  635. if (rt == NULL) {
  636. struct flowi fl = { .oif = ipc.oif,
  637. .mark = sk->sk_mark,
  638. .nl_u = { .ip4_u =
  639. { .daddr = faddr,
  640. .saddr = saddr,
  641. .tos = tos } },
  642. .proto = sk->sk_protocol,
  643. .flags = inet_sk_flowi_flags(sk),
  644. .uli_u = { .ports =
  645. { .sport = inet->inet_sport,
  646. .dport = dport } } };
  647. struct net *net = sock_net(sk);
  648. security_sk_classify_flow(sk, &fl);
  649. err = ip_route_output_flow(net, &rt, &fl, sk, 1);
  650. if (err) {
  651. if (err == -ENETUNREACH)
  652. IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
  653. goto out;
  654. }
  655. err = -EACCES;
  656. if ((rt->rt_flags & RTCF_BROADCAST) &&
  657. !sock_flag(sk, SOCK_BROADCAST))
  658. goto out;
  659. if (connected)
  660. sk_dst_set(sk, dst_clone(&rt->u.dst));
  661. }
  662. if (msg->msg_flags&MSG_CONFIRM)
  663. goto do_confirm;
  664. back_from_confirm:
  665. saddr = rt->rt_src;
  666. if (!ipc.addr)
  667. daddr = ipc.addr = rt->rt_dst;
  668. lock_sock(sk);
  669. if (unlikely(up->pending)) {
  670. /* The socket is already corked while preparing it. */
  671. /* ... which is an evident application bug. --ANK */
  672. release_sock(sk);
  673. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
  674. err = -EINVAL;
  675. goto out;
  676. }
  677. /*
  678. * Now cork the socket to pend data.
  679. */
  680. inet->cork.fl.fl4_dst = daddr;
  681. inet->cork.fl.fl_ip_dport = dport;
  682. inet->cork.fl.fl4_src = saddr;
  683. inet->cork.fl.fl_ip_sport = inet->inet_sport;
  684. up->pending = AF_INET;
  685. do_append_data:
  686. up->len += ulen;
  687. getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
  688. err = ip_append_data(sk, getfrag, msg->msg_iov, ulen,
  689. sizeof(struct udphdr), &ipc, &rt,
  690. corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
  691. if (err)
  692. udp_flush_pending_frames(sk);
  693. else if (!corkreq)
  694. err = udp_push_pending_frames(sk);
  695. else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
  696. up->pending = 0;
  697. release_sock(sk);
  698. out:
  699. ip_rt_put(rt);
  700. if (free)
  701. kfree(ipc.opt);
  702. if (!err)
  703. return len;
  704. /*
  705. * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
  706. * ENOBUFS might not be good (it's not tunable per se), but otherwise
  707. * we don't have a good statistic (IpOutDiscards but it can be too many
  708. * things). We could add another new stat but at least for now that
  709. * seems like overkill.
  710. */
  711. if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
  712. UDP_INC_STATS_USER(sock_net(sk),
  713. UDP_MIB_SNDBUFERRORS, is_udplite);
  714. }
  715. return err;
  716. do_confirm:
  717. dst_confirm(&rt->u.dst);
  718. if (!(msg->msg_flags&MSG_PROBE) || len)
  719. goto back_from_confirm;
  720. err = 0;
  721. goto out;
  722. }
  723. EXPORT_SYMBOL(udp_sendmsg);
  724. int udp_sendpage(struct sock *sk, struct page *page, int offset,
  725. size_t size, int flags)
  726. {
  727. struct udp_sock *up = udp_sk(sk);
  728. int ret;
  729. if (!up->pending) {
  730. struct msghdr msg = { .msg_flags = flags|MSG_MORE };
  731. /* Call udp_sendmsg to specify destination address which
  732. * sendpage interface can't pass.
  733. * This will succeed only when the socket is connected.
  734. */
  735. ret = udp_sendmsg(NULL, sk, &msg, 0);
  736. if (ret < 0)
  737. return ret;
  738. }
  739. lock_sock(sk);
  740. if (unlikely(!up->pending)) {
  741. release_sock(sk);
  742. LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n");
  743. return -EINVAL;
  744. }
  745. ret = ip_append_page(sk, page, offset, size, flags);
  746. if (ret == -EOPNOTSUPP) {
  747. release_sock(sk);
  748. return sock_no_sendpage(sk->sk_socket, page, offset,
  749. size, flags);
  750. }
  751. if (ret < 0) {
  752. udp_flush_pending_frames(sk);
  753. goto out;
  754. }
  755. up->len += size;
  756. if (!(up->corkflag || (flags&MSG_MORE)))
  757. ret = udp_push_pending_frames(sk);
  758. if (!ret)
  759. ret = size;
  760. out:
  761. release_sock(sk);
  762. return ret;
  763. }
  764. /**
  765. * first_packet_length - return length of first packet in receive queue
  766. * @sk: socket
  767. *
  768. * Drops all bad checksum frames, until a valid one is found.
  769. * Returns the length of found skb, or 0 if none is found.
  770. */
  771. static unsigned int first_packet_length(struct sock *sk)
  772. {
  773. struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
  774. struct sk_buff *skb;
  775. unsigned int res;
  776. __skb_queue_head_init(&list_kill);
  777. spin_lock_bh(&rcvq->lock);
  778. while ((skb = skb_peek(rcvq)) != NULL &&
  779. udp_lib_checksum_complete(skb)) {
  780. UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
  781. IS_UDPLITE(sk));
  782. atomic_inc(&sk->sk_drops);
  783. __skb_unlink(skb, rcvq);
  784. __skb_queue_tail(&list_kill, skb);
  785. }
  786. res = skb ? skb->len : 0;
  787. spin_unlock_bh(&rcvq->lock);
  788. if (!skb_queue_empty(&list_kill)) {
  789. lock_sock(sk);
  790. __skb_queue_purge(&list_kill);
  791. sk_mem_reclaim_partial(sk);
  792. release_sock(sk);
  793. }
  794. return res;
  795. }
  796. /*
  797. * IOCTL requests applicable to the UDP protocol
  798. */
  799. int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  800. {
  801. switch (cmd) {
  802. case SIOCOUTQ:
  803. {
  804. int amount = sk_wmem_alloc_get(sk);
  805. return put_user(amount, (int __user *)arg);
  806. }
  807. case SIOCINQ:
  808. {
  809. unsigned int amount = first_packet_length(sk);
  810. if (amount)
  811. /*
  812. * We will only return the amount
  813. * of this packet since that is all
  814. * that will be read.
  815. */
  816. amount -= sizeof(struct udphdr);
  817. return put_user(amount, (int __user *)arg);
  818. }
  819. default:
  820. return -ENOIOCTLCMD;
  821. }
  822. return 0;
  823. }
  824. EXPORT_SYMBOL(udp_ioctl);
  825. /*
  826. * This should be easy, if there is something there we
  827. * return it, otherwise we block.
  828. */
  829. int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  830. size_t len, int noblock, int flags, int *addr_len)
  831. {
  832. struct inet_sock *inet = inet_sk(sk);
  833. struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
  834. struct sk_buff *skb;
  835. unsigned int ulen, copied;
  836. int peeked;
  837. int err;
  838. int is_udplite = IS_UDPLITE(sk);
  839. /*
  840. * Check any passed addresses
  841. */
  842. if (addr_len)
  843. *addr_len = sizeof(*sin);
  844. if (flags & MSG_ERRQUEUE)
  845. return ip_recv_error(sk, msg, len);
  846. try_again:
  847. skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
  848. &peeked, &err);
  849. if (!skb)
  850. goto out;
  851. ulen = skb->len - sizeof(struct udphdr);
  852. copied = len;
  853. if (copied > ulen)
  854. copied = ulen;
  855. else if (copied < ulen)
  856. msg->msg_flags |= MSG_TRUNC;
  857. /*
  858. * If checksum is needed at all, try to do it while copying the
  859. * data. If the data is truncated, or if we only want a partial
  860. * coverage checksum (UDP-Lite), do it before the copy.
  861. */
  862. if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
  863. if (udp_lib_checksum_complete(skb))
  864. goto csum_copy_err;
  865. }
  866. if (skb_csum_unnecessary(skb))
  867. err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
  868. msg->msg_iov, copied);
  869. else {
  870. err = skb_copy_and_csum_datagram_iovec(skb,
  871. sizeof(struct udphdr),
  872. msg->msg_iov);
  873. if (err == -EINVAL)
  874. goto csum_copy_err;
  875. }
  876. if (err)
  877. goto out_free;
  878. if (!peeked)
  879. UDP_INC_STATS_USER(sock_net(sk),
  880. UDP_MIB_INDATAGRAMS, is_udplite);
  881. sock_recv_ts_and_drops(msg, sk, skb);
  882. /* Copy the address. */
  883. if (sin) {
  884. sin->sin_family = AF_INET;
  885. sin->sin_port = udp_hdr(skb)->source;
  886. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  887. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  888. }
  889. if (inet->cmsg_flags)
  890. ip_cmsg_recv(msg, skb);
  891. err = copied;
  892. if (flags & MSG_TRUNC)
  893. err = ulen;
  894. out_free:
  895. skb_free_datagram_locked(sk, skb);
  896. out:
  897. return err;
  898. csum_copy_err:
  899. lock_sock(sk);
  900. if (!skb_kill_datagram(sk, skb, flags))
  901. UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  902. release_sock(sk);
  903. if (noblock)
  904. return -EAGAIN;
  905. goto try_again;
  906. }
  907. int udp_disconnect(struct sock *sk, int flags)
  908. {
  909. struct inet_sock *inet = inet_sk(sk);
  910. /*
  911. * 1003.1g - break association.
  912. */
  913. sk->sk_state = TCP_CLOSE;
  914. inet->inet_daddr = 0;
  915. inet->inet_dport = 0;
  916. sk->sk_bound_dev_if = 0;
  917. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  918. inet_reset_saddr(sk);
  919. if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
  920. sk->sk_prot->unhash(sk);
  921. inet->inet_sport = 0;
  922. }
  923. sk_dst_reset(sk);
  924. return 0;
  925. }
  926. EXPORT_SYMBOL(udp_disconnect);
  927. void udp_lib_unhash(struct sock *sk)
  928. {
  929. if (sk_hashed(sk)) {
  930. struct udp_table *udptable = sk->sk_prot->h.udp_table;
  931. struct udp_hslot *hslot = udp_hashslot(udptable, sock_net(sk),
  932. sk->sk_hash);
  933. spin_lock_bh(&hslot->lock);
  934. if (sk_nulls_del_node_init_rcu(sk)) {
  935. hslot->count--;
  936. inet_sk(sk)->inet_num = 0;
  937. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  938. }
  939. spin_unlock_bh(&hslot->lock);
  940. }
  941. }
  942. EXPORT_SYMBOL(udp_lib_unhash);
  943. static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  944. {
  945. int rc = sock_queue_rcv_skb(sk, skb);
  946. if (rc < 0) {
  947. int is_udplite = IS_UDPLITE(sk);
  948. /* Note that an ENOMEM error is charged twice */
  949. if (rc == -ENOMEM)
  950. UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
  951. is_udplite);
  952. UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  953. kfree_skb(skb);
  954. return -1;
  955. }
  956. return 0;
  957. }
  958. /* returns:
  959. * -1: error
  960. * 0: success
  961. * >0: "udp encap" protocol resubmission
  962. *
  963. * Note that in the success and error cases, the skb is assumed to
  964. * have either been requeued or freed.
  965. */
  966. int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  967. {
  968. struct udp_sock *up = udp_sk(sk);
  969. int rc;
  970. int is_udplite = IS_UDPLITE(sk);
  971. /*
  972. * Charge it to the socket, dropping if the queue is full.
  973. */
  974. if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
  975. goto drop;
  976. nf_reset(skb);
  977. if (up->encap_type) {
  978. /*
  979. * This is an encapsulation socket so pass the skb to
  980. * the socket's udp_encap_rcv() hook. Otherwise, just
  981. * fall through and pass this up the UDP socket.
  982. * up->encap_rcv() returns the following value:
  983. * =0 if skb was successfully passed to the encap
  984. * handler or was discarded by it.
  985. * >0 if skb should be passed on to UDP.
  986. * <0 if skb should be resubmitted as proto -N
  987. */
  988. /* if we're overly short, let UDP handle it */
  989. if (skb->len > sizeof(struct udphdr) &&
  990. up->encap_rcv != NULL) {
  991. int ret;
  992. ret = (*up->encap_rcv)(sk, skb);
  993. if (ret <= 0) {
  994. UDP_INC_STATS_BH(sock_net(sk),
  995. UDP_MIB_INDATAGRAMS,
  996. is_udplite);
  997. return -ret;
  998. }
  999. }
  1000. /* FALLTHROUGH -- it's a UDP Packet */
  1001. }
  1002. /*
  1003. * UDP-Lite specific tests, ignored on UDP sockets
  1004. */
  1005. if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
  1006. /*
  1007. * MIB statistics other than incrementing the error count are
  1008. * disabled for the following two types of errors: these depend
  1009. * on the application settings, not on the functioning of the
  1010. * protocol stack as such.
  1011. *
  1012. * RFC 3828 here recommends (sec 3.3): "There should also be a
  1013. * way ... to ... at least let the receiving application block
  1014. * delivery of packets with coverage values less than a value
  1015. * provided by the application."
  1016. */
  1017. if (up->pcrlen == 0) { /* full coverage was set */
  1018. LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
  1019. "%d while full coverage %d requested\n",
  1020. UDP_SKB_CB(skb)->cscov, skb->len);
  1021. goto drop;
  1022. }
  1023. /* The next case involves violating the min. coverage requested
  1024. * by the receiver. This is subtle: if receiver wants x and x is
  1025. * greater than the buffersize/MTU then receiver will complain
  1026. * that it wants x while sender emits packets of smaller size y.
  1027. * Therefore the above ...()->partial_cov statement is essential.
  1028. */
  1029. if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
  1030. LIMIT_NETDEBUG(KERN_WARNING
  1031. "UDPLITE: coverage %d too small, need min %d\n",
  1032. UDP_SKB_CB(skb)->cscov, up->pcrlen);
  1033. goto drop;
  1034. }
  1035. }
  1036. if (sk->sk_filter) {
  1037. if (udp_lib_checksum_complete(skb))
  1038. goto drop;
  1039. }
  1040. rc = 0;
  1041. bh_lock_sock(sk);
  1042. if (!sock_owned_by_user(sk))
  1043. rc = __udp_queue_rcv_skb(sk, skb);
  1044. else
  1045. sk_add_backlog(sk, skb);
  1046. bh_unlock_sock(sk);
  1047. return rc;
  1048. drop:
  1049. UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
  1050. atomic_inc(&sk->sk_drops);
  1051. kfree_skb(skb);
  1052. return -1;
  1053. }
  1054. /*
  1055. * Multicasts and broadcasts go to each listener.
  1056. *
  1057. * Note: called only from the BH handler context,
  1058. * so we don't need to lock the hashes.
  1059. */
  1060. static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
  1061. struct udphdr *uh,
  1062. __be32 saddr, __be32 daddr,
  1063. struct udp_table *udptable)
  1064. {
  1065. struct sock *sk;
  1066. struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
  1067. int dif;
  1068. spin_lock(&hslot->lock);
  1069. sk = sk_nulls_head(&hslot->head);
  1070. dif = skb->dev->ifindex;
  1071. sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
  1072. if (sk) {
  1073. struct sock *sknext = NULL;
  1074. do {
  1075. struct sk_buff *skb1 = skb;
  1076. sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
  1077. daddr, uh->source, saddr,
  1078. dif);
  1079. if (sknext)
  1080. skb1 = skb_clone(skb, GFP_ATOMIC);
  1081. if (skb1) {
  1082. int ret = udp_queue_rcv_skb(sk, skb1);
  1083. if (ret > 0)
  1084. /* we should probably re-process instead
  1085. * of dropping packets here. */
  1086. kfree_skb(skb1);
  1087. }
  1088. sk = sknext;
  1089. } while (sknext);
  1090. } else
  1091. consume_skb(skb);
  1092. spin_unlock(&hslot->lock);
  1093. return 0;
  1094. }
  1095. /* Initialize UDP checksum. If exited with zero value (success),
  1096. * CHECKSUM_UNNECESSARY means, that no more checks are required.
  1097. * Otherwise, csum completion requires chacksumming packet body,
  1098. * including udp header and folding it to skb->csum.
  1099. */
  1100. static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
  1101. int proto)
  1102. {
  1103. const struct iphdr *iph;
  1104. int err;
  1105. UDP_SKB_CB(skb)->partial_cov = 0;
  1106. UDP_SKB_CB(skb)->cscov = skb->len;
  1107. if (proto == IPPROTO_UDPLITE) {
  1108. err = udplite_checksum_init(skb, uh);
  1109. if (err)
  1110. return err;
  1111. }
  1112. iph = ip_hdr(skb);
  1113. if (uh->check == 0) {
  1114. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1115. } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
  1116. if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  1117. proto, skb->csum))
  1118. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1119. }
  1120. if (!skb_csum_unnecessary(skb))
  1121. skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
  1122. skb->len, proto, 0);
  1123. /* Probably, we should checksum udp header (it should be in cache
  1124. * in any case) and data in tiny packets (< rx copybreak).
  1125. */
  1126. return 0;
  1127. }
  1128. /*
  1129. * All we need to do is get the socket, and then do a checksum.
  1130. */
  1131. int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
  1132. int proto)
  1133. {
  1134. struct sock *sk;
  1135. struct udphdr *uh;
  1136. unsigned short ulen;
  1137. struct rtable *rt = skb_rtable(skb);
  1138. __be32 saddr, daddr;
  1139. struct net *net = dev_net(skb->dev);
  1140. /*
  1141. * Validate the packet.
  1142. */
  1143. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  1144. goto drop; /* No space for header. */
  1145. uh = udp_hdr(skb);
  1146. ulen = ntohs(uh->len);
  1147. if (ulen > skb->len)
  1148. goto short_packet;
  1149. if (proto == IPPROTO_UDP) {
  1150. /* UDP validates ulen. */
  1151. if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
  1152. goto short_packet;
  1153. uh = udp_hdr(skb);
  1154. }
  1155. if (udp4_csum_init(skb, uh, proto))
  1156. goto csum_error;
  1157. saddr = ip_hdr(skb)->saddr;
  1158. daddr = ip_hdr(skb)->daddr;
  1159. if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
  1160. return __udp4_lib_mcast_deliver(net, skb, uh,
  1161. saddr, daddr, udptable);
  1162. sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
  1163. if (sk != NULL) {
  1164. int ret = udp_queue_rcv_skb(sk, skb);
  1165. sock_put(sk);
  1166. /* a return value > 0 means to resubmit the input, but
  1167. * it wants the return to be -protocol, or 0
  1168. */
  1169. if (ret > 0)
  1170. return -ret;
  1171. return 0;
  1172. }
  1173. if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
  1174. goto drop;
  1175. nf_reset(skb);
  1176. /* No socket. Drop packet silently, if checksum is wrong */
  1177. if (udp_lib_checksum_complete(skb))
  1178. goto csum_error;
  1179. UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
  1180. icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
  1181. /*
  1182. * Hmm. We got an UDP packet to a port to which we
  1183. * don't wanna listen. Ignore it.
  1184. */
  1185. kfree_skb(skb);
  1186. return 0;
  1187. short_packet:
  1188. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
  1189. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1190. &saddr,
  1191. ntohs(uh->source),
  1192. ulen,
  1193. skb->len,
  1194. &daddr,
  1195. ntohs(uh->dest));
  1196. goto drop;
  1197. csum_error:
  1198. /*
  1199. * RFC1122: OK. Discards the bad packet silently (as far as
  1200. * the network is concerned, anyway) as per 4.1.3.4 (MUST).
  1201. */
  1202. LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
  1203. proto == IPPROTO_UDPLITE ? "-Lite" : "",
  1204. &saddr,
  1205. ntohs(uh->source),
  1206. &daddr,
  1207. ntohs(uh->dest),
  1208. ulen);
  1209. drop:
  1210. UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
  1211. kfree_skb(skb);
  1212. return 0;
  1213. }
  1214. int udp_rcv(struct sk_buff *skb)
  1215. {
  1216. return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
  1217. }
  1218. void udp_destroy_sock(struct sock *sk)
  1219. {
  1220. lock_sock(sk);
  1221. udp_flush_pending_frames(sk);
  1222. release_sock(sk);
  1223. }
  1224. /*
  1225. * Socket option code for UDP
  1226. */
  1227. int udp_lib_setsockopt(struct sock *sk, int level, int optname,
  1228. char __user *optval, unsigned int optlen,
  1229. int (*push_pending_frames)(struct sock *))
  1230. {
  1231. struct udp_sock *up = udp_sk(sk);
  1232. int val;
  1233. int err = 0;
  1234. int is_udplite = IS_UDPLITE(sk);
  1235. if (optlen < sizeof(int))
  1236. return -EINVAL;
  1237. if (get_user(val, (int __user *)optval))
  1238. return -EFAULT;
  1239. switch (optname) {
  1240. case UDP_CORK:
  1241. if (val != 0) {
  1242. up->corkflag = 1;
  1243. } else {
  1244. up->corkflag = 0;
  1245. lock_sock(sk);
  1246. (*push_pending_frames)(sk);
  1247. release_sock(sk);
  1248. }
  1249. break;
  1250. case UDP_ENCAP:
  1251. switch (val) {
  1252. case 0:
  1253. case UDP_ENCAP_ESPINUDP:
  1254. case UDP_ENCAP_ESPINUDP_NON_IKE:
  1255. up->encap_rcv = xfrm4_udp_encap_rcv;
  1256. /* FALLTHROUGH */
  1257. case UDP_ENCAP_L2TPINUDP:
  1258. up->encap_type = val;
  1259. break;
  1260. default:
  1261. err = -ENOPROTOOPT;
  1262. break;
  1263. }
  1264. break;
  1265. /*
  1266. * UDP-Lite's partial checksum coverage (RFC 3828).
  1267. */
  1268. /* The sender sets actual checksum coverage length via this option.
  1269. * The case coverage > packet length is handled by send module. */
  1270. case UDPLITE_SEND_CSCOV:
  1271. if (!is_udplite) /* Disable the option on UDP sockets */
  1272. return -ENOPROTOOPT;
  1273. if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
  1274. val = 8;
  1275. else if (val > USHORT_MAX)
  1276. val = USHORT_MAX;
  1277. up->pcslen = val;
  1278. up->pcflag |= UDPLITE_SEND_CC;
  1279. break;
  1280. /* The receiver specifies a minimum checksum coverage value. To make
  1281. * sense, this should be set to at least 8 (as done below). If zero is
  1282. * used, this again means full checksum coverage. */
  1283. case UDPLITE_RECV_CSCOV:
  1284. if (!is_udplite) /* Disable the option on UDP sockets */
  1285. return -ENOPROTOOPT;
  1286. if (val != 0 && val < 8) /* Avoid silly minimal values. */
  1287. val = 8;
  1288. else if (val > USHORT_MAX)
  1289. val = USHORT_MAX;
  1290. up->pcrlen = val;
  1291. up->pcflag |= UDPLITE_RECV_CC;
  1292. break;
  1293. default:
  1294. err = -ENOPROTOOPT;
  1295. break;
  1296. }
  1297. return err;
  1298. }
  1299. EXPORT_SYMBOL(udp_lib_setsockopt);
  1300. int udp_setsockopt(struct sock *sk, int level, int optname,
  1301. char __user *optval, unsigned int optlen)
  1302. {
  1303. if (level == SOL_UDP || level == SOL_UDPLITE)
  1304. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1305. udp_push_pending_frames);
  1306. return ip_setsockopt(sk, level, optname, optval, optlen);
  1307. }
  1308. #ifdef CONFIG_COMPAT
  1309. int compat_udp_setsockopt(struct sock *sk, int level, int optname,
  1310. char __user *optval, unsigned int optlen)
  1311. {
  1312. if (level == SOL_UDP || level == SOL_UDPLITE)
  1313. return udp_lib_setsockopt(sk, level, optname, optval, optlen,
  1314. udp_push_pending_frames);
  1315. return compat_ip_setsockopt(sk, level, optname, optval, optlen);
  1316. }
  1317. #endif
  1318. int udp_lib_getsockopt(struct sock *sk, int level, int optname,
  1319. char __user *optval, int __user *optlen)
  1320. {
  1321. struct udp_sock *up = udp_sk(sk);
  1322. int val, len;
  1323. if (get_user(len, optlen))
  1324. return -EFAULT;
  1325. len = min_t(unsigned int, len, sizeof(int));
  1326. if (len < 0)
  1327. return -EINVAL;
  1328. switch (optname) {
  1329. case UDP_CORK:
  1330. val = up->corkflag;
  1331. break;
  1332. case UDP_ENCAP:
  1333. val = up->encap_type;
  1334. break;
  1335. /* The following two cannot be changed on UDP sockets, the return is
  1336. * always 0 (which corresponds to the full checksum coverage of UDP). */
  1337. case UDPLITE_SEND_CSCOV:
  1338. val = up->pcslen;
  1339. break;
  1340. case UDPLITE_RECV_CSCOV:
  1341. val = up->pcrlen;
  1342. break;
  1343. default:
  1344. return -ENOPROTOOPT;
  1345. }
  1346. if (put_user(len, optlen))
  1347. return -EFAULT;
  1348. if (copy_to_user(optval, &val, len))
  1349. return -EFAULT;
  1350. return 0;
  1351. }
  1352. EXPORT_SYMBOL(udp_lib_getsockopt);
  1353. int udp_getsockopt(struct sock *sk, int level, int optname,
  1354. char __user *optval, int __user *optlen)
  1355. {
  1356. if (level == SOL_UDP || level == SOL_UDPLITE)
  1357. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1358. return ip_getsockopt(sk, level, optname, optval, optlen);
  1359. }
  1360. #ifdef CONFIG_COMPAT
  1361. int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  1362. char __user *optval, int __user *optlen)
  1363. {
  1364. if (level == SOL_UDP || level == SOL_UDPLITE)
  1365. return udp_lib_getsockopt(sk, level, optname, optval, optlen);
  1366. return compat_ip_getsockopt(sk, level, optname, optval, optlen);
  1367. }
  1368. #endif
  1369. /**
  1370. * udp_poll - wait for a UDP event.
  1371. * @file - file struct
  1372. * @sock - socket
  1373. * @wait - poll table
  1374. *
  1375. * This is same as datagram poll, except for the special case of
  1376. * blocking sockets. If application is using a blocking fd
  1377. * and a packet with checksum error is in the queue;
  1378. * then it could get return from select indicating data available
  1379. * but then block when reading it. Add special case code
  1380. * to work around these arguably broken applications.
  1381. */
  1382. unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  1383. {
  1384. unsigned int mask = datagram_poll(file, sock, wait);
  1385. struct sock *sk = sock->sk;
  1386. /* Check for false positives due to checksum errors */
  1387. if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
  1388. !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
  1389. mask &= ~(POLLIN | POLLRDNORM);
  1390. return mask;
  1391. }
  1392. EXPORT_SYMBOL(udp_poll);
  1393. struct proto udp_prot = {
  1394. .name = "UDP",
  1395. .owner = THIS_MODULE,
  1396. .close = udp_lib_close,
  1397. .connect = ip4_datagram_connect,
  1398. .disconnect = udp_disconnect,
  1399. .ioctl = udp_ioctl,
  1400. .destroy = udp_destroy_sock,
  1401. .setsockopt = udp_setsockopt,
  1402. .getsockopt = udp_getsockopt,
  1403. .sendmsg = udp_sendmsg,
  1404. .recvmsg = udp_recvmsg,
  1405. .sendpage = udp_sendpage,
  1406. .backlog_rcv = __udp_queue_rcv_skb,
  1407. .hash = udp_lib_hash,
  1408. .unhash = udp_lib_unhash,
  1409. .get_port = udp_v4_get_port,
  1410. .memory_allocated = &udp_memory_allocated,
  1411. .sysctl_mem = sysctl_udp_mem,
  1412. .sysctl_wmem = &sysctl_udp_wmem_min,
  1413. .sysctl_rmem = &sysctl_udp_rmem_min,
  1414. .obj_size = sizeof(struct udp_sock),
  1415. .slab_flags = SLAB_DESTROY_BY_RCU,
  1416. .h.udp_table = &udp_table,
  1417. #ifdef CONFIG_COMPAT
  1418. .compat_setsockopt = compat_udp_setsockopt,
  1419. .compat_getsockopt = compat_udp_getsockopt,
  1420. #endif
  1421. };
  1422. EXPORT_SYMBOL(udp_prot);
  1423. /* ------------------------------------------------------------------------ */
  1424. #ifdef CONFIG_PROC_FS
  1425. static struct sock *udp_get_first(struct seq_file *seq, int start)
  1426. {
  1427. struct sock *sk;
  1428. struct udp_iter_state *state = seq->private;
  1429. struct net *net = seq_file_net(seq);
  1430. for (state->bucket = start; state->bucket <= state->udp_table->mask;
  1431. ++state->bucket) {
  1432. struct hlist_nulls_node *node;
  1433. struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
  1434. if (hlist_nulls_empty(&hslot->head))
  1435. continue;
  1436. spin_lock_bh(&hslot->lock);
  1437. sk_nulls_for_each(sk, node, &hslot->head) {
  1438. if (!net_eq(sock_net(sk), net))
  1439. continue;
  1440. if (sk->sk_family == state->family)
  1441. goto found;
  1442. }
  1443. spin_unlock_bh(&hslot->lock);
  1444. }
  1445. sk = NULL;
  1446. found:
  1447. return sk;
  1448. }
  1449. static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
  1450. {
  1451. struct udp_iter_state *state = seq->private;
  1452. struct net *net = seq_file_net(seq);
  1453. do {
  1454. sk = sk_nulls_next(sk);
  1455. } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
  1456. if (!sk) {
  1457. if (state->bucket <= state->udp_table->mask)
  1458. spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  1459. return udp_get_first(seq, state->bucket + 1);
  1460. }
  1461. return sk;
  1462. }
  1463. static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
  1464. {
  1465. struct sock *sk = udp_get_first(seq, 0);
  1466. if (sk)
  1467. while (pos && (sk = udp_get_next(seq, sk)) != NULL)
  1468. --pos;
  1469. return pos ? NULL : sk;
  1470. }
  1471. static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
  1472. {
  1473. struct udp_iter_state *state = seq->private;
  1474. state->bucket = MAX_UDP_PORTS;
  1475. return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
  1476. }
  1477. static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1478. {
  1479. struct sock *sk;
  1480. if (v == SEQ_START_TOKEN)
  1481. sk = udp_get_idx(seq, 0);
  1482. else
  1483. sk = udp_get_next(seq, v);
  1484. ++*pos;
  1485. return sk;
  1486. }
  1487. static void udp_seq_stop(struct seq_file *seq, void *v)
  1488. {
  1489. struct udp_iter_state *state = seq->private;
  1490. if (state->bucket <= state->udp_table->mask)
  1491. spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
  1492. }
  1493. static int udp_seq_open(struct inode *inode, struct file *file)
  1494. {
  1495. struct udp_seq_afinfo *afinfo = PDE(inode)->data;
  1496. struct udp_iter_state *s;
  1497. int err;
  1498. err = seq_open_net(inode, file, &afinfo->seq_ops,
  1499. sizeof(struct udp_iter_state));
  1500. if (err < 0)
  1501. return err;
  1502. s = ((struct seq_file *)file->private_data)->private;
  1503. s->family = afinfo->family;
  1504. s->udp_table = afinfo->udp_table;
  1505. return err;
  1506. }
  1507. /* ------------------------------------------------------------------------ */
  1508. int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
  1509. {
  1510. struct proc_dir_entry *p;
  1511. int rc = 0;
  1512. afinfo->seq_fops.open = udp_seq_open;
  1513. afinfo->seq_fops.read = seq_read;
  1514. afinfo->seq_fops.llseek = seq_lseek;
  1515. afinfo->seq_fops.release = seq_release_net;
  1516. afinfo->seq_ops.start = udp_seq_start;
  1517. afinfo->seq_ops.next = udp_seq_next;
  1518. afinfo->seq_ops.stop = udp_seq_stop;
  1519. p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
  1520. &afinfo->seq_fops, afinfo);
  1521. if (!p)
  1522. rc = -ENOMEM;
  1523. return rc;
  1524. }
  1525. EXPORT_SYMBOL(udp_proc_register);
  1526. void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
  1527. {
  1528. proc_net_remove(net, afinfo->name);
  1529. }
  1530. EXPORT_SYMBOL(udp_proc_unregister);
  1531. /* ------------------------------------------------------------------------ */
  1532. static void udp4_format_sock(struct sock *sp, struct seq_file *f,
  1533. int bucket, int *len)
  1534. {
  1535. struct inet_sock *inet = inet_sk(sp);
  1536. __be32 dest = inet->inet_daddr;
  1537. __be32 src = inet->inet_rcv_saddr;
  1538. __u16 destp = ntohs(inet->inet_dport);
  1539. __u16 srcp = ntohs(inet->inet_sport);
  1540. seq_printf(f, "%5d: %08X:%04X %08X:%04X"
  1541. " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
  1542. bucket, src, srcp, dest, destp, sp->sk_state,
  1543. sk_wmem_alloc_get(sp),
  1544. sk_rmem_alloc_get(sp),
  1545. 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
  1546. atomic_read(&sp->sk_refcnt), sp,
  1547. atomic_read(&sp->sk_drops), len);
  1548. }
  1549. int udp4_seq_show(struct seq_file *seq, void *v)
  1550. {
  1551. if (v == SEQ_START_TOKEN)
  1552. seq_printf(seq, "%-127s\n",
  1553. " sl local_address rem_address st tx_queue "
  1554. "rx_queue tr tm->when retrnsmt uid timeout "
  1555. "inode ref pointer drops");
  1556. else {
  1557. struct udp_iter_state *state = seq->private;
  1558. int len;
  1559. udp4_format_sock(v, seq, state->bucket, &len);
  1560. seq_printf(seq, "%*s\n", 127 - len, "");
  1561. }
  1562. return 0;
  1563. }
  1564. /* ------------------------------------------------------------------------ */
  1565. static struct udp_seq_afinfo udp4_seq_afinfo = {
  1566. .name = "udp",
  1567. .family = AF_INET,
  1568. .udp_table = &udp_table,
  1569. .seq_fops = {
  1570. .owner = THIS_MODULE,
  1571. },
  1572. .seq_ops = {
  1573. .show = udp4_seq_show,
  1574. },
  1575. };
  1576. static int udp4_proc_init_net(struct net *net)
  1577. {
  1578. return udp_proc_register(net, &udp4_seq_afinfo);
  1579. }
  1580. static void udp4_proc_exit_net(struct net *net)
  1581. {
  1582. udp_proc_unregister(net, &udp4_seq_afinfo);
  1583. }
  1584. static struct pernet_operations udp4_net_ops = {
  1585. .init = udp4_proc_init_net,
  1586. .exit = udp4_proc_exit_net,
  1587. };
  1588. int __init udp4_proc_init(void)
  1589. {
  1590. return register_pernet_subsys(&udp4_net_ops);
  1591. }
  1592. void udp4_proc_exit(void)
  1593. {
  1594. unregister_pernet_subsys(&udp4_net_ops);
  1595. }
  1596. #endif /* CONFIG_PROC_FS */
  1597. static __initdata unsigned long uhash_entries;
  1598. static int __init set_uhash_entries(char *str)
  1599. {
  1600. if (!str)
  1601. return 0;
  1602. uhash_entries = simple_strtoul(str, &str, 0);
  1603. if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
  1604. uhash_entries = UDP_HTABLE_SIZE_MIN;
  1605. return 1;
  1606. }
  1607. __setup("uhash_entries=", set_uhash_entries);
  1608. void __init udp_table_init(struct udp_table *table, const char *name)
  1609. {
  1610. unsigned int i;
  1611. if (!CONFIG_BASE_SMALL)
  1612. table->hash = alloc_large_system_hash(name,
  1613. sizeof(struct udp_hslot),
  1614. uhash_entries,
  1615. 21, /* one slot per 2 MB */
  1616. 0,
  1617. &table->log,
  1618. &table->mask,
  1619. 64 * 1024);
  1620. /*
  1621. * Make sure hash table has the minimum size
  1622. */
  1623. if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
  1624. table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
  1625. sizeof(struct udp_hslot), GFP_KERNEL);
  1626. if (!table->hash)
  1627. panic(name);
  1628. table->log = ilog2(UDP_HTABLE_SIZE_MIN);
  1629. table->mask = UDP_HTABLE_SIZE_MIN - 1;
  1630. }
  1631. for (i = 0; i <= table->mask; i++) {
  1632. INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
  1633. table->hash[i].count = 0;
  1634. spin_lock_init(&table->hash[i].lock);
  1635. }
  1636. }
  1637. void __init udp_init(void)
  1638. {
  1639. unsigned long nr_pages, limit;
  1640. udp_table_init(&udp_table, "UDP");
  1641. /* Set the pressure threshold up by the same strategy of TCP. It is a
  1642. * fraction of global memory that is up to 1/2 at 256 MB, decreasing
  1643. * toward zero with the amount of memory, with a floor of 128 pages.
  1644. */
  1645. nr_pages = totalram_pages - totalhigh_pages;
  1646. limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
  1647. limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
  1648. limit = max(limit, 128UL);
  1649. sysctl_udp_mem[0] = limit / 4 * 3;
  1650. sysctl_udp_mem[1] = limit;
  1651. sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
  1652. sysctl_udp_rmem_min = SK_MEM_QUANTUM;
  1653. sysctl_udp_wmem_min = SK_MEM_QUANTUM;
  1654. }
  1655. int udp4_ufo_send_check(struct sk_buff *skb)
  1656. {
  1657. const struct iphdr *iph;
  1658. struct udphdr *uh;
  1659. if (!pskb_may_pull(skb, sizeof(*uh)))
  1660. return -EINVAL;
  1661. iph = ip_hdr(skb);
  1662. uh = udp_hdr(skb);
  1663. uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
  1664. IPPROTO_UDP, 0);
  1665. skb->csum_start = skb_transport_header(skb) - skb->head;
  1666. skb->csum_offset = offsetof(struct udphdr, check);
  1667. skb->ip_summed = CHECKSUM_PARTIAL;
  1668. return 0;
  1669. }
  1670. struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
  1671. {
  1672. struct sk_buff *segs = ERR_PTR(-EINVAL);
  1673. unsigned int mss;
  1674. int offset;
  1675. __wsum csum;
  1676. mss = skb_shinfo(skb)->gso_size;
  1677. if (unlikely(skb->len <= mss))
  1678. goto out;
  1679. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  1680. /* Packet is from an untrusted source, reset gso_segs. */
  1681. int type = skb_shinfo(skb)->gso_type;
  1682. if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
  1683. !(type & (SKB_GSO_UDP))))
  1684. goto out;
  1685. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  1686. segs = NULL;
  1687. goto out;
  1688. }
  1689. /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
  1690. * do checksum of UDP packets sent as multiple IP fragments.
  1691. */
  1692. offset = skb->csum_start - skb_headroom(skb);
  1693. csum = skb_checksum(skb, offset, skb->len - offset, 0);
  1694. offset += skb->csum_offset;
  1695. *(__sum16 *)(skb->data + offset) = csum_fold(csum);
  1696. skb->ip_summed = CHECKSUM_NONE;
  1697. /* Fragment the skb. IP headers of the fragments are updated in
  1698. * inet_gso_segment()
  1699. */
  1700. segs = skb_segment(skb, features);
  1701. out:
  1702. return segs;
  1703. }