sock.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Alan Cox, <A.Cox@swansea.ac.uk>
  14. *
  15. * Fixes:
  16. * Alan Cox : Numerous verify_area() problems
  17. * Alan Cox : Connecting on a connecting socket
  18. * now returns an error for tcp.
  19. * Alan Cox : sock->protocol is set correctly.
  20. * and is not sometimes left as 0.
  21. * Alan Cox : connect handles icmp errors on a
  22. * connect properly. Unfortunately there
  23. * is a restart syscall nasty there. I
  24. * can't match BSD without hacking the C
  25. * library. Ideas urgently sought!
  26. * Alan Cox : Disallow bind() to addresses that are
  27. * not ours - especially broadcast ones!!
  28. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  29. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  30. * instead they leave that for the DESTROY timer.
  31. * Alan Cox : Clean up error flag in accept
  32. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  33. * was buggy. Put a remove_sock() in the handler
  34. * for memory when we hit 0. Also altered the timer
  35. * code. The ACK stuff can wait and needs major
  36. * TCP layer surgery.
  37. * Alan Cox : Fixed TCP ack bug, removed remove sock
  38. * and fixed timer/inet_bh race.
  39. * Alan Cox : Added zapped flag for TCP
  40. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  41. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  43. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45. * Rick Sladkey : Relaxed UDP rules for matching packets.
  46. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  47. * Pauline Middelink : identd support
  48. * Alan Cox : Fixed connect() taking signals I think.
  49. * Alan Cox : SO_LINGER supported
  50. * Alan Cox : Error reporting fixes
  51. * Anonymous : inet_create tidied up (sk->reuse setting)
  52. * Alan Cox : inet sockets don't set sk->type!
  53. * Alan Cox : Split socket option code
  54. * Alan Cox : Callbacks
  55. * Alan Cox : Nagle flag for Charles & Johannes stuff
  56. * Alex : Removed restriction on inet fioctl
  57. * Alan Cox : Splitting INET from NET core
  58. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  59. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  60. * Alan Cox : Split IP from generic code
  61. * Alan Cox : New kfree_skbmem()
  62. * Alan Cox : Make SO_DEBUG superuser only.
  63. * Alan Cox : Allow anyone to clear SO_DEBUG
  64. * (compatibility fix)
  65. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  66. * Alan Cox : Allocator for a socket is settable.
  67. * Alan Cox : SO_ERROR includes soft errors.
  68. * Alan Cox : Allow NULL arguments on some SO_ opts
  69. * Alan Cox : Generic socket allocation to make hooks
  70. * easier (suggested by Craig Metz).
  71. * Michael Pall : SO_ERROR returns positive errno again
  72. * Steve Whitehouse: Added default destructor to free
  73. * protocol private data.
  74. * Steve Whitehouse: Added various other default routines
  75. * common to several socket families.
  76. * Chris Evans : Call suser() check last on F_SETOWN
  77. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  79. * Andi Kleen : Fix write_space callback
  80. * Chris Evans : Security fixes - signedness again
  81. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  82. *
  83. * To Fix:
  84. *
  85. *
  86. * This program is free software; you can redistribute it and/or
  87. * modify it under the terms of the GNU General Public License
  88. * as published by the Free Software Foundation; either version
  89. * 2 of the License, or (at your option) any later version.
  90. */
  91. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  92. #include <linux/capability.h>
  93. #include <linux/errno.h>
  94. #include <linux/errqueue.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <linux/user_namespace.h>
  115. #include <linux/static_key.h>
  116. #include <linux/memcontrol.h>
  117. #include <linux/prefetch.h>
  118. #include <asm/uaccess.h>
  119. #include <linux/netdevice.h>
  120. #include <net/protocol.h>
  121. #include <linux/skbuff.h>
  122. #include <net/net_namespace.h>
  123. #include <net/request_sock.h>
  124. #include <net/sock.h>
  125. #include <linux/net_tstamp.h>
  126. #include <net/xfrm.h>
  127. #include <linux/ipsec.h>
  128. #include <net/cls_cgroup.h>
  129. #include <net/netprio_cgroup.h>
  130. #include <linux/filter.h>
  131. #include <trace/events/sock.h>
  132. #ifdef CONFIG_INET
  133. #include <net/tcp.h>
  134. #endif
  135. #include <net/busy_poll.h>
  136. static DEFINE_MUTEX(proto_list_mutex);
  137. static LIST_HEAD(proto_list);
  138. /**
  139. * sk_ns_capable - General socket capability test
  140. * @sk: Socket to use a capability on or through
  141. * @user_ns: The user namespace of the capability to use
  142. * @cap: The capability to use
  143. *
  144. * Test to see if the opener of the socket had when the socket was
  145. * created and the current process has the capability @cap in the user
  146. * namespace @user_ns.
  147. */
  148. bool sk_ns_capable(const struct sock *sk,
  149. struct user_namespace *user_ns, int cap)
  150. {
  151. return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
  152. ns_capable(user_ns, cap);
  153. }
  154. EXPORT_SYMBOL(sk_ns_capable);
  155. /**
  156. * sk_capable - Socket global capability test
  157. * @sk: Socket to use a capability on or through
  158. * @cap: The global capability to use
  159. *
  160. * Test to see if the opener of the socket had when the socket was
  161. * created and the current process has the capability @cap in all user
  162. * namespaces.
  163. */
  164. bool sk_capable(const struct sock *sk, int cap)
  165. {
  166. return sk_ns_capable(sk, &init_user_ns, cap);
  167. }
  168. EXPORT_SYMBOL(sk_capable);
  169. /**
  170. * sk_net_capable - Network namespace socket capability test
  171. * @sk: Socket to use a capability on or through
  172. * @cap: The capability to use
  173. *
  174. * Test to see if the opener of the socket had when the socket was created
  175. * and the current process has the capability @cap over the network namespace
  176. * the socket is a member of.
  177. */
  178. bool sk_net_capable(const struct sock *sk, int cap)
  179. {
  180. return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
  181. }
  182. EXPORT_SYMBOL(sk_net_capable);
  183. #ifdef CONFIG_MEMCG_KMEM
  184. int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  185. {
  186. struct proto *proto;
  187. int ret = 0;
  188. mutex_lock(&proto_list_mutex);
  189. list_for_each_entry(proto, &proto_list, node) {
  190. if (proto->init_cgroup) {
  191. ret = proto->init_cgroup(memcg, ss);
  192. if (ret)
  193. goto out;
  194. }
  195. }
  196. mutex_unlock(&proto_list_mutex);
  197. return ret;
  198. out:
  199. list_for_each_entry_continue_reverse(proto, &proto_list, node)
  200. if (proto->destroy_cgroup)
  201. proto->destroy_cgroup(memcg);
  202. mutex_unlock(&proto_list_mutex);
  203. return ret;
  204. }
  205. void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
  206. {
  207. struct proto *proto;
  208. mutex_lock(&proto_list_mutex);
  209. list_for_each_entry_reverse(proto, &proto_list, node)
  210. if (proto->destroy_cgroup)
  211. proto->destroy_cgroup(memcg);
  212. mutex_unlock(&proto_list_mutex);
  213. }
  214. #endif
  215. /*
  216. * Each address family might have different locking rules, so we have
  217. * one slock key per address family:
  218. */
  219. static struct lock_class_key af_family_keys[AF_MAX];
  220. static struct lock_class_key af_family_slock_keys[AF_MAX];
  221. #if defined(CONFIG_MEMCG_KMEM)
  222. struct static_key memcg_socket_limit_enabled;
  223. EXPORT_SYMBOL(memcg_socket_limit_enabled);
  224. #endif
  225. /*
  226. * Make lock validator output more readable. (we pre-construct these
  227. * strings build-time, so that runtime initialization of socket
  228. * locks is fast):
  229. */
  230. static const char *const af_family_key_strings[AF_MAX+1] = {
  231. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  232. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  233. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  234. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  235. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  236. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  237. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  238. "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  239. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  240. "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
  241. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  242. "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
  243. "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
  244. "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
  245. };
  246. static const char *const af_family_slock_key_strings[AF_MAX+1] = {
  247. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  248. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  249. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  250. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  251. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  252. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  253. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  254. "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  255. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  256. "slock-27" , "slock-28" , "slock-AF_CAN" ,
  257. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  258. "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
  259. "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
  260. "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
  261. };
  262. static const char *const af_family_clock_key_strings[AF_MAX+1] = {
  263. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  264. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  265. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  266. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  267. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  268. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  269. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  270. "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  271. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  272. "clock-27" , "clock-28" , "clock-AF_CAN" ,
  273. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  274. "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
  275. "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
  276. "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
  277. };
  278. /*
  279. * sk_callback_lock locking rules are per-address-family,
  280. * so split the lock classes by using a per-AF key:
  281. */
  282. static struct lock_class_key af_callback_keys[AF_MAX];
  283. /* Take into consideration the size of the struct sk_buff overhead in the
  284. * determination of these values, since that is non-constant across
  285. * platforms. This makes socket queueing behavior and performance
  286. * not depend upon such differences.
  287. */
  288. #define _SK_MEM_PACKETS 256
  289. #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
  290. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  291. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  292. /* Run time adjustable parameters. */
  293. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  294. EXPORT_SYMBOL(sysctl_wmem_max);
  295. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  296. EXPORT_SYMBOL(sysctl_rmem_max);
  297. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  298. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  299. /* Maximal space eaten by iovec or ancillary data plus some space */
  300. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  301. EXPORT_SYMBOL(sysctl_optmem_max);
  302. int sysctl_tstamp_allow_data __read_mostly = 1;
  303. struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
  304. EXPORT_SYMBOL_GPL(memalloc_socks);
  305. /**
  306. * sk_set_memalloc - sets %SOCK_MEMALLOC
  307. * @sk: socket to set it on
  308. *
  309. * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
  310. * It's the responsibility of the admin to adjust min_free_kbytes
  311. * to meet the requirements
  312. */
  313. void sk_set_memalloc(struct sock *sk)
  314. {
  315. sock_set_flag(sk, SOCK_MEMALLOC);
  316. sk->sk_allocation |= __GFP_MEMALLOC;
  317. static_key_slow_inc(&memalloc_socks);
  318. }
  319. EXPORT_SYMBOL_GPL(sk_set_memalloc);
  320. void sk_clear_memalloc(struct sock *sk)
  321. {
  322. sock_reset_flag(sk, SOCK_MEMALLOC);
  323. sk->sk_allocation &= ~__GFP_MEMALLOC;
  324. static_key_slow_dec(&memalloc_socks);
  325. /*
  326. * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
  327. * progress of swapping. However, if SOCK_MEMALLOC is cleared while
  328. * it has rmem allocations there is a risk that the user of the
  329. * socket cannot make forward progress due to exceeding the rmem
  330. * limits. By rights, sk_clear_memalloc() should only be called
  331. * on sockets being torn down but warn and reset the accounting if
  332. * that assumption breaks.
  333. */
  334. if (WARN_ON(sk->sk_forward_alloc))
  335. sk_mem_reclaim(sk);
  336. }
  337. EXPORT_SYMBOL_GPL(sk_clear_memalloc);
  338. int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  339. {
  340. int ret;
  341. unsigned long pflags = current->flags;
  342. /* these should have been dropped before queueing */
  343. BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
  344. current->flags |= PF_MEMALLOC;
  345. ret = sk->sk_backlog_rcv(sk, skb);
  346. tsk_restore_flags(current, pflags, PF_MEMALLOC);
  347. return ret;
  348. }
  349. EXPORT_SYMBOL(__sk_backlog_rcv);
  350. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  351. {
  352. struct timeval tv;
  353. if (optlen < sizeof(tv))
  354. return -EINVAL;
  355. if (copy_from_user(&tv, optval, sizeof(tv)))
  356. return -EFAULT;
  357. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  358. return -EDOM;
  359. if (tv.tv_sec < 0) {
  360. static int warned __read_mostly;
  361. *timeo_p = 0;
  362. if (warned < 10 && net_ratelimit()) {
  363. warned++;
  364. pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
  365. __func__, current->comm, task_pid_nr(current));
  366. }
  367. return 0;
  368. }
  369. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  370. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  371. return 0;
  372. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  373. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  374. return 0;
  375. }
  376. static void sock_warn_obsolete_bsdism(const char *name)
  377. {
  378. static int warned;
  379. static char warncomm[TASK_COMM_LEN];
  380. if (strcmp(warncomm, current->comm) && warned < 5) {
  381. strcpy(warncomm, current->comm);
  382. pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
  383. warncomm, name);
  384. warned++;
  385. }
  386. }
  387. #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
  388. static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
  389. {
  390. if (sk->sk_flags & flags) {
  391. sk->sk_flags &= ~flags;
  392. if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
  393. net_disable_timestamp();
  394. }
  395. }
  396. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  397. {
  398. int err;
  399. unsigned long flags;
  400. struct sk_buff_head *list = &sk->sk_receive_queue;
  401. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
  402. atomic_inc(&sk->sk_drops);
  403. trace_sock_rcvqueue_full(sk, skb);
  404. return -ENOMEM;
  405. }
  406. err = sk_filter(sk, skb);
  407. if (err)
  408. return err;
  409. if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
  410. atomic_inc(&sk->sk_drops);
  411. return -ENOBUFS;
  412. }
  413. skb->dev = NULL;
  414. skb_set_owner_r(skb, sk);
  415. /* we escape from rcu protected region, make sure we dont leak
  416. * a norefcounted dst
  417. */
  418. skb_dst_force(skb);
  419. spin_lock_irqsave(&list->lock, flags);
  420. skb->dropcount = atomic_read(&sk->sk_drops);
  421. __skb_queue_tail(list, skb);
  422. spin_unlock_irqrestore(&list->lock, flags);
  423. if (!sock_flag(sk, SOCK_DEAD))
  424. sk->sk_data_ready(sk);
  425. return 0;
  426. }
  427. EXPORT_SYMBOL(sock_queue_rcv_skb);
  428. int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
  429. {
  430. int rc = NET_RX_SUCCESS;
  431. if (sk_filter(sk, skb))
  432. goto discard_and_relse;
  433. skb->dev = NULL;
  434. if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
  435. atomic_inc(&sk->sk_drops);
  436. goto discard_and_relse;
  437. }
  438. if (nested)
  439. bh_lock_sock_nested(sk);
  440. else
  441. bh_lock_sock(sk);
  442. if (!sock_owned_by_user(sk)) {
  443. /*
  444. * trylock + unlock semantics:
  445. */
  446. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  447. rc = sk_backlog_rcv(sk, skb);
  448. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  449. } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
  450. bh_unlock_sock(sk);
  451. atomic_inc(&sk->sk_drops);
  452. goto discard_and_relse;
  453. }
  454. bh_unlock_sock(sk);
  455. out:
  456. sock_put(sk);
  457. return rc;
  458. discard_and_relse:
  459. kfree_skb(skb);
  460. goto out;
  461. }
  462. EXPORT_SYMBOL(sk_receive_skb);
  463. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  464. {
  465. struct dst_entry *dst = __sk_dst_get(sk);
  466. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  467. sk_tx_queue_clear(sk);
  468. RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
  469. dst_release(dst);
  470. return NULL;
  471. }
  472. return dst;
  473. }
  474. EXPORT_SYMBOL(__sk_dst_check);
  475. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  476. {
  477. struct dst_entry *dst = sk_dst_get(sk);
  478. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  479. sk_dst_reset(sk);
  480. dst_release(dst);
  481. return NULL;
  482. }
  483. return dst;
  484. }
  485. EXPORT_SYMBOL(sk_dst_check);
  486. static int sock_setbindtodevice(struct sock *sk, char __user *optval,
  487. int optlen)
  488. {
  489. int ret = -ENOPROTOOPT;
  490. #ifdef CONFIG_NETDEVICES
  491. struct net *net = sock_net(sk);
  492. char devname[IFNAMSIZ];
  493. int index;
  494. /* Sorry... */
  495. ret = -EPERM;
  496. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  497. goto out;
  498. ret = -EINVAL;
  499. if (optlen < 0)
  500. goto out;
  501. /* Bind this socket to a particular device like "eth0",
  502. * as specified in the passed interface name. If the
  503. * name is "" or the option length is zero the socket
  504. * is not bound.
  505. */
  506. if (optlen > IFNAMSIZ - 1)
  507. optlen = IFNAMSIZ - 1;
  508. memset(devname, 0, sizeof(devname));
  509. ret = -EFAULT;
  510. if (copy_from_user(devname, optval, optlen))
  511. goto out;
  512. index = 0;
  513. if (devname[0] != '\0') {
  514. struct net_device *dev;
  515. rcu_read_lock();
  516. dev = dev_get_by_name_rcu(net, devname);
  517. if (dev)
  518. index = dev->ifindex;
  519. rcu_read_unlock();
  520. ret = -ENODEV;
  521. if (!dev)
  522. goto out;
  523. }
  524. lock_sock(sk);
  525. sk->sk_bound_dev_if = index;
  526. sk_dst_reset(sk);
  527. release_sock(sk);
  528. ret = 0;
  529. out:
  530. #endif
  531. return ret;
  532. }
  533. static int sock_getbindtodevice(struct sock *sk, char __user *optval,
  534. int __user *optlen, int len)
  535. {
  536. int ret = -ENOPROTOOPT;
  537. #ifdef CONFIG_NETDEVICES
  538. struct net *net = sock_net(sk);
  539. char devname[IFNAMSIZ];
  540. if (sk->sk_bound_dev_if == 0) {
  541. len = 0;
  542. goto zero;
  543. }
  544. ret = -EINVAL;
  545. if (len < IFNAMSIZ)
  546. goto out;
  547. ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
  548. if (ret)
  549. goto out;
  550. len = strlen(devname) + 1;
  551. ret = -EFAULT;
  552. if (copy_to_user(optval, devname, len))
  553. goto out;
  554. zero:
  555. ret = -EFAULT;
  556. if (put_user(len, optlen))
  557. goto out;
  558. ret = 0;
  559. out:
  560. #endif
  561. return ret;
  562. }
  563. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  564. {
  565. if (valbool)
  566. sock_set_flag(sk, bit);
  567. else
  568. sock_reset_flag(sk, bit);
  569. }
  570. bool sk_mc_loop(struct sock *sk)
  571. {
  572. if (dev_recursion_level())
  573. return false;
  574. if (!sk)
  575. return true;
  576. switch (sk->sk_family) {
  577. case AF_INET:
  578. return inet_sk(sk)->mc_loop;
  579. #if IS_ENABLED(CONFIG_IPV6)
  580. case AF_INET6:
  581. return inet6_sk(sk)->mc_loop;
  582. #endif
  583. }
  584. WARN_ON(1);
  585. return true;
  586. }
  587. EXPORT_SYMBOL(sk_mc_loop);
  588. /*
  589. * This is meant for all protocols to use and covers goings on
  590. * at the socket level. Everything here is generic.
  591. */
  592. int sock_setsockopt(struct socket *sock, int level, int optname,
  593. char __user *optval, unsigned int optlen)
  594. {
  595. struct sock *sk = sock->sk;
  596. int val;
  597. int valbool;
  598. struct linger ling;
  599. int ret = 0;
  600. /*
  601. * Options without arguments
  602. */
  603. if (optname == SO_BINDTODEVICE)
  604. return sock_setbindtodevice(sk, optval, optlen);
  605. if (optlen < sizeof(int))
  606. return -EINVAL;
  607. if (get_user(val, (int __user *)optval))
  608. return -EFAULT;
  609. valbool = val ? 1 : 0;
  610. lock_sock(sk);
  611. switch (optname) {
  612. case SO_DEBUG:
  613. if (val && !capable(CAP_NET_ADMIN))
  614. ret = -EACCES;
  615. else
  616. sock_valbool_flag(sk, SOCK_DBG, valbool);
  617. break;
  618. case SO_REUSEADDR:
  619. sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
  620. break;
  621. case SO_REUSEPORT:
  622. sk->sk_reuseport = valbool;
  623. break;
  624. case SO_TYPE:
  625. case SO_PROTOCOL:
  626. case SO_DOMAIN:
  627. case SO_ERROR:
  628. ret = -ENOPROTOOPT;
  629. break;
  630. case SO_DONTROUTE:
  631. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  632. break;
  633. case SO_BROADCAST:
  634. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  635. break;
  636. case SO_SNDBUF:
  637. /* Don't error on this BSD doesn't and if you think
  638. * about it this is right. Otherwise apps have to
  639. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  640. * are treated in BSD as hints
  641. */
  642. val = min_t(u32, val, sysctl_wmem_max);
  643. set_sndbuf:
  644. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  645. sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
  646. /* Wake up sending tasks if we upped the value. */
  647. sk->sk_write_space(sk);
  648. break;
  649. case SO_SNDBUFFORCE:
  650. if (!capable(CAP_NET_ADMIN)) {
  651. ret = -EPERM;
  652. break;
  653. }
  654. goto set_sndbuf;
  655. case SO_RCVBUF:
  656. /* Don't error on this BSD doesn't and if you think
  657. * about it this is right. Otherwise apps have to
  658. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  659. * are treated in BSD as hints
  660. */
  661. val = min_t(u32, val, sysctl_rmem_max);
  662. set_rcvbuf:
  663. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  664. /*
  665. * We double it on the way in to account for
  666. * "struct sk_buff" etc. overhead. Applications
  667. * assume that the SO_RCVBUF setting they make will
  668. * allow that much actual data to be received on that
  669. * socket.
  670. *
  671. * Applications are unaware that "struct sk_buff" and
  672. * other overheads allocate from the receive buffer
  673. * during socket buffer allocation.
  674. *
  675. * And after considering the possible alternatives,
  676. * returning the value we actually used in getsockopt
  677. * is the most desirable behavior.
  678. */
  679. sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
  680. break;
  681. case SO_RCVBUFFORCE:
  682. if (!capable(CAP_NET_ADMIN)) {
  683. ret = -EPERM;
  684. break;
  685. }
  686. goto set_rcvbuf;
  687. case SO_KEEPALIVE:
  688. #ifdef CONFIG_INET
  689. if (sk->sk_protocol == IPPROTO_TCP &&
  690. sk->sk_type == SOCK_STREAM)
  691. tcp_set_keepalive(sk, valbool);
  692. #endif
  693. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  694. break;
  695. case SO_OOBINLINE:
  696. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  697. break;
  698. case SO_NO_CHECK:
  699. sk->sk_no_check_tx = valbool;
  700. break;
  701. case SO_PRIORITY:
  702. if ((val >= 0 && val <= 6) ||
  703. ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  704. sk->sk_priority = val;
  705. else
  706. ret = -EPERM;
  707. break;
  708. case SO_LINGER:
  709. if (optlen < sizeof(ling)) {
  710. ret = -EINVAL; /* 1003.1g */
  711. break;
  712. }
  713. if (copy_from_user(&ling, optval, sizeof(ling))) {
  714. ret = -EFAULT;
  715. break;
  716. }
  717. if (!ling.l_onoff)
  718. sock_reset_flag(sk, SOCK_LINGER);
  719. else {
  720. #if (BITS_PER_LONG == 32)
  721. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  722. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  723. else
  724. #endif
  725. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  726. sock_set_flag(sk, SOCK_LINGER);
  727. }
  728. break;
  729. case SO_BSDCOMPAT:
  730. sock_warn_obsolete_bsdism("setsockopt");
  731. break;
  732. case SO_PASSCRED:
  733. if (valbool)
  734. set_bit(SOCK_PASSCRED, &sock->flags);
  735. else
  736. clear_bit(SOCK_PASSCRED, &sock->flags);
  737. break;
  738. case SO_TIMESTAMP:
  739. case SO_TIMESTAMPNS:
  740. if (valbool) {
  741. if (optname == SO_TIMESTAMP)
  742. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  743. else
  744. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  745. sock_set_flag(sk, SOCK_RCVTSTAMP);
  746. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  747. } else {
  748. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  749. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  750. }
  751. break;
  752. case SO_TIMESTAMPING:
  753. if (val & ~SOF_TIMESTAMPING_MASK) {
  754. ret = -EINVAL;
  755. break;
  756. }
  757. if (val & SOF_TIMESTAMPING_OPT_ID &&
  758. !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
  759. if (sk->sk_protocol == IPPROTO_TCP) {
  760. if (sk->sk_state != TCP_ESTABLISHED) {
  761. ret = -EINVAL;
  762. break;
  763. }
  764. sk->sk_tskey = tcp_sk(sk)->snd_una;
  765. } else {
  766. sk->sk_tskey = 0;
  767. }
  768. }
  769. sk->sk_tsflags = val;
  770. if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
  771. sock_enable_timestamp(sk,
  772. SOCK_TIMESTAMPING_RX_SOFTWARE);
  773. else
  774. sock_disable_timestamp(sk,
  775. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
  776. break;
  777. case SO_RCVLOWAT:
  778. if (val < 0)
  779. val = INT_MAX;
  780. sk->sk_rcvlowat = val ? : 1;
  781. break;
  782. case SO_RCVTIMEO:
  783. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  784. break;
  785. case SO_SNDTIMEO:
  786. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  787. break;
  788. case SO_ATTACH_FILTER:
  789. ret = -EINVAL;
  790. if (optlen == sizeof(struct sock_fprog)) {
  791. struct sock_fprog fprog;
  792. ret = -EFAULT;
  793. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  794. break;
  795. ret = sk_attach_filter(&fprog, sk);
  796. }
  797. break;
  798. case SO_ATTACH_BPF:
  799. ret = -EINVAL;
  800. if (optlen == sizeof(u32)) {
  801. u32 ufd;
  802. ret = -EFAULT;
  803. if (copy_from_user(&ufd, optval, sizeof(ufd)))
  804. break;
  805. ret = sk_attach_bpf(ufd, sk);
  806. }
  807. break;
  808. case SO_DETACH_FILTER:
  809. ret = sk_detach_filter(sk);
  810. break;
  811. case SO_LOCK_FILTER:
  812. if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
  813. ret = -EPERM;
  814. else
  815. sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
  816. break;
  817. case SO_PASSSEC:
  818. if (valbool)
  819. set_bit(SOCK_PASSSEC, &sock->flags);
  820. else
  821. clear_bit(SOCK_PASSSEC, &sock->flags);
  822. break;
  823. case SO_MARK:
  824. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  825. ret = -EPERM;
  826. else
  827. sk->sk_mark = val;
  828. break;
  829. /* We implement the SO_SNDLOWAT etc to
  830. not be settable (1003.1g 5.3) */
  831. case SO_RXQ_OVFL:
  832. sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
  833. break;
  834. case SO_WIFI_STATUS:
  835. sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
  836. break;
  837. case SO_PEEK_OFF:
  838. if (sock->ops->set_peek_off)
  839. ret = sock->ops->set_peek_off(sk, val);
  840. else
  841. ret = -EOPNOTSUPP;
  842. break;
  843. case SO_NOFCS:
  844. sock_valbool_flag(sk, SOCK_NOFCS, valbool);
  845. break;
  846. case SO_SELECT_ERR_QUEUE:
  847. sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
  848. break;
  849. #ifdef CONFIG_NET_RX_BUSY_POLL
  850. case SO_BUSY_POLL:
  851. /* allow unprivileged users to decrease the value */
  852. if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
  853. ret = -EPERM;
  854. else {
  855. if (val < 0)
  856. ret = -EINVAL;
  857. else
  858. sk->sk_ll_usec = val;
  859. }
  860. break;
  861. #endif
  862. case SO_MAX_PACING_RATE:
  863. sk->sk_max_pacing_rate = val;
  864. sk->sk_pacing_rate = min(sk->sk_pacing_rate,
  865. sk->sk_max_pacing_rate);
  866. break;
  867. default:
  868. ret = -ENOPROTOOPT;
  869. break;
  870. }
  871. release_sock(sk);
  872. return ret;
  873. }
  874. EXPORT_SYMBOL(sock_setsockopt);
  875. static void cred_to_ucred(struct pid *pid, const struct cred *cred,
  876. struct ucred *ucred)
  877. {
  878. ucred->pid = pid_vnr(pid);
  879. ucred->uid = ucred->gid = -1;
  880. if (cred) {
  881. struct user_namespace *current_ns = current_user_ns();
  882. ucred->uid = from_kuid_munged(current_ns, cred->euid);
  883. ucred->gid = from_kgid_munged(current_ns, cred->egid);
  884. }
  885. }
  886. int sock_getsockopt(struct socket *sock, int level, int optname,
  887. char __user *optval, int __user *optlen)
  888. {
  889. struct sock *sk = sock->sk;
  890. union {
  891. int val;
  892. struct linger ling;
  893. struct timeval tm;
  894. } v;
  895. int lv = sizeof(int);
  896. int len;
  897. if (get_user(len, optlen))
  898. return -EFAULT;
  899. if (len < 0)
  900. return -EINVAL;
  901. memset(&v, 0, sizeof(v));
  902. switch (optname) {
  903. case SO_DEBUG:
  904. v.val = sock_flag(sk, SOCK_DBG);
  905. break;
  906. case SO_DONTROUTE:
  907. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  908. break;
  909. case SO_BROADCAST:
  910. v.val = sock_flag(sk, SOCK_BROADCAST);
  911. break;
  912. case SO_SNDBUF:
  913. v.val = sk->sk_sndbuf;
  914. break;
  915. case SO_RCVBUF:
  916. v.val = sk->sk_rcvbuf;
  917. break;
  918. case SO_REUSEADDR:
  919. v.val = sk->sk_reuse;
  920. break;
  921. case SO_REUSEPORT:
  922. v.val = sk->sk_reuseport;
  923. break;
  924. case SO_KEEPALIVE:
  925. v.val = sock_flag(sk, SOCK_KEEPOPEN);
  926. break;
  927. case SO_TYPE:
  928. v.val = sk->sk_type;
  929. break;
  930. case SO_PROTOCOL:
  931. v.val = sk->sk_protocol;
  932. break;
  933. case SO_DOMAIN:
  934. v.val = sk->sk_family;
  935. break;
  936. case SO_ERROR:
  937. v.val = -sock_error(sk);
  938. if (v.val == 0)
  939. v.val = xchg(&sk->sk_err_soft, 0);
  940. break;
  941. case SO_OOBINLINE:
  942. v.val = sock_flag(sk, SOCK_URGINLINE);
  943. break;
  944. case SO_NO_CHECK:
  945. v.val = sk->sk_no_check_tx;
  946. break;
  947. case SO_PRIORITY:
  948. v.val = sk->sk_priority;
  949. break;
  950. case SO_LINGER:
  951. lv = sizeof(v.ling);
  952. v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
  953. v.ling.l_linger = sk->sk_lingertime / HZ;
  954. break;
  955. case SO_BSDCOMPAT:
  956. sock_warn_obsolete_bsdism("getsockopt");
  957. break;
  958. case SO_TIMESTAMP:
  959. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  960. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  961. break;
  962. case SO_TIMESTAMPNS:
  963. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  964. break;
  965. case SO_TIMESTAMPING:
  966. v.val = sk->sk_tsflags;
  967. break;
  968. case SO_RCVTIMEO:
  969. lv = sizeof(struct timeval);
  970. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  971. v.tm.tv_sec = 0;
  972. v.tm.tv_usec = 0;
  973. } else {
  974. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  975. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  976. }
  977. break;
  978. case SO_SNDTIMEO:
  979. lv = sizeof(struct timeval);
  980. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  981. v.tm.tv_sec = 0;
  982. v.tm.tv_usec = 0;
  983. } else {
  984. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  985. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  986. }
  987. break;
  988. case SO_RCVLOWAT:
  989. v.val = sk->sk_rcvlowat;
  990. break;
  991. case SO_SNDLOWAT:
  992. v.val = 1;
  993. break;
  994. case SO_PASSCRED:
  995. v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
  996. break;
  997. case SO_PEERCRED:
  998. {
  999. struct ucred peercred;
  1000. if (len > sizeof(peercred))
  1001. len = sizeof(peercred);
  1002. cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
  1003. if (copy_to_user(optval, &peercred, len))
  1004. return -EFAULT;
  1005. goto lenout;
  1006. }
  1007. case SO_PEERNAME:
  1008. {
  1009. char address[128];
  1010. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  1011. return -ENOTCONN;
  1012. if (lv < len)
  1013. return -EINVAL;
  1014. if (copy_to_user(optval, address, len))
  1015. return -EFAULT;
  1016. goto lenout;
  1017. }
  1018. /* Dubious BSD thing... Probably nobody even uses it, but
  1019. * the UNIX standard wants it for whatever reason... -DaveM
  1020. */
  1021. case SO_ACCEPTCONN:
  1022. v.val = sk->sk_state == TCP_LISTEN;
  1023. break;
  1024. case SO_PASSSEC:
  1025. v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
  1026. break;
  1027. case SO_PEERSEC:
  1028. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  1029. case SO_MARK:
  1030. v.val = sk->sk_mark;
  1031. break;
  1032. case SO_RXQ_OVFL:
  1033. v.val = sock_flag(sk, SOCK_RXQ_OVFL);
  1034. break;
  1035. case SO_WIFI_STATUS:
  1036. v.val = sock_flag(sk, SOCK_WIFI_STATUS);
  1037. break;
  1038. case SO_PEEK_OFF:
  1039. if (!sock->ops->set_peek_off)
  1040. return -EOPNOTSUPP;
  1041. v.val = sk->sk_peek_off;
  1042. break;
  1043. case SO_NOFCS:
  1044. v.val = sock_flag(sk, SOCK_NOFCS);
  1045. break;
  1046. case SO_BINDTODEVICE:
  1047. return sock_getbindtodevice(sk, optval, optlen, len);
  1048. case SO_GET_FILTER:
  1049. len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
  1050. if (len < 0)
  1051. return len;
  1052. goto lenout;
  1053. case SO_LOCK_FILTER:
  1054. v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
  1055. break;
  1056. case SO_BPF_EXTENSIONS:
  1057. v.val = bpf_tell_extensions();
  1058. break;
  1059. case SO_SELECT_ERR_QUEUE:
  1060. v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
  1061. break;
  1062. #ifdef CONFIG_NET_RX_BUSY_POLL
  1063. case SO_BUSY_POLL:
  1064. v.val = sk->sk_ll_usec;
  1065. break;
  1066. #endif
  1067. case SO_MAX_PACING_RATE:
  1068. v.val = sk->sk_max_pacing_rate;
  1069. break;
  1070. case SO_INCOMING_CPU:
  1071. v.val = sk->sk_incoming_cpu;
  1072. break;
  1073. default:
  1074. return -ENOPROTOOPT;
  1075. }
  1076. if (len > lv)
  1077. len = lv;
  1078. if (copy_to_user(optval, &v, len))
  1079. return -EFAULT;
  1080. lenout:
  1081. if (put_user(len, optlen))
  1082. return -EFAULT;
  1083. return 0;
  1084. }
  1085. /*
  1086. * Initialize an sk_lock.
  1087. *
  1088. * (We also register the sk_lock with the lock validator.)
  1089. */
  1090. static inline void sock_lock_init(struct sock *sk)
  1091. {
  1092. sock_lock_init_class_and_name(sk,
  1093. af_family_slock_key_strings[sk->sk_family],
  1094. af_family_slock_keys + sk->sk_family,
  1095. af_family_key_strings[sk->sk_family],
  1096. af_family_keys + sk->sk_family);
  1097. }
  1098. /*
  1099. * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  1100. * even temporarly, because of RCU lookups. sk_node should also be left as is.
  1101. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  1102. */
  1103. static void sock_copy(struct sock *nsk, const struct sock *osk)
  1104. {
  1105. #ifdef CONFIG_SECURITY_NETWORK
  1106. void *sptr = nsk->sk_security;
  1107. #endif
  1108. memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
  1109. memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
  1110. osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
  1111. #ifdef CONFIG_SECURITY_NETWORK
  1112. nsk->sk_security = sptr;
  1113. security_sk_clone(osk, nsk);
  1114. #endif
  1115. }
  1116. void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
  1117. {
  1118. unsigned long nulls1, nulls2;
  1119. nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
  1120. nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
  1121. if (nulls1 > nulls2)
  1122. swap(nulls1, nulls2);
  1123. if (nulls1 != 0)
  1124. memset((char *)sk, 0, nulls1);
  1125. memset((char *)sk + nulls1 + sizeof(void *), 0,
  1126. nulls2 - nulls1 - sizeof(void *));
  1127. memset((char *)sk + nulls2 + sizeof(void *), 0,
  1128. size - nulls2 - sizeof(void *));
  1129. }
  1130. EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
  1131. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  1132. int family)
  1133. {
  1134. struct sock *sk;
  1135. struct kmem_cache *slab;
  1136. slab = prot->slab;
  1137. if (slab != NULL) {
  1138. sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
  1139. if (!sk)
  1140. return sk;
  1141. if (priority & __GFP_ZERO) {
  1142. if (prot->clear_sk)
  1143. prot->clear_sk(sk, prot->obj_size);
  1144. else
  1145. sk_prot_clear_nulls(sk, prot->obj_size);
  1146. }
  1147. } else
  1148. sk = kmalloc(prot->obj_size, priority);
  1149. if (sk != NULL) {
  1150. kmemcheck_annotate_bitfield(sk, flags);
  1151. if (security_sk_alloc(sk, family, priority))
  1152. goto out_free;
  1153. if (!try_module_get(prot->owner))
  1154. goto out_free_sec;
  1155. sk_tx_queue_clear(sk);
  1156. }
  1157. return sk;
  1158. out_free_sec:
  1159. security_sk_free(sk);
  1160. out_free:
  1161. if (slab != NULL)
  1162. kmem_cache_free(slab, sk);
  1163. else
  1164. kfree(sk);
  1165. return NULL;
  1166. }
  1167. static void sk_prot_free(struct proto *prot, struct sock *sk)
  1168. {
  1169. struct kmem_cache *slab;
  1170. struct module *owner;
  1171. owner = prot->owner;
  1172. slab = prot->slab;
  1173. security_sk_free(sk);
  1174. if (slab != NULL)
  1175. kmem_cache_free(slab, sk);
  1176. else
  1177. kfree(sk);
  1178. module_put(owner);
  1179. }
  1180. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  1181. void sock_update_netprioidx(struct sock *sk)
  1182. {
  1183. if (in_interrupt())
  1184. return;
  1185. sk->sk_cgrp_prioidx = task_netprioidx(current);
  1186. }
  1187. EXPORT_SYMBOL_GPL(sock_update_netprioidx);
  1188. #endif
  1189. /**
  1190. * sk_alloc - All socket objects are allocated here
  1191. * @net: the applicable net namespace
  1192. * @family: protocol family
  1193. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1194. * @prot: struct proto associated with this new sock instance
  1195. */
  1196. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  1197. struct proto *prot)
  1198. {
  1199. struct sock *sk;
  1200. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  1201. if (sk) {
  1202. sk->sk_family = family;
  1203. /*
  1204. * See comment in struct sock definition to understand
  1205. * why we need sk_prot_creator -acme
  1206. */
  1207. sk->sk_prot = sk->sk_prot_creator = prot;
  1208. sock_lock_init(sk);
  1209. sock_net_set(sk, get_net(net));
  1210. atomic_set(&sk->sk_wmem_alloc, 1);
  1211. sock_update_classid(sk);
  1212. sock_update_netprioidx(sk);
  1213. }
  1214. return sk;
  1215. }
  1216. EXPORT_SYMBOL(sk_alloc);
  1217. static void __sk_free(struct sock *sk)
  1218. {
  1219. struct sk_filter *filter;
  1220. if (sk->sk_destruct)
  1221. sk->sk_destruct(sk);
  1222. filter = rcu_dereference_check(sk->sk_filter,
  1223. atomic_read(&sk->sk_wmem_alloc) == 0);
  1224. if (filter) {
  1225. sk_filter_uncharge(sk, filter);
  1226. RCU_INIT_POINTER(sk->sk_filter, NULL);
  1227. }
  1228. sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
  1229. if (atomic_read(&sk->sk_omem_alloc))
  1230. pr_debug("%s: optmem leakage (%d bytes) detected\n",
  1231. __func__, atomic_read(&sk->sk_omem_alloc));
  1232. if (sk->sk_peer_cred)
  1233. put_cred(sk->sk_peer_cred);
  1234. put_pid(sk->sk_peer_pid);
  1235. put_net(sock_net(sk));
  1236. sk_prot_free(sk->sk_prot_creator, sk);
  1237. }
  1238. void sk_free(struct sock *sk)
  1239. {
  1240. /*
  1241. * We subtract one from sk_wmem_alloc and can know if
  1242. * some packets are still in some tx queue.
  1243. * If not null, sock_wfree() will call __sk_free(sk) later
  1244. */
  1245. if (atomic_dec_and_test(&sk->sk_wmem_alloc))
  1246. __sk_free(sk);
  1247. }
  1248. EXPORT_SYMBOL(sk_free);
  1249. /*
  1250. * Last sock_put should drop reference to sk->sk_net. It has already
  1251. * been dropped in sk_change_net. Taking reference to stopping namespace
  1252. * is not an option.
  1253. * Take reference to a socket to remove it from hash _alive_ and after that
  1254. * destroy it in the context of init_net.
  1255. */
  1256. void sk_release_kernel(struct sock *sk)
  1257. {
  1258. if (sk == NULL || sk->sk_socket == NULL)
  1259. return;
  1260. sock_hold(sk);
  1261. sock_release(sk->sk_socket);
  1262. release_net(sock_net(sk));
  1263. sock_net_set(sk, get_net(&init_net));
  1264. sock_put(sk);
  1265. }
  1266. EXPORT_SYMBOL(sk_release_kernel);
  1267. static void sk_update_clone(const struct sock *sk, struct sock *newsk)
  1268. {
  1269. if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
  1270. sock_update_memcg(newsk);
  1271. }
  1272. /**
  1273. * sk_clone_lock - clone a socket, and lock its clone
  1274. * @sk: the socket to clone
  1275. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1276. *
  1277. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  1278. */
  1279. struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
  1280. {
  1281. struct sock *newsk;
  1282. bool is_charged = true;
  1283. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  1284. if (newsk != NULL) {
  1285. struct sk_filter *filter;
  1286. sock_copy(newsk, sk);
  1287. /* SANITY */
  1288. get_net(sock_net(newsk));
  1289. sk_node_init(&newsk->sk_node);
  1290. sock_lock_init(newsk);
  1291. bh_lock_sock(newsk);
  1292. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  1293. newsk->sk_backlog.len = 0;
  1294. atomic_set(&newsk->sk_rmem_alloc, 0);
  1295. /*
  1296. * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
  1297. */
  1298. atomic_set(&newsk->sk_wmem_alloc, 1);
  1299. atomic_set(&newsk->sk_omem_alloc, 0);
  1300. skb_queue_head_init(&newsk->sk_receive_queue);
  1301. skb_queue_head_init(&newsk->sk_write_queue);
  1302. spin_lock_init(&newsk->sk_dst_lock);
  1303. rwlock_init(&newsk->sk_callback_lock);
  1304. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  1305. af_callback_keys + newsk->sk_family,
  1306. af_family_clock_key_strings[newsk->sk_family]);
  1307. newsk->sk_dst_cache = NULL;
  1308. newsk->sk_wmem_queued = 0;
  1309. newsk->sk_forward_alloc = 0;
  1310. newsk->sk_send_head = NULL;
  1311. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  1312. sock_reset_flag(newsk, SOCK_DONE);
  1313. skb_queue_head_init(&newsk->sk_error_queue);
  1314. filter = rcu_dereference_protected(newsk->sk_filter, 1);
  1315. if (filter != NULL)
  1316. /* though it's an empty new sock, the charging may fail
  1317. * if sysctl_optmem_max was changed between creation of
  1318. * original socket and cloning
  1319. */
  1320. is_charged = sk_filter_charge(newsk, filter);
  1321. if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
  1322. /* It is still raw copy of parent, so invalidate
  1323. * destructor and make plain sk_free() */
  1324. newsk->sk_destruct = NULL;
  1325. bh_unlock_sock(newsk);
  1326. sk_free(newsk);
  1327. newsk = NULL;
  1328. goto out;
  1329. }
  1330. newsk->sk_err = 0;
  1331. newsk->sk_priority = 0;
  1332. newsk->sk_incoming_cpu = raw_smp_processor_id();
  1333. /*
  1334. * Before updating sk_refcnt, we must commit prior changes to memory
  1335. * (Documentation/RCU/rculist_nulls.txt for details)
  1336. */
  1337. smp_wmb();
  1338. atomic_set(&newsk->sk_refcnt, 2);
  1339. /*
  1340. * Increment the counter in the same struct proto as the master
  1341. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  1342. * is the same as sk->sk_prot->socks, as this field was copied
  1343. * with memcpy).
  1344. *
  1345. * This _changes_ the previous behaviour, where
  1346. * tcp_create_openreq_child always was incrementing the
  1347. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  1348. * to be taken into account in all callers. -acme
  1349. */
  1350. sk_refcnt_debug_inc(newsk);
  1351. sk_set_socket(newsk, NULL);
  1352. newsk->sk_wq = NULL;
  1353. sk_update_clone(sk, newsk);
  1354. if (newsk->sk_prot->sockets_allocated)
  1355. sk_sockets_allocated_inc(newsk);
  1356. if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
  1357. net_enable_timestamp();
  1358. }
  1359. out:
  1360. return newsk;
  1361. }
  1362. EXPORT_SYMBOL_GPL(sk_clone_lock);
  1363. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  1364. {
  1365. __sk_dst_set(sk, dst);
  1366. sk->sk_route_caps = dst->dev->features;
  1367. if (sk->sk_route_caps & NETIF_F_GSO)
  1368. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  1369. sk->sk_route_caps &= ~sk->sk_route_nocaps;
  1370. if (sk_can_gso(sk)) {
  1371. if (dst->header_len) {
  1372. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  1373. } else {
  1374. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1375. sk->sk_gso_max_size = dst->dev->gso_max_size;
  1376. sk->sk_gso_max_segs = dst->dev->gso_max_segs;
  1377. }
  1378. }
  1379. }
  1380. EXPORT_SYMBOL_GPL(sk_setup_caps);
  1381. /*
  1382. * Simple resource managers for sockets.
  1383. */
  1384. /*
  1385. * Write buffer destructor automatically called from kfree_skb.
  1386. */
  1387. void sock_wfree(struct sk_buff *skb)
  1388. {
  1389. struct sock *sk = skb->sk;
  1390. unsigned int len = skb->truesize;
  1391. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
  1392. /*
  1393. * Keep a reference on sk_wmem_alloc, this will be released
  1394. * after sk_write_space() call
  1395. */
  1396. atomic_sub(len - 1, &sk->sk_wmem_alloc);
  1397. sk->sk_write_space(sk);
  1398. len = 1;
  1399. }
  1400. /*
  1401. * if sk_wmem_alloc reaches 0, we must finish what sk_free()
  1402. * could not do because of in-flight packets
  1403. */
  1404. if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
  1405. __sk_free(sk);
  1406. }
  1407. EXPORT_SYMBOL(sock_wfree);
  1408. void skb_orphan_partial(struct sk_buff *skb)
  1409. {
  1410. /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
  1411. * so we do not completely orphan skb, but transfert all
  1412. * accounted bytes but one, to avoid unexpected reorders.
  1413. */
  1414. if (skb->destructor == sock_wfree
  1415. #ifdef CONFIG_INET
  1416. || skb->destructor == tcp_wfree
  1417. #endif
  1418. ) {
  1419. atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
  1420. skb->truesize = 1;
  1421. } else {
  1422. skb_orphan(skb);
  1423. }
  1424. }
  1425. EXPORT_SYMBOL(skb_orphan_partial);
  1426. /*
  1427. * Read buffer destructor automatically called from kfree_skb.
  1428. */
  1429. void sock_rfree(struct sk_buff *skb)
  1430. {
  1431. struct sock *sk = skb->sk;
  1432. unsigned int len = skb->truesize;
  1433. atomic_sub(len, &sk->sk_rmem_alloc);
  1434. sk_mem_uncharge(sk, len);
  1435. }
  1436. EXPORT_SYMBOL(sock_rfree);
  1437. /*
  1438. * Buffer destructor for skbs that are not used directly in read or write
  1439. * path, e.g. for error handler skbs. Automatically called from kfree_skb.
  1440. */
  1441. void sock_efree(struct sk_buff *skb)
  1442. {
  1443. sock_put(skb->sk);
  1444. }
  1445. EXPORT_SYMBOL(sock_efree);
  1446. #ifdef CONFIG_INET
  1447. void sock_edemux(struct sk_buff *skb)
  1448. {
  1449. struct sock *sk = skb->sk;
  1450. if (sk->sk_state == TCP_TIME_WAIT)
  1451. inet_twsk_put(inet_twsk(sk));
  1452. else
  1453. sock_put(sk);
  1454. }
  1455. EXPORT_SYMBOL(sock_edemux);
  1456. #endif
  1457. kuid_t sock_i_uid(struct sock *sk)
  1458. {
  1459. kuid_t uid;
  1460. read_lock_bh(&sk->sk_callback_lock);
  1461. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
  1462. read_unlock_bh(&sk->sk_callback_lock);
  1463. return uid;
  1464. }
  1465. EXPORT_SYMBOL(sock_i_uid);
  1466. unsigned long sock_i_ino(struct sock *sk)
  1467. {
  1468. unsigned long ino;
  1469. read_lock_bh(&sk->sk_callback_lock);
  1470. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  1471. read_unlock_bh(&sk->sk_callback_lock);
  1472. return ino;
  1473. }
  1474. EXPORT_SYMBOL(sock_i_ino);
  1475. /*
  1476. * Allocate a skb from the socket's send buffer.
  1477. */
  1478. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  1479. gfp_t priority)
  1480. {
  1481. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1482. struct sk_buff *skb = alloc_skb(size, priority);
  1483. if (skb) {
  1484. skb_set_owner_w(skb, sk);
  1485. return skb;
  1486. }
  1487. }
  1488. return NULL;
  1489. }
  1490. EXPORT_SYMBOL(sock_wmalloc);
  1491. /*
  1492. * Allocate a memory block from the socket's option memory buffer.
  1493. */
  1494. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1495. {
  1496. if ((unsigned int)size <= sysctl_optmem_max &&
  1497. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1498. void *mem;
  1499. /* First do the add, to avoid the race if kmalloc
  1500. * might sleep.
  1501. */
  1502. atomic_add(size, &sk->sk_omem_alloc);
  1503. mem = kmalloc(size, priority);
  1504. if (mem)
  1505. return mem;
  1506. atomic_sub(size, &sk->sk_omem_alloc);
  1507. }
  1508. return NULL;
  1509. }
  1510. EXPORT_SYMBOL(sock_kmalloc);
  1511. /* Free an option memory block. Note, we actually want the inline
  1512. * here as this allows gcc to detect the nullify and fold away the
  1513. * condition entirely.
  1514. */
  1515. static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
  1516. const bool nullify)
  1517. {
  1518. if (WARN_ON_ONCE(!mem))
  1519. return;
  1520. if (nullify)
  1521. kzfree(mem);
  1522. else
  1523. kfree(mem);
  1524. atomic_sub(size, &sk->sk_omem_alloc);
  1525. }
  1526. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1527. {
  1528. __sock_kfree_s(sk, mem, size, false);
  1529. }
  1530. EXPORT_SYMBOL(sock_kfree_s);
  1531. void sock_kzfree_s(struct sock *sk, void *mem, int size)
  1532. {
  1533. __sock_kfree_s(sk, mem, size, true);
  1534. }
  1535. EXPORT_SYMBOL(sock_kzfree_s);
  1536. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1537. I think, these locks should be removed for datagram sockets.
  1538. */
  1539. static long sock_wait_for_wmem(struct sock *sk, long timeo)
  1540. {
  1541. DEFINE_WAIT(wait);
  1542. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1543. for (;;) {
  1544. if (!timeo)
  1545. break;
  1546. if (signal_pending(current))
  1547. break;
  1548. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1549. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1550. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1551. break;
  1552. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1553. break;
  1554. if (sk->sk_err)
  1555. break;
  1556. timeo = schedule_timeout(timeo);
  1557. }
  1558. finish_wait(sk_sleep(sk), &wait);
  1559. return timeo;
  1560. }
  1561. /*
  1562. * Generic send/receive buffer handlers
  1563. */
  1564. struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
  1565. unsigned long data_len, int noblock,
  1566. int *errcode, int max_page_order)
  1567. {
  1568. struct sk_buff *skb;
  1569. long timeo;
  1570. int err;
  1571. timeo = sock_sndtimeo(sk, noblock);
  1572. for (;;) {
  1573. err = sock_error(sk);
  1574. if (err != 0)
  1575. goto failure;
  1576. err = -EPIPE;
  1577. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1578. goto failure;
  1579. if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
  1580. break;
  1581. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1582. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1583. err = -EAGAIN;
  1584. if (!timeo)
  1585. goto failure;
  1586. if (signal_pending(current))
  1587. goto interrupted;
  1588. timeo = sock_wait_for_wmem(sk, timeo);
  1589. }
  1590. skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
  1591. errcode, sk->sk_allocation);
  1592. if (skb)
  1593. skb_set_owner_w(skb, sk);
  1594. return skb;
  1595. interrupted:
  1596. err = sock_intr_errno(timeo);
  1597. failure:
  1598. *errcode = err;
  1599. return NULL;
  1600. }
  1601. EXPORT_SYMBOL(sock_alloc_send_pskb);
  1602. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1603. int noblock, int *errcode)
  1604. {
  1605. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
  1606. }
  1607. EXPORT_SYMBOL(sock_alloc_send_skb);
  1608. /* On 32bit arches, an skb frag is limited to 2^15 */
  1609. #define SKB_FRAG_PAGE_ORDER get_order(32768)
  1610. /**
  1611. * skb_page_frag_refill - check that a page_frag contains enough room
  1612. * @sz: minimum size of the fragment we want to get
  1613. * @pfrag: pointer to page_frag
  1614. * @gfp: priority for memory allocation
  1615. *
  1616. * Note: While this allocator tries to use high order pages, there is
  1617. * no guarantee that allocations succeed. Therefore, @sz MUST be
  1618. * less or equal than PAGE_SIZE.
  1619. */
  1620. bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
  1621. {
  1622. if (pfrag->page) {
  1623. if (atomic_read(&pfrag->page->_count) == 1) {
  1624. pfrag->offset = 0;
  1625. return true;
  1626. }
  1627. if (pfrag->offset + sz <= pfrag->size)
  1628. return true;
  1629. put_page(pfrag->page);
  1630. }
  1631. pfrag->offset = 0;
  1632. if (SKB_FRAG_PAGE_ORDER) {
  1633. pfrag->page = alloc_pages(gfp | __GFP_COMP |
  1634. __GFP_NOWARN | __GFP_NORETRY,
  1635. SKB_FRAG_PAGE_ORDER);
  1636. if (likely(pfrag->page)) {
  1637. pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
  1638. return true;
  1639. }
  1640. }
  1641. pfrag->page = alloc_page(gfp);
  1642. if (likely(pfrag->page)) {
  1643. pfrag->size = PAGE_SIZE;
  1644. return true;
  1645. }
  1646. return false;
  1647. }
  1648. EXPORT_SYMBOL(skb_page_frag_refill);
  1649. bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
  1650. {
  1651. if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
  1652. return true;
  1653. sk_enter_memory_pressure(sk);
  1654. sk_stream_moderate_sndbuf(sk);
  1655. return false;
  1656. }
  1657. EXPORT_SYMBOL(sk_page_frag_refill);
  1658. static void __lock_sock(struct sock *sk)
  1659. __releases(&sk->sk_lock.slock)
  1660. __acquires(&sk->sk_lock.slock)
  1661. {
  1662. DEFINE_WAIT(wait);
  1663. for (;;) {
  1664. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1665. TASK_UNINTERRUPTIBLE);
  1666. spin_unlock_bh(&sk->sk_lock.slock);
  1667. schedule();
  1668. spin_lock_bh(&sk->sk_lock.slock);
  1669. if (!sock_owned_by_user(sk))
  1670. break;
  1671. }
  1672. finish_wait(&sk->sk_lock.wq, &wait);
  1673. }
  1674. static void __release_sock(struct sock *sk)
  1675. __releases(&sk->sk_lock.slock)
  1676. __acquires(&sk->sk_lock.slock)
  1677. {
  1678. struct sk_buff *skb = sk->sk_backlog.head;
  1679. do {
  1680. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1681. bh_unlock_sock(sk);
  1682. do {
  1683. struct sk_buff *next = skb->next;
  1684. prefetch(next);
  1685. WARN_ON_ONCE(skb_dst_is_noref(skb));
  1686. skb->next = NULL;
  1687. sk_backlog_rcv(sk, skb);
  1688. /*
  1689. * We are in process context here with softirqs
  1690. * disabled, use cond_resched_softirq() to preempt.
  1691. * This is safe to do because we've taken the backlog
  1692. * queue private:
  1693. */
  1694. cond_resched_softirq();
  1695. skb = next;
  1696. } while (skb != NULL);
  1697. bh_lock_sock(sk);
  1698. } while ((skb = sk->sk_backlog.head) != NULL);
  1699. /*
  1700. * Doing the zeroing here guarantee we can not loop forever
  1701. * while a wild producer attempts to flood us.
  1702. */
  1703. sk->sk_backlog.len = 0;
  1704. }
  1705. /**
  1706. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1707. * @sk: sock to wait on
  1708. * @timeo: for how long
  1709. *
  1710. * Now socket state including sk->sk_err is changed only under lock,
  1711. * hence we may omit checks after joining wait queue.
  1712. * We check receive queue before schedule() only as optimization;
  1713. * it is very likely that release_sock() added new data.
  1714. */
  1715. int sk_wait_data(struct sock *sk, long *timeo)
  1716. {
  1717. int rc;
  1718. DEFINE_WAIT(wait);
  1719. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1720. set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1721. rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
  1722. clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1723. finish_wait(sk_sleep(sk), &wait);
  1724. return rc;
  1725. }
  1726. EXPORT_SYMBOL(sk_wait_data);
  1727. /**
  1728. * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
  1729. * @sk: socket
  1730. * @size: memory size to allocate
  1731. * @kind: allocation type
  1732. *
  1733. * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
  1734. * rmem allocation. This function assumes that protocols which have
  1735. * memory_pressure use sk_wmem_queued as write buffer accounting.
  1736. */
  1737. int __sk_mem_schedule(struct sock *sk, int size, int kind)
  1738. {
  1739. struct proto *prot = sk->sk_prot;
  1740. int amt = sk_mem_pages(size);
  1741. long allocated;
  1742. int parent_status = UNDER_LIMIT;
  1743. sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
  1744. allocated = sk_memory_allocated_add(sk, amt, &parent_status);
  1745. /* Under limit. */
  1746. if (parent_status == UNDER_LIMIT &&
  1747. allocated <= sk_prot_mem_limits(sk, 0)) {
  1748. sk_leave_memory_pressure(sk);
  1749. return 1;
  1750. }
  1751. /* Under pressure. (we or our parents) */
  1752. if ((parent_status > SOFT_LIMIT) ||
  1753. allocated > sk_prot_mem_limits(sk, 1))
  1754. sk_enter_memory_pressure(sk);
  1755. /* Over hard limit (we or our parents) */
  1756. if ((parent_status == OVER_LIMIT) ||
  1757. (allocated > sk_prot_mem_limits(sk, 2)))
  1758. goto suppress_allocation;
  1759. /* guarantee minimum buffer size under pressure */
  1760. if (kind == SK_MEM_RECV) {
  1761. if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
  1762. return 1;
  1763. } else { /* SK_MEM_SEND */
  1764. if (sk->sk_type == SOCK_STREAM) {
  1765. if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
  1766. return 1;
  1767. } else if (atomic_read(&sk->sk_wmem_alloc) <
  1768. prot->sysctl_wmem[0])
  1769. return 1;
  1770. }
  1771. if (sk_has_memory_pressure(sk)) {
  1772. int alloc;
  1773. if (!sk_under_memory_pressure(sk))
  1774. return 1;
  1775. alloc = sk_sockets_allocated_read_positive(sk);
  1776. if (sk_prot_mem_limits(sk, 2) > alloc *
  1777. sk_mem_pages(sk->sk_wmem_queued +
  1778. atomic_read(&sk->sk_rmem_alloc) +
  1779. sk->sk_forward_alloc))
  1780. return 1;
  1781. }
  1782. suppress_allocation:
  1783. if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
  1784. sk_stream_moderate_sndbuf(sk);
  1785. /* Fail only if socket is _under_ its sndbuf.
  1786. * In this case we cannot block, so that we have to fail.
  1787. */
  1788. if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
  1789. return 1;
  1790. }
  1791. trace_sock_exceed_buf_limit(sk, prot, allocated);
  1792. /* Alas. Undo changes. */
  1793. sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
  1794. sk_memory_allocated_sub(sk, amt);
  1795. return 0;
  1796. }
  1797. EXPORT_SYMBOL(__sk_mem_schedule);
  1798. /**
  1799. * __sk_reclaim - reclaim memory_allocated
  1800. * @sk: socket
  1801. */
  1802. void __sk_mem_reclaim(struct sock *sk)
  1803. {
  1804. sk_memory_allocated_sub(sk,
  1805. sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
  1806. sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
  1807. if (sk_under_memory_pressure(sk) &&
  1808. (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
  1809. sk_leave_memory_pressure(sk);
  1810. }
  1811. EXPORT_SYMBOL(__sk_mem_reclaim);
  1812. /*
  1813. * Set of default routines for initialising struct proto_ops when
  1814. * the protocol does not support a particular function. In certain
  1815. * cases where it makes no sense for a protocol to have a "do nothing"
  1816. * function, some default processing is provided.
  1817. */
  1818. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1819. {
  1820. return -EOPNOTSUPP;
  1821. }
  1822. EXPORT_SYMBOL(sock_no_bind);
  1823. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1824. int len, int flags)
  1825. {
  1826. return -EOPNOTSUPP;
  1827. }
  1828. EXPORT_SYMBOL(sock_no_connect);
  1829. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1830. {
  1831. return -EOPNOTSUPP;
  1832. }
  1833. EXPORT_SYMBOL(sock_no_socketpair);
  1834. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1835. {
  1836. return -EOPNOTSUPP;
  1837. }
  1838. EXPORT_SYMBOL(sock_no_accept);
  1839. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1840. int *len, int peer)
  1841. {
  1842. return -EOPNOTSUPP;
  1843. }
  1844. EXPORT_SYMBOL(sock_no_getname);
  1845. unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
  1846. {
  1847. return 0;
  1848. }
  1849. EXPORT_SYMBOL(sock_no_poll);
  1850. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1851. {
  1852. return -EOPNOTSUPP;
  1853. }
  1854. EXPORT_SYMBOL(sock_no_ioctl);
  1855. int sock_no_listen(struct socket *sock, int backlog)
  1856. {
  1857. return -EOPNOTSUPP;
  1858. }
  1859. EXPORT_SYMBOL(sock_no_listen);
  1860. int sock_no_shutdown(struct socket *sock, int how)
  1861. {
  1862. return -EOPNOTSUPP;
  1863. }
  1864. EXPORT_SYMBOL(sock_no_shutdown);
  1865. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1866. char __user *optval, unsigned int optlen)
  1867. {
  1868. return -EOPNOTSUPP;
  1869. }
  1870. EXPORT_SYMBOL(sock_no_setsockopt);
  1871. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1872. char __user *optval, int __user *optlen)
  1873. {
  1874. return -EOPNOTSUPP;
  1875. }
  1876. EXPORT_SYMBOL(sock_no_getsockopt);
  1877. int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1878. size_t len)
  1879. {
  1880. return -EOPNOTSUPP;
  1881. }
  1882. EXPORT_SYMBOL(sock_no_sendmsg);
  1883. int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1884. size_t len, int flags)
  1885. {
  1886. return -EOPNOTSUPP;
  1887. }
  1888. EXPORT_SYMBOL(sock_no_recvmsg);
  1889. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  1890. {
  1891. /* Mirror missing mmap method error code */
  1892. return -ENODEV;
  1893. }
  1894. EXPORT_SYMBOL(sock_no_mmap);
  1895. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  1896. {
  1897. ssize_t res;
  1898. struct msghdr msg = {.msg_flags = flags};
  1899. struct kvec iov;
  1900. char *kaddr = kmap(page);
  1901. iov.iov_base = kaddr + offset;
  1902. iov.iov_len = size;
  1903. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  1904. kunmap(page);
  1905. return res;
  1906. }
  1907. EXPORT_SYMBOL(sock_no_sendpage);
  1908. /*
  1909. * Default Socket Callbacks
  1910. */
  1911. static void sock_def_wakeup(struct sock *sk)
  1912. {
  1913. struct socket_wq *wq;
  1914. rcu_read_lock();
  1915. wq = rcu_dereference(sk->sk_wq);
  1916. if (wq_has_sleeper(wq))
  1917. wake_up_interruptible_all(&wq->wait);
  1918. rcu_read_unlock();
  1919. }
  1920. static void sock_def_error_report(struct sock *sk)
  1921. {
  1922. struct socket_wq *wq;
  1923. rcu_read_lock();
  1924. wq = rcu_dereference(sk->sk_wq);
  1925. if (wq_has_sleeper(wq))
  1926. wake_up_interruptible_poll(&wq->wait, POLLERR);
  1927. sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
  1928. rcu_read_unlock();
  1929. }
  1930. static void sock_def_readable(struct sock *sk)
  1931. {
  1932. struct socket_wq *wq;
  1933. rcu_read_lock();
  1934. wq = rcu_dereference(sk->sk_wq);
  1935. if (wq_has_sleeper(wq))
  1936. wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
  1937. POLLRDNORM | POLLRDBAND);
  1938. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  1939. rcu_read_unlock();
  1940. }
  1941. static void sock_def_write_space(struct sock *sk)
  1942. {
  1943. struct socket_wq *wq;
  1944. rcu_read_lock();
  1945. /* Do not wake up a writer until he can make "significant"
  1946. * progress. --DaveM
  1947. */
  1948. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  1949. wq = rcu_dereference(sk->sk_wq);
  1950. if (wq_has_sleeper(wq))
  1951. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  1952. POLLWRNORM | POLLWRBAND);
  1953. /* Should agree with poll, otherwise some programs break */
  1954. if (sock_writeable(sk))
  1955. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  1956. }
  1957. rcu_read_unlock();
  1958. }
  1959. static void sock_def_destruct(struct sock *sk)
  1960. {
  1961. kfree(sk->sk_protinfo);
  1962. }
  1963. void sk_send_sigurg(struct sock *sk)
  1964. {
  1965. if (sk->sk_socket && sk->sk_socket->file)
  1966. if (send_sigurg(&sk->sk_socket->file->f_owner))
  1967. sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
  1968. }
  1969. EXPORT_SYMBOL(sk_send_sigurg);
  1970. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1971. unsigned long expires)
  1972. {
  1973. if (!mod_timer(timer, expires))
  1974. sock_hold(sk);
  1975. }
  1976. EXPORT_SYMBOL(sk_reset_timer);
  1977. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  1978. {
  1979. if (del_timer(timer))
  1980. __sock_put(sk);
  1981. }
  1982. EXPORT_SYMBOL(sk_stop_timer);
  1983. void sock_init_data(struct socket *sock, struct sock *sk)
  1984. {
  1985. skb_queue_head_init(&sk->sk_receive_queue);
  1986. skb_queue_head_init(&sk->sk_write_queue);
  1987. skb_queue_head_init(&sk->sk_error_queue);
  1988. sk->sk_send_head = NULL;
  1989. init_timer(&sk->sk_timer);
  1990. sk->sk_allocation = GFP_KERNEL;
  1991. sk->sk_rcvbuf = sysctl_rmem_default;
  1992. sk->sk_sndbuf = sysctl_wmem_default;
  1993. sk->sk_state = TCP_CLOSE;
  1994. sk_set_socket(sk, sock);
  1995. sock_set_flag(sk, SOCK_ZAPPED);
  1996. if (sock) {
  1997. sk->sk_type = sock->type;
  1998. sk->sk_wq = sock->wq;
  1999. sock->sk = sk;
  2000. } else
  2001. sk->sk_wq = NULL;
  2002. spin_lock_init(&sk->sk_dst_lock);
  2003. rwlock_init(&sk->sk_callback_lock);
  2004. lockdep_set_class_and_name(&sk->sk_callback_lock,
  2005. af_callback_keys + sk->sk_family,
  2006. af_family_clock_key_strings[sk->sk_family]);
  2007. sk->sk_state_change = sock_def_wakeup;
  2008. sk->sk_data_ready = sock_def_readable;
  2009. sk->sk_write_space = sock_def_write_space;
  2010. sk->sk_error_report = sock_def_error_report;
  2011. sk->sk_destruct = sock_def_destruct;
  2012. sk->sk_frag.page = NULL;
  2013. sk->sk_frag.offset = 0;
  2014. sk->sk_peek_off = -1;
  2015. sk->sk_peer_pid = NULL;
  2016. sk->sk_peer_cred = NULL;
  2017. sk->sk_write_pending = 0;
  2018. sk->sk_rcvlowat = 1;
  2019. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  2020. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  2021. sk->sk_stamp = ktime_set(-1L, 0);
  2022. #ifdef CONFIG_NET_RX_BUSY_POLL
  2023. sk->sk_napi_id = 0;
  2024. sk->sk_ll_usec = sysctl_net_busy_read;
  2025. #endif
  2026. sk->sk_max_pacing_rate = ~0U;
  2027. sk->sk_pacing_rate = ~0U;
  2028. /*
  2029. * Before updating sk_refcnt, we must commit prior changes to memory
  2030. * (Documentation/RCU/rculist_nulls.txt for details)
  2031. */
  2032. smp_wmb();
  2033. atomic_set(&sk->sk_refcnt, 1);
  2034. atomic_set(&sk->sk_drops, 0);
  2035. }
  2036. EXPORT_SYMBOL(sock_init_data);
  2037. void lock_sock_nested(struct sock *sk, int subclass)
  2038. {
  2039. might_sleep();
  2040. spin_lock_bh(&sk->sk_lock.slock);
  2041. if (sk->sk_lock.owned)
  2042. __lock_sock(sk);
  2043. sk->sk_lock.owned = 1;
  2044. spin_unlock(&sk->sk_lock.slock);
  2045. /*
  2046. * The sk_lock has mutex_lock() semantics here:
  2047. */
  2048. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  2049. local_bh_enable();
  2050. }
  2051. EXPORT_SYMBOL(lock_sock_nested);
  2052. void release_sock(struct sock *sk)
  2053. {
  2054. /*
  2055. * The sk_lock has mutex_unlock() semantics:
  2056. */
  2057. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  2058. spin_lock_bh(&sk->sk_lock.slock);
  2059. if (sk->sk_backlog.tail)
  2060. __release_sock(sk);
  2061. /* Warning : release_cb() might need to release sk ownership,
  2062. * ie call sock_release_ownership(sk) before us.
  2063. */
  2064. if (sk->sk_prot->release_cb)
  2065. sk->sk_prot->release_cb(sk);
  2066. sock_release_ownership(sk);
  2067. if (waitqueue_active(&sk->sk_lock.wq))
  2068. wake_up(&sk->sk_lock.wq);
  2069. spin_unlock_bh(&sk->sk_lock.slock);
  2070. }
  2071. EXPORT_SYMBOL(release_sock);
  2072. /**
  2073. * lock_sock_fast - fast version of lock_sock
  2074. * @sk: socket
  2075. *
  2076. * This version should be used for very small section, where process wont block
  2077. * return false if fast path is taken
  2078. * sk_lock.slock locked, owned = 0, BH disabled
  2079. * return true if slow path is taken
  2080. * sk_lock.slock unlocked, owned = 1, BH enabled
  2081. */
  2082. bool lock_sock_fast(struct sock *sk)
  2083. {
  2084. might_sleep();
  2085. spin_lock_bh(&sk->sk_lock.slock);
  2086. if (!sk->sk_lock.owned)
  2087. /*
  2088. * Note : We must disable BH
  2089. */
  2090. return false;
  2091. __lock_sock(sk);
  2092. sk->sk_lock.owned = 1;
  2093. spin_unlock(&sk->sk_lock.slock);
  2094. /*
  2095. * The sk_lock has mutex_lock() semantics here:
  2096. */
  2097. mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
  2098. local_bh_enable();
  2099. return true;
  2100. }
  2101. EXPORT_SYMBOL(lock_sock_fast);
  2102. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  2103. {
  2104. struct timeval tv;
  2105. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2106. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2107. tv = ktime_to_timeval(sk->sk_stamp);
  2108. if (tv.tv_sec == -1)
  2109. return -ENOENT;
  2110. if (tv.tv_sec == 0) {
  2111. sk->sk_stamp = ktime_get_real();
  2112. tv = ktime_to_timeval(sk->sk_stamp);
  2113. }
  2114. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  2115. }
  2116. EXPORT_SYMBOL(sock_get_timestamp);
  2117. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  2118. {
  2119. struct timespec ts;
  2120. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2121. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2122. ts = ktime_to_timespec(sk->sk_stamp);
  2123. if (ts.tv_sec == -1)
  2124. return -ENOENT;
  2125. if (ts.tv_sec == 0) {
  2126. sk->sk_stamp = ktime_get_real();
  2127. ts = ktime_to_timespec(sk->sk_stamp);
  2128. }
  2129. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  2130. }
  2131. EXPORT_SYMBOL(sock_get_timestampns);
  2132. void sock_enable_timestamp(struct sock *sk, int flag)
  2133. {
  2134. if (!sock_flag(sk, flag)) {
  2135. unsigned long previous_flags = sk->sk_flags;
  2136. sock_set_flag(sk, flag);
  2137. /*
  2138. * we just set one of the two flags which require net
  2139. * time stamping, but time stamping might have been on
  2140. * already because of the other one
  2141. */
  2142. if (!(previous_flags & SK_FLAGS_TIMESTAMP))
  2143. net_enable_timestamp();
  2144. }
  2145. }
  2146. int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
  2147. int level, int type)
  2148. {
  2149. struct sock_exterr_skb *serr;
  2150. struct sk_buff *skb;
  2151. int copied, err;
  2152. err = -EAGAIN;
  2153. skb = sock_dequeue_err_skb(sk);
  2154. if (skb == NULL)
  2155. goto out;
  2156. copied = skb->len;
  2157. if (copied > len) {
  2158. msg->msg_flags |= MSG_TRUNC;
  2159. copied = len;
  2160. }
  2161. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  2162. if (err)
  2163. goto out_free_skb;
  2164. sock_recv_timestamp(msg, sk, skb);
  2165. serr = SKB_EXT_ERR(skb);
  2166. put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
  2167. msg->msg_flags |= MSG_ERRQUEUE;
  2168. err = copied;
  2169. out_free_skb:
  2170. kfree_skb(skb);
  2171. out:
  2172. return err;
  2173. }
  2174. EXPORT_SYMBOL(sock_recv_errqueue);
  2175. /*
  2176. * Get a socket option on an socket.
  2177. *
  2178. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  2179. * asynchronous errors should be reported by getsockopt. We assume
  2180. * this means if you specify SO_ERROR (otherwise whats the point of it).
  2181. */
  2182. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  2183. char __user *optval, int __user *optlen)
  2184. {
  2185. struct sock *sk = sock->sk;
  2186. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2187. }
  2188. EXPORT_SYMBOL(sock_common_getsockopt);
  2189. #ifdef CONFIG_COMPAT
  2190. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  2191. char __user *optval, int __user *optlen)
  2192. {
  2193. struct sock *sk = sock->sk;
  2194. if (sk->sk_prot->compat_getsockopt != NULL)
  2195. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  2196. optval, optlen);
  2197. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2198. }
  2199. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  2200. #endif
  2201. int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  2202. struct msghdr *msg, size_t size, int flags)
  2203. {
  2204. struct sock *sk = sock->sk;
  2205. int addr_len = 0;
  2206. int err;
  2207. err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
  2208. flags & ~MSG_DONTWAIT, &addr_len);
  2209. if (err >= 0)
  2210. msg->msg_namelen = addr_len;
  2211. return err;
  2212. }
  2213. EXPORT_SYMBOL(sock_common_recvmsg);
  2214. /*
  2215. * Set socket options on an inet socket.
  2216. */
  2217. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  2218. char __user *optval, unsigned int optlen)
  2219. {
  2220. struct sock *sk = sock->sk;
  2221. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2222. }
  2223. EXPORT_SYMBOL(sock_common_setsockopt);
  2224. #ifdef CONFIG_COMPAT
  2225. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  2226. char __user *optval, unsigned int optlen)
  2227. {
  2228. struct sock *sk = sock->sk;
  2229. if (sk->sk_prot->compat_setsockopt != NULL)
  2230. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  2231. optval, optlen);
  2232. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2233. }
  2234. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  2235. #endif
  2236. void sk_common_release(struct sock *sk)
  2237. {
  2238. if (sk->sk_prot->destroy)
  2239. sk->sk_prot->destroy(sk);
  2240. /*
  2241. * Observation: when sock_common_release is called, processes have
  2242. * no access to socket. But net still has.
  2243. * Step one, detach it from networking:
  2244. *
  2245. * A. Remove from hash tables.
  2246. */
  2247. sk->sk_prot->unhash(sk);
  2248. /*
  2249. * In this point socket cannot receive new packets, but it is possible
  2250. * that some packets are in flight because some CPU runs receiver and
  2251. * did hash table lookup before we unhashed socket. They will achieve
  2252. * receive queue and will be purged by socket destructor.
  2253. *
  2254. * Also we still have packets pending on receive queue and probably,
  2255. * our own packets waiting in device queues. sock_destroy will drain
  2256. * receive queue, but transmitted packets will delay socket destruction
  2257. * until the last reference will be released.
  2258. */
  2259. sock_orphan(sk);
  2260. xfrm_sk_free_policy(sk);
  2261. sk_refcnt_debug_release(sk);
  2262. if (sk->sk_frag.page) {
  2263. put_page(sk->sk_frag.page);
  2264. sk->sk_frag.page = NULL;
  2265. }
  2266. sock_put(sk);
  2267. }
  2268. EXPORT_SYMBOL(sk_common_release);
  2269. #ifdef CONFIG_PROC_FS
  2270. #define PROTO_INUSE_NR 64 /* should be enough for the first time */
  2271. struct prot_inuse {
  2272. int val[PROTO_INUSE_NR];
  2273. };
  2274. static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
  2275. #ifdef CONFIG_NET_NS
  2276. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2277. {
  2278. __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
  2279. }
  2280. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2281. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2282. {
  2283. int cpu, idx = prot->inuse_idx;
  2284. int res = 0;
  2285. for_each_possible_cpu(cpu)
  2286. res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
  2287. return res >= 0 ? res : 0;
  2288. }
  2289. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2290. static int __net_init sock_inuse_init_net(struct net *net)
  2291. {
  2292. net->core.inuse = alloc_percpu(struct prot_inuse);
  2293. return net->core.inuse ? 0 : -ENOMEM;
  2294. }
  2295. static void __net_exit sock_inuse_exit_net(struct net *net)
  2296. {
  2297. free_percpu(net->core.inuse);
  2298. }
  2299. static struct pernet_operations net_inuse_ops = {
  2300. .init = sock_inuse_init_net,
  2301. .exit = sock_inuse_exit_net,
  2302. };
  2303. static __init int net_inuse_init(void)
  2304. {
  2305. if (register_pernet_subsys(&net_inuse_ops))
  2306. panic("Cannot initialize net inuse counters");
  2307. return 0;
  2308. }
  2309. core_initcall(net_inuse_init);
  2310. #else
  2311. static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
  2312. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2313. {
  2314. __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
  2315. }
  2316. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2317. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2318. {
  2319. int cpu, idx = prot->inuse_idx;
  2320. int res = 0;
  2321. for_each_possible_cpu(cpu)
  2322. res += per_cpu(prot_inuse, cpu).val[idx];
  2323. return res >= 0 ? res : 0;
  2324. }
  2325. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2326. #endif
  2327. static void assign_proto_idx(struct proto *prot)
  2328. {
  2329. prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
  2330. if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
  2331. pr_err("PROTO_INUSE_NR exhausted\n");
  2332. return;
  2333. }
  2334. set_bit(prot->inuse_idx, proto_inuse_idx);
  2335. }
  2336. static void release_proto_idx(struct proto *prot)
  2337. {
  2338. if (prot->inuse_idx != PROTO_INUSE_NR - 1)
  2339. clear_bit(prot->inuse_idx, proto_inuse_idx);
  2340. }
  2341. #else
  2342. static inline void assign_proto_idx(struct proto *prot)
  2343. {
  2344. }
  2345. static inline void release_proto_idx(struct proto *prot)
  2346. {
  2347. }
  2348. #endif
  2349. int proto_register(struct proto *prot, int alloc_slab)
  2350. {
  2351. if (alloc_slab) {
  2352. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  2353. SLAB_HWCACHE_ALIGN | prot->slab_flags,
  2354. NULL);
  2355. if (prot->slab == NULL) {
  2356. pr_crit("%s: Can't create sock SLAB cache!\n",
  2357. prot->name);
  2358. goto out;
  2359. }
  2360. if (prot->rsk_prot != NULL) {
  2361. prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
  2362. if (prot->rsk_prot->slab_name == NULL)
  2363. goto out_free_sock_slab;
  2364. prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
  2365. prot->rsk_prot->obj_size, 0,
  2366. SLAB_HWCACHE_ALIGN, NULL);
  2367. if (prot->rsk_prot->slab == NULL) {
  2368. pr_crit("%s: Can't create request sock SLAB cache!\n",
  2369. prot->name);
  2370. goto out_free_request_sock_slab_name;
  2371. }
  2372. }
  2373. if (prot->twsk_prot != NULL) {
  2374. prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
  2375. if (prot->twsk_prot->twsk_slab_name == NULL)
  2376. goto out_free_request_sock_slab;
  2377. prot->twsk_prot->twsk_slab =
  2378. kmem_cache_create(prot->twsk_prot->twsk_slab_name,
  2379. prot->twsk_prot->twsk_obj_size,
  2380. 0,
  2381. SLAB_HWCACHE_ALIGN |
  2382. prot->slab_flags,
  2383. NULL);
  2384. if (prot->twsk_prot->twsk_slab == NULL)
  2385. goto out_free_timewait_sock_slab_name;
  2386. }
  2387. }
  2388. mutex_lock(&proto_list_mutex);
  2389. list_add(&prot->node, &proto_list);
  2390. assign_proto_idx(prot);
  2391. mutex_unlock(&proto_list_mutex);
  2392. return 0;
  2393. out_free_timewait_sock_slab_name:
  2394. kfree(prot->twsk_prot->twsk_slab_name);
  2395. out_free_request_sock_slab:
  2396. if (prot->rsk_prot && prot->rsk_prot->slab) {
  2397. kmem_cache_destroy(prot->rsk_prot->slab);
  2398. prot->rsk_prot->slab = NULL;
  2399. }
  2400. out_free_request_sock_slab_name:
  2401. if (prot->rsk_prot)
  2402. kfree(prot->rsk_prot->slab_name);
  2403. out_free_sock_slab:
  2404. kmem_cache_destroy(prot->slab);
  2405. prot->slab = NULL;
  2406. out:
  2407. return -ENOBUFS;
  2408. }
  2409. EXPORT_SYMBOL(proto_register);
  2410. void proto_unregister(struct proto *prot)
  2411. {
  2412. mutex_lock(&proto_list_mutex);
  2413. release_proto_idx(prot);
  2414. list_del(&prot->node);
  2415. mutex_unlock(&proto_list_mutex);
  2416. if (prot->slab != NULL) {
  2417. kmem_cache_destroy(prot->slab);
  2418. prot->slab = NULL;
  2419. }
  2420. if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
  2421. kmem_cache_destroy(prot->rsk_prot->slab);
  2422. kfree(prot->rsk_prot->slab_name);
  2423. prot->rsk_prot->slab = NULL;
  2424. }
  2425. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  2426. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  2427. kfree(prot->twsk_prot->twsk_slab_name);
  2428. prot->twsk_prot->twsk_slab = NULL;
  2429. }
  2430. }
  2431. EXPORT_SYMBOL(proto_unregister);
  2432. #ifdef CONFIG_PROC_FS
  2433. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  2434. __acquires(proto_list_mutex)
  2435. {
  2436. mutex_lock(&proto_list_mutex);
  2437. return seq_list_start_head(&proto_list, *pos);
  2438. }
  2439. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2440. {
  2441. return seq_list_next(v, &proto_list, pos);
  2442. }
  2443. static void proto_seq_stop(struct seq_file *seq, void *v)
  2444. __releases(proto_list_mutex)
  2445. {
  2446. mutex_unlock(&proto_list_mutex);
  2447. }
  2448. static char proto_method_implemented(const void *method)
  2449. {
  2450. return method == NULL ? 'n' : 'y';
  2451. }
  2452. static long sock_prot_memory_allocated(struct proto *proto)
  2453. {
  2454. return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
  2455. }
  2456. static char *sock_prot_memory_pressure(struct proto *proto)
  2457. {
  2458. return proto->memory_pressure != NULL ?
  2459. proto_memory_pressure(proto) ? "yes" : "no" : "NI";
  2460. }
  2461. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  2462. {
  2463. seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
  2464. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  2465. proto->name,
  2466. proto->obj_size,
  2467. sock_prot_inuse_get(seq_file_net(seq), proto),
  2468. sock_prot_memory_allocated(proto),
  2469. sock_prot_memory_pressure(proto),
  2470. proto->max_header,
  2471. proto->slab == NULL ? "no" : "yes",
  2472. module_name(proto->owner),
  2473. proto_method_implemented(proto->close),
  2474. proto_method_implemented(proto->connect),
  2475. proto_method_implemented(proto->disconnect),
  2476. proto_method_implemented(proto->accept),
  2477. proto_method_implemented(proto->ioctl),
  2478. proto_method_implemented(proto->init),
  2479. proto_method_implemented(proto->destroy),
  2480. proto_method_implemented(proto->shutdown),
  2481. proto_method_implemented(proto->setsockopt),
  2482. proto_method_implemented(proto->getsockopt),
  2483. proto_method_implemented(proto->sendmsg),
  2484. proto_method_implemented(proto->recvmsg),
  2485. proto_method_implemented(proto->sendpage),
  2486. proto_method_implemented(proto->bind),
  2487. proto_method_implemented(proto->backlog_rcv),
  2488. proto_method_implemented(proto->hash),
  2489. proto_method_implemented(proto->unhash),
  2490. proto_method_implemented(proto->get_port),
  2491. proto_method_implemented(proto->enter_memory_pressure));
  2492. }
  2493. static int proto_seq_show(struct seq_file *seq, void *v)
  2494. {
  2495. if (v == &proto_list)
  2496. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  2497. "protocol",
  2498. "size",
  2499. "sockets",
  2500. "memory",
  2501. "press",
  2502. "maxhdr",
  2503. "slab",
  2504. "module",
  2505. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  2506. else
  2507. proto_seq_printf(seq, list_entry(v, struct proto, node));
  2508. return 0;
  2509. }
  2510. static const struct seq_operations proto_seq_ops = {
  2511. .start = proto_seq_start,
  2512. .next = proto_seq_next,
  2513. .stop = proto_seq_stop,
  2514. .show = proto_seq_show,
  2515. };
  2516. static int proto_seq_open(struct inode *inode, struct file *file)
  2517. {
  2518. return seq_open_net(inode, file, &proto_seq_ops,
  2519. sizeof(struct seq_net_private));
  2520. }
  2521. static const struct file_operations proto_seq_fops = {
  2522. .owner = THIS_MODULE,
  2523. .open = proto_seq_open,
  2524. .read = seq_read,
  2525. .llseek = seq_lseek,
  2526. .release = seq_release_net,
  2527. };
  2528. static __net_init int proto_init_net(struct net *net)
  2529. {
  2530. if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
  2531. return -ENOMEM;
  2532. return 0;
  2533. }
  2534. static __net_exit void proto_exit_net(struct net *net)
  2535. {
  2536. remove_proc_entry("protocols", net->proc_net);
  2537. }
  2538. static __net_initdata struct pernet_operations proto_net_ops = {
  2539. .init = proto_init_net,
  2540. .exit = proto_exit_net,
  2541. };
  2542. static int __init proto_init(void)
  2543. {
  2544. return register_pernet_subsys(&proto_net_ops);
  2545. }
  2546. subsys_initcall(proto_init);
  2547. #endif /* PROC_FS */