sock.c 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Alan Cox, <A.Cox@swansea.ac.uk>
  14. *
  15. * Fixes:
  16. * Alan Cox : Numerous verify_area() problems
  17. * Alan Cox : Connecting on a connecting socket
  18. * now returns an error for tcp.
  19. * Alan Cox : sock->protocol is set correctly.
  20. * and is not sometimes left as 0.
  21. * Alan Cox : connect handles icmp errors on a
  22. * connect properly. Unfortunately there
  23. * is a restart syscall nasty there. I
  24. * can't match BSD without hacking the C
  25. * library. Ideas urgently sought!
  26. * Alan Cox : Disallow bind() to addresses that are
  27. * not ours - especially broadcast ones!!
  28. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  29. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  30. * instead they leave that for the DESTROY timer.
  31. * Alan Cox : Clean up error flag in accept
  32. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  33. * was buggy. Put a remove_sock() in the handler
  34. * for memory when we hit 0. Also altered the timer
  35. * code. The ACK stuff can wait and needs major
  36. * TCP layer surgery.
  37. * Alan Cox : Fixed TCP ack bug, removed remove sock
  38. * and fixed timer/inet_bh race.
  39. * Alan Cox : Added zapped flag for TCP
  40. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  41. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  43. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45. * Rick Sladkey : Relaxed UDP rules for matching packets.
  46. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  47. * Pauline Middelink : identd support
  48. * Alan Cox : Fixed connect() taking signals I think.
  49. * Alan Cox : SO_LINGER supported
  50. * Alan Cox : Error reporting fixes
  51. * Anonymous : inet_create tidied up (sk->reuse setting)
  52. * Alan Cox : inet sockets don't set sk->type!
  53. * Alan Cox : Split socket option code
  54. * Alan Cox : Callbacks
  55. * Alan Cox : Nagle flag for Charles & Johannes stuff
  56. * Alex : Removed restriction on inet fioctl
  57. * Alan Cox : Splitting INET from NET core
  58. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  59. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  60. * Alan Cox : Split IP from generic code
  61. * Alan Cox : New kfree_skbmem()
  62. * Alan Cox : Make SO_DEBUG superuser only.
  63. * Alan Cox : Allow anyone to clear SO_DEBUG
  64. * (compatibility fix)
  65. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  66. * Alan Cox : Allocator for a socket is settable.
  67. * Alan Cox : SO_ERROR includes soft errors.
  68. * Alan Cox : Allow NULL arguments on some SO_ opts
  69. * Alan Cox : Generic socket allocation to make hooks
  70. * easier (suggested by Craig Metz).
  71. * Michael Pall : SO_ERROR returns positive errno again
  72. * Steve Whitehouse: Added default destructor to free
  73. * protocol private data.
  74. * Steve Whitehouse: Added various other default routines
  75. * common to several socket families.
  76. * Chris Evans : Call suser() check last on F_SETOWN
  77. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  79. * Andi Kleen : Fix write_space callback
  80. * Chris Evans : Security fixes - signedness again
  81. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  82. *
  83. * To Fix:
  84. *
  85. *
  86. * This program is free software; you can redistribute it and/or
  87. * modify it under the terms of the GNU General Public License
  88. * as published by the Free Software Foundation; either version
  89. * 2 of the License, or (at your option) any later version.
  90. */
  91. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  92. #include <asm/unaligned.h>
  93. #include <linux/capability.h>
  94. #include <linux/errno.h>
  95. #include <linux/errqueue.h>
  96. #include <linux/types.h>
  97. #include <linux/socket.h>
  98. #include <linux/in.h>
  99. #include <linux/kernel.h>
  100. #include <linux/module.h>
  101. #include <linux/proc_fs.h>
  102. #include <linux/seq_file.h>
  103. #include <linux/sched.h>
  104. #include <linux/sched/mm.h>
  105. #include <linux/timer.h>
  106. #include <linux/string.h>
  107. #include <linux/sockios.h>
  108. #include <linux/net.h>
  109. #include <linux/mm.h>
  110. #include <linux/slab.h>
  111. #include <linux/interrupt.h>
  112. #include <linux/poll.h>
  113. #include <linux/tcp.h>
  114. #include <linux/init.h>
  115. #include <linux/highmem.h>
  116. #include <linux/user_namespace.h>
  117. #include <linux/static_key.h>
  118. #include <linux/memcontrol.h>
  119. #include <linux/prefetch.h>
  120. #include <linux/uaccess.h>
  121. #include <linux/netdevice.h>
  122. #include <net/protocol.h>
  123. #include <linux/skbuff.h>
  124. #include <net/net_namespace.h>
  125. #include <net/request_sock.h>
  126. #include <net/sock.h>
  127. #include <linux/net_tstamp.h>
  128. #include <net/xfrm.h>
  129. #include <linux/ipsec.h>
  130. #include <net/cls_cgroup.h>
  131. #include <net/netprio_cgroup.h>
  132. #include <linux/sock_diag.h>
  133. #include <linux/filter.h>
  134. #include <net/sock_reuseport.h>
  135. #include <trace/events/sock.h>
  136. #include <net/tcp.h>
  137. #include <net/busy_poll.h>
  138. static DEFINE_MUTEX(proto_list_mutex);
  139. static LIST_HEAD(proto_list);
  140. static void sock_inuse_add(struct net *net, int val);
  141. /**
  142. * sk_ns_capable - General socket capability test
  143. * @sk: Socket to use a capability on or through
  144. * @user_ns: The user namespace of the capability to use
  145. * @cap: The capability to use
  146. *
  147. * Test to see if the opener of the socket had when the socket was
  148. * created and the current process has the capability @cap in the user
  149. * namespace @user_ns.
  150. */
  151. bool sk_ns_capable(const struct sock *sk,
  152. struct user_namespace *user_ns, int cap)
  153. {
  154. return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
  155. ns_capable(user_ns, cap);
  156. }
  157. EXPORT_SYMBOL(sk_ns_capable);
  158. /**
  159. * sk_capable - Socket global capability test
  160. * @sk: Socket to use a capability on or through
  161. * @cap: The global capability to use
  162. *
  163. * Test to see if the opener of the socket had when the socket was
  164. * created and the current process has the capability @cap in all user
  165. * namespaces.
  166. */
  167. bool sk_capable(const struct sock *sk, int cap)
  168. {
  169. return sk_ns_capable(sk, &init_user_ns, cap);
  170. }
  171. EXPORT_SYMBOL(sk_capable);
  172. /**
  173. * sk_net_capable - Network namespace socket capability test
  174. * @sk: Socket to use a capability on or through
  175. * @cap: The capability to use
  176. *
  177. * Test to see if the opener of the socket had when the socket was created
  178. * and the current process has the capability @cap over the network namespace
  179. * the socket is a member of.
  180. */
  181. bool sk_net_capable(const struct sock *sk, int cap)
  182. {
  183. return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
  184. }
  185. EXPORT_SYMBOL(sk_net_capable);
  186. /*
  187. * Each address family might have different locking rules, so we have
  188. * one slock key per address family and separate keys for internal and
  189. * userspace sockets.
  190. */
  191. static struct lock_class_key af_family_keys[AF_MAX];
  192. static struct lock_class_key af_family_kern_keys[AF_MAX];
  193. static struct lock_class_key af_family_slock_keys[AF_MAX];
  194. static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
  195. /*
  196. * Make lock validator output more readable. (we pre-construct these
  197. * strings build-time, so that runtime initialization of socket
  198. * locks is fast):
  199. */
  200. #define _sock_locks(x) \
  201. x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
  202. x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
  203. x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
  204. x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
  205. x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
  206. x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
  207. x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
  208. x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
  209. x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
  210. x "27" , x "28" , x "AF_CAN" , \
  211. x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
  212. x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
  213. x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
  214. x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
  215. x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \
  216. x "AF_MAX"
  217. static const char *const af_family_key_strings[AF_MAX+1] = {
  218. _sock_locks("sk_lock-")
  219. };
  220. static const char *const af_family_slock_key_strings[AF_MAX+1] = {
  221. _sock_locks("slock-")
  222. };
  223. static const char *const af_family_clock_key_strings[AF_MAX+1] = {
  224. _sock_locks("clock-")
  225. };
  226. static const char *const af_family_kern_key_strings[AF_MAX+1] = {
  227. _sock_locks("k-sk_lock-")
  228. };
  229. static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
  230. _sock_locks("k-slock-")
  231. };
  232. static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
  233. _sock_locks("k-clock-")
  234. };
  235. static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
  236. _sock_locks("rlock-")
  237. };
  238. static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
  239. _sock_locks("wlock-")
  240. };
  241. static const char *const af_family_elock_key_strings[AF_MAX+1] = {
  242. _sock_locks("elock-")
  243. };
  244. /*
  245. * sk_callback_lock and sk queues locking rules are per-address-family,
  246. * so split the lock classes by using a per-AF key:
  247. */
  248. static struct lock_class_key af_callback_keys[AF_MAX];
  249. static struct lock_class_key af_rlock_keys[AF_MAX];
  250. static struct lock_class_key af_wlock_keys[AF_MAX];
  251. static struct lock_class_key af_elock_keys[AF_MAX];
  252. static struct lock_class_key af_kern_callback_keys[AF_MAX];
  253. /* Run time adjustable parameters. */
  254. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  255. EXPORT_SYMBOL(sysctl_wmem_max);
  256. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  257. EXPORT_SYMBOL(sysctl_rmem_max);
  258. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  259. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  260. /* Maximal space eaten by iovec or ancillary data plus some space */
  261. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  262. EXPORT_SYMBOL(sysctl_optmem_max);
  263. int sysctl_tstamp_allow_data __read_mostly = 1;
  264. DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
  265. EXPORT_SYMBOL_GPL(memalloc_socks_key);
  266. /**
  267. * sk_set_memalloc - sets %SOCK_MEMALLOC
  268. * @sk: socket to set it on
  269. *
  270. * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
  271. * It's the responsibility of the admin to adjust min_free_kbytes
  272. * to meet the requirements
  273. */
  274. void sk_set_memalloc(struct sock *sk)
  275. {
  276. sock_set_flag(sk, SOCK_MEMALLOC);
  277. sk->sk_allocation |= __GFP_MEMALLOC;
  278. static_branch_inc(&memalloc_socks_key);
  279. }
  280. EXPORT_SYMBOL_GPL(sk_set_memalloc);
  281. void sk_clear_memalloc(struct sock *sk)
  282. {
  283. sock_reset_flag(sk, SOCK_MEMALLOC);
  284. sk->sk_allocation &= ~__GFP_MEMALLOC;
  285. static_branch_dec(&memalloc_socks_key);
  286. /*
  287. * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
  288. * progress of swapping. SOCK_MEMALLOC may be cleared while
  289. * it has rmem allocations due to the last swapfile being deactivated
  290. * but there is a risk that the socket is unusable due to exceeding
  291. * the rmem limits. Reclaim the reserves and obey rmem limits again.
  292. */
  293. sk_mem_reclaim(sk);
  294. }
  295. EXPORT_SYMBOL_GPL(sk_clear_memalloc);
  296. int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  297. {
  298. int ret;
  299. unsigned int noreclaim_flag;
  300. /* these should have been dropped before queueing */
  301. BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
  302. noreclaim_flag = memalloc_noreclaim_save();
  303. ret = sk->sk_backlog_rcv(sk, skb);
  304. memalloc_noreclaim_restore(noreclaim_flag);
  305. return ret;
  306. }
  307. EXPORT_SYMBOL(__sk_backlog_rcv);
  308. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  309. {
  310. struct timeval tv;
  311. if (optlen < sizeof(tv))
  312. return -EINVAL;
  313. if (copy_from_user(&tv, optval, sizeof(tv)))
  314. return -EFAULT;
  315. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  316. return -EDOM;
  317. if (tv.tv_sec < 0) {
  318. static int warned __read_mostly;
  319. *timeo_p = 0;
  320. if (warned < 10 && net_ratelimit()) {
  321. warned++;
  322. pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
  323. __func__, current->comm, task_pid_nr(current));
  324. }
  325. return 0;
  326. }
  327. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  328. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  329. return 0;
  330. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  331. *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
  332. return 0;
  333. }
  334. static void sock_warn_obsolete_bsdism(const char *name)
  335. {
  336. static int warned;
  337. static char warncomm[TASK_COMM_LEN];
  338. if (strcmp(warncomm, current->comm) && warned < 5) {
  339. strcpy(warncomm, current->comm);
  340. pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
  341. warncomm, name);
  342. warned++;
  343. }
  344. }
  345. static bool sock_needs_netstamp(const struct sock *sk)
  346. {
  347. switch (sk->sk_family) {
  348. case AF_UNSPEC:
  349. case AF_UNIX:
  350. return false;
  351. default:
  352. return true;
  353. }
  354. }
  355. static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
  356. {
  357. if (sk->sk_flags & flags) {
  358. sk->sk_flags &= ~flags;
  359. if (sock_needs_netstamp(sk) &&
  360. !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
  361. net_disable_timestamp();
  362. }
  363. }
  364. int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  365. {
  366. unsigned long flags;
  367. struct sk_buff_head *list = &sk->sk_receive_queue;
  368. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
  369. atomic_inc(&sk->sk_drops);
  370. trace_sock_rcvqueue_full(sk, skb);
  371. return -ENOMEM;
  372. }
  373. if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
  374. atomic_inc(&sk->sk_drops);
  375. return -ENOBUFS;
  376. }
  377. skb->dev = NULL;
  378. skb_set_owner_r(skb, sk);
  379. /* we escape from rcu protected region, make sure we dont leak
  380. * a norefcounted dst
  381. */
  382. skb_dst_force(skb);
  383. spin_lock_irqsave(&list->lock, flags);
  384. sock_skb_set_dropcount(sk, skb);
  385. __skb_queue_tail(list, skb);
  386. spin_unlock_irqrestore(&list->lock, flags);
  387. if (!sock_flag(sk, SOCK_DEAD))
  388. sk->sk_data_ready(sk);
  389. return 0;
  390. }
  391. EXPORT_SYMBOL(__sock_queue_rcv_skb);
  392. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  393. {
  394. int err;
  395. err = sk_filter(sk, skb);
  396. if (err)
  397. return err;
  398. return __sock_queue_rcv_skb(sk, skb);
  399. }
  400. EXPORT_SYMBOL(sock_queue_rcv_skb);
  401. int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
  402. const int nested, unsigned int trim_cap, bool refcounted)
  403. {
  404. int rc = NET_RX_SUCCESS;
  405. if (sk_filter_trim_cap(sk, skb, trim_cap))
  406. goto discard_and_relse;
  407. skb->dev = NULL;
  408. if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
  409. atomic_inc(&sk->sk_drops);
  410. goto discard_and_relse;
  411. }
  412. if (nested)
  413. bh_lock_sock_nested(sk);
  414. else
  415. bh_lock_sock(sk);
  416. if (!sock_owned_by_user(sk)) {
  417. /*
  418. * trylock + unlock semantics:
  419. */
  420. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  421. rc = sk_backlog_rcv(sk, skb);
  422. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  423. } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
  424. bh_unlock_sock(sk);
  425. atomic_inc(&sk->sk_drops);
  426. goto discard_and_relse;
  427. }
  428. bh_unlock_sock(sk);
  429. out:
  430. if (refcounted)
  431. sock_put(sk);
  432. return rc;
  433. discard_and_relse:
  434. kfree_skb(skb);
  435. goto out;
  436. }
  437. EXPORT_SYMBOL(__sk_receive_skb);
  438. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  439. {
  440. struct dst_entry *dst = __sk_dst_get(sk);
  441. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  442. sk_tx_queue_clear(sk);
  443. sk->sk_dst_pending_confirm = 0;
  444. RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
  445. dst_release(dst);
  446. return NULL;
  447. }
  448. return dst;
  449. }
  450. EXPORT_SYMBOL(__sk_dst_check);
  451. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  452. {
  453. struct dst_entry *dst = sk_dst_get(sk);
  454. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  455. sk_dst_reset(sk);
  456. dst_release(dst);
  457. return NULL;
  458. }
  459. return dst;
  460. }
  461. EXPORT_SYMBOL(sk_dst_check);
  462. static int sock_setbindtodevice(struct sock *sk, char __user *optval,
  463. int optlen)
  464. {
  465. int ret = -ENOPROTOOPT;
  466. #ifdef CONFIG_NETDEVICES
  467. struct net *net = sock_net(sk);
  468. char devname[IFNAMSIZ];
  469. int index;
  470. /* Sorry... */
  471. ret = -EPERM;
  472. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  473. goto out;
  474. ret = -EINVAL;
  475. if (optlen < 0)
  476. goto out;
  477. /* Bind this socket to a particular device like "eth0",
  478. * as specified in the passed interface name. If the
  479. * name is "" or the option length is zero the socket
  480. * is not bound.
  481. */
  482. if (optlen > IFNAMSIZ - 1)
  483. optlen = IFNAMSIZ - 1;
  484. memset(devname, 0, sizeof(devname));
  485. ret = -EFAULT;
  486. if (copy_from_user(devname, optval, optlen))
  487. goto out;
  488. index = 0;
  489. if (devname[0] != '\0') {
  490. struct net_device *dev;
  491. rcu_read_lock();
  492. dev = dev_get_by_name_rcu(net, devname);
  493. if (dev)
  494. index = dev->ifindex;
  495. rcu_read_unlock();
  496. ret = -ENODEV;
  497. if (!dev)
  498. goto out;
  499. }
  500. lock_sock(sk);
  501. sk->sk_bound_dev_if = index;
  502. sk_dst_reset(sk);
  503. release_sock(sk);
  504. ret = 0;
  505. out:
  506. #endif
  507. return ret;
  508. }
  509. static int sock_getbindtodevice(struct sock *sk, char __user *optval,
  510. int __user *optlen, int len)
  511. {
  512. int ret = -ENOPROTOOPT;
  513. #ifdef CONFIG_NETDEVICES
  514. struct net *net = sock_net(sk);
  515. char devname[IFNAMSIZ];
  516. if (sk->sk_bound_dev_if == 0) {
  517. len = 0;
  518. goto zero;
  519. }
  520. ret = -EINVAL;
  521. if (len < IFNAMSIZ)
  522. goto out;
  523. ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
  524. if (ret)
  525. goto out;
  526. len = strlen(devname) + 1;
  527. ret = -EFAULT;
  528. if (copy_to_user(optval, devname, len))
  529. goto out;
  530. zero:
  531. ret = -EFAULT;
  532. if (put_user(len, optlen))
  533. goto out;
  534. ret = 0;
  535. out:
  536. #endif
  537. return ret;
  538. }
  539. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  540. {
  541. if (valbool)
  542. sock_set_flag(sk, bit);
  543. else
  544. sock_reset_flag(sk, bit);
  545. }
  546. bool sk_mc_loop(struct sock *sk)
  547. {
  548. if (dev_recursion_level())
  549. return false;
  550. if (!sk)
  551. return true;
  552. switch (sk->sk_family) {
  553. case AF_INET:
  554. return inet_sk(sk)->mc_loop;
  555. #if IS_ENABLED(CONFIG_IPV6)
  556. case AF_INET6:
  557. return inet6_sk(sk)->mc_loop;
  558. #endif
  559. }
  560. WARN_ON(1);
  561. return true;
  562. }
  563. EXPORT_SYMBOL(sk_mc_loop);
  564. /*
  565. * This is meant for all protocols to use and covers goings on
  566. * at the socket level. Everything here is generic.
  567. */
  568. int sock_setsockopt(struct socket *sock, int level, int optname,
  569. char __user *optval, unsigned int optlen)
  570. {
  571. struct sock_txtime sk_txtime;
  572. struct sock *sk = sock->sk;
  573. int val;
  574. int valbool;
  575. struct linger ling;
  576. int ret = 0;
  577. /*
  578. * Options without arguments
  579. */
  580. if (optname == SO_BINDTODEVICE)
  581. return sock_setbindtodevice(sk, optval, optlen);
  582. if (optlen < sizeof(int))
  583. return -EINVAL;
  584. if (get_user(val, (int __user *)optval))
  585. return -EFAULT;
  586. valbool = val ? 1 : 0;
  587. lock_sock(sk);
  588. switch (optname) {
  589. case SO_DEBUG:
  590. if (val && !capable(CAP_NET_ADMIN))
  591. ret = -EACCES;
  592. else
  593. sock_valbool_flag(sk, SOCK_DBG, valbool);
  594. break;
  595. case SO_REUSEADDR:
  596. sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
  597. break;
  598. case SO_REUSEPORT:
  599. sk->sk_reuseport = valbool;
  600. break;
  601. case SO_TYPE:
  602. case SO_PROTOCOL:
  603. case SO_DOMAIN:
  604. case SO_ERROR:
  605. ret = -ENOPROTOOPT;
  606. break;
  607. case SO_DONTROUTE:
  608. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  609. break;
  610. case SO_BROADCAST:
  611. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  612. break;
  613. case SO_SNDBUF:
  614. /* Don't error on this BSD doesn't and if you think
  615. * about it this is right. Otherwise apps have to
  616. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  617. * are treated in BSD as hints
  618. */
  619. val = min_t(u32, val, sysctl_wmem_max);
  620. set_sndbuf:
  621. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  622. sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
  623. /* Wake up sending tasks if we upped the value. */
  624. sk->sk_write_space(sk);
  625. break;
  626. case SO_SNDBUFFORCE:
  627. if (!capable(CAP_NET_ADMIN)) {
  628. ret = -EPERM;
  629. break;
  630. }
  631. goto set_sndbuf;
  632. case SO_RCVBUF:
  633. /* Don't error on this BSD doesn't and if you think
  634. * about it this is right. Otherwise apps have to
  635. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  636. * are treated in BSD as hints
  637. */
  638. val = min_t(u32, val, sysctl_rmem_max);
  639. set_rcvbuf:
  640. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  641. /*
  642. * We double it on the way in to account for
  643. * "struct sk_buff" etc. overhead. Applications
  644. * assume that the SO_RCVBUF setting they make will
  645. * allow that much actual data to be received on that
  646. * socket.
  647. *
  648. * Applications are unaware that "struct sk_buff" and
  649. * other overheads allocate from the receive buffer
  650. * during socket buffer allocation.
  651. *
  652. * And after considering the possible alternatives,
  653. * returning the value we actually used in getsockopt
  654. * is the most desirable behavior.
  655. */
  656. sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
  657. break;
  658. case SO_RCVBUFFORCE:
  659. if (!capable(CAP_NET_ADMIN)) {
  660. ret = -EPERM;
  661. break;
  662. }
  663. goto set_rcvbuf;
  664. case SO_KEEPALIVE:
  665. if (sk->sk_prot->keepalive)
  666. sk->sk_prot->keepalive(sk, valbool);
  667. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  668. break;
  669. case SO_OOBINLINE:
  670. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  671. break;
  672. case SO_NO_CHECK:
  673. sk->sk_no_check_tx = valbool;
  674. break;
  675. case SO_PRIORITY:
  676. if ((val >= 0 && val <= 6) ||
  677. ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  678. sk->sk_priority = val;
  679. else
  680. ret = -EPERM;
  681. break;
  682. case SO_LINGER:
  683. if (optlen < sizeof(ling)) {
  684. ret = -EINVAL; /* 1003.1g */
  685. break;
  686. }
  687. if (copy_from_user(&ling, optval, sizeof(ling))) {
  688. ret = -EFAULT;
  689. break;
  690. }
  691. if (!ling.l_onoff)
  692. sock_reset_flag(sk, SOCK_LINGER);
  693. else {
  694. #if (BITS_PER_LONG == 32)
  695. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  696. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  697. else
  698. #endif
  699. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  700. sock_set_flag(sk, SOCK_LINGER);
  701. }
  702. break;
  703. case SO_BSDCOMPAT:
  704. sock_warn_obsolete_bsdism("setsockopt");
  705. break;
  706. case SO_PASSCRED:
  707. if (valbool)
  708. set_bit(SOCK_PASSCRED, &sock->flags);
  709. else
  710. clear_bit(SOCK_PASSCRED, &sock->flags);
  711. break;
  712. case SO_TIMESTAMP:
  713. case SO_TIMESTAMPNS:
  714. if (valbool) {
  715. if (optname == SO_TIMESTAMP)
  716. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  717. else
  718. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  719. sock_set_flag(sk, SOCK_RCVTSTAMP);
  720. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  721. } else {
  722. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  723. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  724. }
  725. break;
  726. case SO_TIMESTAMPING:
  727. if (val & ~SOF_TIMESTAMPING_MASK) {
  728. ret = -EINVAL;
  729. break;
  730. }
  731. if (val & SOF_TIMESTAMPING_OPT_ID &&
  732. !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
  733. if (sk->sk_protocol == IPPROTO_TCP &&
  734. sk->sk_type == SOCK_STREAM) {
  735. if ((1 << sk->sk_state) &
  736. (TCPF_CLOSE | TCPF_LISTEN)) {
  737. ret = -EINVAL;
  738. break;
  739. }
  740. sk->sk_tskey = tcp_sk(sk)->snd_una;
  741. } else {
  742. sk->sk_tskey = 0;
  743. }
  744. }
  745. if (val & SOF_TIMESTAMPING_OPT_STATS &&
  746. !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
  747. ret = -EINVAL;
  748. break;
  749. }
  750. sk->sk_tsflags = val;
  751. if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
  752. sock_enable_timestamp(sk,
  753. SOCK_TIMESTAMPING_RX_SOFTWARE);
  754. else
  755. sock_disable_timestamp(sk,
  756. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
  757. break;
  758. case SO_RCVLOWAT:
  759. if (val < 0)
  760. val = INT_MAX;
  761. if (sock->ops->set_rcvlowat)
  762. ret = sock->ops->set_rcvlowat(sk, val);
  763. else
  764. sk->sk_rcvlowat = val ? : 1;
  765. break;
  766. case SO_RCVTIMEO:
  767. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  768. break;
  769. case SO_SNDTIMEO:
  770. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  771. break;
  772. case SO_ATTACH_FILTER:
  773. ret = -EINVAL;
  774. if (optlen == sizeof(struct sock_fprog)) {
  775. struct sock_fprog fprog;
  776. ret = -EFAULT;
  777. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  778. break;
  779. ret = sk_attach_filter(&fprog, sk);
  780. }
  781. break;
  782. case SO_ATTACH_BPF:
  783. ret = -EINVAL;
  784. if (optlen == sizeof(u32)) {
  785. u32 ufd;
  786. ret = -EFAULT;
  787. if (copy_from_user(&ufd, optval, sizeof(ufd)))
  788. break;
  789. ret = sk_attach_bpf(ufd, sk);
  790. }
  791. break;
  792. case SO_ATTACH_REUSEPORT_CBPF:
  793. ret = -EINVAL;
  794. if (optlen == sizeof(struct sock_fprog)) {
  795. struct sock_fprog fprog;
  796. ret = -EFAULT;
  797. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  798. break;
  799. ret = sk_reuseport_attach_filter(&fprog, sk);
  800. }
  801. break;
  802. case SO_ATTACH_REUSEPORT_EBPF:
  803. ret = -EINVAL;
  804. if (optlen == sizeof(u32)) {
  805. u32 ufd;
  806. ret = -EFAULT;
  807. if (copy_from_user(&ufd, optval, sizeof(ufd)))
  808. break;
  809. ret = sk_reuseport_attach_bpf(ufd, sk);
  810. }
  811. break;
  812. case SO_DETACH_FILTER:
  813. ret = sk_detach_filter(sk);
  814. break;
  815. case SO_LOCK_FILTER:
  816. if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
  817. ret = -EPERM;
  818. else
  819. sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
  820. break;
  821. case SO_PASSSEC:
  822. if (valbool)
  823. set_bit(SOCK_PASSSEC, &sock->flags);
  824. else
  825. clear_bit(SOCK_PASSSEC, &sock->flags);
  826. break;
  827. case SO_MARK:
  828. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  829. ret = -EPERM;
  830. else
  831. sk->sk_mark = val;
  832. break;
  833. case SO_RXQ_OVFL:
  834. sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
  835. break;
  836. case SO_WIFI_STATUS:
  837. sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
  838. break;
  839. case SO_PEEK_OFF:
  840. if (sock->ops->set_peek_off)
  841. ret = sock->ops->set_peek_off(sk, val);
  842. else
  843. ret = -EOPNOTSUPP;
  844. break;
  845. case SO_NOFCS:
  846. sock_valbool_flag(sk, SOCK_NOFCS, valbool);
  847. break;
  848. case SO_SELECT_ERR_QUEUE:
  849. sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
  850. break;
  851. #ifdef CONFIG_NET_RX_BUSY_POLL
  852. case SO_BUSY_POLL:
  853. /* allow unprivileged users to decrease the value */
  854. if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
  855. ret = -EPERM;
  856. else {
  857. if (val < 0)
  858. ret = -EINVAL;
  859. else
  860. sk->sk_ll_usec = val;
  861. }
  862. break;
  863. #endif
  864. case SO_MAX_PACING_RATE:
  865. if (val != ~0U)
  866. cmpxchg(&sk->sk_pacing_status,
  867. SK_PACING_NONE,
  868. SK_PACING_NEEDED);
  869. sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
  870. sk->sk_pacing_rate = min(sk->sk_pacing_rate,
  871. sk->sk_max_pacing_rate);
  872. break;
  873. case SO_INCOMING_CPU:
  874. sk->sk_incoming_cpu = val;
  875. break;
  876. case SO_CNX_ADVICE:
  877. if (val == 1)
  878. dst_negative_advice(sk);
  879. break;
  880. case SO_ZEROCOPY:
  881. if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
  882. if (sk->sk_protocol != IPPROTO_TCP)
  883. ret = -ENOTSUPP;
  884. } else if (sk->sk_family != PF_RDS) {
  885. ret = -ENOTSUPP;
  886. }
  887. if (!ret) {
  888. if (val < 0 || val > 1)
  889. ret = -EINVAL;
  890. else
  891. sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
  892. }
  893. break;
  894. case SO_TXTIME:
  895. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
  896. ret = -EPERM;
  897. } else if (optlen != sizeof(struct sock_txtime)) {
  898. ret = -EINVAL;
  899. } else if (copy_from_user(&sk_txtime, optval,
  900. sizeof(struct sock_txtime))) {
  901. ret = -EFAULT;
  902. } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
  903. ret = -EINVAL;
  904. } else {
  905. sock_valbool_flag(sk, SOCK_TXTIME, true);
  906. sk->sk_clockid = sk_txtime.clockid;
  907. sk->sk_txtime_deadline_mode =
  908. !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
  909. sk->sk_txtime_report_errors =
  910. !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
  911. }
  912. break;
  913. default:
  914. ret = -ENOPROTOOPT;
  915. break;
  916. }
  917. release_sock(sk);
  918. return ret;
  919. }
  920. EXPORT_SYMBOL(sock_setsockopt);
  921. static void cred_to_ucred(struct pid *pid, const struct cred *cred,
  922. struct ucred *ucred)
  923. {
  924. ucred->pid = pid_vnr(pid);
  925. ucred->uid = ucred->gid = -1;
  926. if (cred) {
  927. struct user_namespace *current_ns = current_user_ns();
  928. ucred->uid = from_kuid_munged(current_ns, cred->euid);
  929. ucred->gid = from_kgid_munged(current_ns, cred->egid);
  930. }
  931. }
  932. static int groups_to_user(gid_t __user *dst, const struct group_info *src)
  933. {
  934. struct user_namespace *user_ns = current_user_ns();
  935. int i;
  936. for (i = 0; i < src->ngroups; i++)
  937. if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
  938. return -EFAULT;
  939. return 0;
  940. }
  941. int sock_getsockopt(struct socket *sock, int level, int optname,
  942. char __user *optval, int __user *optlen)
  943. {
  944. struct sock *sk = sock->sk;
  945. union {
  946. int val;
  947. u64 val64;
  948. struct linger ling;
  949. struct timeval tm;
  950. struct sock_txtime txtime;
  951. } v;
  952. int lv = sizeof(int);
  953. int len;
  954. if (get_user(len, optlen))
  955. return -EFAULT;
  956. if (len < 0)
  957. return -EINVAL;
  958. memset(&v, 0, sizeof(v));
  959. switch (optname) {
  960. case SO_DEBUG:
  961. v.val = sock_flag(sk, SOCK_DBG);
  962. break;
  963. case SO_DONTROUTE:
  964. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  965. break;
  966. case SO_BROADCAST:
  967. v.val = sock_flag(sk, SOCK_BROADCAST);
  968. break;
  969. case SO_SNDBUF:
  970. v.val = sk->sk_sndbuf;
  971. break;
  972. case SO_RCVBUF:
  973. v.val = sk->sk_rcvbuf;
  974. break;
  975. case SO_REUSEADDR:
  976. v.val = sk->sk_reuse;
  977. break;
  978. case SO_REUSEPORT:
  979. v.val = sk->sk_reuseport;
  980. break;
  981. case SO_KEEPALIVE:
  982. v.val = sock_flag(sk, SOCK_KEEPOPEN);
  983. break;
  984. case SO_TYPE:
  985. v.val = sk->sk_type;
  986. break;
  987. case SO_PROTOCOL:
  988. v.val = sk->sk_protocol;
  989. break;
  990. case SO_DOMAIN:
  991. v.val = sk->sk_family;
  992. break;
  993. case SO_ERROR:
  994. v.val = -sock_error(sk);
  995. if (v.val == 0)
  996. v.val = xchg(&sk->sk_err_soft, 0);
  997. break;
  998. case SO_OOBINLINE:
  999. v.val = sock_flag(sk, SOCK_URGINLINE);
  1000. break;
  1001. case SO_NO_CHECK:
  1002. v.val = sk->sk_no_check_tx;
  1003. break;
  1004. case SO_PRIORITY:
  1005. v.val = sk->sk_priority;
  1006. break;
  1007. case SO_LINGER:
  1008. lv = sizeof(v.ling);
  1009. v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
  1010. v.ling.l_linger = sk->sk_lingertime / HZ;
  1011. break;
  1012. case SO_BSDCOMPAT:
  1013. sock_warn_obsolete_bsdism("getsockopt");
  1014. break;
  1015. case SO_TIMESTAMP:
  1016. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  1017. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  1018. break;
  1019. case SO_TIMESTAMPNS:
  1020. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  1021. break;
  1022. case SO_TIMESTAMPING:
  1023. v.val = sk->sk_tsflags;
  1024. break;
  1025. case SO_RCVTIMEO:
  1026. lv = sizeof(struct timeval);
  1027. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  1028. v.tm.tv_sec = 0;
  1029. v.tm.tv_usec = 0;
  1030. } else {
  1031. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  1032. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ;
  1033. }
  1034. break;
  1035. case SO_SNDTIMEO:
  1036. lv = sizeof(struct timeval);
  1037. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  1038. v.tm.tv_sec = 0;
  1039. v.tm.tv_usec = 0;
  1040. } else {
  1041. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  1042. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ;
  1043. }
  1044. break;
  1045. case SO_RCVLOWAT:
  1046. v.val = sk->sk_rcvlowat;
  1047. break;
  1048. case SO_SNDLOWAT:
  1049. v.val = 1;
  1050. break;
  1051. case SO_PASSCRED:
  1052. v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
  1053. break;
  1054. case SO_PEERCRED:
  1055. {
  1056. struct ucred peercred;
  1057. if (len > sizeof(peercred))
  1058. len = sizeof(peercred);
  1059. cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
  1060. if (copy_to_user(optval, &peercred, len))
  1061. return -EFAULT;
  1062. goto lenout;
  1063. }
  1064. case SO_PEERGROUPS:
  1065. {
  1066. int ret, n;
  1067. if (!sk->sk_peer_cred)
  1068. return -ENODATA;
  1069. n = sk->sk_peer_cred->group_info->ngroups;
  1070. if (len < n * sizeof(gid_t)) {
  1071. len = n * sizeof(gid_t);
  1072. return put_user(len, optlen) ? -EFAULT : -ERANGE;
  1073. }
  1074. len = n * sizeof(gid_t);
  1075. ret = groups_to_user((gid_t __user *)optval,
  1076. sk->sk_peer_cred->group_info);
  1077. if (ret)
  1078. return ret;
  1079. goto lenout;
  1080. }
  1081. case SO_PEERNAME:
  1082. {
  1083. char address[128];
  1084. lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
  1085. if (lv < 0)
  1086. return -ENOTCONN;
  1087. if (lv < len)
  1088. return -EINVAL;
  1089. if (copy_to_user(optval, address, len))
  1090. return -EFAULT;
  1091. goto lenout;
  1092. }
  1093. /* Dubious BSD thing... Probably nobody even uses it, but
  1094. * the UNIX standard wants it for whatever reason... -DaveM
  1095. */
  1096. case SO_ACCEPTCONN:
  1097. v.val = sk->sk_state == TCP_LISTEN;
  1098. break;
  1099. case SO_PASSSEC:
  1100. v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
  1101. break;
  1102. case SO_PEERSEC:
  1103. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  1104. case SO_MARK:
  1105. v.val = sk->sk_mark;
  1106. break;
  1107. case SO_RXQ_OVFL:
  1108. v.val = sock_flag(sk, SOCK_RXQ_OVFL);
  1109. break;
  1110. case SO_WIFI_STATUS:
  1111. v.val = sock_flag(sk, SOCK_WIFI_STATUS);
  1112. break;
  1113. case SO_PEEK_OFF:
  1114. if (!sock->ops->set_peek_off)
  1115. return -EOPNOTSUPP;
  1116. v.val = sk->sk_peek_off;
  1117. break;
  1118. case SO_NOFCS:
  1119. v.val = sock_flag(sk, SOCK_NOFCS);
  1120. break;
  1121. case SO_BINDTODEVICE:
  1122. return sock_getbindtodevice(sk, optval, optlen, len);
  1123. case SO_GET_FILTER:
  1124. len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
  1125. if (len < 0)
  1126. return len;
  1127. goto lenout;
  1128. case SO_LOCK_FILTER:
  1129. v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
  1130. break;
  1131. case SO_BPF_EXTENSIONS:
  1132. v.val = bpf_tell_extensions();
  1133. break;
  1134. case SO_SELECT_ERR_QUEUE:
  1135. v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
  1136. break;
  1137. #ifdef CONFIG_NET_RX_BUSY_POLL
  1138. case SO_BUSY_POLL:
  1139. v.val = sk->sk_ll_usec;
  1140. break;
  1141. #endif
  1142. case SO_MAX_PACING_RATE:
  1143. /* 32bit version */
  1144. v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
  1145. break;
  1146. case SO_INCOMING_CPU:
  1147. v.val = sk->sk_incoming_cpu;
  1148. break;
  1149. case SO_MEMINFO:
  1150. {
  1151. u32 meminfo[SK_MEMINFO_VARS];
  1152. if (get_user(len, optlen))
  1153. return -EFAULT;
  1154. sk_get_meminfo(sk, meminfo);
  1155. len = min_t(unsigned int, len, sizeof(meminfo));
  1156. if (copy_to_user(optval, &meminfo, len))
  1157. return -EFAULT;
  1158. goto lenout;
  1159. }
  1160. #ifdef CONFIG_NET_RX_BUSY_POLL
  1161. case SO_INCOMING_NAPI_ID:
  1162. v.val = READ_ONCE(sk->sk_napi_id);
  1163. /* aggregate non-NAPI IDs down to 0 */
  1164. if (v.val < MIN_NAPI_ID)
  1165. v.val = 0;
  1166. break;
  1167. #endif
  1168. case SO_COOKIE:
  1169. lv = sizeof(u64);
  1170. if (len < lv)
  1171. return -EINVAL;
  1172. v.val64 = sock_gen_cookie(sk);
  1173. break;
  1174. case SO_ZEROCOPY:
  1175. v.val = sock_flag(sk, SOCK_ZEROCOPY);
  1176. break;
  1177. case SO_TXTIME:
  1178. lv = sizeof(v.txtime);
  1179. v.txtime.clockid = sk->sk_clockid;
  1180. v.txtime.flags |= sk->sk_txtime_deadline_mode ?
  1181. SOF_TXTIME_DEADLINE_MODE : 0;
  1182. v.txtime.flags |= sk->sk_txtime_report_errors ?
  1183. SOF_TXTIME_REPORT_ERRORS : 0;
  1184. break;
  1185. default:
  1186. /* We implement the SO_SNDLOWAT etc to not be settable
  1187. * (1003.1g 7).
  1188. */
  1189. return -ENOPROTOOPT;
  1190. }
  1191. if (len > lv)
  1192. len = lv;
  1193. if (copy_to_user(optval, &v, len))
  1194. return -EFAULT;
  1195. lenout:
  1196. if (put_user(len, optlen))
  1197. return -EFAULT;
  1198. return 0;
  1199. }
  1200. /*
  1201. * Initialize an sk_lock.
  1202. *
  1203. * (We also register the sk_lock with the lock validator.)
  1204. */
  1205. static inline void sock_lock_init(struct sock *sk)
  1206. {
  1207. if (sk->sk_kern_sock)
  1208. sock_lock_init_class_and_name(
  1209. sk,
  1210. af_family_kern_slock_key_strings[sk->sk_family],
  1211. af_family_kern_slock_keys + sk->sk_family,
  1212. af_family_kern_key_strings[sk->sk_family],
  1213. af_family_kern_keys + sk->sk_family);
  1214. else
  1215. sock_lock_init_class_and_name(
  1216. sk,
  1217. af_family_slock_key_strings[sk->sk_family],
  1218. af_family_slock_keys + sk->sk_family,
  1219. af_family_key_strings[sk->sk_family],
  1220. af_family_keys + sk->sk_family);
  1221. }
  1222. /*
  1223. * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  1224. * even temporarly, because of RCU lookups. sk_node should also be left as is.
  1225. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  1226. */
  1227. static void sock_copy(struct sock *nsk, const struct sock *osk)
  1228. {
  1229. #ifdef CONFIG_SECURITY_NETWORK
  1230. void *sptr = nsk->sk_security;
  1231. #endif
  1232. memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
  1233. memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
  1234. osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
  1235. #ifdef CONFIG_SECURITY_NETWORK
  1236. nsk->sk_security = sptr;
  1237. security_sk_clone(osk, nsk);
  1238. #endif
  1239. }
  1240. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  1241. int family)
  1242. {
  1243. struct sock *sk;
  1244. struct kmem_cache *slab;
  1245. slab = prot->slab;
  1246. if (slab != NULL) {
  1247. sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
  1248. if (!sk)
  1249. return sk;
  1250. if (priority & __GFP_ZERO)
  1251. sk_prot_clear_nulls(sk, prot->obj_size);
  1252. } else
  1253. sk = kmalloc(prot->obj_size, priority);
  1254. if (sk != NULL) {
  1255. if (security_sk_alloc(sk, family, priority))
  1256. goto out_free;
  1257. if (!try_module_get(prot->owner))
  1258. goto out_free_sec;
  1259. sk_tx_queue_clear(sk);
  1260. }
  1261. return sk;
  1262. out_free_sec:
  1263. security_sk_free(sk);
  1264. out_free:
  1265. if (slab != NULL)
  1266. kmem_cache_free(slab, sk);
  1267. else
  1268. kfree(sk);
  1269. return NULL;
  1270. }
  1271. static void sk_prot_free(struct proto *prot, struct sock *sk)
  1272. {
  1273. struct kmem_cache *slab;
  1274. struct module *owner;
  1275. owner = prot->owner;
  1276. slab = prot->slab;
  1277. cgroup_sk_free(&sk->sk_cgrp_data);
  1278. mem_cgroup_sk_free(sk);
  1279. security_sk_free(sk);
  1280. if (slab != NULL)
  1281. kmem_cache_free(slab, sk);
  1282. else
  1283. kfree(sk);
  1284. module_put(owner);
  1285. }
  1286. /**
  1287. * sk_alloc - All socket objects are allocated here
  1288. * @net: the applicable net namespace
  1289. * @family: protocol family
  1290. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1291. * @prot: struct proto associated with this new sock instance
  1292. * @kern: is this to be a kernel socket?
  1293. */
  1294. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  1295. struct proto *prot, int kern)
  1296. {
  1297. struct sock *sk;
  1298. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  1299. if (sk) {
  1300. sk->sk_family = family;
  1301. /*
  1302. * See comment in struct sock definition to understand
  1303. * why we need sk_prot_creator -acme
  1304. */
  1305. sk->sk_prot = sk->sk_prot_creator = prot;
  1306. sk->sk_kern_sock = kern;
  1307. sock_lock_init(sk);
  1308. sk->sk_net_refcnt = kern ? 0 : 1;
  1309. if (likely(sk->sk_net_refcnt)) {
  1310. get_net(net);
  1311. sock_inuse_add(net, 1);
  1312. }
  1313. sock_net_set(sk, net);
  1314. refcount_set(&sk->sk_wmem_alloc, 1);
  1315. mem_cgroup_sk_alloc(sk);
  1316. cgroup_sk_alloc(&sk->sk_cgrp_data);
  1317. sock_update_classid(&sk->sk_cgrp_data);
  1318. sock_update_netprioidx(&sk->sk_cgrp_data);
  1319. }
  1320. return sk;
  1321. }
  1322. EXPORT_SYMBOL(sk_alloc);
  1323. /* Sockets having SOCK_RCU_FREE will call this function after one RCU
  1324. * grace period. This is the case for UDP sockets and TCP listeners.
  1325. */
  1326. static void __sk_destruct(struct rcu_head *head)
  1327. {
  1328. struct sock *sk = container_of(head, struct sock, sk_rcu);
  1329. struct sk_filter *filter;
  1330. if (sk->sk_destruct)
  1331. sk->sk_destruct(sk);
  1332. filter = rcu_dereference_check(sk->sk_filter,
  1333. refcount_read(&sk->sk_wmem_alloc) == 0);
  1334. if (filter) {
  1335. sk_filter_uncharge(sk, filter);
  1336. RCU_INIT_POINTER(sk->sk_filter, NULL);
  1337. }
  1338. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1339. reuseport_detach_sock(sk);
  1340. sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
  1341. if (atomic_read(&sk->sk_omem_alloc))
  1342. pr_debug("%s: optmem leakage (%d bytes) detected\n",
  1343. __func__, atomic_read(&sk->sk_omem_alloc));
  1344. if (sk->sk_frag.page) {
  1345. put_page(sk->sk_frag.page);
  1346. sk->sk_frag.page = NULL;
  1347. }
  1348. if (sk->sk_peer_cred)
  1349. put_cred(sk->sk_peer_cred);
  1350. put_pid(sk->sk_peer_pid);
  1351. if (likely(sk->sk_net_refcnt))
  1352. put_net(sock_net(sk));
  1353. sk_prot_free(sk->sk_prot_creator, sk);
  1354. }
  1355. void sk_destruct(struct sock *sk)
  1356. {
  1357. if (sock_flag(sk, SOCK_RCU_FREE))
  1358. call_rcu(&sk->sk_rcu, __sk_destruct);
  1359. else
  1360. __sk_destruct(&sk->sk_rcu);
  1361. }
  1362. static void __sk_free(struct sock *sk)
  1363. {
  1364. if (likely(sk->sk_net_refcnt))
  1365. sock_inuse_add(sock_net(sk), -1);
  1366. if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
  1367. sock_diag_broadcast_destroy(sk);
  1368. else
  1369. sk_destruct(sk);
  1370. }
  1371. void sk_free(struct sock *sk)
  1372. {
  1373. /*
  1374. * We subtract one from sk_wmem_alloc and can know if
  1375. * some packets are still in some tx queue.
  1376. * If not null, sock_wfree() will call __sk_free(sk) later
  1377. */
  1378. if (refcount_dec_and_test(&sk->sk_wmem_alloc))
  1379. __sk_free(sk);
  1380. }
  1381. EXPORT_SYMBOL(sk_free);
  1382. static void sk_init_common(struct sock *sk)
  1383. {
  1384. skb_queue_head_init(&sk->sk_receive_queue);
  1385. skb_queue_head_init(&sk->sk_write_queue);
  1386. skb_queue_head_init(&sk->sk_error_queue);
  1387. rwlock_init(&sk->sk_callback_lock);
  1388. lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
  1389. af_rlock_keys + sk->sk_family,
  1390. af_family_rlock_key_strings[sk->sk_family]);
  1391. lockdep_set_class_and_name(&sk->sk_write_queue.lock,
  1392. af_wlock_keys + sk->sk_family,
  1393. af_family_wlock_key_strings[sk->sk_family]);
  1394. lockdep_set_class_and_name(&sk->sk_error_queue.lock,
  1395. af_elock_keys + sk->sk_family,
  1396. af_family_elock_key_strings[sk->sk_family]);
  1397. lockdep_set_class_and_name(&sk->sk_callback_lock,
  1398. af_callback_keys + sk->sk_family,
  1399. af_family_clock_key_strings[sk->sk_family]);
  1400. }
  1401. /**
  1402. * sk_clone_lock - clone a socket, and lock its clone
  1403. * @sk: the socket to clone
  1404. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1405. *
  1406. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  1407. */
  1408. struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
  1409. {
  1410. struct sock *newsk;
  1411. bool is_charged = true;
  1412. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  1413. if (newsk != NULL) {
  1414. struct sk_filter *filter;
  1415. sock_copy(newsk, sk);
  1416. newsk->sk_prot_creator = sk->sk_prot;
  1417. /* SANITY */
  1418. if (likely(newsk->sk_net_refcnt))
  1419. get_net(sock_net(newsk));
  1420. sk_node_init(&newsk->sk_node);
  1421. sock_lock_init(newsk);
  1422. bh_lock_sock(newsk);
  1423. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  1424. newsk->sk_backlog.len = 0;
  1425. atomic_set(&newsk->sk_rmem_alloc, 0);
  1426. /*
  1427. * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
  1428. */
  1429. refcount_set(&newsk->sk_wmem_alloc, 1);
  1430. atomic_set(&newsk->sk_omem_alloc, 0);
  1431. sk_init_common(newsk);
  1432. newsk->sk_dst_cache = NULL;
  1433. newsk->sk_dst_pending_confirm = 0;
  1434. newsk->sk_wmem_queued = 0;
  1435. newsk->sk_forward_alloc = 0;
  1436. atomic_set(&newsk->sk_drops, 0);
  1437. newsk->sk_send_head = NULL;
  1438. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  1439. atomic_set(&newsk->sk_zckey, 0);
  1440. sock_reset_flag(newsk, SOCK_DONE);
  1441. mem_cgroup_sk_alloc(newsk);
  1442. cgroup_sk_alloc(&newsk->sk_cgrp_data);
  1443. rcu_read_lock();
  1444. filter = rcu_dereference(sk->sk_filter);
  1445. if (filter != NULL)
  1446. /* though it's an empty new sock, the charging may fail
  1447. * if sysctl_optmem_max was changed between creation of
  1448. * original socket and cloning
  1449. */
  1450. is_charged = sk_filter_charge(newsk, filter);
  1451. RCU_INIT_POINTER(newsk->sk_filter, filter);
  1452. rcu_read_unlock();
  1453. if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
  1454. /* We need to make sure that we don't uncharge the new
  1455. * socket if we couldn't charge it in the first place
  1456. * as otherwise we uncharge the parent's filter.
  1457. */
  1458. if (!is_charged)
  1459. RCU_INIT_POINTER(newsk->sk_filter, NULL);
  1460. sk_free_unlock_clone(newsk);
  1461. newsk = NULL;
  1462. goto out;
  1463. }
  1464. RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
  1465. newsk->sk_err = 0;
  1466. newsk->sk_err_soft = 0;
  1467. newsk->sk_priority = 0;
  1468. newsk->sk_incoming_cpu = raw_smp_processor_id();
  1469. atomic64_set(&newsk->sk_cookie, 0);
  1470. if (likely(newsk->sk_net_refcnt))
  1471. sock_inuse_add(sock_net(newsk), 1);
  1472. /*
  1473. * Before updating sk_refcnt, we must commit prior changes to memory
  1474. * (Documentation/RCU/rculist_nulls.txt for details)
  1475. */
  1476. smp_wmb();
  1477. refcount_set(&newsk->sk_refcnt, 2);
  1478. /*
  1479. * Increment the counter in the same struct proto as the master
  1480. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  1481. * is the same as sk->sk_prot->socks, as this field was copied
  1482. * with memcpy).
  1483. *
  1484. * This _changes_ the previous behaviour, where
  1485. * tcp_create_openreq_child always was incrementing the
  1486. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  1487. * to be taken into account in all callers. -acme
  1488. */
  1489. sk_refcnt_debug_inc(newsk);
  1490. sk_set_socket(newsk, NULL);
  1491. newsk->sk_wq = NULL;
  1492. if (newsk->sk_prot->sockets_allocated)
  1493. sk_sockets_allocated_inc(newsk);
  1494. if (sock_needs_netstamp(sk) &&
  1495. newsk->sk_flags & SK_FLAGS_TIMESTAMP)
  1496. net_enable_timestamp();
  1497. }
  1498. out:
  1499. return newsk;
  1500. }
  1501. EXPORT_SYMBOL_GPL(sk_clone_lock);
  1502. void sk_free_unlock_clone(struct sock *sk)
  1503. {
  1504. /* It is still raw copy of parent, so invalidate
  1505. * destructor and make plain sk_free() */
  1506. sk->sk_destruct = NULL;
  1507. bh_unlock_sock(sk);
  1508. sk_free(sk);
  1509. }
  1510. EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
  1511. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  1512. {
  1513. u32 max_segs = 1;
  1514. sk_dst_set(sk, dst);
  1515. sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
  1516. if (sk->sk_route_caps & NETIF_F_GSO)
  1517. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  1518. sk->sk_route_caps &= ~sk->sk_route_nocaps;
  1519. if (sk_can_gso(sk)) {
  1520. if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
  1521. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  1522. } else {
  1523. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1524. sk->sk_gso_max_size = dst->dev->gso_max_size;
  1525. max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
  1526. }
  1527. }
  1528. sk->sk_gso_max_segs = max_segs;
  1529. }
  1530. EXPORT_SYMBOL_GPL(sk_setup_caps);
  1531. /*
  1532. * Simple resource managers for sockets.
  1533. */
  1534. /*
  1535. * Write buffer destructor automatically called from kfree_skb.
  1536. */
  1537. void sock_wfree(struct sk_buff *skb)
  1538. {
  1539. struct sock *sk = skb->sk;
  1540. unsigned int len = skb->truesize;
  1541. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
  1542. /*
  1543. * Keep a reference on sk_wmem_alloc, this will be released
  1544. * after sk_write_space() call
  1545. */
  1546. WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
  1547. sk->sk_write_space(sk);
  1548. len = 1;
  1549. }
  1550. /*
  1551. * if sk_wmem_alloc reaches 0, we must finish what sk_free()
  1552. * could not do because of in-flight packets
  1553. */
  1554. if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
  1555. __sk_free(sk);
  1556. }
  1557. EXPORT_SYMBOL(sock_wfree);
  1558. /* This variant of sock_wfree() is used by TCP,
  1559. * since it sets SOCK_USE_WRITE_QUEUE.
  1560. */
  1561. void __sock_wfree(struct sk_buff *skb)
  1562. {
  1563. struct sock *sk = skb->sk;
  1564. if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
  1565. __sk_free(sk);
  1566. }
  1567. void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
  1568. {
  1569. skb_orphan(skb);
  1570. skb->sk = sk;
  1571. #ifdef CONFIG_INET
  1572. if (unlikely(!sk_fullsock(sk))) {
  1573. skb->destructor = sock_edemux;
  1574. sock_hold(sk);
  1575. return;
  1576. }
  1577. #endif
  1578. skb->destructor = sock_wfree;
  1579. skb_set_hash_from_sk(skb, sk);
  1580. /*
  1581. * We used to take a refcount on sk, but following operation
  1582. * is enough to guarantee sk_free() wont free this sock until
  1583. * all in-flight packets are completed
  1584. */
  1585. refcount_add(skb->truesize, &sk->sk_wmem_alloc);
  1586. }
  1587. EXPORT_SYMBOL(skb_set_owner_w);
  1588. /* This helper is used by netem, as it can hold packets in its
  1589. * delay queue. We want to allow the owner socket to send more
  1590. * packets, as if they were already TX completed by a typical driver.
  1591. * But we also want to keep skb->sk set because some packet schedulers
  1592. * rely on it (sch_fq for example).
  1593. */
  1594. void skb_orphan_partial(struct sk_buff *skb)
  1595. {
  1596. if (skb_is_tcp_pure_ack(skb))
  1597. return;
  1598. if (skb->destructor == sock_wfree
  1599. #ifdef CONFIG_INET
  1600. || skb->destructor == tcp_wfree
  1601. #endif
  1602. ) {
  1603. struct sock *sk = skb->sk;
  1604. if (refcount_inc_not_zero(&sk->sk_refcnt)) {
  1605. WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
  1606. skb->destructor = sock_efree;
  1607. }
  1608. } else {
  1609. skb_orphan(skb);
  1610. }
  1611. }
  1612. EXPORT_SYMBOL(skb_orphan_partial);
  1613. /*
  1614. * Read buffer destructor automatically called from kfree_skb.
  1615. */
  1616. void sock_rfree(struct sk_buff *skb)
  1617. {
  1618. struct sock *sk = skb->sk;
  1619. unsigned int len = skb->truesize;
  1620. atomic_sub(len, &sk->sk_rmem_alloc);
  1621. sk_mem_uncharge(sk, len);
  1622. }
  1623. EXPORT_SYMBOL(sock_rfree);
  1624. /*
  1625. * Buffer destructor for skbs that are not used directly in read or write
  1626. * path, e.g. for error handler skbs. Automatically called from kfree_skb.
  1627. */
  1628. void sock_efree(struct sk_buff *skb)
  1629. {
  1630. sock_put(skb->sk);
  1631. }
  1632. EXPORT_SYMBOL(sock_efree);
  1633. kuid_t sock_i_uid(struct sock *sk)
  1634. {
  1635. kuid_t uid;
  1636. read_lock_bh(&sk->sk_callback_lock);
  1637. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
  1638. read_unlock_bh(&sk->sk_callback_lock);
  1639. return uid;
  1640. }
  1641. EXPORT_SYMBOL(sock_i_uid);
  1642. unsigned long sock_i_ino(struct sock *sk)
  1643. {
  1644. unsigned long ino;
  1645. read_lock_bh(&sk->sk_callback_lock);
  1646. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  1647. read_unlock_bh(&sk->sk_callback_lock);
  1648. return ino;
  1649. }
  1650. EXPORT_SYMBOL(sock_i_ino);
  1651. /*
  1652. * Allocate a skb from the socket's send buffer.
  1653. */
  1654. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  1655. gfp_t priority)
  1656. {
  1657. if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1658. struct sk_buff *skb = alloc_skb(size, priority);
  1659. if (skb) {
  1660. skb_set_owner_w(skb, sk);
  1661. return skb;
  1662. }
  1663. }
  1664. return NULL;
  1665. }
  1666. EXPORT_SYMBOL(sock_wmalloc);
  1667. static void sock_ofree(struct sk_buff *skb)
  1668. {
  1669. struct sock *sk = skb->sk;
  1670. atomic_sub(skb->truesize, &sk->sk_omem_alloc);
  1671. }
  1672. struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
  1673. gfp_t priority)
  1674. {
  1675. struct sk_buff *skb;
  1676. /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
  1677. if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
  1678. sysctl_optmem_max)
  1679. return NULL;
  1680. skb = alloc_skb(size, priority);
  1681. if (!skb)
  1682. return NULL;
  1683. atomic_add(skb->truesize, &sk->sk_omem_alloc);
  1684. skb->sk = sk;
  1685. skb->destructor = sock_ofree;
  1686. return skb;
  1687. }
  1688. /*
  1689. * Allocate a memory block from the socket's option memory buffer.
  1690. */
  1691. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1692. {
  1693. if ((unsigned int)size <= sysctl_optmem_max &&
  1694. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1695. void *mem;
  1696. /* First do the add, to avoid the race if kmalloc
  1697. * might sleep.
  1698. */
  1699. atomic_add(size, &sk->sk_omem_alloc);
  1700. mem = kmalloc(size, priority);
  1701. if (mem)
  1702. return mem;
  1703. atomic_sub(size, &sk->sk_omem_alloc);
  1704. }
  1705. return NULL;
  1706. }
  1707. EXPORT_SYMBOL(sock_kmalloc);
  1708. /* Free an option memory block. Note, we actually want the inline
  1709. * here as this allows gcc to detect the nullify and fold away the
  1710. * condition entirely.
  1711. */
  1712. static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
  1713. const bool nullify)
  1714. {
  1715. if (WARN_ON_ONCE(!mem))
  1716. return;
  1717. if (nullify)
  1718. kzfree(mem);
  1719. else
  1720. kfree(mem);
  1721. atomic_sub(size, &sk->sk_omem_alloc);
  1722. }
  1723. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1724. {
  1725. __sock_kfree_s(sk, mem, size, false);
  1726. }
  1727. EXPORT_SYMBOL(sock_kfree_s);
  1728. void sock_kzfree_s(struct sock *sk, void *mem, int size)
  1729. {
  1730. __sock_kfree_s(sk, mem, size, true);
  1731. }
  1732. EXPORT_SYMBOL(sock_kzfree_s);
  1733. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1734. I think, these locks should be removed for datagram sockets.
  1735. */
  1736. static long sock_wait_for_wmem(struct sock *sk, long timeo)
  1737. {
  1738. DEFINE_WAIT(wait);
  1739. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  1740. for (;;) {
  1741. if (!timeo)
  1742. break;
  1743. if (signal_pending(current))
  1744. break;
  1745. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1746. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1747. if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1748. break;
  1749. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1750. break;
  1751. if (sk->sk_err)
  1752. break;
  1753. timeo = schedule_timeout(timeo);
  1754. }
  1755. finish_wait(sk_sleep(sk), &wait);
  1756. return timeo;
  1757. }
  1758. /*
  1759. * Generic send/receive buffer handlers
  1760. */
  1761. struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
  1762. unsigned long data_len, int noblock,
  1763. int *errcode, int max_page_order)
  1764. {
  1765. struct sk_buff *skb;
  1766. long timeo;
  1767. int err;
  1768. timeo = sock_sndtimeo(sk, noblock);
  1769. for (;;) {
  1770. err = sock_error(sk);
  1771. if (err != 0)
  1772. goto failure;
  1773. err = -EPIPE;
  1774. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1775. goto failure;
  1776. if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
  1777. break;
  1778. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  1779. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1780. err = -EAGAIN;
  1781. if (!timeo)
  1782. goto failure;
  1783. if (signal_pending(current))
  1784. goto interrupted;
  1785. timeo = sock_wait_for_wmem(sk, timeo);
  1786. }
  1787. skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
  1788. errcode, sk->sk_allocation);
  1789. if (skb)
  1790. skb_set_owner_w(skb, sk);
  1791. return skb;
  1792. interrupted:
  1793. err = sock_intr_errno(timeo);
  1794. failure:
  1795. *errcode = err;
  1796. return NULL;
  1797. }
  1798. EXPORT_SYMBOL(sock_alloc_send_pskb);
  1799. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1800. int noblock, int *errcode)
  1801. {
  1802. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
  1803. }
  1804. EXPORT_SYMBOL(sock_alloc_send_skb);
  1805. int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
  1806. struct sockcm_cookie *sockc)
  1807. {
  1808. u32 tsflags;
  1809. switch (cmsg->cmsg_type) {
  1810. case SO_MARK:
  1811. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1812. return -EPERM;
  1813. if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
  1814. return -EINVAL;
  1815. sockc->mark = *(u32 *)CMSG_DATA(cmsg);
  1816. break;
  1817. case SO_TIMESTAMPING:
  1818. if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
  1819. return -EINVAL;
  1820. tsflags = *(u32 *)CMSG_DATA(cmsg);
  1821. if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
  1822. return -EINVAL;
  1823. sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
  1824. sockc->tsflags |= tsflags;
  1825. break;
  1826. case SCM_TXTIME:
  1827. if (!sock_flag(sk, SOCK_TXTIME))
  1828. return -EINVAL;
  1829. if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
  1830. return -EINVAL;
  1831. sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
  1832. break;
  1833. /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
  1834. case SCM_RIGHTS:
  1835. case SCM_CREDENTIALS:
  1836. break;
  1837. default:
  1838. return -EINVAL;
  1839. }
  1840. return 0;
  1841. }
  1842. EXPORT_SYMBOL(__sock_cmsg_send);
  1843. int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
  1844. struct sockcm_cookie *sockc)
  1845. {
  1846. struct cmsghdr *cmsg;
  1847. int ret;
  1848. for_each_cmsghdr(cmsg, msg) {
  1849. if (!CMSG_OK(msg, cmsg))
  1850. return -EINVAL;
  1851. if (cmsg->cmsg_level != SOL_SOCKET)
  1852. continue;
  1853. ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
  1854. if (ret)
  1855. return ret;
  1856. }
  1857. return 0;
  1858. }
  1859. EXPORT_SYMBOL(sock_cmsg_send);
  1860. static void sk_enter_memory_pressure(struct sock *sk)
  1861. {
  1862. if (!sk->sk_prot->enter_memory_pressure)
  1863. return;
  1864. sk->sk_prot->enter_memory_pressure(sk);
  1865. }
  1866. static void sk_leave_memory_pressure(struct sock *sk)
  1867. {
  1868. if (sk->sk_prot->leave_memory_pressure) {
  1869. sk->sk_prot->leave_memory_pressure(sk);
  1870. } else {
  1871. unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
  1872. if (memory_pressure && *memory_pressure)
  1873. *memory_pressure = 0;
  1874. }
  1875. }
  1876. /* On 32bit arches, an skb frag is limited to 2^15 */
  1877. #define SKB_FRAG_PAGE_ORDER get_order(32768)
  1878. /**
  1879. * skb_page_frag_refill - check that a page_frag contains enough room
  1880. * @sz: minimum size of the fragment we want to get
  1881. * @pfrag: pointer to page_frag
  1882. * @gfp: priority for memory allocation
  1883. *
  1884. * Note: While this allocator tries to use high order pages, there is
  1885. * no guarantee that allocations succeed. Therefore, @sz MUST be
  1886. * less or equal than PAGE_SIZE.
  1887. */
  1888. bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
  1889. {
  1890. if (pfrag->page) {
  1891. if (page_ref_count(pfrag->page) == 1) {
  1892. pfrag->offset = 0;
  1893. return true;
  1894. }
  1895. if (pfrag->offset + sz <= pfrag->size)
  1896. return true;
  1897. put_page(pfrag->page);
  1898. }
  1899. pfrag->offset = 0;
  1900. if (SKB_FRAG_PAGE_ORDER) {
  1901. /* Avoid direct reclaim but allow kswapd to wake */
  1902. pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
  1903. __GFP_COMP | __GFP_NOWARN |
  1904. __GFP_NORETRY,
  1905. SKB_FRAG_PAGE_ORDER);
  1906. if (likely(pfrag->page)) {
  1907. pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
  1908. return true;
  1909. }
  1910. }
  1911. pfrag->page = alloc_page(gfp);
  1912. if (likely(pfrag->page)) {
  1913. pfrag->size = PAGE_SIZE;
  1914. return true;
  1915. }
  1916. return false;
  1917. }
  1918. EXPORT_SYMBOL(skb_page_frag_refill);
  1919. bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
  1920. {
  1921. if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
  1922. return true;
  1923. sk_enter_memory_pressure(sk);
  1924. sk_stream_moderate_sndbuf(sk);
  1925. return false;
  1926. }
  1927. EXPORT_SYMBOL(sk_page_frag_refill);
  1928. static void __lock_sock(struct sock *sk)
  1929. __releases(&sk->sk_lock.slock)
  1930. __acquires(&sk->sk_lock.slock)
  1931. {
  1932. DEFINE_WAIT(wait);
  1933. for (;;) {
  1934. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1935. TASK_UNINTERRUPTIBLE);
  1936. spin_unlock_bh(&sk->sk_lock.slock);
  1937. schedule();
  1938. spin_lock_bh(&sk->sk_lock.slock);
  1939. if (!sock_owned_by_user(sk))
  1940. break;
  1941. }
  1942. finish_wait(&sk->sk_lock.wq, &wait);
  1943. }
  1944. void __release_sock(struct sock *sk)
  1945. __releases(&sk->sk_lock.slock)
  1946. __acquires(&sk->sk_lock.slock)
  1947. {
  1948. struct sk_buff *skb, *next;
  1949. while ((skb = sk->sk_backlog.head) != NULL) {
  1950. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1951. spin_unlock_bh(&sk->sk_lock.slock);
  1952. do {
  1953. next = skb->next;
  1954. prefetch(next);
  1955. WARN_ON_ONCE(skb_dst_is_noref(skb));
  1956. skb_mark_not_on_list(skb);
  1957. sk_backlog_rcv(sk, skb);
  1958. cond_resched();
  1959. skb = next;
  1960. } while (skb != NULL);
  1961. spin_lock_bh(&sk->sk_lock.slock);
  1962. }
  1963. /*
  1964. * Doing the zeroing here guarantee we can not loop forever
  1965. * while a wild producer attempts to flood us.
  1966. */
  1967. sk->sk_backlog.len = 0;
  1968. }
  1969. void __sk_flush_backlog(struct sock *sk)
  1970. {
  1971. spin_lock_bh(&sk->sk_lock.slock);
  1972. __release_sock(sk);
  1973. spin_unlock_bh(&sk->sk_lock.slock);
  1974. }
  1975. /**
  1976. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1977. * @sk: sock to wait on
  1978. * @timeo: for how long
  1979. * @skb: last skb seen on sk_receive_queue
  1980. *
  1981. * Now socket state including sk->sk_err is changed only under lock,
  1982. * hence we may omit checks after joining wait queue.
  1983. * We check receive queue before schedule() only as optimization;
  1984. * it is very likely that release_sock() added new data.
  1985. */
  1986. int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
  1987. {
  1988. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  1989. int rc;
  1990. add_wait_queue(sk_sleep(sk), &wait);
  1991. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  1992. rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
  1993. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  1994. remove_wait_queue(sk_sleep(sk), &wait);
  1995. return rc;
  1996. }
  1997. EXPORT_SYMBOL(sk_wait_data);
  1998. /**
  1999. * __sk_mem_raise_allocated - increase memory_allocated
  2000. * @sk: socket
  2001. * @size: memory size to allocate
  2002. * @amt: pages to allocate
  2003. * @kind: allocation type
  2004. *
  2005. * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
  2006. */
  2007. int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
  2008. {
  2009. struct proto *prot = sk->sk_prot;
  2010. long allocated = sk_memory_allocated_add(sk, amt);
  2011. bool charged = true;
  2012. if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
  2013. !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
  2014. goto suppress_allocation;
  2015. /* Under limit. */
  2016. if (allocated <= sk_prot_mem_limits(sk, 0)) {
  2017. sk_leave_memory_pressure(sk);
  2018. return 1;
  2019. }
  2020. /* Under pressure. */
  2021. if (allocated > sk_prot_mem_limits(sk, 1))
  2022. sk_enter_memory_pressure(sk);
  2023. /* Over hard limit. */
  2024. if (allocated > sk_prot_mem_limits(sk, 2))
  2025. goto suppress_allocation;
  2026. /* guarantee minimum buffer size under pressure */
  2027. if (kind == SK_MEM_RECV) {
  2028. if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
  2029. return 1;
  2030. } else { /* SK_MEM_SEND */
  2031. int wmem0 = sk_get_wmem0(sk, prot);
  2032. if (sk->sk_type == SOCK_STREAM) {
  2033. if (sk->sk_wmem_queued < wmem0)
  2034. return 1;
  2035. } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
  2036. return 1;
  2037. }
  2038. }
  2039. if (sk_has_memory_pressure(sk)) {
  2040. int alloc;
  2041. if (!sk_under_memory_pressure(sk))
  2042. return 1;
  2043. alloc = sk_sockets_allocated_read_positive(sk);
  2044. if (sk_prot_mem_limits(sk, 2) > alloc *
  2045. sk_mem_pages(sk->sk_wmem_queued +
  2046. atomic_read(&sk->sk_rmem_alloc) +
  2047. sk->sk_forward_alloc))
  2048. return 1;
  2049. }
  2050. suppress_allocation:
  2051. if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
  2052. sk_stream_moderate_sndbuf(sk);
  2053. /* Fail only if socket is _under_ its sndbuf.
  2054. * In this case we cannot block, so that we have to fail.
  2055. */
  2056. if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
  2057. return 1;
  2058. }
  2059. if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
  2060. trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
  2061. sk_memory_allocated_sub(sk, amt);
  2062. if (mem_cgroup_sockets_enabled && sk->sk_memcg)
  2063. mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
  2064. return 0;
  2065. }
  2066. EXPORT_SYMBOL(__sk_mem_raise_allocated);
  2067. /**
  2068. * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
  2069. * @sk: socket
  2070. * @size: memory size to allocate
  2071. * @kind: allocation type
  2072. *
  2073. * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
  2074. * rmem allocation. This function assumes that protocols which have
  2075. * memory_pressure use sk_wmem_queued as write buffer accounting.
  2076. */
  2077. int __sk_mem_schedule(struct sock *sk, int size, int kind)
  2078. {
  2079. int ret, amt = sk_mem_pages(size);
  2080. sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
  2081. ret = __sk_mem_raise_allocated(sk, size, amt, kind);
  2082. if (!ret)
  2083. sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
  2084. return ret;
  2085. }
  2086. EXPORT_SYMBOL(__sk_mem_schedule);
  2087. /**
  2088. * __sk_mem_reduce_allocated - reclaim memory_allocated
  2089. * @sk: socket
  2090. * @amount: number of quanta
  2091. *
  2092. * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
  2093. */
  2094. void __sk_mem_reduce_allocated(struct sock *sk, int amount)
  2095. {
  2096. sk_memory_allocated_sub(sk, amount);
  2097. if (mem_cgroup_sockets_enabled && sk->sk_memcg)
  2098. mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
  2099. if (sk_under_memory_pressure(sk) &&
  2100. (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
  2101. sk_leave_memory_pressure(sk);
  2102. }
  2103. EXPORT_SYMBOL(__sk_mem_reduce_allocated);
  2104. /**
  2105. * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
  2106. * @sk: socket
  2107. * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  2108. */
  2109. void __sk_mem_reclaim(struct sock *sk, int amount)
  2110. {
  2111. amount >>= SK_MEM_QUANTUM_SHIFT;
  2112. sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
  2113. __sk_mem_reduce_allocated(sk, amount);
  2114. }
  2115. EXPORT_SYMBOL(__sk_mem_reclaim);
  2116. int sk_set_peek_off(struct sock *sk, int val)
  2117. {
  2118. sk->sk_peek_off = val;
  2119. return 0;
  2120. }
  2121. EXPORT_SYMBOL_GPL(sk_set_peek_off);
  2122. /*
  2123. * Set of default routines for initialising struct proto_ops when
  2124. * the protocol does not support a particular function. In certain
  2125. * cases where it makes no sense for a protocol to have a "do nothing"
  2126. * function, some default processing is provided.
  2127. */
  2128. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  2129. {
  2130. return -EOPNOTSUPP;
  2131. }
  2132. EXPORT_SYMBOL(sock_no_bind);
  2133. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  2134. int len, int flags)
  2135. {
  2136. return -EOPNOTSUPP;
  2137. }
  2138. EXPORT_SYMBOL(sock_no_connect);
  2139. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  2140. {
  2141. return -EOPNOTSUPP;
  2142. }
  2143. EXPORT_SYMBOL(sock_no_socketpair);
  2144. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
  2145. bool kern)
  2146. {
  2147. return -EOPNOTSUPP;
  2148. }
  2149. EXPORT_SYMBOL(sock_no_accept);
  2150. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  2151. int peer)
  2152. {
  2153. return -EOPNOTSUPP;
  2154. }
  2155. EXPORT_SYMBOL(sock_no_getname);
  2156. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  2157. {
  2158. return -EOPNOTSUPP;
  2159. }
  2160. EXPORT_SYMBOL(sock_no_ioctl);
  2161. int sock_no_listen(struct socket *sock, int backlog)
  2162. {
  2163. return -EOPNOTSUPP;
  2164. }
  2165. EXPORT_SYMBOL(sock_no_listen);
  2166. int sock_no_shutdown(struct socket *sock, int how)
  2167. {
  2168. return -EOPNOTSUPP;
  2169. }
  2170. EXPORT_SYMBOL(sock_no_shutdown);
  2171. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  2172. char __user *optval, unsigned int optlen)
  2173. {
  2174. return -EOPNOTSUPP;
  2175. }
  2176. EXPORT_SYMBOL(sock_no_setsockopt);
  2177. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  2178. char __user *optval, int __user *optlen)
  2179. {
  2180. return -EOPNOTSUPP;
  2181. }
  2182. EXPORT_SYMBOL(sock_no_getsockopt);
  2183. int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
  2184. {
  2185. return -EOPNOTSUPP;
  2186. }
  2187. EXPORT_SYMBOL(sock_no_sendmsg);
  2188. int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
  2189. {
  2190. return -EOPNOTSUPP;
  2191. }
  2192. EXPORT_SYMBOL(sock_no_sendmsg_locked);
  2193. int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
  2194. int flags)
  2195. {
  2196. return -EOPNOTSUPP;
  2197. }
  2198. EXPORT_SYMBOL(sock_no_recvmsg);
  2199. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  2200. {
  2201. /* Mirror missing mmap method error code */
  2202. return -ENODEV;
  2203. }
  2204. EXPORT_SYMBOL(sock_no_mmap);
  2205. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  2206. {
  2207. ssize_t res;
  2208. struct msghdr msg = {.msg_flags = flags};
  2209. struct kvec iov;
  2210. char *kaddr = kmap(page);
  2211. iov.iov_base = kaddr + offset;
  2212. iov.iov_len = size;
  2213. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  2214. kunmap(page);
  2215. return res;
  2216. }
  2217. EXPORT_SYMBOL(sock_no_sendpage);
  2218. ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
  2219. int offset, size_t size, int flags)
  2220. {
  2221. ssize_t res;
  2222. struct msghdr msg = {.msg_flags = flags};
  2223. struct kvec iov;
  2224. char *kaddr = kmap(page);
  2225. iov.iov_base = kaddr + offset;
  2226. iov.iov_len = size;
  2227. res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
  2228. kunmap(page);
  2229. return res;
  2230. }
  2231. EXPORT_SYMBOL(sock_no_sendpage_locked);
  2232. /*
  2233. * Default Socket Callbacks
  2234. */
  2235. static void sock_def_wakeup(struct sock *sk)
  2236. {
  2237. struct socket_wq *wq;
  2238. rcu_read_lock();
  2239. wq = rcu_dereference(sk->sk_wq);
  2240. if (skwq_has_sleeper(wq))
  2241. wake_up_interruptible_all(&wq->wait);
  2242. rcu_read_unlock();
  2243. }
  2244. static void sock_def_error_report(struct sock *sk)
  2245. {
  2246. struct socket_wq *wq;
  2247. rcu_read_lock();
  2248. wq = rcu_dereference(sk->sk_wq);
  2249. if (skwq_has_sleeper(wq))
  2250. wake_up_interruptible_poll(&wq->wait, EPOLLERR);
  2251. sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
  2252. rcu_read_unlock();
  2253. }
  2254. static void sock_def_readable(struct sock *sk)
  2255. {
  2256. struct socket_wq *wq;
  2257. rcu_read_lock();
  2258. wq = rcu_dereference(sk->sk_wq);
  2259. if (skwq_has_sleeper(wq))
  2260. wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
  2261. EPOLLRDNORM | EPOLLRDBAND);
  2262. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  2263. rcu_read_unlock();
  2264. }
  2265. static void sock_def_write_space(struct sock *sk)
  2266. {
  2267. struct socket_wq *wq;
  2268. rcu_read_lock();
  2269. /* Do not wake up a writer until he can make "significant"
  2270. * progress. --DaveM
  2271. */
  2272. if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  2273. wq = rcu_dereference(sk->sk_wq);
  2274. if (skwq_has_sleeper(wq))
  2275. wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
  2276. EPOLLWRNORM | EPOLLWRBAND);
  2277. /* Should agree with poll, otherwise some programs break */
  2278. if (sock_writeable(sk))
  2279. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  2280. }
  2281. rcu_read_unlock();
  2282. }
  2283. static void sock_def_destruct(struct sock *sk)
  2284. {
  2285. }
  2286. void sk_send_sigurg(struct sock *sk)
  2287. {
  2288. if (sk->sk_socket && sk->sk_socket->file)
  2289. if (send_sigurg(&sk->sk_socket->file->f_owner))
  2290. sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
  2291. }
  2292. EXPORT_SYMBOL(sk_send_sigurg);
  2293. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  2294. unsigned long expires)
  2295. {
  2296. if (!mod_timer(timer, expires))
  2297. sock_hold(sk);
  2298. }
  2299. EXPORT_SYMBOL(sk_reset_timer);
  2300. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  2301. {
  2302. if (del_timer(timer))
  2303. __sock_put(sk);
  2304. }
  2305. EXPORT_SYMBOL(sk_stop_timer);
  2306. void sock_init_data(struct socket *sock, struct sock *sk)
  2307. {
  2308. sk_init_common(sk);
  2309. sk->sk_send_head = NULL;
  2310. timer_setup(&sk->sk_timer, NULL, 0);
  2311. sk->sk_allocation = GFP_KERNEL;
  2312. sk->sk_rcvbuf = sysctl_rmem_default;
  2313. sk->sk_sndbuf = sysctl_wmem_default;
  2314. sk->sk_state = TCP_CLOSE;
  2315. sk_set_socket(sk, sock);
  2316. sock_set_flag(sk, SOCK_ZAPPED);
  2317. if (sock) {
  2318. sk->sk_type = sock->type;
  2319. sk->sk_wq = sock->wq;
  2320. sock->sk = sk;
  2321. sk->sk_uid = SOCK_INODE(sock)->i_uid;
  2322. } else {
  2323. sk->sk_wq = NULL;
  2324. sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
  2325. }
  2326. rwlock_init(&sk->sk_callback_lock);
  2327. if (sk->sk_kern_sock)
  2328. lockdep_set_class_and_name(
  2329. &sk->sk_callback_lock,
  2330. af_kern_callback_keys + sk->sk_family,
  2331. af_family_kern_clock_key_strings[sk->sk_family]);
  2332. else
  2333. lockdep_set_class_and_name(
  2334. &sk->sk_callback_lock,
  2335. af_callback_keys + sk->sk_family,
  2336. af_family_clock_key_strings[sk->sk_family]);
  2337. sk->sk_state_change = sock_def_wakeup;
  2338. sk->sk_data_ready = sock_def_readable;
  2339. sk->sk_write_space = sock_def_write_space;
  2340. sk->sk_error_report = sock_def_error_report;
  2341. sk->sk_destruct = sock_def_destruct;
  2342. sk->sk_frag.page = NULL;
  2343. sk->sk_frag.offset = 0;
  2344. sk->sk_peek_off = -1;
  2345. sk->sk_peer_pid = NULL;
  2346. sk->sk_peer_cred = NULL;
  2347. sk->sk_write_pending = 0;
  2348. sk->sk_rcvlowat = 1;
  2349. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  2350. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  2351. sk->sk_stamp = SK_DEFAULT_STAMP;
  2352. atomic_set(&sk->sk_zckey, 0);
  2353. #ifdef CONFIG_NET_RX_BUSY_POLL
  2354. sk->sk_napi_id = 0;
  2355. sk->sk_ll_usec = sysctl_net_busy_read;
  2356. #endif
  2357. sk->sk_max_pacing_rate = ~0UL;
  2358. sk->sk_pacing_rate = ~0UL;
  2359. sk->sk_pacing_shift = 10;
  2360. sk->sk_incoming_cpu = -1;
  2361. sk_rx_queue_clear(sk);
  2362. /*
  2363. * Before updating sk_refcnt, we must commit prior changes to memory
  2364. * (Documentation/RCU/rculist_nulls.txt for details)
  2365. */
  2366. smp_wmb();
  2367. refcount_set(&sk->sk_refcnt, 1);
  2368. atomic_set(&sk->sk_drops, 0);
  2369. }
  2370. EXPORT_SYMBOL(sock_init_data);
  2371. void lock_sock_nested(struct sock *sk, int subclass)
  2372. {
  2373. might_sleep();
  2374. spin_lock_bh(&sk->sk_lock.slock);
  2375. if (sk->sk_lock.owned)
  2376. __lock_sock(sk);
  2377. sk->sk_lock.owned = 1;
  2378. spin_unlock(&sk->sk_lock.slock);
  2379. /*
  2380. * The sk_lock has mutex_lock() semantics here:
  2381. */
  2382. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  2383. local_bh_enable();
  2384. }
  2385. EXPORT_SYMBOL(lock_sock_nested);
  2386. void release_sock(struct sock *sk)
  2387. {
  2388. spin_lock_bh(&sk->sk_lock.slock);
  2389. if (sk->sk_backlog.tail)
  2390. __release_sock(sk);
  2391. /* Warning : release_cb() might need to release sk ownership,
  2392. * ie call sock_release_ownership(sk) before us.
  2393. */
  2394. if (sk->sk_prot->release_cb)
  2395. sk->sk_prot->release_cb(sk);
  2396. sock_release_ownership(sk);
  2397. if (waitqueue_active(&sk->sk_lock.wq))
  2398. wake_up(&sk->sk_lock.wq);
  2399. spin_unlock_bh(&sk->sk_lock.slock);
  2400. }
  2401. EXPORT_SYMBOL(release_sock);
  2402. /**
  2403. * lock_sock_fast - fast version of lock_sock
  2404. * @sk: socket
  2405. *
  2406. * This version should be used for very small section, where process wont block
  2407. * return false if fast path is taken:
  2408. *
  2409. * sk_lock.slock locked, owned = 0, BH disabled
  2410. *
  2411. * return true if slow path is taken:
  2412. *
  2413. * sk_lock.slock unlocked, owned = 1, BH enabled
  2414. */
  2415. bool lock_sock_fast(struct sock *sk)
  2416. {
  2417. might_sleep();
  2418. spin_lock_bh(&sk->sk_lock.slock);
  2419. if (!sk->sk_lock.owned)
  2420. /*
  2421. * Note : We must disable BH
  2422. */
  2423. return false;
  2424. __lock_sock(sk);
  2425. sk->sk_lock.owned = 1;
  2426. spin_unlock(&sk->sk_lock.slock);
  2427. /*
  2428. * The sk_lock has mutex_lock() semantics here:
  2429. */
  2430. mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
  2431. local_bh_enable();
  2432. return true;
  2433. }
  2434. EXPORT_SYMBOL(lock_sock_fast);
  2435. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  2436. {
  2437. struct timeval tv;
  2438. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2439. tv = ktime_to_timeval(sk->sk_stamp);
  2440. if (tv.tv_sec == -1)
  2441. return -ENOENT;
  2442. if (tv.tv_sec == 0) {
  2443. sk->sk_stamp = ktime_get_real();
  2444. tv = ktime_to_timeval(sk->sk_stamp);
  2445. }
  2446. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  2447. }
  2448. EXPORT_SYMBOL(sock_get_timestamp);
  2449. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  2450. {
  2451. struct timespec ts;
  2452. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2453. ts = ktime_to_timespec(sk->sk_stamp);
  2454. if (ts.tv_sec == -1)
  2455. return -ENOENT;
  2456. if (ts.tv_sec == 0) {
  2457. sk->sk_stamp = ktime_get_real();
  2458. ts = ktime_to_timespec(sk->sk_stamp);
  2459. }
  2460. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  2461. }
  2462. EXPORT_SYMBOL(sock_get_timestampns);
  2463. void sock_enable_timestamp(struct sock *sk, int flag)
  2464. {
  2465. if (!sock_flag(sk, flag)) {
  2466. unsigned long previous_flags = sk->sk_flags;
  2467. sock_set_flag(sk, flag);
  2468. /*
  2469. * we just set one of the two flags which require net
  2470. * time stamping, but time stamping might have been on
  2471. * already because of the other one
  2472. */
  2473. if (sock_needs_netstamp(sk) &&
  2474. !(previous_flags & SK_FLAGS_TIMESTAMP))
  2475. net_enable_timestamp();
  2476. }
  2477. }
  2478. int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
  2479. int level, int type)
  2480. {
  2481. struct sock_exterr_skb *serr;
  2482. struct sk_buff *skb;
  2483. int copied, err;
  2484. err = -EAGAIN;
  2485. skb = sock_dequeue_err_skb(sk);
  2486. if (skb == NULL)
  2487. goto out;
  2488. copied = skb->len;
  2489. if (copied > len) {
  2490. msg->msg_flags |= MSG_TRUNC;
  2491. copied = len;
  2492. }
  2493. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  2494. if (err)
  2495. goto out_free_skb;
  2496. sock_recv_timestamp(msg, sk, skb);
  2497. serr = SKB_EXT_ERR(skb);
  2498. put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
  2499. msg->msg_flags |= MSG_ERRQUEUE;
  2500. err = copied;
  2501. out_free_skb:
  2502. kfree_skb(skb);
  2503. out:
  2504. return err;
  2505. }
  2506. EXPORT_SYMBOL(sock_recv_errqueue);
  2507. /*
  2508. * Get a socket option on an socket.
  2509. *
  2510. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  2511. * asynchronous errors should be reported by getsockopt. We assume
  2512. * this means if you specify SO_ERROR (otherwise whats the point of it).
  2513. */
  2514. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  2515. char __user *optval, int __user *optlen)
  2516. {
  2517. struct sock *sk = sock->sk;
  2518. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2519. }
  2520. EXPORT_SYMBOL(sock_common_getsockopt);
  2521. #ifdef CONFIG_COMPAT
  2522. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  2523. char __user *optval, int __user *optlen)
  2524. {
  2525. struct sock *sk = sock->sk;
  2526. if (sk->sk_prot->compat_getsockopt != NULL)
  2527. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  2528. optval, optlen);
  2529. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2530. }
  2531. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  2532. #endif
  2533. int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  2534. int flags)
  2535. {
  2536. struct sock *sk = sock->sk;
  2537. int addr_len = 0;
  2538. int err;
  2539. err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
  2540. flags & ~MSG_DONTWAIT, &addr_len);
  2541. if (err >= 0)
  2542. msg->msg_namelen = addr_len;
  2543. return err;
  2544. }
  2545. EXPORT_SYMBOL(sock_common_recvmsg);
  2546. /*
  2547. * Set socket options on an inet socket.
  2548. */
  2549. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  2550. char __user *optval, unsigned int optlen)
  2551. {
  2552. struct sock *sk = sock->sk;
  2553. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2554. }
  2555. EXPORT_SYMBOL(sock_common_setsockopt);
  2556. #ifdef CONFIG_COMPAT
  2557. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  2558. char __user *optval, unsigned int optlen)
  2559. {
  2560. struct sock *sk = sock->sk;
  2561. if (sk->sk_prot->compat_setsockopt != NULL)
  2562. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  2563. optval, optlen);
  2564. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2565. }
  2566. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  2567. #endif
  2568. void sk_common_release(struct sock *sk)
  2569. {
  2570. if (sk->sk_prot->destroy)
  2571. sk->sk_prot->destroy(sk);
  2572. /*
  2573. * Observation: when sock_common_release is called, processes have
  2574. * no access to socket. But net still has.
  2575. * Step one, detach it from networking:
  2576. *
  2577. * A. Remove from hash tables.
  2578. */
  2579. sk->sk_prot->unhash(sk);
  2580. /*
  2581. * In this point socket cannot receive new packets, but it is possible
  2582. * that some packets are in flight because some CPU runs receiver and
  2583. * did hash table lookup before we unhashed socket. They will achieve
  2584. * receive queue and will be purged by socket destructor.
  2585. *
  2586. * Also we still have packets pending on receive queue and probably,
  2587. * our own packets waiting in device queues. sock_destroy will drain
  2588. * receive queue, but transmitted packets will delay socket destruction
  2589. * until the last reference will be released.
  2590. */
  2591. sock_orphan(sk);
  2592. xfrm_sk_free_policy(sk);
  2593. sk_refcnt_debug_release(sk);
  2594. sock_put(sk);
  2595. }
  2596. EXPORT_SYMBOL(sk_common_release);
  2597. void sk_get_meminfo(const struct sock *sk, u32 *mem)
  2598. {
  2599. memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
  2600. mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
  2601. mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
  2602. mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
  2603. mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
  2604. mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
  2605. mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
  2606. mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
  2607. mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
  2608. mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
  2609. }
  2610. #ifdef CONFIG_PROC_FS
  2611. #define PROTO_INUSE_NR 64 /* should be enough for the first time */
  2612. struct prot_inuse {
  2613. int val[PROTO_INUSE_NR];
  2614. };
  2615. static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
  2616. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2617. {
  2618. __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
  2619. }
  2620. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2621. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2622. {
  2623. int cpu, idx = prot->inuse_idx;
  2624. int res = 0;
  2625. for_each_possible_cpu(cpu)
  2626. res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
  2627. return res >= 0 ? res : 0;
  2628. }
  2629. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2630. static void sock_inuse_add(struct net *net, int val)
  2631. {
  2632. this_cpu_add(*net->core.sock_inuse, val);
  2633. }
  2634. int sock_inuse_get(struct net *net)
  2635. {
  2636. int cpu, res = 0;
  2637. for_each_possible_cpu(cpu)
  2638. res += *per_cpu_ptr(net->core.sock_inuse, cpu);
  2639. return res;
  2640. }
  2641. EXPORT_SYMBOL_GPL(sock_inuse_get);
  2642. static int __net_init sock_inuse_init_net(struct net *net)
  2643. {
  2644. net->core.prot_inuse = alloc_percpu(struct prot_inuse);
  2645. if (net->core.prot_inuse == NULL)
  2646. return -ENOMEM;
  2647. net->core.sock_inuse = alloc_percpu(int);
  2648. if (net->core.sock_inuse == NULL)
  2649. goto out;
  2650. return 0;
  2651. out:
  2652. free_percpu(net->core.prot_inuse);
  2653. return -ENOMEM;
  2654. }
  2655. static void __net_exit sock_inuse_exit_net(struct net *net)
  2656. {
  2657. free_percpu(net->core.prot_inuse);
  2658. free_percpu(net->core.sock_inuse);
  2659. }
  2660. static struct pernet_operations net_inuse_ops = {
  2661. .init = sock_inuse_init_net,
  2662. .exit = sock_inuse_exit_net,
  2663. };
  2664. static __init int net_inuse_init(void)
  2665. {
  2666. if (register_pernet_subsys(&net_inuse_ops))
  2667. panic("Cannot initialize net inuse counters");
  2668. return 0;
  2669. }
  2670. core_initcall(net_inuse_init);
  2671. static void assign_proto_idx(struct proto *prot)
  2672. {
  2673. prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
  2674. if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
  2675. pr_err("PROTO_INUSE_NR exhausted\n");
  2676. return;
  2677. }
  2678. set_bit(prot->inuse_idx, proto_inuse_idx);
  2679. }
  2680. static void release_proto_idx(struct proto *prot)
  2681. {
  2682. if (prot->inuse_idx != PROTO_INUSE_NR - 1)
  2683. clear_bit(prot->inuse_idx, proto_inuse_idx);
  2684. }
  2685. #else
  2686. static inline void assign_proto_idx(struct proto *prot)
  2687. {
  2688. }
  2689. static inline void release_proto_idx(struct proto *prot)
  2690. {
  2691. }
  2692. static void sock_inuse_add(struct net *net, int val)
  2693. {
  2694. }
  2695. #endif
  2696. static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
  2697. {
  2698. if (!rsk_prot)
  2699. return;
  2700. kfree(rsk_prot->slab_name);
  2701. rsk_prot->slab_name = NULL;
  2702. kmem_cache_destroy(rsk_prot->slab);
  2703. rsk_prot->slab = NULL;
  2704. }
  2705. static int req_prot_init(const struct proto *prot)
  2706. {
  2707. struct request_sock_ops *rsk_prot = prot->rsk_prot;
  2708. if (!rsk_prot)
  2709. return 0;
  2710. rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
  2711. prot->name);
  2712. if (!rsk_prot->slab_name)
  2713. return -ENOMEM;
  2714. rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
  2715. rsk_prot->obj_size, 0,
  2716. SLAB_ACCOUNT | prot->slab_flags,
  2717. NULL);
  2718. if (!rsk_prot->slab) {
  2719. pr_crit("%s: Can't create request sock SLAB cache!\n",
  2720. prot->name);
  2721. return -ENOMEM;
  2722. }
  2723. return 0;
  2724. }
  2725. int proto_register(struct proto *prot, int alloc_slab)
  2726. {
  2727. if (alloc_slab) {
  2728. prot->slab = kmem_cache_create_usercopy(prot->name,
  2729. prot->obj_size, 0,
  2730. SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
  2731. prot->slab_flags,
  2732. prot->useroffset, prot->usersize,
  2733. NULL);
  2734. if (prot->slab == NULL) {
  2735. pr_crit("%s: Can't create sock SLAB cache!\n",
  2736. prot->name);
  2737. goto out;
  2738. }
  2739. if (req_prot_init(prot))
  2740. goto out_free_request_sock_slab;
  2741. if (prot->twsk_prot != NULL) {
  2742. prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
  2743. if (prot->twsk_prot->twsk_slab_name == NULL)
  2744. goto out_free_request_sock_slab;
  2745. prot->twsk_prot->twsk_slab =
  2746. kmem_cache_create(prot->twsk_prot->twsk_slab_name,
  2747. prot->twsk_prot->twsk_obj_size,
  2748. 0,
  2749. SLAB_ACCOUNT |
  2750. prot->slab_flags,
  2751. NULL);
  2752. if (prot->twsk_prot->twsk_slab == NULL)
  2753. goto out_free_timewait_sock_slab_name;
  2754. }
  2755. }
  2756. mutex_lock(&proto_list_mutex);
  2757. list_add(&prot->node, &proto_list);
  2758. assign_proto_idx(prot);
  2759. mutex_unlock(&proto_list_mutex);
  2760. return 0;
  2761. out_free_timewait_sock_slab_name:
  2762. kfree(prot->twsk_prot->twsk_slab_name);
  2763. out_free_request_sock_slab:
  2764. req_prot_cleanup(prot->rsk_prot);
  2765. kmem_cache_destroy(prot->slab);
  2766. prot->slab = NULL;
  2767. out:
  2768. return -ENOBUFS;
  2769. }
  2770. EXPORT_SYMBOL(proto_register);
  2771. void proto_unregister(struct proto *prot)
  2772. {
  2773. mutex_lock(&proto_list_mutex);
  2774. release_proto_idx(prot);
  2775. list_del(&prot->node);
  2776. mutex_unlock(&proto_list_mutex);
  2777. kmem_cache_destroy(prot->slab);
  2778. prot->slab = NULL;
  2779. req_prot_cleanup(prot->rsk_prot);
  2780. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  2781. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  2782. kfree(prot->twsk_prot->twsk_slab_name);
  2783. prot->twsk_prot->twsk_slab = NULL;
  2784. }
  2785. }
  2786. EXPORT_SYMBOL(proto_unregister);
  2787. int sock_load_diag_module(int family, int protocol)
  2788. {
  2789. if (!protocol) {
  2790. if (!sock_is_registered(family))
  2791. return -ENOENT;
  2792. return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
  2793. NETLINK_SOCK_DIAG, family);
  2794. }
  2795. #ifdef CONFIG_INET
  2796. if (family == AF_INET &&
  2797. protocol != IPPROTO_RAW &&
  2798. !rcu_access_pointer(inet_protos[protocol]))
  2799. return -ENOENT;
  2800. #endif
  2801. return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
  2802. NETLINK_SOCK_DIAG, family, protocol);
  2803. }
  2804. EXPORT_SYMBOL(sock_load_diag_module);
  2805. #ifdef CONFIG_PROC_FS
  2806. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  2807. __acquires(proto_list_mutex)
  2808. {
  2809. mutex_lock(&proto_list_mutex);
  2810. return seq_list_start_head(&proto_list, *pos);
  2811. }
  2812. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2813. {
  2814. return seq_list_next(v, &proto_list, pos);
  2815. }
  2816. static void proto_seq_stop(struct seq_file *seq, void *v)
  2817. __releases(proto_list_mutex)
  2818. {
  2819. mutex_unlock(&proto_list_mutex);
  2820. }
  2821. static char proto_method_implemented(const void *method)
  2822. {
  2823. return method == NULL ? 'n' : 'y';
  2824. }
  2825. static long sock_prot_memory_allocated(struct proto *proto)
  2826. {
  2827. return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
  2828. }
  2829. static char *sock_prot_memory_pressure(struct proto *proto)
  2830. {
  2831. return proto->memory_pressure != NULL ?
  2832. proto_memory_pressure(proto) ? "yes" : "no" : "NI";
  2833. }
  2834. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  2835. {
  2836. seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
  2837. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  2838. proto->name,
  2839. proto->obj_size,
  2840. sock_prot_inuse_get(seq_file_net(seq), proto),
  2841. sock_prot_memory_allocated(proto),
  2842. sock_prot_memory_pressure(proto),
  2843. proto->max_header,
  2844. proto->slab == NULL ? "no" : "yes",
  2845. module_name(proto->owner),
  2846. proto_method_implemented(proto->close),
  2847. proto_method_implemented(proto->connect),
  2848. proto_method_implemented(proto->disconnect),
  2849. proto_method_implemented(proto->accept),
  2850. proto_method_implemented(proto->ioctl),
  2851. proto_method_implemented(proto->init),
  2852. proto_method_implemented(proto->destroy),
  2853. proto_method_implemented(proto->shutdown),
  2854. proto_method_implemented(proto->setsockopt),
  2855. proto_method_implemented(proto->getsockopt),
  2856. proto_method_implemented(proto->sendmsg),
  2857. proto_method_implemented(proto->recvmsg),
  2858. proto_method_implemented(proto->sendpage),
  2859. proto_method_implemented(proto->bind),
  2860. proto_method_implemented(proto->backlog_rcv),
  2861. proto_method_implemented(proto->hash),
  2862. proto_method_implemented(proto->unhash),
  2863. proto_method_implemented(proto->get_port),
  2864. proto_method_implemented(proto->enter_memory_pressure));
  2865. }
  2866. static int proto_seq_show(struct seq_file *seq, void *v)
  2867. {
  2868. if (v == &proto_list)
  2869. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  2870. "protocol",
  2871. "size",
  2872. "sockets",
  2873. "memory",
  2874. "press",
  2875. "maxhdr",
  2876. "slab",
  2877. "module",
  2878. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  2879. else
  2880. proto_seq_printf(seq, list_entry(v, struct proto, node));
  2881. return 0;
  2882. }
  2883. static const struct seq_operations proto_seq_ops = {
  2884. .start = proto_seq_start,
  2885. .next = proto_seq_next,
  2886. .stop = proto_seq_stop,
  2887. .show = proto_seq_show,
  2888. };
  2889. static __net_init int proto_init_net(struct net *net)
  2890. {
  2891. if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
  2892. sizeof(struct seq_net_private)))
  2893. return -ENOMEM;
  2894. return 0;
  2895. }
  2896. static __net_exit void proto_exit_net(struct net *net)
  2897. {
  2898. remove_proc_entry("protocols", net->proc_net);
  2899. }
  2900. static __net_initdata struct pernet_operations proto_net_ops = {
  2901. .init = proto_init_net,
  2902. .exit = proto_exit_net,
  2903. };
  2904. static int __init proto_init(void)
  2905. {
  2906. return register_pernet_subsys(&proto_net_ops);
  2907. }
  2908. subsys_initcall(proto_init);
  2909. #endif /* PROC_FS */
  2910. #ifdef CONFIG_NET_RX_BUSY_POLL
  2911. bool sk_busy_loop_end(void *p, unsigned long start_time)
  2912. {
  2913. struct sock *sk = p;
  2914. return !skb_queue_empty(&sk->sk_receive_queue) ||
  2915. sk_busy_loop_timeout(sk, start_time);
  2916. }
  2917. EXPORT_SYMBOL(sk_busy_loop_end);
  2918. #endif /* CONFIG_NET_RX_BUSY_POLL */