sock.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Alan Cox, <A.Cox@swansea.ac.uk>
  14. *
  15. * Fixes:
  16. * Alan Cox : Numerous verify_area() problems
  17. * Alan Cox : Connecting on a connecting socket
  18. * now returns an error for tcp.
  19. * Alan Cox : sock->protocol is set correctly.
  20. * and is not sometimes left as 0.
  21. * Alan Cox : connect handles icmp errors on a
  22. * connect properly. Unfortunately there
  23. * is a restart syscall nasty there. I
  24. * can't match BSD without hacking the C
  25. * library. Ideas urgently sought!
  26. * Alan Cox : Disallow bind() to addresses that are
  27. * not ours - especially broadcast ones!!
  28. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  29. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  30. * instead they leave that for the DESTROY timer.
  31. * Alan Cox : Clean up error flag in accept
  32. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  33. * was buggy. Put a remove_sock() in the handler
  34. * for memory when we hit 0. Also altered the timer
  35. * code. The ACK stuff can wait and needs major
  36. * TCP layer surgery.
  37. * Alan Cox : Fixed TCP ack bug, removed remove sock
  38. * and fixed timer/inet_bh race.
  39. * Alan Cox : Added zapped flag for TCP
  40. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  41. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  43. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45. * Rick Sladkey : Relaxed UDP rules for matching packets.
  46. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  47. * Pauline Middelink : identd support
  48. * Alan Cox : Fixed connect() taking signals I think.
  49. * Alan Cox : SO_LINGER supported
  50. * Alan Cox : Error reporting fixes
  51. * Anonymous : inet_create tidied up (sk->reuse setting)
  52. * Alan Cox : inet sockets don't set sk->type!
  53. * Alan Cox : Split socket option code
  54. * Alan Cox : Callbacks
  55. * Alan Cox : Nagle flag for Charles & Johannes stuff
  56. * Alex : Removed restriction on inet fioctl
  57. * Alan Cox : Splitting INET from NET core
  58. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  59. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  60. * Alan Cox : Split IP from generic code
  61. * Alan Cox : New kfree_skbmem()
  62. * Alan Cox : Make SO_DEBUG superuser only.
  63. * Alan Cox : Allow anyone to clear SO_DEBUG
  64. * (compatibility fix)
  65. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  66. * Alan Cox : Allocator for a socket is settable.
  67. * Alan Cox : SO_ERROR includes soft errors.
  68. * Alan Cox : Allow NULL arguments on some SO_ opts
  69. * Alan Cox : Generic socket allocation to make hooks
  70. * easier (suggested by Craig Metz).
  71. * Michael Pall : SO_ERROR returns positive errno again
  72. * Steve Whitehouse: Added default destructor to free
  73. * protocol private data.
  74. * Steve Whitehouse: Added various other default routines
  75. * common to several socket families.
  76. * Chris Evans : Call suser() check last on F_SETOWN
  77. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  79. * Andi Kleen : Fix write_space callback
  80. * Chris Evans : Security fixes - signedness again
  81. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  82. *
  83. * To Fix:
  84. *
  85. *
  86. * This program is free software; you can redistribute it and/or
  87. * modify it under the terms of the GNU General Public License
  88. * as published by the Free Software Foundation; either version
  89. * 2 of the License, or (at your option) any later version.
  90. */
  91. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  92. #include <linux/capability.h>
  93. #include <linux/errno.h>
  94. #include <linux/errqueue.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <linux/user_namespace.h>
  115. #include <linux/static_key.h>
  116. #include <linux/memcontrol.h>
  117. #include <linux/prefetch.h>
  118. #include <asm/uaccess.h>
  119. #include <linux/netdevice.h>
  120. #include <net/protocol.h>
  121. #include <linux/skbuff.h>
  122. #include <net/net_namespace.h>
  123. #include <net/request_sock.h>
  124. #include <net/sock.h>
  125. #include <linux/net_tstamp.h>
  126. #include <net/xfrm.h>
  127. #include <linux/ipsec.h>
  128. #include <net/cls_cgroup.h>
  129. #include <net/netprio_cgroup.h>
  130. #include <linux/sock_diag.h>
  131. #include <linux/filter.h>
  132. #include <net/sock_reuseport.h>
  133. #include <trace/events/sock.h>
  134. #ifdef CONFIG_INET
  135. #include <net/tcp.h>
  136. #endif
  137. #include <net/busy_poll.h>
  138. static DEFINE_MUTEX(proto_list_mutex);
  139. static LIST_HEAD(proto_list);
  140. /**
  141. * sk_ns_capable - General socket capability test
  142. * @sk: Socket to use a capability on or through
  143. * @user_ns: The user namespace of the capability to use
  144. * @cap: The capability to use
  145. *
  146. * Test to see if the opener of the socket had when the socket was
  147. * created and the current process has the capability @cap in the user
  148. * namespace @user_ns.
  149. */
  150. bool sk_ns_capable(const struct sock *sk,
  151. struct user_namespace *user_ns, int cap)
  152. {
  153. return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
  154. ns_capable(user_ns, cap);
  155. }
  156. EXPORT_SYMBOL(sk_ns_capable);
  157. /**
  158. * sk_capable - Socket global capability test
  159. * @sk: Socket to use a capability on or through
  160. * @cap: The global capability to use
  161. *
  162. * Test to see if the opener of the socket had when the socket was
  163. * created and the current process has the capability @cap in all user
  164. * namespaces.
  165. */
  166. bool sk_capable(const struct sock *sk, int cap)
  167. {
  168. return sk_ns_capable(sk, &init_user_ns, cap);
  169. }
  170. EXPORT_SYMBOL(sk_capable);
  171. /**
  172. * sk_net_capable - Network namespace socket capability test
  173. * @sk: Socket to use a capability on or through
  174. * @cap: The capability to use
  175. *
  176. * Test to see if the opener of the socket had when the socket was created
  177. * and the current process has the capability @cap over the network namespace
  178. * the socket is a member of.
  179. */
  180. bool sk_net_capable(const struct sock *sk, int cap)
  181. {
  182. return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
  183. }
  184. EXPORT_SYMBOL(sk_net_capable);
  185. /*
  186. * Each address family might have different locking rules, so we have
  187. * one slock key per address family:
  188. */
  189. static struct lock_class_key af_family_keys[AF_MAX];
  190. static struct lock_class_key af_family_slock_keys[AF_MAX];
  191. /*
  192. * Make lock validator output more readable. (we pre-construct these
  193. * strings build-time, so that runtime initialization of socket
  194. * locks is fast):
  195. */
  196. static const char *const af_family_key_strings[AF_MAX+1] = {
  197. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  198. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  199. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  200. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  201. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  202. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  203. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  204. "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  205. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  206. "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
  207. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  208. "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
  209. "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
  210. "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
  211. "sk_lock-AF_MAX"
  212. };
  213. static const char *const af_family_slock_key_strings[AF_MAX+1] = {
  214. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  215. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  216. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  217. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  218. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  219. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  220. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  221. "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  222. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  223. "slock-27" , "slock-28" , "slock-AF_CAN" ,
  224. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  225. "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
  226. "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
  227. "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
  228. "slock-AF_MAX"
  229. };
  230. static const char *const af_family_clock_key_strings[AF_MAX+1] = {
  231. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  232. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  233. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  234. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  235. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  236. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  237. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  238. "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  239. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  240. "clock-27" , "clock-28" , "clock-AF_CAN" ,
  241. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  242. "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
  243. "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
  244. "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
  245. "clock-AF_MAX"
  246. };
  247. /*
  248. * sk_callback_lock locking rules are per-address-family,
  249. * so split the lock classes by using a per-AF key:
  250. */
  251. static struct lock_class_key af_callback_keys[AF_MAX];
  252. /* Take into consideration the size of the struct sk_buff overhead in the
  253. * determination of these values, since that is non-constant across
  254. * platforms. This makes socket queueing behavior and performance
  255. * not depend upon such differences.
  256. */
  257. #define _SK_MEM_PACKETS 256
  258. #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
  259. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  260. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  261. /* Run time adjustable parameters. */
  262. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  263. EXPORT_SYMBOL(sysctl_wmem_max);
  264. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  265. EXPORT_SYMBOL(sysctl_rmem_max);
  266. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  267. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  268. /* Maximal space eaten by iovec or ancillary data plus some space */
  269. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  270. EXPORT_SYMBOL(sysctl_optmem_max);
  271. int sysctl_tstamp_allow_data __read_mostly = 1;
  272. struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
  273. EXPORT_SYMBOL_GPL(memalloc_socks);
  274. /**
  275. * sk_set_memalloc - sets %SOCK_MEMALLOC
  276. * @sk: socket to set it on
  277. *
  278. * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
  279. * It's the responsibility of the admin to adjust min_free_kbytes
  280. * to meet the requirements
  281. */
  282. void sk_set_memalloc(struct sock *sk)
  283. {
  284. sock_set_flag(sk, SOCK_MEMALLOC);
  285. sk->sk_allocation |= __GFP_MEMALLOC;
  286. static_key_slow_inc(&memalloc_socks);
  287. }
  288. EXPORT_SYMBOL_GPL(sk_set_memalloc);
  289. void sk_clear_memalloc(struct sock *sk)
  290. {
  291. sock_reset_flag(sk, SOCK_MEMALLOC);
  292. sk->sk_allocation &= ~__GFP_MEMALLOC;
  293. static_key_slow_dec(&memalloc_socks);
  294. /*
  295. * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
  296. * progress of swapping. SOCK_MEMALLOC may be cleared while
  297. * it has rmem allocations due to the last swapfile being deactivated
  298. * but there is a risk that the socket is unusable due to exceeding
  299. * the rmem limits. Reclaim the reserves and obey rmem limits again.
  300. */
  301. sk_mem_reclaim(sk);
  302. }
  303. EXPORT_SYMBOL_GPL(sk_clear_memalloc);
  304. int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  305. {
  306. int ret;
  307. unsigned long pflags = current->flags;
  308. /* these should have been dropped before queueing */
  309. BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
  310. current->flags |= PF_MEMALLOC;
  311. ret = sk->sk_backlog_rcv(sk, skb);
  312. tsk_restore_flags(current, pflags, PF_MEMALLOC);
  313. return ret;
  314. }
  315. EXPORT_SYMBOL(__sk_backlog_rcv);
  316. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  317. {
  318. struct timeval tv;
  319. if (optlen < sizeof(tv))
  320. return -EINVAL;
  321. if (copy_from_user(&tv, optval, sizeof(tv)))
  322. return -EFAULT;
  323. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  324. return -EDOM;
  325. if (tv.tv_sec < 0) {
  326. static int warned __read_mostly;
  327. *timeo_p = 0;
  328. if (warned < 10 && net_ratelimit()) {
  329. warned++;
  330. pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
  331. __func__, current->comm, task_pid_nr(current));
  332. }
  333. return 0;
  334. }
  335. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  336. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  337. return 0;
  338. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  339. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  340. return 0;
  341. }
  342. static void sock_warn_obsolete_bsdism(const char *name)
  343. {
  344. static int warned;
  345. static char warncomm[TASK_COMM_LEN];
  346. if (strcmp(warncomm, current->comm) && warned < 5) {
  347. strcpy(warncomm, current->comm);
  348. pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
  349. warncomm, name);
  350. warned++;
  351. }
  352. }
  353. static bool sock_needs_netstamp(const struct sock *sk)
  354. {
  355. switch (sk->sk_family) {
  356. case AF_UNSPEC:
  357. case AF_UNIX:
  358. return false;
  359. default:
  360. return true;
  361. }
  362. }
  363. static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
  364. {
  365. if (sk->sk_flags & flags) {
  366. sk->sk_flags &= ~flags;
  367. if (sock_needs_netstamp(sk) &&
  368. !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
  369. net_disable_timestamp();
  370. }
  371. }
  372. int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  373. {
  374. unsigned long flags;
  375. struct sk_buff_head *list = &sk->sk_receive_queue;
  376. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
  377. atomic_inc(&sk->sk_drops);
  378. trace_sock_rcvqueue_full(sk, skb);
  379. return -ENOMEM;
  380. }
  381. if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
  382. atomic_inc(&sk->sk_drops);
  383. return -ENOBUFS;
  384. }
  385. skb->dev = NULL;
  386. skb_set_owner_r(skb, sk);
  387. /* we escape from rcu protected region, make sure we dont leak
  388. * a norefcounted dst
  389. */
  390. skb_dst_force(skb);
  391. spin_lock_irqsave(&list->lock, flags);
  392. sock_skb_set_dropcount(sk, skb);
  393. __skb_queue_tail(list, skb);
  394. spin_unlock_irqrestore(&list->lock, flags);
  395. if (!sock_flag(sk, SOCK_DEAD))
  396. sk->sk_data_ready(sk);
  397. return 0;
  398. }
  399. EXPORT_SYMBOL(__sock_queue_rcv_skb);
  400. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  401. {
  402. int err;
  403. err = sk_filter(sk, skb);
  404. if (err)
  405. return err;
  406. return __sock_queue_rcv_skb(sk, skb);
  407. }
  408. EXPORT_SYMBOL(sock_queue_rcv_skb);
  409. int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
  410. const int nested, unsigned int trim_cap)
  411. {
  412. int rc = NET_RX_SUCCESS;
  413. if (sk_filter_trim_cap(sk, skb, trim_cap))
  414. goto discard_and_relse;
  415. skb->dev = NULL;
  416. if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
  417. atomic_inc(&sk->sk_drops);
  418. goto discard_and_relse;
  419. }
  420. if (nested)
  421. bh_lock_sock_nested(sk);
  422. else
  423. bh_lock_sock(sk);
  424. if (!sock_owned_by_user(sk)) {
  425. /*
  426. * trylock + unlock semantics:
  427. */
  428. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  429. rc = sk_backlog_rcv(sk, skb);
  430. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  431. } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
  432. bh_unlock_sock(sk);
  433. atomic_inc(&sk->sk_drops);
  434. goto discard_and_relse;
  435. }
  436. bh_unlock_sock(sk);
  437. out:
  438. sock_put(sk);
  439. return rc;
  440. discard_and_relse:
  441. kfree_skb(skb);
  442. goto out;
  443. }
  444. EXPORT_SYMBOL(__sk_receive_skb);
  445. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  446. {
  447. struct dst_entry *dst = __sk_dst_get(sk);
  448. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  449. sk_tx_queue_clear(sk);
  450. RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
  451. dst_release(dst);
  452. return NULL;
  453. }
  454. return dst;
  455. }
  456. EXPORT_SYMBOL(__sk_dst_check);
  457. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  458. {
  459. struct dst_entry *dst = sk_dst_get(sk);
  460. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  461. sk_dst_reset(sk);
  462. dst_release(dst);
  463. return NULL;
  464. }
  465. return dst;
  466. }
  467. EXPORT_SYMBOL(sk_dst_check);
  468. static int sock_setbindtodevice(struct sock *sk, char __user *optval,
  469. int optlen)
  470. {
  471. int ret = -ENOPROTOOPT;
  472. #ifdef CONFIG_NETDEVICES
  473. struct net *net = sock_net(sk);
  474. char devname[IFNAMSIZ];
  475. int index;
  476. /* Sorry... */
  477. ret = -EPERM;
  478. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  479. goto out;
  480. ret = -EINVAL;
  481. if (optlen < 0)
  482. goto out;
  483. /* Bind this socket to a particular device like "eth0",
  484. * as specified in the passed interface name. If the
  485. * name is "" or the option length is zero the socket
  486. * is not bound.
  487. */
  488. if (optlen > IFNAMSIZ - 1)
  489. optlen = IFNAMSIZ - 1;
  490. memset(devname, 0, sizeof(devname));
  491. ret = -EFAULT;
  492. if (copy_from_user(devname, optval, optlen))
  493. goto out;
  494. index = 0;
  495. if (devname[0] != '\0') {
  496. struct net_device *dev;
  497. rcu_read_lock();
  498. dev = dev_get_by_name_rcu(net, devname);
  499. if (dev)
  500. index = dev->ifindex;
  501. rcu_read_unlock();
  502. ret = -ENODEV;
  503. if (!dev)
  504. goto out;
  505. }
  506. lock_sock(sk);
  507. sk->sk_bound_dev_if = index;
  508. sk_dst_reset(sk);
  509. release_sock(sk);
  510. ret = 0;
  511. out:
  512. #endif
  513. return ret;
  514. }
  515. static int sock_getbindtodevice(struct sock *sk, char __user *optval,
  516. int __user *optlen, int len)
  517. {
  518. int ret = -ENOPROTOOPT;
  519. #ifdef CONFIG_NETDEVICES
  520. struct net *net = sock_net(sk);
  521. char devname[IFNAMSIZ];
  522. if (sk->sk_bound_dev_if == 0) {
  523. len = 0;
  524. goto zero;
  525. }
  526. ret = -EINVAL;
  527. if (len < IFNAMSIZ)
  528. goto out;
  529. ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
  530. if (ret)
  531. goto out;
  532. len = strlen(devname) + 1;
  533. ret = -EFAULT;
  534. if (copy_to_user(optval, devname, len))
  535. goto out;
  536. zero:
  537. ret = -EFAULT;
  538. if (put_user(len, optlen))
  539. goto out;
  540. ret = 0;
  541. out:
  542. #endif
  543. return ret;
  544. }
  545. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  546. {
  547. if (valbool)
  548. sock_set_flag(sk, bit);
  549. else
  550. sock_reset_flag(sk, bit);
  551. }
  552. bool sk_mc_loop(struct sock *sk)
  553. {
  554. if (dev_recursion_level())
  555. return false;
  556. if (!sk)
  557. return true;
  558. switch (sk->sk_family) {
  559. case AF_INET:
  560. return inet_sk(sk)->mc_loop;
  561. #if IS_ENABLED(CONFIG_IPV6)
  562. case AF_INET6:
  563. return inet6_sk(sk)->mc_loop;
  564. #endif
  565. }
  566. WARN_ON(1);
  567. return true;
  568. }
  569. EXPORT_SYMBOL(sk_mc_loop);
  570. /*
  571. * This is meant for all protocols to use and covers goings on
  572. * at the socket level. Everything here is generic.
  573. */
  574. int sock_setsockopt(struct socket *sock, int level, int optname,
  575. char __user *optval, unsigned int optlen)
  576. {
  577. struct sock *sk = sock->sk;
  578. int val;
  579. int valbool;
  580. struct linger ling;
  581. int ret = 0;
  582. /*
  583. * Options without arguments
  584. */
  585. if (optname == SO_BINDTODEVICE)
  586. return sock_setbindtodevice(sk, optval, optlen);
  587. if (optlen < sizeof(int))
  588. return -EINVAL;
  589. if (get_user(val, (int __user *)optval))
  590. return -EFAULT;
  591. valbool = val ? 1 : 0;
  592. lock_sock(sk);
  593. switch (optname) {
  594. case SO_DEBUG:
  595. if (val && !capable(CAP_NET_ADMIN))
  596. ret = -EACCES;
  597. else
  598. sock_valbool_flag(sk, SOCK_DBG, valbool);
  599. break;
  600. case SO_REUSEADDR:
  601. sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
  602. break;
  603. case SO_REUSEPORT:
  604. sk->sk_reuseport = valbool;
  605. break;
  606. case SO_TYPE:
  607. case SO_PROTOCOL:
  608. case SO_DOMAIN:
  609. case SO_ERROR:
  610. ret = -ENOPROTOOPT;
  611. break;
  612. case SO_DONTROUTE:
  613. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  614. break;
  615. case SO_BROADCAST:
  616. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  617. break;
  618. case SO_SNDBUF:
  619. /* Don't error on this BSD doesn't and if you think
  620. * about it this is right. Otherwise apps have to
  621. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  622. * are treated in BSD as hints
  623. */
  624. val = min_t(u32, val, sysctl_wmem_max);
  625. set_sndbuf:
  626. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  627. sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
  628. /* Wake up sending tasks if we upped the value. */
  629. sk->sk_write_space(sk);
  630. break;
  631. case SO_SNDBUFFORCE:
  632. if (!capable(CAP_NET_ADMIN)) {
  633. ret = -EPERM;
  634. break;
  635. }
  636. goto set_sndbuf;
  637. case SO_RCVBUF:
  638. /* Don't error on this BSD doesn't and if you think
  639. * about it this is right. Otherwise apps have to
  640. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  641. * are treated in BSD as hints
  642. */
  643. val = min_t(u32, val, sysctl_rmem_max);
  644. set_rcvbuf:
  645. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  646. /*
  647. * We double it on the way in to account for
  648. * "struct sk_buff" etc. overhead. Applications
  649. * assume that the SO_RCVBUF setting they make will
  650. * allow that much actual data to be received on that
  651. * socket.
  652. *
  653. * Applications are unaware that "struct sk_buff" and
  654. * other overheads allocate from the receive buffer
  655. * during socket buffer allocation.
  656. *
  657. * And after considering the possible alternatives,
  658. * returning the value we actually used in getsockopt
  659. * is the most desirable behavior.
  660. */
  661. sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
  662. break;
  663. case SO_RCVBUFFORCE:
  664. if (!capable(CAP_NET_ADMIN)) {
  665. ret = -EPERM;
  666. break;
  667. }
  668. goto set_rcvbuf;
  669. case SO_KEEPALIVE:
  670. #ifdef CONFIG_INET
  671. if (sk->sk_protocol == IPPROTO_TCP &&
  672. sk->sk_type == SOCK_STREAM)
  673. tcp_set_keepalive(sk, valbool);
  674. #endif
  675. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  676. break;
  677. case SO_OOBINLINE:
  678. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  679. break;
  680. case SO_NO_CHECK:
  681. sk->sk_no_check_tx = valbool;
  682. break;
  683. case SO_PRIORITY:
  684. if ((val >= 0 && val <= 6) ||
  685. ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  686. sk->sk_priority = val;
  687. else
  688. ret = -EPERM;
  689. break;
  690. case SO_LINGER:
  691. if (optlen < sizeof(ling)) {
  692. ret = -EINVAL; /* 1003.1g */
  693. break;
  694. }
  695. if (copy_from_user(&ling, optval, sizeof(ling))) {
  696. ret = -EFAULT;
  697. break;
  698. }
  699. if (!ling.l_onoff)
  700. sock_reset_flag(sk, SOCK_LINGER);
  701. else {
  702. #if (BITS_PER_LONG == 32)
  703. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  704. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  705. else
  706. #endif
  707. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  708. sock_set_flag(sk, SOCK_LINGER);
  709. }
  710. break;
  711. case SO_BSDCOMPAT:
  712. sock_warn_obsolete_bsdism("setsockopt");
  713. break;
  714. case SO_PASSCRED:
  715. if (valbool)
  716. set_bit(SOCK_PASSCRED, &sock->flags);
  717. else
  718. clear_bit(SOCK_PASSCRED, &sock->flags);
  719. break;
  720. case SO_TIMESTAMP:
  721. case SO_TIMESTAMPNS:
  722. if (valbool) {
  723. if (optname == SO_TIMESTAMP)
  724. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  725. else
  726. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  727. sock_set_flag(sk, SOCK_RCVTSTAMP);
  728. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  729. } else {
  730. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  731. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  732. }
  733. break;
  734. case SO_TIMESTAMPING:
  735. if (val & ~SOF_TIMESTAMPING_MASK) {
  736. ret = -EINVAL;
  737. break;
  738. }
  739. if (val & SOF_TIMESTAMPING_OPT_ID &&
  740. !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
  741. if (sk->sk_protocol == IPPROTO_TCP &&
  742. sk->sk_type == SOCK_STREAM) {
  743. if ((1 << sk->sk_state) &
  744. (TCPF_CLOSE | TCPF_LISTEN)) {
  745. ret = -EINVAL;
  746. break;
  747. }
  748. sk->sk_tskey = tcp_sk(sk)->snd_una;
  749. } else {
  750. sk->sk_tskey = 0;
  751. }
  752. }
  753. sk->sk_tsflags = val;
  754. if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
  755. sock_enable_timestamp(sk,
  756. SOCK_TIMESTAMPING_RX_SOFTWARE);
  757. else
  758. sock_disable_timestamp(sk,
  759. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
  760. break;
  761. case SO_RCVLOWAT:
  762. if (val < 0)
  763. val = INT_MAX;
  764. sk->sk_rcvlowat = val ? : 1;
  765. break;
  766. case SO_RCVTIMEO:
  767. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  768. break;
  769. case SO_SNDTIMEO:
  770. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  771. break;
  772. case SO_ATTACH_FILTER:
  773. ret = -EINVAL;
  774. if (optlen == sizeof(struct sock_fprog)) {
  775. struct sock_fprog fprog;
  776. ret = -EFAULT;
  777. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  778. break;
  779. ret = sk_attach_filter(&fprog, sk);
  780. }
  781. break;
  782. case SO_ATTACH_BPF:
  783. ret = -EINVAL;
  784. if (optlen == sizeof(u32)) {
  785. u32 ufd;
  786. ret = -EFAULT;
  787. if (copy_from_user(&ufd, optval, sizeof(ufd)))
  788. break;
  789. ret = sk_attach_bpf(ufd, sk);
  790. }
  791. break;
  792. case SO_ATTACH_REUSEPORT_CBPF:
  793. ret = -EINVAL;
  794. if (optlen == sizeof(struct sock_fprog)) {
  795. struct sock_fprog fprog;
  796. ret = -EFAULT;
  797. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  798. break;
  799. ret = sk_reuseport_attach_filter(&fprog, sk);
  800. }
  801. break;
  802. case SO_ATTACH_REUSEPORT_EBPF:
  803. ret = -EINVAL;
  804. if (optlen == sizeof(u32)) {
  805. u32 ufd;
  806. ret = -EFAULT;
  807. if (copy_from_user(&ufd, optval, sizeof(ufd)))
  808. break;
  809. ret = sk_reuseport_attach_bpf(ufd, sk);
  810. }
  811. break;
  812. case SO_DETACH_FILTER:
  813. ret = sk_detach_filter(sk);
  814. break;
  815. case SO_LOCK_FILTER:
  816. if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
  817. ret = -EPERM;
  818. else
  819. sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
  820. break;
  821. case SO_PASSSEC:
  822. if (valbool)
  823. set_bit(SOCK_PASSSEC, &sock->flags);
  824. else
  825. clear_bit(SOCK_PASSSEC, &sock->flags);
  826. break;
  827. case SO_MARK:
  828. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  829. ret = -EPERM;
  830. else
  831. sk->sk_mark = val;
  832. break;
  833. case SO_RXQ_OVFL:
  834. sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
  835. break;
  836. case SO_WIFI_STATUS:
  837. sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
  838. break;
  839. case SO_PEEK_OFF:
  840. if (sock->ops->set_peek_off)
  841. ret = sock->ops->set_peek_off(sk, val);
  842. else
  843. ret = -EOPNOTSUPP;
  844. break;
  845. case SO_NOFCS:
  846. sock_valbool_flag(sk, SOCK_NOFCS, valbool);
  847. break;
  848. case SO_SELECT_ERR_QUEUE:
  849. sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
  850. break;
  851. #ifdef CONFIG_NET_RX_BUSY_POLL
  852. case SO_BUSY_POLL:
  853. /* allow unprivileged users to decrease the value */
  854. if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
  855. ret = -EPERM;
  856. else {
  857. if (val < 0)
  858. ret = -EINVAL;
  859. else
  860. sk->sk_ll_usec = val;
  861. }
  862. break;
  863. #endif
  864. case SO_MAX_PACING_RATE:
  865. sk->sk_max_pacing_rate = val;
  866. sk->sk_pacing_rate = min(sk->sk_pacing_rate,
  867. sk->sk_max_pacing_rate);
  868. break;
  869. case SO_INCOMING_CPU:
  870. sk->sk_incoming_cpu = val;
  871. break;
  872. case SO_CNX_ADVICE:
  873. if (val == 1)
  874. dst_negative_advice(sk);
  875. break;
  876. default:
  877. ret = -ENOPROTOOPT;
  878. break;
  879. }
  880. release_sock(sk);
  881. return ret;
  882. }
  883. EXPORT_SYMBOL(sock_setsockopt);
  884. static void cred_to_ucred(struct pid *pid, const struct cred *cred,
  885. struct ucred *ucred)
  886. {
  887. ucred->pid = pid_vnr(pid);
  888. ucred->uid = ucred->gid = -1;
  889. if (cred) {
  890. struct user_namespace *current_ns = current_user_ns();
  891. ucred->uid = from_kuid_munged(current_ns, cred->euid);
  892. ucred->gid = from_kgid_munged(current_ns, cred->egid);
  893. }
  894. }
  895. int sock_getsockopt(struct socket *sock, int level, int optname,
  896. char __user *optval, int __user *optlen)
  897. {
  898. struct sock *sk = sock->sk;
  899. union {
  900. int val;
  901. struct linger ling;
  902. struct timeval tm;
  903. } v;
  904. int lv = sizeof(int);
  905. int len;
  906. if (get_user(len, optlen))
  907. return -EFAULT;
  908. if (len < 0)
  909. return -EINVAL;
  910. memset(&v, 0, sizeof(v));
  911. switch (optname) {
  912. case SO_DEBUG:
  913. v.val = sock_flag(sk, SOCK_DBG);
  914. break;
  915. case SO_DONTROUTE:
  916. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  917. break;
  918. case SO_BROADCAST:
  919. v.val = sock_flag(sk, SOCK_BROADCAST);
  920. break;
  921. case SO_SNDBUF:
  922. v.val = sk->sk_sndbuf;
  923. break;
  924. case SO_RCVBUF:
  925. v.val = sk->sk_rcvbuf;
  926. break;
  927. case SO_REUSEADDR:
  928. v.val = sk->sk_reuse;
  929. break;
  930. case SO_REUSEPORT:
  931. v.val = sk->sk_reuseport;
  932. break;
  933. case SO_KEEPALIVE:
  934. v.val = sock_flag(sk, SOCK_KEEPOPEN);
  935. break;
  936. case SO_TYPE:
  937. v.val = sk->sk_type;
  938. break;
  939. case SO_PROTOCOL:
  940. v.val = sk->sk_protocol;
  941. break;
  942. case SO_DOMAIN:
  943. v.val = sk->sk_family;
  944. break;
  945. case SO_ERROR:
  946. v.val = -sock_error(sk);
  947. if (v.val == 0)
  948. v.val = xchg(&sk->sk_err_soft, 0);
  949. break;
  950. case SO_OOBINLINE:
  951. v.val = sock_flag(sk, SOCK_URGINLINE);
  952. break;
  953. case SO_NO_CHECK:
  954. v.val = sk->sk_no_check_tx;
  955. break;
  956. case SO_PRIORITY:
  957. v.val = sk->sk_priority;
  958. break;
  959. case SO_LINGER:
  960. lv = sizeof(v.ling);
  961. v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
  962. v.ling.l_linger = sk->sk_lingertime / HZ;
  963. break;
  964. case SO_BSDCOMPAT:
  965. sock_warn_obsolete_bsdism("getsockopt");
  966. break;
  967. case SO_TIMESTAMP:
  968. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  969. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  970. break;
  971. case SO_TIMESTAMPNS:
  972. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  973. break;
  974. case SO_TIMESTAMPING:
  975. v.val = sk->sk_tsflags;
  976. break;
  977. case SO_RCVTIMEO:
  978. lv = sizeof(struct timeval);
  979. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  980. v.tm.tv_sec = 0;
  981. v.tm.tv_usec = 0;
  982. } else {
  983. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  984. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  985. }
  986. break;
  987. case SO_SNDTIMEO:
  988. lv = sizeof(struct timeval);
  989. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  990. v.tm.tv_sec = 0;
  991. v.tm.tv_usec = 0;
  992. } else {
  993. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  994. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  995. }
  996. break;
  997. case SO_RCVLOWAT:
  998. v.val = sk->sk_rcvlowat;
  999. break;
  1000. case SO_SNDLOWAT:
  1001. v.val = 1;
  1002. break;
  1003. case SO_PASSCRED:
  1004. v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
  1005. break;
  1006. case SO_PEERCRED:
  1007. {
  1008. struct ucred peercred;
  1009. if (len > sizeof(peercred))
  1010. len = sizeof(peercred);
  1011. cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
  1012. if (copy_to_user(optval, &peercred, len))
  1013. return -EFAULT;
  1014. goto lenout;
  1015. }
  1016. case SO_PEERNAME:
  1017. {
  1018. char address[128];
  1019. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  1020. return -ENOTCONN;
  1021. if (lv < len)
  1022. return -EINVAL;
  1023. if (copy_to_user(optval, address, len))
  1024. return -EFAULT;
  1025. goto lenout;
  1026. }
  1027. /* Dubious BSD thing... Probably nobody even uses it, but
  1028. * the UNIX standard wants it for whatever reason... -DaveM
  1029. */
  1030. case SO_ACCEPTCONN:
  1031. v.val = sk->sk_state == TCP_LISTEN;
  1032. break;
  1033. case SO_PASSSEC:
  1034. v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
  1035. break;
  1036. case SO_PEERSEC:
  1037. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  1038. case SO_MARK:
  1039. v.val = sk->sk_mark;
  1040. break;
  1041. case SO_RXQ_OVFL:
  1042. v.val = sock_flag(sk, SOCK_RXQ_OVFL);
  1043. break;
  1044. case SO_WIFI_STATUS:
  1045. v.val = sock_flag(sk, SOCK_WIFI_STATUS);
  1046. break;
  1047. case SO_PEEK_OFF:
  1048. if (!sock->ops->set_peek_off)
  1049. return -EOPNOTSUPP;
  1050. v.val = sk->sk_peek_off;
  1051. break;
  1052. case SO_NOFCS:
  1053. v.val = sock_flag(sk, SOCK_NOFCS);
  1054. break;
  1055. case SO_BINDTODEVICE:
  1056. return sock_getbindtodevice(sk, optval, optlen, len);
  1057. case SO_GET_FILTER:
  1058. len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
  1059. if (len < 0)
  1060. return len;
  1061. goto lenout;
  1062. case SO_LOCK_FILTER:
  1063. v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
  1064. break;
  1065. case SO_BPF_EXTENSIONS:
  1066. v.val = bpf_tell_extensions();
  1067. break;
  1068. case SO_SELECT_ERR_QUEUE:
  1069. v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
  1070. break;
  1071. #ifdef CONFIG_NET_RX_BUSY_POLL
  1072. case SO_BUSY_POLL:
  1073. v.val = sk->sk_ll_usec;
  1074. break;
  1075. #endif
  1076. case SO_MAX_PACING_RATE:
  1077. v.val = sk->sk_max_pacing_rate;
  1078. break;
  1079. case SO_INCOMING_CPU:
  1080. v.val = sk->sk_incoming_cpu;
  1081. break;
  1082. default:
  1083. /* We implement the SO_SNDLOWAT etc to not be settable
  1084. * (1003.1g 7).
  1085. */
  1086. return -ENOPROTOOPT;
  1087. }
  1088. if (len > lv)
  1089. len = lv;
  1090. if (copy_to_user(optval, &v, len))
  1091. return -EFAULT;
  1092. lenout:
  1093. if (put_user(len, optlen))
  1094. return -EFAULT;
  1095. return 0;
  1096. }
  1097. /*
  1098. * Initialize an sk_lock.
  1099. *
  1100. * (We also register the sk_lock with the lock validator.)
  1101. */
  1102. static inline void sock_lock_init(struct sock *sk)
  1103. {
  1104. sock_lock_init_class_and_name(sk,
  1105. af_family_slock_key_strings[sk->sk_family],
  1106. af_family_slock_keys + sk->sk_family,
  1107. af_family_key_strings[sk->sk_family],
  1108. af_family_keys + sk->sk_family);
  1109. }
  1110. /*
  1111. * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  1112. * even temporarly, because of RCU lookups. sk_node should also be left as is.
  1113. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  1114. */
  1115. static void sock_copy(struct sock *nsk, const struct sock *osk)
  1116. {
  1117. #ifdef CONFIG_SECURITY_NETWORK
  1118. void *sptr = nsk->sk_security;
  1119. #endif
  1120. memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
  1121. memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
  1122. osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
  1123. #ifdef CONFIG_SECURITY_NETWORK
  1124. nsk->sk_security = sptr;
  1125. security_sk_clone(osk, nsk);
  1126. #endif
  1127. }
  1128. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  1129. int family)
  1130. {
  1131. struct sock *sk;
  1132. struct kmem_cache *slab;
  1133. slab = prot->slab;
  1134. if (slab != NULL) {
  1135. sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
  1136. if (!sk)
  1137. return sk;
  1138. if (priority & __GFP_ZERO)
  1139. sk_prot_clear_nulls(sk, prot->obj_size);
  1140. } else
  1141. sk = kmalloc(prot->obj_size, priority);
  1142. if (sk != NULL) {
  1143. kmemcheck_annotate_bitfield(sk, flags);
  1144. if (security_sk_alloc(sk, family, priority))
  1145. goto out_free;
  1146. if (!try_module_get(prot->owner))
  1147. goto out_free_sec;
  1148. sk_tx_queue_clear(sk);
  1149. }
  1150. return sk;
  1151. out_free_sec:
  1152. security_sk_free(sk);
  1153. out_free:
  1154. if (slab != NULL)
  1155. kmem_cache_free(slab, sk);
  1156. else
  1157. kfree(sk);
  1158. return NULL;
  1159. }
  1160. static void sk_prot_free(struct proto *prot, struct sock *sk)
  1161. {
  1162. struct kmem_cache *slab;
  1163. struct module *owner;
  1164. owner = prot->owner;
  1165. slab = prot->slab;
  1166. cgroup_sk_free(&sk->sk_cgrp_data);
  1167. mem_cgroup_sk_free(sk);
  1168. security_sk_free(sk);
  1169. if (slab != NULL)
  1170. kmem_cache_free(slab, sk);
  1171. else
  1172. kfree(sk);
  1173. module_put(owner);
  1174. }
  1175. /**
  1176. * sk_alloc - All socket objects are allocated here
  1177. * @net: the applicable net namespace
  1178. * @family: protocol family
  1179. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1180. * @prot: struct proto associated with this new sock instance
  1181. * @kern: is this to be a kernel socket?
  1182. */
  1183. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  1184. struct proto *prot, int kern)
  1185. {
  1186. struct sock *sk;
  1187. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  1188. if (sk) {
  1189. sk->sk_family = family;
  1190. /*
  1191. * See comment in struct sock definition to understand
  1192. * why we need sk_prot_creator -acme
  1193. */
  1194. sk->sk_prot = sk->sk_prot_creator = prot;
  1195. sock_lock_init(sk);
  1196. sk->sk_net_refcnt = kern ? 0 : 1;
  1197. if (likely(sk->sk_net_refcnt))
  1198. get_net(net);
  1199. sock_net_set(sk, net);
  1200. atomic_set(&sk->sk_wmem_alloc, 1);
  1201. mem_cgroup_sk_alloc(sk);
  1202. cgroup_sk_alloc(&sk->sk_cgrp_data);
  1203. sock_update_classid(&sk->sk_cgrp_data);
  1204. sock_update_netprioidx(&sk->sk_cgrp_data);
  1205. }
  1206. return sk;
  1207. }
  1208. EXPORT_SYMBOL(sk_alloc);
  1209. /* Sockets having SOCK_RCU_FREE will call this function after one RCU
  1210. * grace period. This is the case for UDP sockets and TCP listeners.
  1211. */
  1212. static void __sk_destruct(struct rcu_head *head)
  1213. {
  1214. struct sock *sk = container_of(head, struct sock, sk_rcu);
  1215. struct sk_filter *filter;
  1216. if (sk->sk_destruct)
  1217. sk->sk_destruct(sk);
  1218. filter = rcu_dereference_check(sk->sk_filter,
  1219. atomic_read(&sk->sk_wmem_alloc) == 0);
  1220. if (filter) {
  1221. sk_filter_uncharge(sk, filter);
  1222. RCU_INIT_POINTER(sk->sk_filter, NULL);
  1223. }
  1224. if (rcu_access_pointer(sk->sk_reuseport_cb))
  1225. reuseport_detach_sock(sk);
  1226. sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
  1227. if (atomic_read(&sk->sk_omem_alloc))
  1228. pr_debug("%s: optmem leakage (%d bytes) detected\n",
  1229. __func__, atomic_read(&sk->sk_omem_alloc));
  1230. if (sk->sk_peer_cred)
  1231. put_cred(sk->sk_peer_cred);
  1232. put_pid(sk->sk_peer_pid);
  1233. if (likely(sk->sk_net_refcnt))
  1234. put_net(sock_net(sk));
  1235. sk_prot_free(sk->sk_prot_creator, sk);
  1236. }
  1237. void sk_destruct(struct sock *sk)
  1238. {
  1239. if (sock_flag(sk, SOCK_RCU_FREE))
  1240. call_rcu(&sk->sk_rcu, __sk_destruct);
  1241. else
  1242. __sk_destruct(&sk->sk_rcu);
  1243. }
  1244. static void __sk_free(struct sock *sk)
  1245. {
  1246. if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
  1247. sock_diag_broadcast_destroy(sk);
  1248. else
  1249. sk_destruct(sk);
  1250. }
  1251. void sk_free(struct sock *sk)
  1252. {
  1253. /*
  1254. * We subtract one from sk_wmem_alloc and can know if
  1255. * some packets are still in some tx queue.
  1256. * If not null, sock_wfree() will call __sk_free(sk) later
  1257. */
  1258. if (atomic_dec_and_test(&sk->sk_wmem_alloc))
  1259. __sk_free(sk);
  1260. }
  1261. EXPORT_SYMBOL(sk_free);
  1262. /**
  1263. * sk_clone_lock - clone a socket, and lock its clone
  1264. * @sk: the socket to clone
  1265. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1266. *
  1267. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  1268. */
  1269. struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
  1270. {
  1271. struct sock *newsk;
  1272. bool is_charged = true;
  1273. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  1274. if (newsk != NULL) {
  1275. struct sk_filter *filter;
  1276. sock_copy(newsk, sk);
  1277. /* SANITY */
  1278. if (likely(newsk->sk_net_refcnt))
  1279. get_net(sock_net(newsk));
  1280. sk_node_init(&newsk->sk_node);
  1281. sock_lock_init(newsk);
  1282. bh_lock_sock(newsk);
  1283. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  1284. newsk->sk_backlog.len = 0;
  1285. atomic_set(&newsk->sk_rmem_alloc, 0);
  1286. /*
  1287. * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
  1288. */
  1289. atomic_set(&newsk->sk_wmem_alloc, 1);
  1290. atomic_set(&newsk->sk_omem_alloc, 0);
  1291. skb_queue_head_init(&newsk->sk_receive_queue);
  1292. skb_queue_head_init(&newsk->sk_write_queue);
  1293. rwlock_init(&newsk->sk_callback_lock);
  1294. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  1295. af_callback_keys + newsk->sk_family,
  1296. af_family_clock_key_strings[newsk->sk_family]);
  1297. newsk->sk_dst_cache = NULL;
  1298. newsk->sk_wmem_queued = 0;
  1299. newsk->sk_forward_alloc = 0;
  1300. atomic_set(&newsk->sk_drops, 0);
  1301. newsk->sk_send_head = NULL;
  1302. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  1303. sock_reset_flag(newsk, SOCK_DONE);
  1304. skb_queue_head_init(&newsk->sk_error_queue);
  1305. filter = rcu_dereference_protected(newsk->sk_filter, 1);
  1306. if (filter != NULL)
  1307. /* though it's an empty new sock, the charging may fail
  1308. * if sysctl_optmem_max was changed between creation of
  1309. * original socket and cloning
  1310. */
  1311. is_charged = sk_filter_charge(newsk, filter);
  1312. if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
  1313. /* It is still raw copy of parent, so invalidate
  1314. * destructor and make plain sk_free() */
  1315. newsk->sk_destruct = NULL;
  1316. bh_unlock_sock(newsk);
  1317. sk_free(newsk);
  1318. newsk = NULL;
  1319. goto out;
  1320. }
  1321. RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
  1322. newsk->sk_err = 0;
  1323. newsk->sk_priority = 0;
  1324. newsk->sk_incoming_cpu = raw_smp_processor_id();
  1325. atomic64_set(&newsk->sk_cookie, 0);
  1326. mem_cgroup_sk_alloc(newsk);
  1327. cgroup_sk_alloc(&newsk->sk_cgrp_data);
  1328. /*
  1329. * Before updating sk_refcnt, we must commit prior changes to memory
  1330. * (Documentation/RCU/rculist_nulls.txt for details)
  1331. */
  1332. smp_wmb();
  1333. atomic_set(&newsk->sk_refcnt, 2);
  1334. /*
  1335. * Increment the counter in the same struct proto as the master
  1336. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  1337. * is the same as sk->sk_prot->socks, as this field was copied
  1338. * with memcpy).
  1339. *
  1340. * This _changes_ the previous behaviour, where
  1341. * tcp_create_openreq_child always was incrementing the
  1342. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  1343. * to be taken into account in all callers. -acme
  1344. */
  1345. sk_refcnt_debug_inc(newsk);
  1346. sk_set_socket(newsk, NULL);
  1347. newsk->sk_wq = NULL;
  1348. if (newsk->sk_prot->sockets_allocated)
  1349. sk_sockets_allocated_inc(newsk);
  1350. if (sock_needs_netstamp(sk) &&
  1351. newsk->sk_flags & SK_FLAGS_TIMESTAMP)
  1352. net_enable_timestamp();
  1353. }
  1354. out:
  1355. return newsk;
  1356. }
  1357. EXPORT_SYMBOL_GPL(sk_clone_lock);
  1358. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  1359. {
  1360. u32 max_segs = 1;
  1361. sk_dst_set(sk, dst);
  1362. sk->sk_route_caps = dst->dev->features;
  1363. if (sk->sk_route_caps & NETIF_F_GSO)
  1364. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  1365. sk->sk_route_caps &= ~sk->sk_route_nocaps;
  1366. if (sk_can_gso(sk)) {
  1367. if (dst->header_len) {
  1368. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  1369. } else {
  1370. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1371. sk->sk_gso_max_size = dst->dev->gso_max_size;
  1372. max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
  1373. }
  1374. }
  1375. sk->sk_gso_max_segs = max_segs;
  1376. }
  1377. EXPORT_SYMBOL_GPL(sk_setup_caps);
  1378. /*
  1379. * Simple resource managers for sockets.
  1380. */
  1381. /*
  1382. * Write buffer destructor automatically called from kfree_skb.
  1383. */
  1384. void sock_wfree(struct sk_buff *skb)
  1385. {
  1386. struct sock *sk = skb->sk;
  1387. unsigned int len = skb->truesize;
  1388. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
  1389. /*
  1390. * Keep a reference on sk_wmem_alloc, this will be released
  1391. * after sk_write_space() call
  1392. */
  1393. atomic_sub(len - 1, &sk->sk_wmem_alloc);
  1394. sk->sk_write_space(sk);
  1395. len = 1;
  1396. }
  1397. /*
  1398. * if sk_wmem_alloc reaches 0, we must finish what sk_free()
  1399. * could not do because of in-flight packets
  1400. */
  1401. if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
  1402. __sk_free(sk);
  1403. }
  1404. EXPORT_SYMBOL(sock_wfree);
  1405. /* This variant of sock_wfree() is used by TCP,
  1406. * since it sets SOCK_USE_WRITE_QUEUE.
  1407. */
  1408. void __sock_wfree(struct sk_buff *skb)
  1409. {
  1410. struct sock *sk = skb->sk;
  1411. if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
  1412. __sk_free(sk);
  1413. }
  1414. void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
  1415. {
  1416. skb_orphan(skb);
  1417. skb->sk = sk;
  1418. #ifdef CONFIG_INET
  1419. if (unlikely(!sk_fullsock(sk))) {
  1420. skb->destructor = sock_edemux;
  1421. sock_hold(sk);
  1422. return;
  1423. }
  1424. #endif
  1425. skb->destructor = sock_wfree;
  1426. skb_set_hash_from_sk(skb, sk);
  1427. /*
  1428. * We used to take a refcount on sk, but following operation
  1429. * is enough to guarantee sk_free() wont free this sock until
  1430. * all in-flight packets are completed
  1431. */
  1432. atomic_add(skb->truesize, &sk->sk_wmem_alloc);
  1433. }
  1434. EXPORT_SYMBOL(skb_set_owner_w);
  1435. /* This helper is used by netem, as it can hold packets in its
  1436. * delay queue. We want to allow the owner socket to send more
  1437. * packets, as if they were already TX completed by a typical driver.
  1438. * But we also want to keep skb->sk set because some packet schedulers
  1439. * rely on it (sch_fq for example). So we set skb->truesize to a small
  1440. * amount (1) and decrease sk_wmem_alloc accordingly.
  1441. */
  1442. void skb_orphan_partial(struct sk_buff *skb)
  1443. {
  1444. /* If this skb is a TCP pure ACK or already went here,
  1445. * we have nothing to do. 2 is already a very small truesize.
  1446. */
  1447. if (skb->truesize <= 2)
  1448. return;
  1449. /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
  1450. * so we do not completely orphan skb, but transfert all
  1451. * accounted bytes but one, to avoid unexpected reorders.
  1452. */
  1453. if (skb->destructor == sock_wfree
  1454. #ifdef CONFIG_INET
  1455. || skb->destructor == tcp_wfree
  1456. #endif
  1457. ) {
  1458. atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
  1459. skb->truesize = 1;
  1460. } else {
  1461. skb_orphan(skb);
  1462. }
  1463. }
  1464. EXPORT_SYMBOL(skb_orphan_partial);
  1465. /*
  1466. * Read buffer destructor automatically called from kfree_skb.
  1467. */
  1468. void sock_rfree(struct sk_buff *skb)
  1469. {
  1470. struct sock *sk = skb->sk;
  1471. unsigned int len = skb->truesize;
  1472. atomic_sub(len, &sk->sk_rmem_alloc);
  1473. sk_mem_uncharge(sk, len);
  1474. }
  1475. EXPORT_SYMBOL(sock_rfree);
  1476. /*
  1477. * Buffer destructor for skbs that are not used directly in read or write
  1478. * path, e.g. for error handler skbs. Automatically called from kfree_skb.
  1479. */
  1480. void sock_efree(struct sk_buff *skb)
  1481. {
  1482. sock_put(skb->sk);
  1483. }
  1484. EXPORT_SYMBOL(sock_efree);
  1485. kuid_t sock_i_uid(struct sock *sk)
  1486. {
  1487. kuid_t uid;
  1488. read_lock_bh(&sk->sk_callback_lock);
  1489. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
  1490. read_unlock_bh(&sk->sk_callback_lock);
  1491. return uid;
  1492. }
  1493. EXPORT_SYMBOL(sock_i_uid);
  1494. unsigned long sock_i_ino(struct sock *sk)
  1495. {
  1496. unsigned long ino;
  1497. read_lock_bh(&sk->sk_callback_lock);
  1498. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  1499. read_unlock_bh(&sk->sk_callback_lock);
  1500. return ino;
  1501. }
  1502. EXPORT_SYMBOL(sock_i_ino);
  1503. /*
  1504. * Allocate a skb from the socket's send buffer.
  1505. */
  1506. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  1507. gfp_t priority)
  1508. {
  1509. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1510. struct sk_buff *skb = alloc_skb(size, priority);
  1511. if (skb) {
  1512. skb_set_owner_w(skb, sk);
  1513. return skb;
  1514. }
  1515. }
  1516. return NULL;
  1517. }
  1518. EXPORT_SYMBOL(sock_wmalloc);
  1519. /*
  1520. * Allocate a memory block from the socket's option memory buffer.
  1521. */
  1522. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1523. {
  1524. if ((unsigned int)size <= sysctl_optmem_max &&
  1525. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1526. void *mem;
  1527. /* First do the add, to avoid the race if kmalloc
  1528. * might sleep.
  1529. */
  1530. atomic_add(size, &sk->sk_omem_alloc);
  1531. mem = kmalloc(size, priority);
  1532. if (mem)
  1533. return mem;
  1534. atomic_sub(size, &sk->sk_omem_alloc);
  1535. }
  1536. return NULL;
  1537. }
  1538. EXPORT_SYMBOL(sock_kmalloc);
  1539. /* Free an option memory block. Note, we actually want the inline
  1540. * here as this allows gcc to detect the nullify and fold away the
  1541. * condition entirely.
  1542. */
  1543. static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
  1544. const bool nullify)
  1545. {
  1546. if (WARN_ON_ONCE(!mem))
  1547. return;
  1548. if (nullify)
  1549. kzfree(mem);
  1550. else
  1551. kfree(mem);
  1552. atomic_sub(size, &sk->sk_omem_alloc);
  1553. }
  1554. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1555. {
  1556. __sock_kfree_s(sk, mem, size, false);
  1557. }
  1558. EXPORT_SYMBOL(sock_kfree_s);
  1559. void sock_kzfree_s(struct sock *sk, void *mem, int size)
  1560. {
  1561. __sock_kfree_s(sk, mem, size, true);
  1562. }
  1563. EXPORT_SYMBOL(sock_kzfree_s);
  1564. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1565. I think, these locks should be removed for datagram sockets.
  1566. */
  1567. static long sock_wait_for_wmem(struct sock *sk, long timeo)
  1568. {
  1569. DEFINE_WAIT(wait);
  1570. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  1571. for (;;) {
  1572. if (!timeo)
  1573. break;
  1574. if (signal_pending(current))
  1575. break;
  1576. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1577. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1578. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1579. break;
  1580. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1581. break;
  1582. if (sk->sk_err)
  1583. break;
  1584. timeo = schedule_timeout(timeo);
  1585. }
  1586. finish_wait(sk_sleep(sk), &wait);
  1587. return timeo;
  1588. }
  1589. /*
  1590. * Generic send/receive buffer handlers
  1591. */
  1592. struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
  1593. unsigned long data_len, int noblock,
  1594. int *errcode, int max_page_order)
  1595. {
  1596. struct sk_buff *skb;
  1597. long timeo;
  1598. int err;
  1599. timeo = sock_sndtimeo(sk, noblock);
  1600. for (;;) {
  1601. err = sock_error(sk);
  1602. if (err != 0)
  1603. goto failure;
  1604. err = -EPIPE;
  1605. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1606. goto failure;
  1607. if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
  1608. break;
  1609. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  1610. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1611. err = -EAGAIN;
  1612. if (!timeo)
  1613. goto failure;
  1614. if (signal_pending(current))
  1615. goto interrupted;
  1616. timeo = sock_wait_for_wmem(sk, timeo);
  1617. }
  1618. skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
  1619. errcode, sk->sk_allocation);
  1620. if (skb)
  1621. skb_set_owner_w(skb, sk);
  1622. return skb;
  1623. interrupted:
  1624. err = sock_intr_errno(timeo);
  1625. failure:
  1626. *errcode = err;
  1627. return NULL;
  1628. }
  1629. EXPORT_SYMBOL(sock_alloc_send_pskb);
  1630. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1631. int noblock, int *errcode)
  1632. {
  1633. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
  1634. }
  1635. EXPORT_SYMBOL(sock_alloc_send_skb);
  1636. int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
  1637. struct sockcm_cookie *sockc)
  1638. {
  1639. u32 tsflags;
  1640. switch (cmsg->cmsg_type) {
  1641. case SO_MARK:
  1642. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1643. return -EPERM;
  1644. if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
  1645. return -EINVAL;
  1646. sockc->mark = *(u32 *)CMSG_DATA(cmsg);
  1647. break;
  1648. case SO_TIMESTAMPING:
  1649. if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
  1650. return -EINVAL;
  1651. tsflags = *(u32 *)CMSG_DATA(cmsg);
  1652. if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
  1653. return -EINVAL;
  1654. sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
  1655. sockc->tsflags |= tsflags;
  1656. break;
  1657. /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
  1658. case SCM_RIGHTS:
  1659. case SCM_CREDENTIALS:
  1660. break;
  1661. default:
  1662. return -EINVAL;
  1663. }
  1664. return 0;
  1665. }
  1666. EXPORT_SYMBOL(__sock_cmsg_send);
  1667. int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
  1668. struct sockcm_cookie *sockc)
  1669. {
  1670. struct cmsghdr *cmsg;
  1671. int ret;
  1672. for_each_cmsghdr(cmsg, msg) {
  1673. if (!CMSG_OK(msg, cmsg))
  1674. return -EINVAL;
  1675. if (cmsg->cmsg_level != SOL_SOCKET)
  1676. continue;
  1677. ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
  1678. if (ret)
  1679. return ret;
  1680. }
  1681. return 0;
  1682. }
  1683. EXPORT_SYMBOL(sock_cmsg_send);
  1684. /* On 32bit arches, an skb frag is limited to 2^15 */
  1685. #define SKB_FRAG_PAGE_ORDER get_order(32768)
  1686. /**
  1687. * skb_page_frag_refill - check that a page_frag contains enough room
  1688. * @sz: minimum size of the fragment we want to get
  1689. * @pfrag: pointer to page_frag
  1690. * @gfp: priority for memory allocation
  1691. *
  1692. * Note: While this allocator tries to use high order pages, there is
  1693. * no guarantee that allocations succeed. Therefore, @sz MUST be
  1694. * less or equal than PAGE_SIZE.
  1695. */
  1696. bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
  1697. {
  1698. if (pfrag->page) {
  1699. if (page_ref_count(pfrag->page) == 1) {
  1700. pfrag->offset = 0;
  1701. return true;
  1702. }
  1703. if (pfrag->offset + sz <= pfrag->size)
  1704. return true;
  1705. put_page(pfrag->page);
  1706. }
  1707. pfrag->offset = 0;
  1708. if (SKB_FRAG_PAGE_ORDER) {
  1709. /* Avoid direct reclaim but allow kswapd to wake */
  1710. pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
  1711. __GFP_COMP | __GFP_NOWARN |
  1712. __GFP_NORETRY,
  1713. SKB_FRAG_PAGE_ORDER);
  1714. if (likely(pfrag->page)) {
  1715. pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
  1716. return true;
  1717. }
  1718. }
  1719. pfrag->page = alloc_page(gfp);
  1720. if (likely(pfrag->page)) {
  1721. pfrag->size = PAGE_SIZE;
  1722. return true;
  1723. }
  1724. return false;
  1725. }
  1726. EXPORT_SYMBOL(skb_page_frag_refill);
  1727. bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
  1728. {
  1729. if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
  1730. return true;
  1731. sk_enter_memory_pressure(sk);
  1732. sk_stream_moderate_sndbuf(sk);
  1733. return false;
  1734. }
  1735. EXPORT_SYMBOL(sk_page_frag_refill);
  1736. static void __lock_sock(struct sock *sk)
  1737. __releases(&sk->sk_lock.slock)
  1738. __acquires(&sk->sk_lock.slock)
  1739. {
  1740. DEFINE_WAIT(wait);
  1741. for (;;) {
  1742. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1743. TASK_UNINTERRUPTIBLE);
  1744. spin_unlock_bh(&sk->sk_lock.slock);
  1745. schedule();
  1746. spin_lock_bh(&sk->sk_lock.slock);
  1747. if (!sock_owned_by_user(sk))
  1748. break;
  1749. }
  1750. finish_wait(&sk->sk_lock.wq, &wait);
  1751. }
  1752. static void __release_sock(struct sock *sk)
  1753. __releases(&sk->sk_lock.slock)
  1754. __acquires(&sk->sk_lock.slock)
  1755. {
  1756. struct sk_buff *skb, *next;
  1757. while ((skb = sk->sk_backlog.head) != NULL) {
  1758. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1759. spin_unlock_bh(&sk->sk_lock.slock);
  1760. do {
  1761. next = skb->next;
  1762. prefetch(next);
  1763. WARN_ON_ONCE(skb_dst_is_noref(skb));
  1764. skb->next = NULL;
  1765. sk_backlog_rcv(sk, skb);
  1766. cond_resched();
  1767. skb = next;
  1768. } while (skb != NULL);
  1769. spin_lock_bh(&sk->sk_lock.slock);
  1770. }
  1771. /*
  1772. * Doing the zeroing here guarantee we can not loop forever
  1773. * while a wild producer attempts to flood us.
  1774. */
  1775. sk->sk_backlog.len = 0;
  1776. }
  1777. void __sk_flush_backlog(struct sock *sk)
  1778. {
  1779. spin_lock_bh(&sk->sk_lock.slock);
  1780. __release_sock(sk);
  1781. spin_unlock_bh(&sk->sk_lock.slock);
  1782. }
  1783. /**
  1784. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1785. * @sk: sock to wait on
  1786. * @timeo: for how long
  1787. * @skb: last skb seen on sk_receive_queue
  1788. *
  1789. * Now socket state including sk->sk_err is changed only under lock,
  1790. * hence we may omit checks after joining wait queue.
  1791. * We check receive queue before schedule() only as optimization;
  1792. * it is very likely that release_sock() added new data.
  1793. */
  1794. int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
  1795. {
  1796. int rc;
  1797. DEFINE_WAIT(wait);
  1798. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1799. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  1800. rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
  1801. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  1802. finish_wait(sk_sleep(sk), &wait);
  1803. return rc;
  1804. }
  1805. EXPORT_SYMBOL(sk_wait_data);
  1806. /**
  1807. * __sk_mem_raise_allocated - increase memory_allocated
  1808. * @sk: socket
  1809. * @size: memory size to allocate
  1810. * @amt: pages to allocate
  1811. * @kind: allocation type
  1812. *
  1813. * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
  1814. */
  1815. int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
  1816. {
  1817. struct proto *prot = sk->sk_prot;
  1818. long allocated = sk_memory_allocated_add(sk, amt);
  1819. if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
  1820. !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
  1821. goto suppress_allocation;
  1822. /* Under limit. */
  1823. if (allocated <= sk_prot_mem_limits(sk, 0)) {
  1824. sk_leave_memory_pressure(sk);
  1825. return 1;
  1826. }
  1827. /* Under pressure. */
  1828. if (allocated > sk_prot_mem_limits(sk, 1))
  1829. sk_enter_memory_pressure(sk);
  1830. /* Over hard limit. */
  1831. if (allocated > sk_prot_mem_limits(sk, 2))
  1832. goto suppress_allocation;
  1833. /* guarantee minimum buffer size under pressure */
  1834. if (kind == SK_MEM_RECV) {
  1835. if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
  1836. return 1;
  1837. } else { /* SK_MEM_SEND */
  1838. if (sk->sk_type == SOCK_STREAM) {
  1839. if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
  1840. return 1;
  1841. } else if (atomic_read(&sk->sk_wmem_alloc) <
  1842. prot->sysctl_wmem[0])
  1843. return 1;
  1844. }
  1845. if (sk_has_memory_pressure(sk)) {
  1846. int alloc;
  1847. if (!sk_under_memory_pressure(sk))
  1848. return 1;
  1849. alloc = sk_sockets_allocated_read_positive(sk);
  1850. if (sk_prot_mem_limits(sk, 2) > alloc *
  1851. sk_mem_pages(sk->sk_wmem_queued +
  1852. atomic_read(&sk->sk_rmem_alloc) +
  1853. sk->sk_forward_alloc))
  1854. return 1;
  1855. }
  1856. suppress_allocation:
  1857. if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
  1858. sk_stream_moderate_sndbuf(sk);
  1859. /* Fail only if socket is _under_ its sndbuf.
  1860. * In this case we cannot block, so that we have to fail.
  1861. */
  1862. if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
  1863. return 1;
  1864. }
  1865. trace_sock_exceed_buf_limit(sk, prot, allocated);
  1866. sk_memory_allocated_sub(sk, amt);
  1867. if (mem_cgroup_sockets_enabled && sk->sk_memcg)
  1868. mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
  1869. return 0;
  1870. }
  1871. EXPORT_SYMBOL(__sk_mem_raise_allocated);
  1872. /**
  1873. * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
  1874. * @sk: socket
  1875. * @size: memory size to allocate
  1876. * @kind: allocation type
  1877. *
  1878. * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
  1879. * rmem allocation. This function assumes that protocols which have
  1880. * memory_pressure use sk_wmem_queued as write buffer accounting.
  1881. */
  1882. int __sk_mem_schedule(struct sock *sk, int size, int kind)
  1883. {
  1884. int ret, amt = sk_mem_pages(size);
  1885. sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
  1886. ret = __sk_mem_raise_allocated(sk, size, amt, kind);
  1887. if (!ret)
  1888. sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
  1889. return ret;
  1890. }
  1891. EXPORT_SYMBOL(__sk_mem_schedule);
  1892. /**
  1893. * __sk_mem_reduce_allocated - reclaim memory_allocated
  1894. * @sk: socket
  1895. * @amount: number of quanta
  1896. *
  1897. * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
  1898. */
  1899. void __sk_mem_reduce_allocated(struct sock *sk, int amount)
  1900. {
  1901. sk_memory_allocated_sub(sk, amount);
  1902. if (mem_cgroup_sockets_enabled && sk->sk_memcg)
  1903. mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
  1904. if (sk_under_memory_pressure(sk) &&
  1905. (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
  1906. sk_leave_memory_pressure(sk);
  1907. }
  1908. EXPORT_SYMBOL(__sk_mem_reduce_allocated);
  1909. /**
  1910. * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
  1911. * @sk: socket
  1912. * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  1913. */
  1914. void __sk_mem_reclaim(struct sock *sk, int amount)
  1915. {
  1916. amount >>= SK_MEM_QUANTUM_SHIFT;
  1917. sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
  1918. __sk_mem_reduce_allocated(sk, amount);
  1919. }
  1920. EXPORT_SYMBOL(__sk_mem_reclaim);
  1921. int sk_set_peek_off(struct sock *sk, int val)
  1922. {
  1923. if (val < 0)
  1924. return -EINVAL;
  1925. sk->sk_peek_off = val;
  1926. return 0;
  1927. }
  1928. EXPORT_SYMBOL_GPL(sk_set_peek_off);
  1929. /*
  1930. * Set of default routines for initialising struct proto_ops when
  1931. * the protocol does not support a particular function. In certain
  1932. * cases where it makes no sense for a protocol to have a "do nothing"
  1933. * function, some default processing is provided.
  1934. */
  1935. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1936. {
  1937. return -EOPNOTSUPP;
  1938. }
  1939. EXPORT_SYMBOL(sock_no_bind);
  1940. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1941. int len, int flags)
  1942. {
  1943. return -EOPNOTSUPP;
  1944. }
  1945. EXPORT_SYMBOL(sock_no_connect);
  1946. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1947. {
  1948. return -EOPNOTSUPP;
  1949. }
  1950. EXPORT_SYMBOL(sock_no_socketpair);
  1951. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1952. {
  1953. return -EOPNOTSUPP;
  1954. }
  1955. EXPORT_SYMBOL(sock_no_accept);
  1956. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1957. int *len, int peer)
  1958. {
  1959. return -EOPNOTSUPP;
  1960. }
  1961. EXPORT_SYMBOL(sock_no_getname);
  1962. unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
  1963. {
  1964. return 0;
  1965. }
  1966. EXPORT_SYMBOL(sock_no_poll);
  1967. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1968. {
  1969. return -EOPNOTSUPP;
  1970. }
  1971. EXPORT_SYMBOL(sock_no_ioctl);
  1972. int sock_no_listen(struct socket *sock, int backlog)
  1973. {
  1974. return -EOPNOTSUPP;
  1975. }
  1976. EXPORT_SYMBOL(sock_no_listen);
  1977. int sock_no_shutdown(struct socket *sock, int how)
  1978. {
  1979. return -EOPNOTSUPP;
  1980. }
  1981. EXPORT_SYMBOL(sock_no_shutdown);
  1982. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1983. char __user *optval, unsigned int optlen)
  1984. {
  1985. return -EOPNOTSUPP;
  1986. }
  1987. EXPORT_SYMBOL(sock_no_setsockopt);
  1988. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1989. char __user *optval, int __user *optlen)
  1990. {
  1991. return -EOPNOTSUPP;
  1992. }
  1993. EXPORT_SYMBOL(sock_no_getsockopt);
  1994. int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
  1995. {
  1996. return -EOPNOTSUPP;
  1997. }
  1998. EXPORT_SYMBOL(sock_no_sendmsg);
  1999. int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
  2000. int flags)
  2001. {
  2002. return -EOPNOTSUPP;
  2003. }
  2004. EXPORT_SYMBOL(sock_no_recvmsg);
  2005. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  2006. {
  2007. /* Mirror missing mmap method error code */
  2008. return -ENODEV;
  2009. }
  2010. EXPORT_SYMBOL(sock_no_mmap);
  2011. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  2012. {
  2013. ssize_t res;
  2014. struct msghdr msg = {.msg_flags = flags};
  2015. struct kvec iov;
  2016. char *kaddr = kmap(page);
  2017. iov.iov_base = kaddr + offset;
  2018. iov.iov_len = size;
  2019. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  2020. kunmap(page);
  2021. return res;
  2022. }
  2023. EXPORT_SYMBOL(sock_no_sendpage);
  2024. /*
  2025. * Default Socket Callbacks
  2026. */
  2027. static void sock_def_wakeup(struct sock *sk)
  2028. {
  2029. struct socket_wq *wq;
  2030. rcu_read_lock();
  2031. wq = rcu_dereference(sk->sk_wq);
  2032. if (skwq_has_sleeper(wq))
  2033. wake_up_interruptible_all(&wq->wait);
  2034. rcu_read_unlock();
  2035. }
  2036. static void sock_def_error_report(struct sock *sk)
  2037. {
  2038. struct socket_wq *wq;
  2039. rcu_read_lock();
  2040. wq = rcu_dereference(sk->sk_wq);
  2041. if (skwq_has_sleeper(wq))
  2042. wake_up_interruptible_poll(&wq->wait, POLLERR);
  2043. sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
  2044. rcu_read_unlock();
  2045. }
  2046. static void sock_def_readable(struct sock *sk)
  2047. {
  2048. struct socket_wq *wq;
  2049. rcu_read_lock();
  2050. wq = rcu_dereference(sk->sk_wq);
  2051. if (skwq_has_sleeper(wq))
  2052. wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
  2053. POLLRDNORM | POLLRDBAND);
  2054. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  2055. rcu_read_unlock();
  2056. }
  2057. static void sock_def_write_space(struct sock *sk)
  2058. {
  2059. struct socket_wq *wq;
  2060. rcu_read_lock();
  2061. /* Do not wake up a writer until he can make "significant"
  2062. * progress. --DaveM
  2063. */
  2064. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  2065. wq = rcu_dereference(sk->sk_wq);
  2066. if (skwq_has_sleeper(wq))
  2067. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  2068. POLLWRNORM | POLLWRBAND);
  2069. /* Should agree with poll, otherwise some programs break */
  2070. if (sock_writeable(sk))
  2071. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  2072. }
  2073. rcu_read_unlock();
  2074. }
  2075. static void sock_def_destruct(struct sock *sk)
  2076. {
  2077. }
  2078. void sk_send_sigurg(struct sock *sk)
  2079. {
  2080. if (sk->sk_socket && sk->sk_socket->file)
  2081. if (send_sigurg(&sk->sk_socket->file->f_owner))
  2082. sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
  2083. }
  2084. EXPORT_SYMBOL(sk_send_sigurg);
  2085. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  2086. unsigned long expires)
  2087. {
  2088. if (!mod_timer(timer, expires))
  2089. sock_hold(sk);
  2090. }
  2091. EXPORT_SYMBOL(sk_reset_timer);
  2092. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  2093. {
  2094. if (del_timer(timer))
  2095. __sock_put(sk);
  2096. }
  2097. EXPORT_SYMBOL(sk_stop_timer);
  2098. void sock_init_data(struct socket *sock, struct sock *sk)
  2099. {
  2100. skb_queue_head_init(&sk->sk_receive_queue);
  2101. skb_queue_head_init(&sk->sk_write_queue);
  2102. skb_queue_head_init(&sk->sk_error_queue);
  2103. sk->sk_send_head = NULL;
  2104. init_timer(&sk->sk_timer);
  2105. sk->sk_allocation = GFP_KERNEL;
  2106. sk->sk_rcvbuf = sysctl_rmem_default;
  2107. sk->sk_sndbuf = sysctl_wmem_default;
  2108. sk->sk_state = TCP_CLOSE;
  2109. sk_set_socket(sk, sock);
  2110. sock_set_flag(sk, SOCK_ZAPPED);
  2111. if (sock) {
  2112. sk->sk_type = sock->type;
  2113. sk->sk_wq = sock->wq;
  2114. sock->sk = sk;
  2115. } else
  2116. sk->sk_wq = NULL;
  2117. rwlock_init(&sk->sk_callback_lock);
  2118. lockdep_set_class_and_name(&sk->sk_callback_lock,
  2119. af_callback_keys + sk->sk_family,
  2120. af_family_clock_key_strings[sk->sk_family]);
  2121. sk->sk_state_change = sock_def_wakeup;
  2122. sk->sk_data_ready = sock_def_readable;
  2123. sk->sk_write_space = sock_def_write_space;
  2124. sk->sk_error_report = sock_def_error_report;
  2125. sk->sk_destruct = sock_def_destruct;
  2126. sk->sk_frag.page = NULL;
  2127. sk->sk_frag.offset = 0;
  2128. sk->sk_peek_off = -1;
  2129. sk->sk_peer_pid = NULL;
  2130. sk->sk_peer_cred = NULL;
  2131. sk->sk_write_pending = 0;
  2132. sk->sk_rcvlowat = 1;
  2133. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  2134. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  2135. sk->sk_stamp = ktime_set(-1L, 0);
  2136. #ifdef CONFIG_NET_RX_BUSY_POLL
  2137. sk->sk_napi_id = 0;
  2138. sk->sk_ll_usec = sysctl_net_busy_read;
  2139. #endif
  2140. sk->sk_max_pacing_rate = ~0U;
  2141. sk->sk_pacing_rate = ~0U;
  2142. sk->sk_incoming_cpu = -1;
  2143. /*
  2144. * Before updating sk_refcnt, we must commit prior changes to memory
  2145. * (Documentation/RCU/rculist_nulls.txt for details)
  2146. */
  2147. smp_wmb();
  2148. atomic_set(&sk->sk_refcnt, 1);
  2149. atomic_set(&sk->sk_drops, 0);
  2150. }
  2151. EXPORT_SYMBOL(sock_init_data);
  2152. void lock_sock_nested(struct sock *sk, int subclass)
  2153. {
  2154. might_sleep();
  2155. spin_lock_bh(&sk->sk_lock.slock);
  2156. if (sk->sk_lock.owned)
  2157. __lock_sock(sk);
  2158. sk->sk_lock.owned = 1;
  2159. spin_unlock(&sk->sk_lock.slock);
  2160. /*
  2161. * The sk_lock has mutex_lock() semantics here:
  2162. */
  2163. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  2164. local_bh_enable();
  2165. }
  2166. EXPORT_SYMBOL(lock_sock_nested);
  2167. void release_sock(struct sock *sk)
  2168. {
  2169. spin_lock_bh(&sk->sk_lock.slock);
  2170. if (sk->sk_backlog.tail)
  2171. __release_sock(sk);
  2172. /* Warning : release_cb() might need to release sk ownership,
  2173. * ie call sock_release_ownership(sk) before us.
  2174. */
  2175. if (sk->sk_prot->release_cb)
  2176. sk->sk_prot->release_cb(sk);
  2177. sock_release_ownership(sk);
  2178. if (waitqueue_active(&sk->sk_lock.wq))
  2179. wake_up(&sk->sk_lock.wq);
  2180. spin_unlock_bh(&sk->sk_lock.slock);
  2181. }
  2182. EXPORT_SYMBOL(release_sock);
  2183. /**
  2184. * lock_sock_fast - fast version of lock_sock
  2185. * @sk: socket
  2186. *
  2187. * This version should be used for very small section, where process wont block
  2188. * return false if fast path is taken
  2189. * sk_lock.slock locked, owned = 0, BH disabled
  2190. * return true if slow path is taken
  2191. * sk_lock.slock unlocked, owned = 1, BH enabled
  2192. */
  2193. bool lock_sock_fast(struct sock *sk)
  2194. {
  2195. might_sleep();
  2196. spin_lock_bh(&sk->sk_lock.slock);
  2197. if (!sk->sk_lock.owned)
  2198. /*
  2199. * Note : We must disable BH
  2200. */
  2201. return false;
  2202. __lock_sock(sk);
  2203. sk->sk_lock.owned = 1;
  2204. spin_unlock(&sk->sk_lock.slock);
  2205. /*
  2206. * The sk_lock has mutex_lock() semantics here:
  2207. */
  2208. mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
  2209. local_bh_enable();
  2210. return true;
  2211. }
  2212. EXPORT_SYMBOL(lock_sock_fast);
  2213. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  2214. {
  2215. struct timeval tv;
  2216. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2217. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2218. tv = ktime_to_timeval(sk->sk_stamp);
  2219. if (tv.tv_sec == -1)
  2220. return -ENOENT;
  2221. if (tv.tv_sec == 0) {
  2222. sk->sk_stamp = ktime_get_real();
  2223. tv = ktime_to_timeval(sk->sk_stamp);
  2224. }
  2225. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  2226. }
  2227. EXPORT_SYMBOL(sock_get_timestamp);
  2228. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  2229. {
  2230. struct timespec ts;
  2231. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2232. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2233. ts = ktime_to_timespec(sk->sk_stamp);
  2234. if (ts.tv_sec == -1)
  2235. return -ENOENT;
  2236. if (ts.tv_sec == 0) {
  2237. sk->sk_stamp = ktime_get_real();
  2238. ts = ktime_to_timespec(sk->sk_stamp);
  2239. }
  2240. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  2241. }
  2242. EXPORT_SYMBOL(sock_get_timestampns);
  2243. void sock_enable_timestamp(struct sock *sk, int flag)
  2244. {
  2245. if (!sock_flag(sk, flag)) {
  2246. unsigned long previous_flags = sk->sk_flags;
  2247. sock_set_flag(sk, flag);
  2248. /*
  2249. * we just set one of the two flags which require net
  2250. * time stamping, but time stamping might have been on
  2251. * already because of the other one
  2252. */
  2253. if (sock_needs_netstamp(sk) &&
  2254. !(previous_flags & SK_FLAGS_TIMESTAMP))
  2255. net_enable_timestamp();
  2256. }
  2257. }
  2258. int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
  2259. int level, int type)
  2260. {
  2261. struct sock_exterr_skb *serr;
  2262. struct sk_buff *skb;
  2263. int copied, err;
  2264. err = -EAGAIN;
  2265. skb = sock_dequeue_err_skb(sk);
  2266. if (skb == NULL)
  2267. goto out;
  2268. copied = skb->len;
  2269. if (copied > len) {
  2270. msg->msg_flags |= MSG_TRUNC;
  2271. copied = len;
  2272. }
  2273. err = skb_copy_datagram_msg(skb, 0, msg, copied);
  2274. if (err)
  2275. goto out_free_skb;
  2276. sock_recv_timestamp(msg, sk, skb);
  2277. serr = SKB_EXT_ERR(skb);
  2278. put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
  2279. msg->msg_flags |= MSG_ERRQUEUE;
  2280. err = copied;
  2281. out_free_skb:
  2282. kfree_skb(skb);
  2283. out:
  2284. return err;
  2285. }
  2286. EXPORT_SYMBOL(sock_recv_errqueue);
  2287. /*
  2288. * Get a socket option on an socket.
  2289. *
  2290. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  2291. * asynchronous errors should be reported by getsockopt. We assume
  2292. * this means if you specify SO_ERROR (otherwise whats the point of it).
  2293. */
  2294. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  2295. char __user *optval, int __user *optlen)
  2296. {
  2297. struct sock *sk = sock->sk;
  2298. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2299. }
  2300. EXPORT_SYMBOL(sock_common_getsockopt);
  2301. #ifdef CONFIG_COMPAT
  2302. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  2303. char __user *optval, int __user *optlen)
  2304. {
  2305. struct sock *sk = sock->sk;
  2306. if (sk->sk_prot->compat_getsockopt != NULL)
  2307. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  2308. optval, optlen);
  2309. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2310. }
  2311. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  2312. #endif
  2313. int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  2314. int flags)
  2315. {
  2316. struct sock *sk = sock->sk;
  2317. int addr_len = 0;
  2318. int err;
  2319. err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
  2320. flags & ~MSG_DONTWAIT, &addr_len);
  2321. if (err >= 0)
  2322. msg->msg_namelen = addr_len;
  2323. return err;
  2324. }
  2325. EXPORT_SYMBOL(sock_common_recvmsg);
  2326. /*
  2327. * Set socket options on an inet socket.
  2328. */
  2329. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  2330. char __user *optval, unsigned int optlen)
  2331. {
  2332. struct sock *sk = sock->sk;
  2333. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2334. }
  2335. EXPORT_SYMBOL(sock_common_setsockopt);
  2336. #ifdef CONFIG_COMPAT
  2337. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  2338. char __user *optval, unsigned int optlen)
  2339. {
  2340. struct sock *sk = sock->sk;
  2341. if (sk->sk_prot->compat_setsockopt != NULL)
  2342. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  2343. optval, optlen);
  2344. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2345. }
  2346. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  2347. #endif
  2348. void sk_common_release(struct sock *sk)
  2349. {
  2350. if (sk->sk_prot->destroy)
  2351. sk->sk_prot->destroy(sk);
  2352. /*
  2353. * Observation: when sock_common_release is called, processes have
  2354. * no access to socket. But net still has.
  2355. * Step one, detach it from networking:
  2356. *
  2357. * A. Remove from hash tables.
  2358. */
  2359. sk->sk_prot->unhash(sk);
  2360. /*
  2361. * In this point socket cannot receive new packets, but it is possible
  2362. * that some packets are in flight because some CPU runs receiver and
  2363. * did hash table lookup before we unhashed socket. They will achieve
  2364. * receive queue and will be purged by socket destructor.
  2365. *
  2366. * Also we still have packets pending on receive queue and probably,
  2367. * our own packets waiting in device queues. sock_destroy will drain
  2368. * receive queue, but transmitted packets will delay socket destruction
  2369. * until the last reference will be released.
  2370. */
  2371. sock_orphan(sk);
  2372. xfrm_sk_free_policy(sk);
  2373. sk_refcnt_debug_release(sk);
  2374. if (sk->sk_frag.page) {
  2375. put_page(sk->sk_frag.page);
  2376. sk->sk_frag.page = NULL;
  2377. }
  2378. sock_put(sk);
  2379. }
  2380. EXPORT_SYMBOL(sk_common_release);
  2381. #ifdef CONFIG_PROC_FS
  2382. #define PROTO_INUSE_NR 64 /* should be enough for the first time */
  2383. struct prot_inuse {
  2384. int val[PROTO_INUSE_NR];
  2385. };
  2386. static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
  2387. #ifdef CONFIG_NET_NS
  2388. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2389. {
  2390. __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
  2391. }
  2392. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2393. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2394. {
  2395. int cpu, idx = prot->inuse_idx;
  2396. int res = 0;
  2397. for_each_possible_cpu(cpu)
  2398. res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
  2399. return res >= 0 ? res : 0;
  2400. }
  2401. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2402. static int __net_init sock_inuse_init_net(struct net *net)
  2403. {
  2404. net->core.inuse = alloc_percpu(struct prot_inuse);
  2405. return net->core.inuse ? 0 : -ENOMEM;
  2406. }
  2407. static void __net_exit sock_inuse_exit_net(struct net *net)
  2408. {
  2409. free_percpu(net->core.inuse);
  2410. }
  2411. static struct pernet_operations net_inuse_ops = {
  2412. .init = sock_inuse_init_net,
  2413. .exit = sock_inuse_exit_net,
  2414. };
  2415. static __init int net_inuse_init(void)
  2416. {
  2417. if (register_pernet_subsys(&net_inuse_ops))
  2418. panic("Cannot initialize net inuse counters");
  2419. return 0;
  2420. }
  2421. core_initcall(net_inuse_init);
  2422. #else
  2423. static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
  2424. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2425. {
  2426. __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
  2427. }
  2428. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2429. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2430. {
  2431. int cpu, idx = prot->inuse_idx;
  2432. int res = 0;
  2433. for_each_possible_cpu(cpu)
  2434. res += per_cpu(prot_inuse, cpu).val[idx];
  2435. return res >= 0 ? res : 0;
  2436. }
  2437. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2438. #endif
  2439. static void assign_proto_idx(struct proto *prot)
  2440. {
  2441. prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
  2442. if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
  2443. pr_err("PROTO_INUSE_NR exhausted\n");
  2444. return;
  2445. }
  2446. set_bit(prot->inuse_idx, proto_inuse_idx);
  2447. }
  2448. static void release_proto_idx(struct proto *prot)
  2449. {
  2450. if (prot->inuse_idx != PROTO_INUSE_NR - 1)
  2451. clear_bit(prot->inuse_idx, proto_inuse_idx);
  2452. }
  2453. #else
  2454. static inline void assign_proto_idx(struct proto *prot)
  2455. {
  2456. }
  2457. static inline void release_proto_idx(struct proto *prot)
  2458. {
  2459. }
  2460. #endif
  2461. static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
  2462. {
  2463. if (!rsk_prot)
  2464. return;
  2465. kfree(rsk_prot->slab_name);
  2466. rsk_prot->slab_name = NULL;
  2467. kmem_cache_destroy(rsk_prot->slab);
  2468. rsk_prot->slab = NULL;
  2469. }
  2470. static int req_prot_init(const struct proto *prot)
  2471. {
  2472. struct request_sock_ops *rsk_prot = prot->rsk_prot;
  2473. if (!rsk_prot)
  2474. return 0;
  2475. rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
  2476. prot->name);
  2477. if (!rsk_prot->slab_name)
  2478. return -ENOMEM;
  2479. rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
  2480. rsk_prot->obj_size, 0,
  2481. prot->slab_flags, NULL);
  2482. if (!rsk_prot->slab) {
  2483. pr_crit("%s: Can't create request sock SLAB cache!\n",
  2484. prot->name);
  2485. return -ENOMEM;
  2486. }
  2487. return 0;
  2488. }
  2489. int proto_register(struct proto *prot, int alloc_slab)
  2490. {
  2491. if (alloc_slab) {
  2492. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  2493. SLAB_HWCACHE_ALIGN | prot->slab_flags,
  2494. NULL);
  2495. if (prot->slab == NULL) {
  2496. pr_crit("%s: Can't create sock SLAB cache!\n",
  2497. prot->name);
  2498. goto out;
  2499. }
  2500. if (req_prot_init(prot))
  2501. goto out_free_request_sock_slab;
  2502. if (prot->twsk_prot != NULL) {
  2503. prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
  2504. if (prot->twsk_prot->twsk_slab_name == NULL)
  2505. goto out_free_request_sock_slab;
  2506. prot->twsk_prot->twsk_slab =
  2507. kmem_cache_create(prot->twsk_prot->twsk_slab_name,
  2508. prot->twsk_prot->twsk_obj_size,
  2509. 0,
  2510. prot->slab_flags,
  2511. NULL);
  2512. if (prot->twsk_prot->twsk_slab == NULL)
  2513. goto out_free_timewait_sock_slab_name;
  2514. }
  2515. }
  2516. mutex_lock(&proto_list_mutex);
  2517. list_add(&prot->node, &proto_list);
  2518. assign_proto_idx(prot);
  2519. mutex_unlock(&proto_list_mutex);
  2520. return 0;
  2521. out_free_timewait_sock_slab_name:
  2522. kfree(prot->twsk_prot->twsk_slab_name);
  2523. out_free_request_sock_slab:
  2524. req_prot_cleanup(prot->rsk_prot);
  2525. kmem_cache_destroy(prot->slab);
  2526. prot->slab = NULL;
  2527. out:
  2528. return -ENOBUFS;
  2529. }
  2530. EXPORT_SYMBOL(proto_register);
  2531. void proto_unregister(struct proto *prot)
  2532. {
  2533. mutex_lock(&proto_list_mutex);
  2534. release_proto_idx(prot);
  2535. list_del(&prot->node);
  2536. mutex_unlock(&proto_list_mutex);
  2537. kmem_cache_destroy(prot->slab);
  2538. prot->slab = NULL;
  2539. req_prot_cleanup(prot->rsk_prot);
  2540. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  2541. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  2542. kfree(prot->twsk_prot->twsk_slab_name);
  2543. prot->twsk_prot->twsk_slab = NULL;
  2544. }
  2545. }
  2546. EXPORT_SYMBOL(proto_unregister);
  2547. #ifdef CONFIG_PROC_FS
  2548. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  2549. __acquires(proto_list_mutex)
  2550. {
  2551. mutex_lock(&proto_list_mutex);
  2552. return seq_list_start_head(&proto_list, *pos);
  2553. }
  2554. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2555. {
  2556. return seq_list_next(v, &proto_list, pos);
  2557. }
  2558. static void proto_seq_stop(struct seq_file *seq, void *v)
  2559. __releases(proto_list_mutex)
  2560. {
  2561. mutex_unlock(&proto_list_mutex);
  2562. }
  2563. static char proto_method_implemented(const void *method)
  2564. {
  2565. return method == NULL ? 'n' : 'y';
  2566. }
  2567. static long sock_prot_memory_allocated(struct proto *proto)
  2568. {
  2569. return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
  2570. }
  2571. static char *sock_prot_memory_pressure(struct proto *proto)
  2572. {
  2573. return proto->memory_pressure != NULL ?
  2574. proto_memory_pressure(proto) ? "yes" : "no" : "NI";
  2575. }
  2576. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  2577. {
  2578. seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
  2579. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  2580. proto->name,
  2581. proto->obj_size,
  2582. sock_prot_inuse_get(seq_file_net(seq), proto),
  2583. sock_prot_memory_allocated(proto),
  2584. sock_prot_memory_pressure(proto),
  2585. proto->max_header,
  2586. proto->slab == NULL ? "no" : "yes",
  2587. module_name(proto->owner),
  2588. proto_method_implemented(proto->close),
  2589. proto_method_implemented(proto->connect),
  2590. proto_method_implemented(proto->disconnect),
  2591. proto_method_implemented(proto->accept),
  2592. proto_method_implemented(proto->ioctl),
  2593. proto_method_implemented(proto->init),
  2594. proto_method_implemented(proto->destroy),
  2595. proto_method_implemented(proto->shutdown),
  2596. proto_method_implemented(proto->setsockopt),
  2597. proto_method_implemented(proto->getsockopt),
  2598. proto_method_implemented(proto->sendmsg),
  2599. proto_method_implemented(proto->recvmsg),
  2600. proto_method_implemented(proto->sendpage),
  2601. proto_method_implemented(proto->bind),
  2602. proto_method_implemented(proto->backlog_rcv),
  2603. proto_method_implemented(proto->hash),
  2604. proto_method_implemented(proto->unhash),
  2605. proto_method_implemented(proto->get_port),
  2606. proto_method_implemented(proto->enter_memory_pressure));
  2607. }
  2608. static int proto_seq_show(struct seq_file *seq, void *v)
  2609. {
  2610. if (v == &proto_list)
  2611. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  2612. "protocol",
  2613. "size",
  2614. "sockets",
  2615. "memory",
  2616. "press",
  2617. "maxhdr",
  2618. "slab",
  2619. "module",
  2620. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  2621. else
  2622. proto_seq_printf(seq, list_entry(v, struct proto, node));
  2623. return 0;
  2624. }
  2625. static const struct seq_operations proto_seq_ops = {
  2626. .start = proto_seq_start,
  2627. .next = proto_seq_next,
  2628. .stop = proto_seq_stop,
  2629. .show = proto_seq_show,
  2630. };
  2631. static int proto_seq_open(struct inode *inode, struct file *file)
  2632. {
  2633. return seq_open_net(inode, file, &proto_seq_ops,
  2634. sizeof(struct seq_net_private));
  2635. }
  2636. static const struct file_operations proto_seq_fops = {
  2637. .owner = THIS_MODULE,
  2638. .open = proto_seq_open,
  2639. .read = seq_read,
  2640. .llseek = seq_lseek,
  2641. .release = seq_release_net,
  2642. };
  2643. static __net_init int proto_init_net(struct net *net)
  2644. {
  2645. if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
  2646. return -ENOMEM;
  2647. return 0;
  2648. }
  2649. static __net_exit void proto_exit_net(struct net *net)
  2650. {
  2651. remove_proc_entry("protocols", net->proc_net);
  2652. }
  2653. static __net_initdata struct pernet_operations proto_net_ops = {
  2654. .init = proto_init_net,
  2655. .exit = proto_exit_net,
  2656. };
  2657. static int __init proto_init(void)
  2658. {
  2659. return register_pernet_subsys(&proto_net_ops);
  2660. }
  2661. subsys_initcall(proto_init);
  2662. #endif /* PROC_FS */