sock.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Generic socket support routines. Memory allocators, socket lock/release
  7. * handler for protocols to use and generic option handler.
  8. *
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Alan Cox, <A.Cox@swansea.ac.uk>
  14. *
  15. * Fixes:
  16. * Alan Cox : Numerous verify_area() problems
  17. * Alan Cox : Connecting on a connecting socket
  18. * now returns an error for tcp.
  19. * Alan Cox : sock->protocol is set correctly.
  20. * and is not sometimes left as 0.
  21. * Alan Cox : connect handles icmp errors on a
  22. * connect properly. Unfortunately there
  23. * is a restart syscall nasty there. I
  24. * can't match BSD without hacking the C
  25. * library. Ideas urgently sought!
  26. * Alan Cox : Disallow bind() to addresses that are
  27. * not ours - especially broadcast ones!!
  28. * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
  29. * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
  30. * instead they leave that for the DESTROY timer.
  31. * Alan Cox : Clean up error flag in accept
  32. * Alan Cox : TCP ack handling is buggy, the DESTROY timer
  33. * was buggy. Put a remove_sock() in the handler
  34. * for memory when we hit 0. Also altered the timer
  35. * code. The ACK stuff can wait and needs major
  36. * TCP layer surgery.
  37. * Alan Cox : Fixed TCP ack bug, removed remove sock
  38. * and fixed timer/inet_bh race.
  39. * Alan Cox : Added zapped flag for TCP
  40. * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
  41. * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42. * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
  43. * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44. * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45. * Rick Sladkey : Relaxed UDP rules for matching packets.
  46. * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
  47. * Pauline Middelink : identd support
  48. * Alan Cox : Fixed connect() taking signals I think.
  49. * Alan Cox : SO_LINGER supported
  50. * Alan Cox : Error reporting fixes
  51. * Anonymous : inet_create tidied up (sk->reuse setting)
  52. * Alan Cox : inet sockets don't set sk->type!
  53. * Alan Cox : Split socket option code
  54. * Alan Cox : Callbacks
  55. * Alan Cox : Nagle flag for Charles & Johannes stuff
  56. * Alex : Removed restriction on inet fioctl
  57. * Alan Cox : Splitting INET from NET core
  58. * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
  59. * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
  60. * Alan Cox : Split IP from generic code
  61. * Alan Cox : New kfree_skbmem()
  62. * Alan Cox : Make SO_DEBUG superuser only.
  63. * Alan Cox : Allow anyone to clear SO_DEBUG
  64. * (compatibility fix)
  65. * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
  66. * Alan Cox : Allocator for a socket is settable.
  67. * Alan Cox : SO_ERROR includes soft errors.
  68. * Alan Cox : Allow NULL arguments on some SO_ opts
  69. * Alan Cox : Generic socket allocation to make hooks
  70. * easier (suggested by Craig Metz).
  71. * Michael Pall : SO_ERROR returns positive errno again
  72. * Steve Whitehouse: Added default destructor to free
  73. * protocol private data.
  74. * Steve Whitehouse: Added various other default routines
  75. * common to several socket families.
  76. * Chris Evans : Call suser() check last on F_SETOWN
  77. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78. * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
  79. * Andi Kleen : Fix write_space callback
  80. * Chris Evans : Security fixes - signedness again
  81. * Arnaldo C. Melo : cleanups, use skb_queue_purge
  82. *
  83. * To Fix:
  84. *
  85. *
  86. * This program is free software; you can redistribute it and/or
  87. * modify it under the terms of the GNU General Public License
  88. * as published by the Free Software Foundation; either version
  89. * 2 of the License, or (at your option) any later version.
  90. */
  91. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  92. #include <linux/capability.h>
  93. #include <linux/errno.h>
  94. #include <linux/errqueue.h>
  95. #include <linux/types.h>
  96. #include <linux/socket.h>
  97. #include <linux/in.h>
  98. #include <linux/kernel.h>
  99. #include <linux/module.h>
  100. #include <linux/proc_fs.h>
  101. #include <linux/seq_file.h>
  102. #include <linux/sched.h>
  103. #include <linux/timer.h>
  104. #include <linux/string.h>
  105. #include <linux/sockios.h>
  106. #include <linux/net.h>
  107. #include <linux/mm.h>
  108. #include <linux/slab.h>
  109. #include <linux/interrupt.h>
  110. #include <linux/poll.h>
  111. #include <linux/tcp.h>
  112. #include <linux/init.h>
  113. #include <linux/highmem.h>
  114. #include <linux/user_namespace.h>
  115. #include <linux/static_key.h>
  116. #include <linux/memcontrol.h>
  117. #include <linux/prefetch.h>
  118. #include <asm/uaccess.h>
  119. #include <linux/netdevice.h>
  120. #include <net/protocol.h>
  121. #include <linux/skbuff.h>
  122. #include <net/net_namespace.h>
  123. #include <net/request_sock.h>
  124. #include <net/sock.h>
  125. #include <linux/net_tstamp.h>
  126. #include <net/xfrm.h>
  127. #include <linux/ipsec.h>
  128. #include <net/cls_cgroup.h>
  129. #include <net/netprio_cgroup.h>
  130. #include <linux/filter.h>
  131. #include <trace/events/sock.h>
  132. #ifdef CONFIG_INET
  133. #include <net/tcp.h>
  134. #endif
  135. #include <net/busy_poll.h>
  136. static DEFINE_MUTEX(proto_list_mutex);
  137. static LIST_HEAD(proto_list);
  138. /**
  139. * sk_ns_capable - General socket capability test
  140. * @sk: Socket to use a capability on or through
  141. * @user_ns: The user namespace of the capability to use
  142. * @cap: The capability to use
  143. *
  144. * Test to see if the opener of the socket had when the socket was
  145. * created and the current process has the capability @cap in the user
  146. * namespace @user_ns.
  147. */
  148. bool sk_ns_capable(const struct sock *sk,
  149. struct user_namespace *user_ns, int cap)
  150. {
  151. return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
  152. ns_capable(user_ns, cap);
  153. }
  154. EXPORT_SYMBOL(sk_ns_capable);
  155. /**
  156. * sk_capable - Socket global capability test
  157. * @sk: Socket to use a capability on or through
  158. * @cap: The global capbility to use
  159. *
  160. * Test to see if the opener of the socket had when the socket was
  161. * created and the current process has the capability @cap in all user
  162. * namespaces.
  163. */
  164. bool sk_capable(const struct sock *sk, int cap)
  165. {
  166. return sk_ns_capable(sk, &init_user_ns, cap);
  167. }
  168. EXPORT_SYMBOL(sk_capable);
  169. /**
  170. * sk_net_capable - Network namespace socket capability test
  171. * @sk: Socket to use a capability on or through
  172. * @cap: The capability to use
  173. *
  174. * Test to see if the opener of the socket had when the socke was created
  175. * and the current process has the capability @cap over the network namespace
  176. * the socket is a member of.
  177. */
  178. bool sk_net_capable(const struct sock *sk, int cap)
  179. {
  180. return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
  181. }
  182. EXPORT_SYMBOL(sk_net_capable);
  183. #ifdef CONFIG_MEMCG_KMEM
  184. int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  185. {
  186. struct proto *proto;
  187. int ret = 0;
  188. mutex_lock(&proto_list_mutex);
  189. list_for_each_entry(proto, &proto_list, node) {
  190. if (proto->init_cgroup) {
  191. ret = proto->init_cgroup(memcg, ss);
  192. if (ret)
  193. goto out;
  194. }
  195. }
  196. mutex_unlock(&proto_list_mutex);
  197. return ret;
  198. out:
  199. list_for_each_entry_continue_reverse(proto, &proto_list, node)
  200. if (proto->destroy_cgroup)
  201. proto->destroy_cgroup(memcg);
  202. mutex_unlock(&proto_list_mutex);
  203. return ret;
  204. }
  205. void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
  206. {
  207. struct proto *proto;
  208. mutex_lock(&proto_list_mutex);
  209. list_for_each_entry_reverse(proto, &proto_list, node)
  210. if (proto->destroy_cgroup)
  211. proto->destroy_cgroup(memcg);
  212. mutex_unlock(&proto_list_mutex);
  213. }
  214. #endif
  215. /*
  216. * Each address family might have different locking rules, so we have
  217. * one slock key per address family:
  218. */
  219. static struct lock_class_key af_family_keys[AF_MAX];
  220. static struct lock_class_key af_family_slock_keys[AF_MAX];
  221. #if defined(CONFIG_MEMCG_KMEM)
  222. struct static_key memcg_socket_limit_enabled;
  223. EXPORT_SYMBOL(memcg_socket_limit_enabled);
  224. #endif
  225. /*
  226. * Make lock validator output more readable. (we pre-construct these
  227. * strings build-time, so that runtime initialization of socket
  228. * locks is fast):
  229. */
  230. static const char *const af_family_key_strings[AF_MAX+1] = {
  231. "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
  232. "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
  233. "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
  234. "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
  235. "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
  236. "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
  237. "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
  238. "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
  239. "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
  240. "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
  241. "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
  242. "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
  243. "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
  244. "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
  245. };
  246. static const char *const af_family_slock_key_strings[AF_MAX+1] = {
  247. "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
  248. "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
  249. "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
  250. "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
  251. "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
  252. "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
  253. "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
  254. "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
  255. "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
  256. "slock-27" , "slock-28" , "slock-AF_CAN" ,
  257. "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
  258. "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
  259. "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
  260. "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
  261. };
  262. static const char *const af_family_clock_key_strings[AF_MAX+1] = {
  263. "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
  264. "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
  265. "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
  266. "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
  267. "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
  268. "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
  269. "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
  270. "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
  271. "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
  272. "clock-27" , "clock-28" , "clock-AF_CAN" ,
  273. "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
  274. "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
  275. "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
  276. "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
  277. };
  278. /*
  279. * sk_callback_lock locking rules are per-address-family,
  280. * so split the lock classes by using a per-AF key:
  281. */
  282. static struct lock_class_key af_callback_keys[AF_MAX];
  283. /* Take into consideration the size of the struct sk_buff overhead in the
  284. * determination of these values, since that is non-constant across
  285. * platforms. This makes socket queueing behavior and performance
  286. * not depend upon such differences.
  287. */
  288. #define _SK_MEM_PACKETS 256
  289. #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
  290. #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  291. #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
  292. /* Run time adjustable parameters. */
  293. __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
  294. EXPORT_SYMBOL(sysctl_wmem_max);
  295. __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
  296. EXPORT_SYMBOL(sysctl_rmem_max);
  297. __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
  298. __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
  299. /* Maximal space eaten by iovec or ancillary data plus some space */
  300. int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
  301. EXPORT_SYMBOL(sysctl_optmem_max);
  302. struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
  303. EXPORT_SYMBOL_GPL(memalloc_socks);
  304. /**
  305. * sk_set_memalloc - sets %SOCK_MEMALLOC
  306. * @sk: socket to set it on
  307. *
  308. * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
  309. * It's the responsibility of the admin to adjust min_free_kbytes
  310. * to meet the requirements
  311. */
  312. void sk_set_memalloc(struct sock *sk)
  313. {
  314. sock_set_flag(sk, SOCK_MEMALLOC);
  315. sk->sk_allocation |= __GFP_MEMALLOC;
  316. static_key_slow_inc(&memalloc_socks);
  317. }
  318. EXPORT_SYMBOL_GPL(sk_set_memalloc);
  319. void sk_clear_memalloc(struct sock *sk)
  320. {
  321. sock_reset_flag(sk, SOCK_MEMALLOC);
  322. sk->sk_allocation &= ~__GFP_MEMALLOC;
  323. static_key_slow_dec(&memalloc_socks);
  324. /*
  325. * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
  326. * progress of swapping. However, if SOCK_MEMALLOC is cleared while
  327. * it has rmem allocations there is a risk that the user of the
  328. * socket cannot make forward progress due to exceeding the rmem
  329. * limits. By rights, sk_clear_memalloc() should only be called
  330. * on sockets being torn down but warn and reset the accounting if
  331. * that assumption breaks.
  332. */
  333. if (WARN_ON(sk->sk_forward_alloc))
  334. sk_mem_reclaim(sk);
  335. }
  336. EXPORT_SYMBOL_GPL(sk_clear_memalloc);
  337. int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  338. {
  339. int ret;
  340. unsigned long pflags = current->flags;
  341. /* these should have been dropped before queueing */
  342. BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
  343. current->flags |= PF_MEMALLOC;
  344. ret = sk->sk_backlog_rcv(sk, skb);
  345. tsk_restore_flags(current, pflags, PF_MEMALLOC);
  346. return ret;
  347. }
  348. EXPORT_SYMBOL(__sk_backlog_rcv);
  349. static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
  350. {
  351. struct timeval tv;
  352. if (optlen < sizeof(tv))
  353. return -EINVAL;
  354. if (copy_from_user(&tv, optval, sizeof(tv)))
  355. return -EFAULT;
  356. if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
  357. return -EDOM;
  358. if (tv.tv_sec < 0) {
  359. static int warned __read_mostly;
  360. *timeo_p = 0;
  361. if (warned < 10 && net_ratelimit()) {
  362. warned++;
  363. pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
  364. __func__, current->comm, task_pid_nr(current));
  365. }
  366. return 0;
  367. }
  368. *timeo_p = MAX_SCHEDULE_TIMEOUT;
  369. if (tv.tv_sec == 0 && tv.tv_usec == 0)
  370. return 0;
  371. if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
  372. *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
  373. return 0;
  374. }
  375. static void sock_warn_obsolete_bsdism(const char *name)
  376. {
  377. static int warned;
  378. static char warncomm[TASK_COMM_LEN];
  379. if (strcmp(warncomm, current->comm) && warned < 5) {
  380. strcpy(warncomm, current->comm);
  381. pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
  382. warncomm, name);
  383. warned++;
  384. }
  385. }
  386. #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
  387. static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
  388. {
  389. if (sk->sk_flags & flags) {
  390. sk->sk_flags &= ~flags;
  391. if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
  392. net_disable_timestamp();
  393. }
  394. }
  395. int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  396. {
  397. int err;
  398. int skb_len;
  399. unsigned long flags;
  400. struct sk_buff_head *list = &sk->sk_receive_queue;
  401. if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
  402. atomic_inc(&sk->sk_drops);
  403. trace_sock_rcvqueue_full(sk, skb);
  404. return -ENOMEM;
  405. }
  406. err = sk_filter(sk, skb);
  407. if (err)
  408. return err;
  409. if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
  410. atomic_inc(&sk->sk_drops);
  411. return -ENOBUFS;
  412. }
  413. skb->dev = NULL;
  414. skb_set_owner_r(skb, sk);
  415. /* Cache the SKB length before we tack it onto the receive
  416. * queue. Once it is added it no longer belongs to us and
  417. * may be freed by other threads of control pulling packets
  418. * from the queue.
  419. */
  420. skb_len = skb->len;
  421. /* we escape from rcu protected region, make sure we dont leak
  422. * a norefcounted dst
  423. */
  424. skb_dst_force(skb);
  425. spin_lock_irqsave(&list->lock, flags);
  426. skb->dropcount = atomic_read(&sk->sk_drops);
  427. __skb_queue_tail(list, skb);
  428. spin_unlock_irqrestore(&list->lock, flags);
  429. if (!sock_flag(sk, SOCK_DEAD))
  430. sk->sk_data_ready(sk);
  431. return 0;
  432. }
  433. EXPORT_SYMBOL(sock_queue_rcv_skb);
  434. int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
  435. {
  436. int rc = NET_RX_SUCCESS;
  437. if (sk_filter(sk, skb))
  438. goto discard_and_relse;
  439. skb->dev = NULL;
  440. if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
  441. atomic_inc(&sk->sk_drops);
  442. goto discard_and_relse;
  443. }
  444. if (nested)
  445. bh_lock_sock_nested(sk);
  446. else
  447. bh_lock_sock(sk);
  448. if (!sock_owned_by_user(sk)) {
  449. /*
  450. * trylock + unlock semantics:
  451. */
  452. mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
  453. rc = sk_backlog_rcv(sk, skb);
  454. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  455. } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
  456. bh_unlock_sock(sk);
  457. atomic_inc(&sk->sk_drops);
  458. goto discard_and_relse;
  459. }
  460. bh_unlock_sock(sk);
  461. out:
  462. sock_put(sk);
  463. return rc;
  464. discard_and_relse:
  465. kfree_skb(skb);
  466. goto out;
  467. }
  468. EXPORT_SYMBOL(sk_receive_skb);
  469. struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
  470. {
  471. struct dst_entry *dst = __sk_dst_get(sk);
  472. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  473. sk_tx_queue_clear(sk);
  474. RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
  475. dst_release(dst);
  476. return NULL;
  477. }
  478. return dst;
  479. }
  480. EXPORT_SYMBOL(__sk_dst_check);
  481. struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
  482. {
  483. struct dst_entry *dst = sk_dst_get(sk);
  484. if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
  485. sk_dst_reset(sk);
  486. dst_release(dst);
  487. return NULL;
  488. }
  489. return dst;
  490. }
  491. EXPORT_SYMBOL(sk_dst_check);
  492. static int sock_setbindtodevice(struct sock *sk, char __user *optval,
  493. int optlen)
  494. {
  495. int ret = -ENOPROTOOPT;
  496. #ifdef CONFIG_NETDEVICES
  497. struct net *net = sock_net(sk);
  498. char devname[IFNAMSIZ];
  499. int index;
  500. /* Sorry... */
  501. ret = -EPERM;
  502. if (!ns_capable(net->user_ns, CAP_NET_RAW))
  503. goto out;
  504. ret = -EINVAL;
  505. if (optlen < 0)
  506. goto out;
  507. /* Bind this socket to a particular device like "eth0",
  508. * as specified in the passed interface name. If the
  509. * name is "" or the option length is zero the socket
  510. * is not bound.
  511. */
  512. if (optlen > IFNAMSIZ - 1)
  513. optlen = IFNAMSIZ - 1;
  514. memset(devname, 0, sizeof(devname));
  515. ret = -EFAULT;
  516. if (copy_from_user(devname, optval, optlen))
  517. goto out;
  518. index = 0;
  519. if (devname[0] != '\0') {
  520. struct net_device *dev;
  521. rcu_read_lock();
  522. dev = dev_get_by_name_rcu(net, devname);
  523. if (dev)
  524. index = dev->ifindex;
  525. rcu_read_unlock();
  526. ret = -ENODEV;
  527. if (!dev)
  528. goto out;
  529. }
  530. lock_sock(sk);
  531. sk->sk_bound_dev_if = index;
  532. sk_dst_reset(sk);
  533. release_sock(sk);
  534. ret = 0;
  535. out:
  536. #endif
  537. return ret;
  538. }
  539. static int sock_getbindtodevice(struct sock *sk, char __user *optval,
  540. int __user *optlen, int len)
  541. {
  542. int ret = -ENOPROTOOPT;
  543. #ifdef CONFIG_NETDEVICES
  544. struct net *net = sock_net(sk);
  545. char devname[IFNAMSIZ];
  546. if (sk->sk_bound_dev_if == 0) {
  547. len = 0;
  548. goto zero;
  549. }
  550. ret = -EINVAL;
  551. if (len < IFNAMSIZ)
  552. goto out;
  553. ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
  554. if (ret)
  555. goto out;
  556. len = strlen(devname) + 1;
  557. ret = -EFAULT;
  558. if (copy_to_user(optval, devname, len))
  559. goto out;
  560. zero:
  561. ret = -EFAULT;
  562. if (put_user(len, optlen))
  563. goto out;
  564. ret = 0;
  565. out:
  566. #endif
  567. return ret;
  568. }
  569. static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  570. {
  571. if (valbool)
  572. sock_set_flag(sk, bit);
  573. else
  574. sock_reset_flag(sk, bit);
  575. }
  576. /*
  577. * This is meant for all protocols to use and covers goings on
  578. * at the socket level. Everything here is generic.
  579. */
  580. int sock_setsockopt(struct socket *sock, int level, int optname,
  581. char __user *optval, unsigned int optlen)
  582. {
  583. struct sock *sk = sock->sk;
  584. int val;
  585. int valbool;
  586. struct linger ling;
  587. int ret = 0;
  588. /*
  589. * Options without arguments
  590. */
  591. if (optname == SO_BINDTODEVICE)
  592. return sock_setbindtodevice(sk, optval, optlen);
  593. if (optlen < sizeof(int))
  594. return -EINVAL;
  595. if (get_user(val, (int __user *)optval))
  596. return -EFAULT;
  597. valbool = val ? 1 : 0;
  598. lock_sock(sk);
  599. switch (optname) {
  600. case SO_DEBUG:
  601. if (val && !capable(CAP_NET_ADMIN))
  602. ret = -EACCES;
  603. else
  604. sock_valbool_flag(sk, SOCK_DBG, valbool);
  605. break;
  606. case SO_REUSEADDR:
  607. sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
  608. break;
  609. case SO_REUSEPORT:
  610. sk->sk_reuseport = valbool;
  611. break;
  612. case SO_TYPE:
  613. case SO_PROTOCOL:
  614. case SO_DOMAIN:
  615. case SO_ERROR:
  616. ret = -ENOPROTOOPT;
  617. break;
  618. case SO_DONTROUTE:
  619. sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
  620. break;
  621. case SO_BROADCAST:
  622. sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
  623. break;
  624. case SO_SNDBUF:
  625. /* Don't error on this BSD doesn't and if you think
  626. * about it this is right. Otherwise apps have to
  627. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  628. * are treated in BSD as hints
  629. */
  630. val = min_t(u32, val, sysctl_wmem_max);
  631. set_sndbuf:
  632. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  633. sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
  634. /* Wake up sending tasks if we upped the value. */
  635. sk->sk_write_space(sk);
  636. break;
  637. case SO_SNDBUFFORCE:
  638. if (!capable(CAP_NET_ADMIN)) {
  639. ret = -EPERM;
  640. break;
  641. }
  642. goto set_sndbuf;
  643. case SO_RCVBUF:
  644. /* Don't error on this BSD doesn't and if you think
  645. * about it this is right. Otherwise apps have to
  646. * play 'guess the biggest size' games. RCVBUF/SNDBUF
  647. * are treated in BSD as hints
  648. */
  649. val = min_t(u32, val, sysctl_rmem_max);
  650. set_rcvbuf:
  651. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  652. /*
  653. * We double it on the way in to account for
  654. * "struct sk_buff" etc. overhead. Applications
  655. * assume that the SO_RCVBUF setting they make will
  656. * allow that much actual data to be received on that
  657. * socket.
  658. *
  659. * Applications are unaware that "struct sk_buff" and
  660. * other overheads allocate from the receive buffer
  661. * during socket buffer allocation.
  662. *
  663. * And after considering the possible alternatives,
  664. * returning the value we actually used in getsockopt
  665. * is the most desirable behavior.
  666. */
  667. sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
  668. break;
  669. case SO_RCVBUFFORCE:
  670. if (!capable(CAP_NET_ADMIN)) {
  671. ret = -EPERM;
  672. break;
  673. }
  674. goto set_rcvbuf;
  675. case SO_KEEPALIVE:
  676. #ifdef CONFIG_INET
  677. if (sk->sk_protocol == IPPROTO_TCP &&
  678. sk->sk_type == SOCK_STREAM)
  679. tcp_set_keepalive(sk, valbool);
  680. #endif
  681. sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
  682. break;
  683. case SO_OOBINLINE:
  684. sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
  685. break;
  686. case SO_NO_CHECK:
  687. sk->sk_no_check_tx = valbool;
  688. break;
  689. case SO_PRIORITY:
  690. if ((val >= 0 && val <= 6) ||
  691. ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  692. sk->sk_priority = val;
  693. else
  694. ret = -EPERM;
  695. break;
  696. case SO_LINGER:
  697. if (optlen < sizeof(ling)) {
  698. ret = -EINVAL; /* 1003.1g */
  699. break;
  700. }
  701. if (copy_from_user(&ling, optval, sizeof(ling))) {
  702. ret = -EFAULT;
  703. break;
  704. }
  705. if (!ling.l_onoff)
  706. sock_reset_flag(sk, SOCK_LINGER);
  707. else {
  708. #if (BITS_PER_LONG == 32)
  709. if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
  710. sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
  711. else
  712. #endif
  713. sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
  714. sock_set_flag(sk, SOCK_LINGER);
  715. }
  716. break;
  717. case SO_BSDCOMPAT:
  718. sock_warn_obsolete_bsdism("setsockopt");
  719. break;
  720. case SO_PASSCRED:
  721. if (valbool)
  722. set_bit(SOCK_PASSCRED, &sock->flags);
  723. else
  724. clear_bit(SOCK_PASSCRED, &sock->flags);
  725. break;
  726. case SO_TIMESTAMP:
  727. case SO_TIMESTAMPNS:
  728. if (valbool) {
  729. if (optname == SO_TIMESTAMP)
  730. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  731. else
  732. sock_set_flag(sk, SOCK_RCVTSTAMPNS);
  733. sock_set_flag(sk, SOCK_RCVTSTAMP);
  734. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  735. } else {
  736. sock_reset_flag(sk, SOCK_RCVTSTAMP);
  737. sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
  738. }
  739. break;
  740. case SO_TIMESTAMPING:
  741. if (val & ~SOF_TIMESTAMPING_MASK) {
  742. ret = -EINVAL;
  743. break;
  744. }
  745. sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
  746. val & SOF_TIMESTAMPING_TX_HARDWARE);
  747. sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
  748. val & SOF_TIMESTAMPING_TX_SOFTWARE);
  749. sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
  750. val & SOF_TIMESTAMPING_RX_HARDWARE);
  751. if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
  752. sock_enable_timestamp(sk,
  753. SOCK_TIMESTAMPING_RX_SOFTWARE);
  754. else
  755. sock_disable_timestamp(sk,
  756. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
  757. sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
  758. val & SOF_TIMESTAMPING_SOFTWARE);
  759. sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
  760. val & SOF_TIMESTAMPING_SYS_HARDWARE);
  761. sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
  762. val & SOF_TIMESTAMPING_RAW_HARDWARE);
  763. break;
  764. case SO_RCVLOWAT:
  765. if (val < 0)
  766. val = INT_MAX;
  767. sk->sk_rcvlowat = val ? : 1;
  768. break;
  769. case SO_RCVTIMEO:
  770. ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
  771. break;
  772. case SO_SNDTIMEO:
  773. ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
  774. break;
  775. case SO_ATTACH_FILTER:
  776. ret = -EINVAL;
  777. if (optlen == sizeof(struct sock_fprog)) {
  778. struct sock_fprog fprog;
  779. ret = -EFAULT;
  780. if (copy_from_user(&fprog, optval, sizeof(fprog)))
  781. break;
  782. ret = sk_attach_filter(&fprog, sk);
  783. }
  784. break;
  785. case SO_DETACH_FILTER:
  786. ret = sk_detach_filter(sk);
  787. break;
  788. case SO_LOCK_FILTER:
  789. if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
  790. ret = -EPERM;
  791. else
  792. sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
  793. break;
  794. case SO_PASSSEC:
  795. if (valbool)
  796. set_bit(SOCK_PASSSEC, &sock->flags);
  797. else
  798. clear_bit(SOCK_PASSSEC, &sock->flags);
  799. break;
  800. case SO_MARK:
  801. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  802. ret = -EPERM;
  803. else
  804. sk->sk_mark = val;
  805. break;
  806. /* We implement the SO_SNDLOWAT etc to
  807. not be settable (1003.1g 5.3) */
  808. case SO_RXQ_OVFL:
  809. sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
  810. break;
  811. case SO_WIFI_STATUS:
  812. sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
  813. break;
  814. case SO_PEEK_OFF:
  815. if (sock->ops->set_peek_off)
  816. ret = sock->ops->set_peek_off(sk, val);
  817. else
  818. ret = -EOPNOTSUPP;
  819. break;
  820. case SO_NOFCS:
  821. sock_valbool_flag(sk, SOCK_NOFCS, valbool);
  822. break;
  823. case SO_SELECT_ERR_QUEUE:
  824. sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
  825. break;
  826. #ifdef CONFIG_NET_RX_BUSY_POLL
  827. case SO_BUSY_POLL:
  828. /* allow unprivileged users to decrease the value */
  829. if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
  830. ret = -EPERM;
  831. else {
  832. if (val < 0)
  833. ret = -EINVAL;
  834. else
  835. sk->sk_ll_usec = val;
  836. }
  837. break;
  838. #endif
  839. case SO_MAX_PACING_RATE:
  840. sk->sk_max_pacing_rate = val;
  841. sk->sk_pacing_rate = min(sk->sk_pacing_rate,
  842. sk->sk_max_pacing_rate);
  843. break;
  844. default:
  845. ret = -ENOPROTOOPT;
  846. break;
  847. }
  848. release_sock(sk);
  849. return ret;
  850. }
  851. EXPORT_SYMBOL(sock_setsockopt);
  852. static void cred_to_ucred(struct pid *pid, const struct cred *cred,
  853. struct ucred *ucred)
  854. {
  855. ucred->pid = pid_vnr(pid);
  856. ucred->uid = ucred->gid = -1;
  857. if (cred) {
  858. struct user_namespace *current_ns = current_user_ns();
  859. ucred->uid = from_kuid_munged(current_ns, cred->euid);
  860. ucred->gid = from_kgid_munged(current_ns, cred->egid);
  861. }
  862. }
  863. int sock_getsockopt(struct socket *sock, int level, int optname,
  864. char __user *optval, int __user *optlen)
  865. {
  866. struct sock *sk = sock->sk;
  867. union {
  868. int val;
  869. struct linger ling;
  870. struct timeval tm;
  871. } v;
  872. int lv = sizeof(int);
  873. int len;
  874. if (get_user(len, optlen))
  875. return -EFAULT;
  876. if (len < 0)
  877. return -EINVAL;
  878. memset(&v, 0, sizeof(v));
  879. switch (optname) {
  880. case SO_DEBUG:
  881. v.val = sock_flag(sk, SOCK_DBG);
  882. break;
  883. case SO_DONTROUTE:
  884. v.val = sock_flag(sk, SOCK_LOCALROUTE);
  885. break;
  886. case SO_BROADCAST:
  887. v.val = sock_flag(sk, SOCK_BROADCAST);
  888. break;
  889. case SO_SNDBUF:
  890. v.val = sk->sk_sndbuf;
  891. break;
  892. case SO_RCVBUF:
  893. v.val = sk->sk_rcvbuf;
  894. break;
  895. case SO_REUSEADDR:
  896. v.val = sk->sk_reuse;
  897. break;
  898. case SO_REUSEPORT:
  899. v.val = sk->sk_reuseport;
  900. break;
  901. case SO_KEEPALIVE:
  902. v.val = sock_flag(sk, SOCK_KEEPOPEN);
  903. break;
  904. case SO_TYPE:
  905. v.val = sk->sk_type;
  906. break;
  907. case SO_PROTOCOL:
  908. v.val = sk->sk_protocol;
  909. break;
  910. case SO_DOMAIN:
  911. v.val = sk->sk_family;
  912. break;
  913. case SO_ERROR:
  914. v.val = -sock_error(sk);
  915. if (v.val == 0)
  916. v.val = xchg(&sk->sk_err_soft, 0);
  917. break;
  918. case SO_OOBINLINE:
  919. v.val = sock_flag(sk, SOCK_URGINLINE);
  920. break;
  921. case SO_NO_CHECK:
  922. v.val = sk->sk_no_check_tx;
  923. break;
  924. case SO_PRIORITY:
  925. v.val = sk->sk_priority;
  926. break;
  927. case SO_LINGER:
  928. lv = sizeof(v.ling);
  929. v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
  930. v.ling.l_linger = sk->sk_lingertime / HZ;
  931. break;
  932. case SO_BSDCOMPAT:
  933. sock_warn_obsolete_bsdism("getsockopt");
  934. break;
  935. case SO_TIMESTAMP:
  936. v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
  937. !sock_flag(sk, SOCK_RCVTSTAMPNS);
  938. break;
  939. case SO_TIMESTAMPNS:
  940. v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
  941. break;
  942. case SO_TIMESTAMPING:
  943. v.val = 0;
  944. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
  945. v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
  946. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
  947. v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
  948. if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
  949. v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
  950. if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
  951. v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
  952. if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
  953. v.val |= SOF_TIMESTAMPING_SOFTWARE;
  954. if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
  955. v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
  956. if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
  957. v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
  958. break;
  959. case SO_RCVTIMEO:
  960. lv = sizeof(struct timeval);
  961. if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
  962. v.tm.tv_sec = 0;
  963. v.tm.tv_usec = 0;
  964. } else {
  965. v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
  966. v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
  967. }
  968. break;
  969. case SO_SNDTIMEO:
  970. lv = sizeof(struct timeval);
  971. if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
  972. v.tm.tv_sec = 0;
  973. v.tm.tv_usec = 0;
  974. } else {
  975. v.tm.tv_sec = sk->sk_sndtimeo / HZ;
  976. v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
  977. }
  978. break;
  979. case SO_RCVLOWAT:
  980. v.val = sk->sk_rcvlowat;
  981. break;
  982. case SO_SNDLOWAT:
  983. v.val = 1;
  984. break;
  985. case SO_PASSCRED:
  986. v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
  987. break;
  988. case SO_PEERCRED:
  989. {
  990. struct ucred peercred;
  991. if (len > sizeof(peercred))
  992. len = sizeof(peercred);
  993. cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
  994. if (copy_to_user(optval, &peercred, len))
  995. return -EFAULT;
  996. goto lenout;
  997. }
  998. case SO_PEERNAME:
  999. {
  1000. char address[128];
  1001. if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
  1002. return -ENOTCONN;
  1003. if (lv < len)
  1004. return -EINVAL;
  1005. if (copy_to_user(optval, address, len))
  1006. return -EFAULT;
  1007. goto lenout;
  1008. }
  1009. /* Dubious BSD thing... Probably nobody even uses it, but
  1010. * the UNIX standard wants it for whatever reason... -DaveM
  1011. */
  1012. case SO_ACCEPTCONN:
  1013. v.val = sk->sk_state == TCP_LISTEN;
  1014. break;
  1015. case SO_PASSSEC:
  1016. v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
  1017. break;
  1018. case SO_PEERSEC:
  1019. return security_socket_getpeersec_stream(sock, optval, optlen, len);
  1020. case SO_MARK:
  1021. v.val = sk->sk_mark;
  1022. break;
  1023. case SO_RXQ_OVFL:
  1024. v.val = sock_flag(sk, SOCK_RXQ_OVFL);
  1025. break;
  1026. case SO_WIFI_STATUS:
  1027. v.val = sock_flag(sk, SOCK_WIFI_STATUS);
  1028. break;
  1029. case SO_PEEK_OFF:
  1030. if (!sock->ops->set_peek_off)
  1031. return -EOPNOTSUPP;
  1032. v.val = sk->sk_peek_off;
  1033. break;
  1034. case SO_NOFCS:
  1035. v.val = sock_flag(sk, SOCK_NOFCS);
  1036. break;
  1037. case SO_BINDTODEVICE:
  1038. return sock_getbindtodevice(sk, optval, optlen, len);
  1039. case SO_GET_FILTER:
  1040. len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
  1041. if (len < 0)
  1042. return len;
  1043. goto lenout;
  1044. case SO_LOCK_FILTER:
  1045. v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
  1046. break;
  1047. case SO_BPF_EXTENSIONS:
  1048. v.val = bpf_tell_extensions();
  1049. break;
  1050. case SO_SELECT_ERR_QUEUE:
  1051. v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
  1052. break;
  1053. #ifdef CONFIG_NET_RX_BUSY_POLL
  1054. case SO_BUSY_POLL:
  1055. v.val = sk->sk_ll_usec;
  1056. break;
  1057. #endif
  1058. case SO_MAX_PACING_RATE:
  1059. v.val = sk->sk_max_pacing_rate;
  1060. break;
  1061. default:
  1062. return -ENOPROTOOPT;
  1063. }
  1064. if (len > lv)
  1065. len = lv;
  1066. if (copy_to_user(optval, &v, len))
  1067. return -EFAULT;
  1068. lenout:
  1069. if (put_user(len, optlen))
  1070. return -EFAULT;
  1071. return 0;
  1072. }
  1073. /*
  1074. * Initialize an sk_lock.
  1075. *
  1076. * (We also register the sk_lock with the lock validator.)
  1077. */
  1078. static inline void sock_lock_init(struct sock *sk)
  1079. {
  1080. sock_lock_init_class_and_name(sk,
  1081. af_family_slock_key_strings[sk->sk_family],
  1082. af_family_slock_keys + sk->sk_family,
  1083. af_family_key_strings[sk->sk_family],
  1084. af_family_keys + sk->sk_family);
  1085. }
  1086. /*
  1087. * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  1088. * even temporarly, because of RCU lookups. sk_node should also be left as is.
  1089. * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  1090. */
  1091. static void sock_copy(struct sock *nsk, const struct sock *osk)
  1092. {
  1093. #ifdef CONFIG_SECURITY_NETWORK
  1094. void *sptr = nsk->sk_security;
  1095. #endif
  1096. memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
  1097. memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
  1098. osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
  1099. #ifdef CONFIG_SECURITY_NETWORK
  1100. nsk->sk_security = sptr;
  1101. security_sk_clone(osk, nsk);
  1102. #endif
  1103. }
  1104. void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
  1105. {
  1106. unsigned long nulls1, nulls2;
  1107. nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
  1108. nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
  1109. if (nulls1 > nulls2)
  1110. swap(nulls1, nulls2);
  1111. if (nulls1 != 0)
  1112. memset((char *)sk, 0, nulls1);
  1113. memset((char *)sk + nulls1 + sizeof(void *), 0,
  1114. nulls2 - nulls1 - sizeof(void *));
  1115. memset((char *)sk + nulls2 + sizeof(void *), 0,
  1116. size - nulls2 - sizeof(void *));
  1117. }
  1118. EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
  1119. static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
  1120. int family)
  1121. {
  1122. struct sock *sk;
  1123. struct kmem_cache *slab;
  1124. slab = prot->slab;
  1125. if (slab != NULL) {
  1126. sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
  1127. if (!sk)
  1128. return sk;
  1129. if (priority & __GFP_ZERO) {
  1130. if (prot->clear_sk)
  1131. prot->clear_sk(sk, prot->obj_size);
  1132. else
  1133. sk_prot_clear_nulls(sk, prot->obj_size);
  1134. }
  1135. } else
  1136. sk = kmalloc(prot->obj_size, priority);
  1137. if (sk != NULL) {
  1138. kmemcheck_annotate_bitfield(sk, flags);
  1139. if (security_sk_alloc(sk, family, priority))
  1140. goto out_free;
  1141. if (!try_module_get(prot->owner))
  1142. goto out_free_sec;
  1143. sk_tx_queue_clear(sk);
  1144. }
  1145. return sk;
  1146. out_free_sec:
  1147. security_sk_free(sk);
  1148. out_free:
  1149. if (slab != NULL)
  1150. kmem_cache_free(slab, sk);
  1151. else
  1152. kfree(sk);
  1153. return NULL;
  1154. }
  1155. static void sk_prot_free(struct proto *prot, struct sock *sk)
  1156. {
  1157. struct kmem_cache *slab;
  1158. struct module *owner;
  1159. owner = prot->owner;
  1160. slab = prot->slab;
  1161. security_sk_free(sk);
  1162. if (slab != NULL)
  1163. kmem_cache_free(slab, sk);
  1164. else
  1165. kfree(sk);
  1166. module_put(owner);
  1167. }
  1168. #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
  1169. void sock_update_netprioidx(struct sock *sk)
  1170. {
  1171. if (in_interrupt())
  1172. return;
  1173. sk->sk_cgrp_prioidx = task_netprioidx(current);
  1174. }
  1175. EXPORT_SYMBOL_GPL(sock_update_netprioidx);
  1176. #endif
  1177. /**
  1178. * sk_alloc - All socket objects are allocated here
  1179. * @net: the applicable net namespace
  1180. * @family: protocol family
  1181. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1182. * @prot: struct proto associated with this new sock instance
  1183. */
  1184. struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
  1185. struct proto *prot)
  1186. {
  1187. struct sock *sk;
  1188. sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
  1189. if (sk) {
  1190. sk->sk_family = family;
  1191. /*
  1192. * See comment in struct sock definition to understand
  1193. * why we need sk_prot_creator -acme
  1194. */
  1195. sk->sk_prot = sk->sk_prot_creator = prot;
  1196. sock_lock_init(sk);
  1197. sock_net_set(sk, get_net(net));
  1198. atomic_set(&sk->sk_wmem_alloc, 1);
  1199. sock_update_classid(sk);
  1200. sock_update_netprioidx(sk);
  1201. }
  1202. return sk;
  1203. }
  1204. EXPORT_SYMBOL(sk_alloc);
  1205. static void __sk_free(struct sock *sk)
  1206. {
  1207. struct sk_filter *filter;
  1208. if (sk->sk_destruct)
  1209. sk->sk_destruct(sk);
  1210. filter = rcu_dereference_check(sk->sk_filter,
  1211. atomic_read(&sk->sk_wmem_alloc) == 0);
  1212. if (filter) {
  1213. sk_filter_uncharge(sk, filter);
  1214. RCU_INIT_POINTER(sk->sk_filter, NULL);
  1215. }
  1216. sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
  1217. if (atomic_read(&sk->sk_omem_alloc))
  1218. pr_debug("%s: optmem leakage (%d bytes) detected\n",
  1219. __func__, atomic_read(&sk->sk_omem_alloc));
  1220. if (sk->sk_peer_cred)
  1221. put_cred(sk->sk_peer_cred);
  1222. put_pid(sk->sk_peer_pid);
  1223. put_net(sock_net(sk));
  1224. sk_prot_free(sk->sk_prot_creator, sk);
  1225. }
  1226. void sk_free(struct sock *sk)
  1227. {
  1228. /*
  1229. * We subtract one from sk_wmem_alloc and can know if
  1230. * some packets are still in some tx queue.
  1231. * If not null, sock_wfree() will call __sk_free(sk) later
  1232. */
  1233. if (atomic_dec_and_test(&sk->sk_wmem_alloc))
  1234. __sk_free(sk);
  1235. }
  1236. EXPORT_SYMBOL(sk_free);
  1237. /*
  1238. * Last sock_put should drop reference to sk->sk_net. It has already
  1239. * been dropped in sk_change_net. Taking reference to stopping namespace
  1240. * is not an option.
  1241. * Take reference to a socket to remove it from hash _alive_ and after that
  1242. * destroy it in the context of init_net.
  1243. */
  1244. void sk_release_kernel(struct sock *sk)
  1245. {
  1246. if (sk == NULL || sk->sk_socket == NULL)
  1247. return;
  1248. sock_hold(sk);
  1249. sock_release(sk->sk_socket);
  1250. release_net(sock_net(sk));
  1251. sock_net_set(sk, get_net(&init_net));
  1252. sock_put(sk);
  1253. }
  1254. EXPORT_SYMBOL(sk_release_kernel);
  1255. static void sk_update_clone(const struct sock *sk, struct sock *newsk)
  1256. {
  1257. if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
  1258. sock_update_memcg(newsk);
  1259. }
  1260. /**
  1261. * sk_clone_lock - clone a socket, and lock its clone
  1262. * @sk: the socket to clone
  1263. * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  1264. *
  1265. * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
  1266. */
  1267. struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
  1268. {
  1269. struct sock *newsk;
  1270. newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
  1271. if (newsk != NULL) {
  1272. struct sk_filter *filter;
  1273. sock_copy(newsk, sk);
  1274. /* SANITY */
  1275. get_net(sock_net(newsk));
  1276. sk_node_init(&newsk->sk_node);
  1277. sock_lock_init(newsk);
  1278. bh_lock_sock(newsk);
  1279. newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
  1280. newsk->sk_backlog.len = 0;
  1281. atomic_set(&newsk->sk_rmem_alloc, 0);
  1282. /*
  1283. * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
  1284. */
  1285. atomic_set(&newsk->sk_wmem_alloc, 1);
  1286. atomic_set(&newsk->sk_omem_alloc, 0);
  1287. skb_queue_head_init(&newsk->sk_receive_queue);
  1288. skb_queue_head_init(&newsk->sk_write_queue);
  1289. #ifdef CONFIG_NET_DMA
  1290. skb_queue_head_init(&newsk->sk_async_wait_queue);
  1291. #endif
  1292. spin_lock_init(&newsk->sk_dst_lock);
  1293. rwlock_init(&newsk->sk_callback_lock);
  1294. lockdep_set_class_and_name(&newsk->sk_callback_lock,
  1295. af_callback_keys + newsk->sk_family,
  1296. af_family_clock_key_strings[newsk->sk_family]);
  1297. newsk->sk_dst_cache = NULL;
  1298. newsk->sk_wmem_queued = 0;
  1299. newsk->sk_forward_alloc = 0;
  1300. newsk->sk_send_head = NULL;
  1301. newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  1302. sock_reset_flag(newsk, SOCK_DONE);
  1303. skb_queue_head_init(&newsk->sk_error_queue);
  1304. filter = rcu_dereference_protected(newsk->sk_filter, 1);
  1305. if (filter != NULL)
  1306. sk_filter_charge(newsk, filter);
  1307. if (unlikely(xfrm_sk_clone_policy(newsk))) {
  1308. /* It is still raw copy of parent, so invalidate
  1309. * destructor and make plain sk_free() */
  1310. newsk->sk_destruct = NULL;
  1311. bh_unlock_sock(newsk);
  1312. sk_free(newsk);
  1313. newsk = NULL;
  1314. goto out;
  1315. }
  1316. newsk->sk_err = 0;
  1317. newsk->sk_priority = 0;
  1318. /*
  1319. * Before updating sk_refcnt, we must commit prior changes to memory
  1320. * (Documentation/RCU/rculist_nulls.txt for details)
  1321. */
  1322. smp_wmb();
  1323. atomic_set(&newsk->sk_refcnt, 2);
  1324. /*
  1325. * Increment the counter in the same struct proto as the master
  1326. * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
  1327. * is the same as sk->sk_prot->socks, as this field was copied
  1328. * with memcpy).
  1329. *
  1330. * This _changes_ the previous behaviour, where
  1331. * tcp_create_openreq_child always was incrementing the
  1332. * equivalent to tcp_prot->socks (inet_sock_nr), so this have
  1333. * to be taken into account in all callers. -acme
  1334. */
  1335. sk_refcnt_debug_inc(newsk);
  1336. sk_set_socket(newsk, NULL);
  1337. newsk->sk_wq = NULL;
  1338. sk_update_clone(sk, newsk);
  1339. if (newsk->sk_prot->sockets_allocated)
  1340. sk_sockets_allocated_inc(newsk);
  1341. if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
  1342. net_enable_timestamp();
  1343. }
  1344. out:
  1345. return newsk;
  1346. }
  1347. EXPORT_SYMBOL_GPL(sk_clone_lock);
  1348. void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
  1349. {
  1350. __sk_dst_set(sk, dst);
  1351. sk->sk_route_caps = dst->dev->features;
  1352. if (sk->sk_route_caps & NETIF_F_GSO)
  1353. sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
  1354. sk->sk_route_caps &= ~sk->sk_route_nocaps;
  1355. if (sk_can_gso(sk)) {
  1356. if (dst->header_len) {
  1357. sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
  1358. } else {
  1359. sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
  1360. sk->sk_gso_max_size = dst->dev->gso_max_size;
  1361. sk->sk_gso_max_segs = dst->dev->gso_max_segs;
  1362. }
  1363. }
  1364. }
  1365. EXPORT_SYMBOL_GPL(sk_setup_caps);
  1366. /*
  1367. * Simple resource managers for sockets.
  1368. */
  1369. /*
  1370. * Write buffer destructor automatically called from kfree_skb.
  1371. */
  1372. void sock_wfree(struct sk_buff *skb)
  1373. {
  1374. struct sock *sk = skb->sk;
  1375. unsigned int len = skb->truesize;
  1376. if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
  1377. /*
  1378. * Keep a reference on sk_wmem_alloc, this will be released
  1379. * after sk_write_space() call
  1380. */
  1381. atomic_sub(len - 1, &sk->sk_wmem_alloc);
  1382. sk->sk_write_space(sk);
  1383. len = 1;
  1384. }
  1385. /*
  1386. * if sk_wmem_alloc reaches 0, we must finish what sk_free()
  1387. * could not do because of in-flight packets
  1388. */
  1389. if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
  1390. __sk_free(sk);
  1391. }
  1392. EXPORT_SYMBOL(sock_wfree);
  1393. void skb_orphan_partial(struct sk_buff *skb)
  1394. {
  1395. /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
  1396. * so we do not completely orphan skb, but transfert all
  1397. * accounted bytes but one, to avoid unexpected reorders.
  1398. */
  1399. if (skb->destructor == sock_wfree
  1400. #ifdef CONFIG_INET
  1401. || skb->destructor == tcp_wfree
  1402. #endif
  1403. ) {
  1404. atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
  1405. skb->truesize = 1;
  1406. } else {
  1407. skb_orphan(skb);
  1408. }
  1409. }
  1410. EXPORT_SYMBOL(skb_orphan_partial);
  1411. /*
  1412. * Read buffer destructor automatically called from kfree_skb.
  1413. */
  1414. void sock_rfree(struct sk_buff *skb)
  1415. {
  1416. struct sock *sk = skb->sk;
  1417. unsigned int len = skb->truesize;
  1418. atomic_sub(len, &sk->sk_rmem_alloc);
  1419. sk_mem_uncharge(sk, len);
  1420. }
  1421. EXPORT_SYMBOL(sock_rfree);
  1422. void sock_edemux(struct sk_buff *skb)
  1423. {
  1424. struct sock *sk = skb->sk;
  1425. #ifdef CONFIG_INET
  1426. if (sk->sk_state == TCP_TIME_WAIT)
  1427. inet_twsk_put(inet_twsk(sk));
  1428. else
  1429. #endif
  1430. sock_put(sk);
  1431. }
  1432. EXPORT_SYMBOL(sock_edemux);
  1433. kuid_t sock_i_uid(struct sock *sk)
  1434. {
  1435. kuid_t uid;
  1436. read_lock_bh(&sk->sk_callback_lock);
  1437. uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
  1438. read_unlock_bh(&sk->sk_callback_lock);
  1439. return uid;
  1440. }
  1441. EXPORT_SYMBOL(sock_i_uid);
  1442. unsigned long sock_i_ino(struct sock *sk)
  1443. {
  1444. unsigned long ino;
  1445. read_lock_bh(&sk->sk_callback_lock);
  1446. ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
  1447. read_unlock_bh(&sk->sk_callback_lock);
  1448. return ino;
  1449. }
  1450. EXPORT_SYMBOL(sock_i_ino);
  1451. /*
  1452. * Allocate a skb from the socket's send buffer.
  1453. */
  1454. struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
  1455. gfp_t priority)
  1456. {
  1457. if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
  1458. struct sk_buff *skb = alloc_skb(size, priority);
  1459. if (skb) {
  1460. skb_set_owner_w(skb, sk);
  1461. return skb;
  1462. }
  1463. }
  1464. return NULL;
  1465. }
  1466. EXPORT_SYMBOL(sock_wmalloc);
  1467. /*
  1468. * Allocate a memory block from the socket's option memory buffer.
  1469. */
  1470. void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
  1471. {
  1472. if ((unsigned int)size <= sysctl_optmem_max &&
  1473. atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
  1474. void *mem;
  1475. /* First do the add, to avoid the race if kmalloc
  1476. * might sleep.
  1477. */
  1478. atomic_add(size, &sk->sk_omem_alloc);
  1479. mem = kmalloc(size, priority);
  1480. if (mem)
  1481. return mem;
  1482. atomic_sub(size, &sk->sk_omem_alloc);
  1483. }
  1484. return NULL;
  1485. }
  1486. EXPORT_SYMBOL(sock_kmalloc);
  1487. /*
  1488. * Free an option memory block.
  1489. */
  1490. void sock_kfree_s(struct sock *sk, void *mem, int size)
  1491. {
  1492. kfree(mem);
  1493. atomic_sub(size, &sk->sk_omem_alloc);
  1494. }
  1495. EXPORT_SYMBOL(sock_kfree_s);
  1496. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
  1497. I think, these locks should be removed for datagram sockets.
  1498. */
  1499. static long sock_wait_for_wmem(struct sock *sk, long timeo)
  1500. {
  1501. DEFINE_WAIT(wait);
  1502. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1503. for (;;) {
  1504. if (!timeo)
  1505. break;
  1506. if (signal_pending(current))
  1507. break;
  1508. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1509. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1510. if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
  1511. break;
  1512. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1513. break;
  1514. if (sk->sk_err)
  1515. break;
  1516. timeo = schedule_timeout(timeo);
  1517. }
  1518. finish_wait(sk_sleep(sk), &wait);
  1519. return timeo;
  1520. }
  1521. /*
  1522. * Generic send/receive buffer handlers
  1523. */
  1524. struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
  1525. unsigned long data_len, int noblock,
  1526. int *errcode, int max_page_order)
  1527. {
  1528. struct sk_buff *skb = NULL;
  1529. unsigned long chunk;
  1530. gfp_t gfp_mask;
  1531. long timeo;
  1532. int err;
  1533. int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  1534. struct page *page;
  1535. int i;
  1536. err = -EMSGSIZE;
  1537. if (npages > MAX_SKB_FRAGS)
  1538. goto failure;
  1539. timeo = sock_sndtimeo(sk, noblock);
  1540. while (!skb) {
  1541. err = sock_error(sk);
  1542. if (err != 0)
  1543. goto failure;
  1544. err = -EPIPE;
  1545. if (sk->sk_shutdown & SEND_SHUTDOWN)
  1546. goto failure;
  1547. if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
  1548. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  1549. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1550. err = -EAGAIN;
  1551. if (!timeo)
  1552. goto failure;
  1553. if (signal_pending(current))
  1554. goto interrupted;
  1555. timeo = sock_wait_for_wmem(sk, timeo);
  1556. continue;
  1557. }
  1558. err = -ENOBUFS;
  1559. gfp_mask = sk->sk_allocation;
  1560. if (gfp_mask & __GFP_WAIT)
  1561. gfp_mask |= __GFP_REPEAT;
  1562. skb = alloc_skb(header_len, gfp_mask);
  1563. if (!skb)
  1564. goto failure;
  1565. skb->truesize += data_len;
  1566. for (i = 0; npages > 0; i++) {
  1567. int order = max_page_order;
  1568. while (order) {
  1569. if (npages >= 1 << order) {
  1570. page = alloc_pages(sk->sk_allocation |
  1571. __GFP_COMP |
  1572. __GFP_NOWARN |
  1573. __GFP_NORETRY,
  1574. order);
  1575. if (page)
  1576. goto fill_page;
  1577. }
  1578. order--;
  1579. }
  1580. page = alloc_page(sk->sk_allocation);
  1581. if (!page)
  1582. goto failure;
  1583. fill_page:
  1584. chunk = min_t(unsigned long, data_len,
  1585. PAGE_SIZE << order);
  1586. skb_fill_page_desc(skb, i, page, 0, chunk);
  1587. data_len -= chunk;
  1588. npages -= 1 << order;
  1589. }
  1590. }
  1591. skb_set_owner_w(skb, sk);
  1592. return skb;
  1593. interrupted:
  1594. err = sock_intr_errno(timeo);
  1595. failure:
  1596. kfree_skb(skb);
  1597. *errcode = err;
  1598. return NULL;
  1599. }
  1600. EXPORT_SYMBOL(sock_alloc_send_pskb);
  1601. struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
  1602. int noblock, int *errcode)
  1603. {
  1604. return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
  1605. }
  1606. EXPORT_SYMBOL(sock_alloc_send_skb);
  1607. /* On 32bit arches, an skb frag is limited to 2^15 */
  1608. #define SKB_FRAG_PAGE_ORDER get_order(32768)
  1609. /**
  1610. * skb_page_frag_refill - check that a page_frag contains enough room
  1611. * @sz: minimum size of the fragment we want to get
  1612. * @pfrag: pointer to page_frag
  1613. * @prio: priority for memory allocation
  1614. *
  1615. * Note: While this allocator tries to use high order pages, there is
  1616. * no guarantee that allocations succeed. Therefore, @sz MUST be
  1617. * less or equal than PAGE_SIZE.
  1618. */
  1619. bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
  1620. {
  1621. int order;
  1622. if (pfrag->page) {
  1623. if (atomic_read(&pfrag->page->_count) == 1) {
  1624. pfrag->offset = 0;
  1625. return true;
  1626. }
  1627. if (pfrag->offset + sz <= pfrag->size)
  1628. return true;
  1629. put_page(pfrag->page);
  1630. }
  1631. order = SKB_FRAG_PAGE_ORDER;
  1632. do {
  1633. gfp_t gfp = prio;
  1634. if (order)
  1635. gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
  1636. pfrag->page = alloc_pages(gfp, order);
  1637. if (likely(pfrag->page)) {
  1638. pfrag->offset = 0;
  1639. pfrag->size = PAGE_SIZE << order;
  1640. return true;
  1641. }
  1642. } while (--order >= 0);
  1643. return false;
  1644. }
  1645. EXPORT_SYMBOL(skb_page_frag_refill);
  1646. bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
  1647. {
  1648. if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
  1649. return true;
  1650. sk_enter_memory_pressure(sk);
  1651. sk_stream_moderate_sndbuf(sk);
  1652. return false;
  1653. }
  1654. EXPORT_SYMBOL(sk_page_frag_refill);
  1655. static void __lock_sock(struct sock *sk)
  1656. __releases(&sk->sk_lock.slock)
  1657. __acquires(&sk->sk_lock.slock)
  1658. {
  1659. DEFINE_WAIT(wait);
  1660. for (;;) {
  1661. prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
  1662. TASK_UNINTERRUPTIBLE);
  1663. spin_unlock_bh(&sk->sk_lock.slock);
  1664. schedule();
  1665. spin_lock_bh(&sk->sk_lock.slock);
  1666. if (!sock_owned_by_user(sk))
  1667. break;
  1668. }
  1669. finish_wait(&sk->sk_lock.wq, &wait);
  1670. }
  1671. static void __release_sock(struct sock *sk)
  1672. __releases(&sk->sk_lock.slock)
  1673. __acquires(&sk->sk_lock.slock)
  1674. {
  1675. struct sk_buff *skb = sk->sk_backlog.head;
  1676. do {
  1677. sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
  1678. bh_unlock_sock(sk);
  1679. do {
  1680. struct sk_buff *next = skb->next;
  1681. prefetch(next);
  1682. WARN_ON_ONCE(skb_dst_is_noref(skb));
  1683. skb->next = NULL;
  1684. sk_backlog_rcv(sk, skb);
  1685. /*
  1686. * We are in process context here with softirqs
  1687. * disabled, use cond_resched_softirq() to preempt.
  1688. * This is safe to do because we've taken the backlog
  1689. * queue private:
  1690. */
  1691. cond_resched_softirq();
  1692. skb = next;
  1693. } while (skb != NULL);
  1694. bh_lock_sock(sk);
  1695. } while ((skb = sk->sk_backlog.head) != NULL);
  1696. /*
  1697. * Doing the zeroing here guarantee we can not loop forever
  1698. * while a wild producer attempts to flood us.
  1699. */
  1700. sk->sk_backlog.len = 0;
  1701. }
  1702. /**
  1703. * sk_wait_data - wait for data to arrive at sk_receive_queue
  1704. * @sk: sock to wait on
  1705. * @timeo: for how long
  1706. *
  1707. * Now socket state including sk->sk_err is changed only under lock,
  1708. * hence we may omit checks after joining wait queue.
  1709. * We check receive queue before schedule() only as optimization;
  1710. * it is very likely that release_sock() added new data.
  1711. */
  1712. int sk_wait_data(struct sock *sk, long *timeo)
  1713. {
  1714. int rc;
  1715. DEFINE_WAIT(wait);
  1716. prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1717. set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1718. rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
  1719. clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
  1720. finish_wait(sk_sleep(sk), &wait);
  1721. return rc;
  1722. }
  1723. EXPORT_SYMBOL(sk_wait_data);
  1724. /**
  1725. * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
  1726. * @sk: socket
  1727. * @size: memory size to allocate
  1728. * @kind: allocation type
  1729. *
  1730. * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
  1731. * rmem allocation. This function assumes that protocols which have
  1732. * memory_pressure use sk_wmem_queued as write buffer accounting.
  1733. */
  1734. int __sk_mem_schedule(struct sock *sk, int size, int kind)
  1735. {
  1736. struct proto *prot = sk->sk_prot;
  1737. int amt = sk_mem_pages(size);
  1738. long allocated;
  1739. int parent_status = UNDER_LIMIT;
  1740. sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
  1741. allocated = sk_memory_allocated_add(sk, amt, &parent_status);
  1742. /* Under limit. */
  1743. if (parent_status == UNDER_LIMIT &&
  1744. allocated <= sk_prot_mem_limits(sk, 0)) {
  1745. sk_leave_memory_pressure(sk);
  1746. return 1;
  1747. }
  1748. /* Under pressure. (we or our parents) */
  1749. if ((parent_status > SOFT_LIMIT) ||
  1750. allocated > sk_prot_mem_limits(sk, 1))
  1751. sk_enter_memory_pressure(sk);
  1752. /* Over hard limit (we or our parents) */
  1753. if ((parent_status == OVER_LIMIT) ||
  1754. (allocated > sk_prot_mem_limits(sk, 2)))
  1755. goto suppress_allocation;
  1756. /* guarantee minimum buffer size under pressure */
  1757. if (kind == SK_MEM_RECV) {
  1758. if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
  1759. return 1;
  1760. } else { /* SK_MEM_SEND */
  1761. if (sk->sk_type == SOCK_STREAM) {
  1762. if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
  1763. return 1;
  1764. } else if (atomic_read(&sk->sk_wmem_alloc) <
  1765. prot->sysctl_wmem[0])
  1766. return 1;
  1767. }
  1768. if (sk_has_memory_pressure(sk)) {
  1769. int alloc;
  1770. if (!sk_under_memory_pressure(sk))
  1771. return 1;
  1772. alloc = sk_sockets_allocated_read_positive(sk);
  1773. if (sk_prot_mem_limits(sk, 2) > alloc *
  1774. sk_mem_pages(sk->sk_wmem_queued +
  1775. atomic_read(&sk->sk_rmem_alloc) +
  1776. sk->sk_forward_alloc))
  1777. return 1;
  1778. }
  1779. suppress_allocation:
  1780. if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
  1781. sk_stream_moderate_sndbuf(sk);
  1782. /* Fail only if socket is _under_ its sndbuf.
  1783. * In this case we cannot block, so that we have to fail.
  1784. */
  1785. if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
  1786. return 1;
  1787. }
  1788. trace_sock_exceed_buf_limit(sk, prot, allocated);
  1789. /* Alas. Undo changes. */
  1790. sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
  1791. sk_memory_allocated_sub(sk, amt);
  1792. return 0;
  1793. }
  1794. EXPORT_SYMBOL(__sk_mem_schedule);
  1795. /**
  1796. * __sk_reclaim - reclaim memory_allocated
  1797. * @sk: socket
  1798. */
  1799. void __sk_mem_reclaim(struct sock *sk)
  1800. {
  1801. sk_memory_allocated_sub(sk,
  1802. sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
  1803. sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
  1804. if (sk_under_memory_pressure(sk) &&
  1805. (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
  1806. sk_leave_memory_pressure(sk);
  1807. }
  1808. EXPORT_SYMBOL(__sk_mem_reclaim);
  1809. /*
  1810. * Set of default routines for initialising struct proto_ops when
  1811. * the protocol does not support a particular function. In certain
  1812. * cases where it makes no sense for a protocol to have a "do nothing"
  1813. * function, some default processing is provided.
  1814. */
  1815. int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
  1816. {
  1817. return -EOPNOTSUPP;
  1818. }
  1819. EXPORT_SYMBOL(sock_no_bind);
  1820. int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
  1821. int len, int flags)
  1822. {
  1823. return -EOPNOTSUPP;
  1824. }
  1825. EXPORT_SYMBOL(sock_no_connect);
  1826. int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
  1827. {
  1828. return -EOPNOTSUPP;
  1829. }
  1830. EXPORT_SYMBOL(sock_no_socketpair);
  1831. int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
  1832. {
  1833. return -EOPNOTSUPP;
  1834. }
  1835. EXPORT_SYMBOL(sock_no_accept);
  1836. int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
  1837. int *len, int peer)
  1838. {
  1839. return -EOPNOTSUPP;
  1840. }
  1841. EXPORT_SYMBOL(sock_no_getname);
  1842. unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
  1843. {
  1844. return 0;
  1845. }
  1846. EXPORT_SYMBOL(sock_no_poll);
  1847. int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1848. {
  1849. return -EOPNOTSUPP;
  1850. }
  1851. EXPORT_SYMBOL(sock_no_ioctl);
  1852. int sock_no_listen(struct socket *sock, int backlog)
  1853. {
  1854. return -EOPNOTSUPP;
  1855. }
  1856. EXPORT_SYMBOL(sock_no_listen);
  1857. int sock_no_shutdown(struct socket *sock, int how)
  1858. {
  1859. return -EOPNOTSUPP;
  1860. }
  1861. EXPORT_SYMBOL(sock_no_shutdown);
  1862. int sock_no_setsockopt(struct socket *sock, int level, int optname,
  1863. char __user *optval, unsigned int optlen)
  1864. {
  1865. return -EOPNOTSUPP;
  1866. }
  1867. EXPORT_SYMBOL(sock_no_setsockopt);
  1868. int sock_no_getsockopt(struct socket *sock, int level, int optname,
  1869. char __user *optval, int __user *optlen)
  1870. {
  1871. return -EOPNOTSUPP;
  1872. }
  1873. EXPORT_SYMBOL(sock_no_getsockopt);
  1874. int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1875. size_t len)
  1876. {
  1877. return -EOPNOTSUPP;
  1878. }
  1879. EXPORT_SYMBOL(sock_no_sendmsg);
  1880. int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
  1881. size_t len, int flags)
  1882. {
  1883. return -EOPNOTSUPP;
  1884. }
  1885. EXPORT_SYMBOL(sock_no_recvmsg);
  1886. int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
  1887. {
  1888. /* Mirror missing mmap method error code */
  1889. return -ENODEV;
  1890. }
  1891. EXPORT_SYMBOL(sock_no_mmap);
  1892. ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
  1893. {
  1894. ssize_t res;
  1895. struct msghdr msg = {.msg_flags = flags};
  1896. struct kvec iov;
  1897. char *kaddr = kmap(page);
  1898. iov.iov_base = kaddr + offset;
  1899. iov.iov_len = size;
  1900. res = kernel_sendmsg(sock, &msg, &iov, 1, size);
  1901. kunmap(page);
  1902. return res;
  1903. }
  1904. EXPORT_SYMBOL(sock_no_sendpage);
  1905. /*
  1906. * Default Socket Callbacks
  1907. */
  1908. static void sock_def_wakeup(struct sock *sk)
  1909. {
  1910. struct socket_wq *wq;
  1911. rcu_read_lock();
  1912. wq = rcu_dereference(sk->sk_wq);
  1913. if (wq_has_sleeper(wq))
  1914. wake_up_interruptible_all(&wq->wait);
  1915. rcu_read_unlock();
  1916. }
  1917. static void sock_def_error_report(struct sock *sk)
  1918. {
  1919. struct socket_wq *wq;
  1920. rcu_read_lock();
  1921. wq = rcu_dereference(sk->sk_wq);
  1922. if (wq_has_sleeper(wq))
  1923. wake_up_interruptible_poll(&wq->wait, POLLERR);
  1924. sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
  1925. rcu_read_unlock();
  1926. }
  1927. static void sock_def_readable(struct sock *sk)
  1928. {
  1929. struct socket_wq *wq;
  1930. rcu_read_lock();
  1931. wq = rcu_dereference(sk->sk_wq);
  1932. if (wq_has_sleeper(wq))
  1933. wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
  1934. POLLRDNORM | POLLRDBAND);
  1935. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  1936. rcu_read_unlock();
  1937. }
  1938. static void sock_def_write_space(struct sock *sk)
  1939. {
  1940. struct socket_wq *wq;
  1941. rcu_read_lock();
  1942. /* Do not wake up a writer until he can make "significant"
  1943. * progress. --DaveM
  1944. */
  1945. if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
  1946. wq = rcu_dereference(sk->sk_wq);
  1947. if (wq_has_sleeper(wq))
  1948. wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
  1949. POLLWRNORM | POLLWRBAND);
  1950. /* Should agree with poll, otherwise some programs break */
  1951. if (sock_writeable(sk))
  1952. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  1953. }
  1954. rcu_read_unlock();
  1955. }
  1956. static void sock_def_destruct(struct sock *sk)
  1957. {
  1958. kfree(sk->sk_protinfo);
  1959. }
  1960. void sk_send_sigurg(struct sock *sk)
  1961. {
  1962. if (sk->sk_socket && sk->sk_socket->file)
  1963. if (send_sigurg(&sk->sk_socket->file->f_owner))
  1964. sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
  1965. }
  1966. EXPORT_SYMBOL(sk_send_sigurg);
  1967. void sk_reset_timer(struct sock *sk, struct timer_list* timer,
  1968. unsigned long expires)
  1969. {
  1970. if (!mod_timer(timer, expires))
  1971. sock_hold(sk);
  1972. }
  1973. EXPORT_SYMBOL(sk_reset_timer);
  1974. void sk_stop_timer(struct sock *sk, struct timer_list* timer)
  1975. {
  1976. if (del_timer(timer))
  1977. __sock_put(sk);
  1978. }
  1979. EXPORT_SYMBOL(sk_stop_timer);
  1980. void sock_init_data(struct socket *sock, struct sock *sk)
  1981. {
  1982. skb_queue_head_init(&sk->sk_receive_queue);
  1983. skb_queue_head_init(&sk->sk_write_queue);
  1984. skb_queue_head_init(&sk->sk_error_queue);
  1985. #ifdef CONFIG_NET_DMA
  1986. skb_queue_head_init(&sk->sk_async_wait_queue);
  1987. #endif
  1988. sk->sk_send_head = NULL;
  1989. init_timer(&sk->sk_timer);
  1990. sk->sk_allocation = GFP_KERNEL;
  1991. sk->sk_rcvbuf = sysctl_rmem_default;
  1992. sk->sk_sndbuf = sysctl_wmem_default;
  1993. sk->sk_state = TCP_CLOSE;
  1994. sk_set_socket(sk, sock);
  1995. sock_set_flag(sk, SOCK_ZAPPED);
  1996. if (sock) {
  1997. sk->sk_type = sock->type;
  1998. sk->sk_wq = sock->wq;
  1999. sock->sk = sk;
  2000. } else
  2001. sk->sk_wq = NULL;
  2002. spin_lock_init(&sk->sk_dst_lock);
  2003. rwlock_init(&sk->sk_callback_lock);
  2004. lockdep_set_class_and_name(&sk->sk_callback_lock,
  2005. af_callback_keys + sk->sk_family,
  2006. af_family_clock_key_strings[sk->sk_family]);
  2007. sk->sk_state_change = sock_def_wakeup;
  2008. sk->sk_data_ready = sock_def_readable;
  2009. sk->sk_write_space = sock_def_write_space;
  2010. sk->sk_error_report = sock_def_error_report;
  2011. sk->sk_destruct = sock_def_destruct;
  2012. sk->sk_frag.page = NULL;
  2013. sk->sk_frag.offset = 0;
  2014. sk->sk_peek_off = -1;
  2015. sk->sk_peer_pid = NULL;
  2016. sk->sk_peer_cred = NULL;
  2017. sk->sk_write_pending = 0;
  2018. sk->sk_rcvlowat = 1;
  2019. sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
  2020. sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
  2021. sk->sk_stamp = ktime_set(-1L, 0);
  2022. #ifdef CONFIG_NET_RX_BUSY_POLL
  2023. sk->sk_napi_id = 0;
  2024. sk->sk_ll_usec = sysctl_net_busy_read;
  2025. #endif
  2026. sk->sk_max_pacing_rate = ~0U;
  2027. sk->sk_pacing_rate = ~0U;
  2028. /*
  2029. * Before updating sk_refcnt, we must commit prior changes to memory
  2030. * (Documentation/RCU/rculist_nulls.txt for details)
  2031. */
  2032. smp_wmb();
  2033. atomic_set(&sk->sk_refcnt, 1);
  2034. atomic_set(&sk->sk_drops, 0);
  2035. }
  2036. EXPORT_SYMBOL(sock_init_data);
  2037. void lock_sock_nested(struct sock *sk, int subclass)
  2038. {
  2039. might_sleep();
  2040. spin_lock_bh(&sk->sk_lock.slock);
  2041. if (sk->sk_lock.owned)
  2042. __lock_sock(sk);
  2043. sk->sk_lock.owned = 1;
  2044. spin_unlock(&sk->sk_lock.slock);
  2045. /*
  2046. * The sk_lock has mutex_lock() semantics here:
  2047. */
  2048. mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
  2049. local_bh_enable();
  2050. }
  2051. EXPORT_SYMBOL(lock_sock_nested);
  2052. void release_sock(struct sock *sk)
  2053. {
  2054. /*
  2055. * The sk_lock has mutex_unlock() semantics:
  2056. */
  2057. mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
  2058. spin_lock_bh(&sk->sk_lock.slock);
  2059. if (sk->sk_backlog.tail)
  2060. __release_sock(sk);
  2061. /* Warning : release_cb() might need to release sk ownership,
  2062. * ie call sock_release_ownership(sk) before us.
  2063. */
  2064. if (sk->sk_prot->release_cb)
  2065. sk->sk_prot->release_cb(sk);
  2066. sock_release_ownership(sk);
  2067. if (waitqueue_active(&sk->sk_lock.wq))
  2068. wake_up(&sk->sk_lock.wq);
  2069. spin_unlock_bh(&sk->sk_lock.slock);
  2070. }
  2071. EXPORT_SYMBOL(release_sock);
  2072. /**
  2073. * lock_sock_fast - fast version of lock_sock
  2074. * @sk: socket
  2075. *
  2076. * This version should be used for very small section, where process wont block
  2077. * return false if fast path is taken
  2078. * sk_lock.slock locked, owned = 0, BH disabled
  2079. * return true if slow path is taken
  2080. * sk_lock.slock unlocked, owned = 1, BH enabled
  2081. */
  2082. bool lock_sock_fast(struct sock *sk)
  2083. {
  2084. might_sleep();
  2085. spin_lock_bh(&sk->sk_lock.slock);
  2086. if (!sk->sk_lock.owned)
  2087. /*
  2088. * Note : We must disable BH
  2089. */
  2090. return false;
  2091. __lock_sock(sk);
  2092. sk->sk_lock.owned = 1;
  2093. spin_unlock(&sk->sk_lock.slock);
  2094. /*
  2095. * The sk_lock has mutex_lock() semantics here:
  2096. */
  2097. mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
  2098. local_bh_enable();
  2099. return true;
  2100. }
  2101. EXPORT_SYMBOL(lock_sock_fast);
  2102. int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
  2103. {
  2104. struct timeval tv;
  2105. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2106. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2107. tv = ktime_to_timeval(sk->sk_stamp);
  2108. if (tv.tv_sec == -1)
  2109. return -ENOENT;
  2110. if (tv.tv_sec == 0) {
  2111. sk->sk_stamp = ktime_get_real();
  2112. tv = ktime_to_timeval(sk->sk_stamp);
  2113. }
  2114. return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
  2115. }
  2116. EXPORT_SYMBOL(sock_get_timestamp);
  2117. int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
  2118. {
  2119. struct timespec ts;
  2120. if (!sock_flag(sk, SOCK_TIMESTAMP))
  2121. sock_enable_timestamp(sk, SOCK_TIMESTAMP);
  2122. ts = ktime_to_timespec(sk->sk_stamp);
  2123. if (ts.tv_sec == -1)
  2124. return -ENOENT;
  2125. if (ts.tv_sec == 0) {
  2126. sk->sk_stamp = ktime_get_real();
  2127. ts = ktime_to_timespec(sk->sk_stamp);
  2128. }
  2129. return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
  2130. }
  2131. EXPORT_SYMBOL(sock_get_timestampns);
  2132. void sock_enable_timestamp(struct sock *sk, int flag)
  2133. {
  2134. if (!sock_flag(sk, flag)) {
  2135. unsigned long previous_flags = sk->sk_flags;
  2136. sock_set_flag(sk, flag);
  2137. /*
  2138. * we just set one of the two flags which require net
  2139. * time stamping, but time stamping might have been on
  2140. * already because of the other one
  2141. */
  2142. if (!(previous_flags & SK_FLAGS_TIMESTAMP))
  2143. net_enable_timestamp();
  2144. }
  2145. }
  2146. int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
  2147. int level, int type)
  2148. {
  2149. struct sock_exterr_skb *serr;
  2150. struct sk_buff *skb, *skb2;
  2151. int copied, err;
  2152. err = -EAGAIN;
  2153. skb = skb_dequeue(&sk->sk_error_queue);
  2154. if (skb == NULL)
  2155. goto out;
  2156. copied = skb->len;
  2157. if (copied > len) {
  2158. msg->msg_flags |= MSG_TRUNC;
  2159. copied = len;
  2160. }
  2161. err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  2162. if (err)
  2163. goto out_free_skb;
  2164. sock_recv_timestamp(msg, sk, skb);
  2165. serr = SKB_EXT_ERR(skb);
  2166. put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
  2167. msg->msg_flags |= MSG_ERRQUEUE;
  2168. err = copied;
  2169. /* Reset and regenerate socket error */
  2170. spin_lock_bh(&sk->sk_error_queue.lock);
  2171. sk->sk_err = 0;
  2172. if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
  2173. sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
  2174. spin_unlock_bh(&sk->sk_error_queue.lock);
  2175. sk->sk_error_report(sk);
  2176. } else
  2177. spin_unlock_bh(&sk->sk_error_queue.lock);
  2178. out_free_skb:
  2179. kfree_skb(skb);
  2180. out:
  2181. return err;
  2182. }
  2183. EXPORT_SYMBOL(sock_recv_errqueue);
  2184. /*
  2185. * Get a socket option on an socket.
  2186. *
  2187. * FIX: POSIX 1003.1g is very ambiguous here. It states that
  2188. * asynchronous errors should be reported by getsockopt. We assume
  2189. * this means if you specify SO_ERROR (otherwise whats the point of it).
  2190. */
  2191. int sock_common_getsockopt(struct socket *sock, int level, int optname,
  2192. char __user *optval, int __user *optlen)
  2193. {
  2194. struct sock *sk = sock->sk;
  2195. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2196. }
  2197. EXPORT_SYMBOL(sock_common_getsockopt);
  2198. #ifdef CONFIG_COMPAT
  2199. int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
  2200. char __user *optval, int __user *optlen)
  2201. {
  2202. struct sock *sk = sock->sk;
  2203. if (sk->sk_prot->compat_getsockopt != NULL)
  2204. return sk->sk_prot->compat_getsockopt(sk, level, optname,
  2205. optval, optlen);
  2206. return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
  2207. }
  2208. EXPORT_SYMBOL(compat_sock_common_getsockopt);
  2209. #endif
  2210. int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
  2211. struct msghdr *msg, size_t size, int flags)
  2212. {
  2213. struct sock *sk = sock->sk;
  2214. int addr_len = 0;
  2215. int err;
  2216. err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
  2217. flags & ~MSG_DONTWAIT, &addr_len);
  2218. if (err >= 0)
  2219. msg->msg_namelen = addr_len;
  2220. return err;
  2221. }
  2222. EXPORT_SYMBOL(sock_common_recvmsg);
  2223. /*
  2224. * Set socket options on an inet socket.
  2225. */
  2226. int sock_common_setsockopt(struct socket *sock, int level, int optname,
  2227. char __user *optval, unsigned int optlen)
  2228. {
  2229. struct sock *sk = sock->sk;
  2230. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2231. }
  2232. EXPORT_SYMBOL(sock_common_setsockopt);
  2233. #ifdef CONFIG_COMPAT
  2234. int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
  2235. char __user *optval, unsigned int optlen)
  2236. {
  2237. struct sock *sk = sock->sk;
  2238. if (sk->sk_prot->compat_setsockopt != NULL)
  2239. return sk->sk_prot->compat_setsockopt(sk, level, optname,
  2240. optval, optlen);
  2241. return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
  2242. }
  2243. EXPORT_SYMBOL(compat_sock_common_setsockopt);
  2244. #endif
  2245. void sk_common_release(struct sock *sk)
  2246. {
  2247. if (sk->sk_prot->destroy)
  2248. sk->sk_prot->destroy(sk);
  2249. /*
  2250. * Observation: when sock_common_release is called, processes have
  2251. * no access to socket. But net still has.
  2252. * Step one, detach it from networking:
  2253. *
  2254. * A. Remove from hash tables.
  2255. */
  2256. sk->sk_prot->unhash(sk);
  2257. /*
  2258. * In this point socket cannot receive new packets, but it is possible
  2259. * that some packets are in flight because some CPU runs receiver and
  2260. * did hash table lookup before we unhashed socket. They will achieve
  2261. * receive queue and will be purged by socket destructor.
  2262. *
  2263. * Also we still have packets pending on receive queue and probably,
  2264. * our own packets waiting in device queues. sock_destroy will drain
  2265. * receive queue, but transmitted packets will delay socket destruction
  2266. * until the last reference will be released.
  2267. */
  2268. sock_orphan(sk);
  2269. xfrm_sk_free_policy(sk);
  2270. sk_refcnt_debug_release(sk);
  2271. if (sk->sk_frag.page) {
  2272. put_page(sk->sk_frag.page);
  2273. sk->sk_frag.page = NULL;
  2274. }
  2275. sock_put(sk);
  2276. }
  2277. EXPORT_SYMBOL(sk_common_release);
  2278. #ifdef CONFIG_PROC_FS
  2279. #define PROTO_INUSE_NR 64 /* should be enough for the first time */
  2280. struct prot_inuse {
  2281. int val[PROTO_INUSE_NR];
  2282. };
  2283. static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
  2284. #ifdef CONFIG_NET_NS
  2285. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2286. {
  2287. __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
  2288. }
  2289. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2290. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2291. {
  2292. int cpu, idx = prot->inuse_idx;
  2293. int res = 0;
  2294. for_each_possible_cpu(cpu)
  2295. res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
  2296. return res >= 0 ? res : 0;
  2297. }
  2298. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2299. static int __net_init sock_inuse_init_net(struct net *net)
  2300. {
  2301. net->core.inuse = alloc_percpu(struct prot_inuse);
  2302. return net->core.inuse ? 0 : -ENOMEM;
  2303. }
  2304. static void __net_exit sock_inuse_exit_net(struct net *net)
  2305. {
  2306. free_percpu(net->core.inuse);
  2307. }
  2308. static struct pernet_operations net_inuse_ops = {
  2309. .init = sock_inuse_init_net,
  2310. .exit = sock_inuse_exit_net,
  2311. };
  2312. static __init int net_inuse_init(void)
  2313. {
  2314. if (register_pernet_subsys(&net_inuse_ops))
  2315. panic("Cannot initialize net inuse counters");
  2316. return 0;
  2317. }
  2318. core_initcall(net_inuse_init);
  2319. #else
  2320. static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
  2321. void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  2322. {
  2323. __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
  2324. }
  2325. EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  2326. int sock_prot_inuse_get(struct net *net, struct proto *prot)
  2327. {
  2328. int cpu, idx = prot->inuse_idx;
  2329. int res = 0;
  2330. for_each_possible_cpu(cpu)
  2331. res += per_cpu(prot_inuse, cpu).val[idx];
  2332. return res >= 0 ? res : 0;
  2333. }
  2334. EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  2335. #endif
  2336. static void assign_proto_idx(struct proto *prot)
  2337. {
  2338. prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
  2339. if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
  2340. pr_err("PROTO_INUSE_NR exhausted\n");
  2341. return;
  2342. }
  2343. set_bit(prot->inuse_idx, proto_inuse_idx);
  2344. }
  2345. static void release_proto_idx(struct proto *prot)
  2346. {
  2347. if (prot->inuse_idx != PROTO_INUSE_NR - 1)
  2348. clear_bit(prot->inuse_idx, proto_inuse_idx);
  2349. }
  2350. #else
  2351. static inline void assign_proto_idx(struct proto *prot)
  2352. {
  2353. }
  2354. static inline void release_proto_idx(struct proto *prot)
  2355. {
  2356. }
  2357. #endif
  2358. int proto_register(struct proto *prot, int alloc_slab)
  2359. {
  2360. if (alloc_slab) {
  2361. prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
  2362. SLAB_HWCACHE_ALIGN | prot->slab_flags,
  2363. NULL);
  2364. if (prot->slab == NULL) {
  2365. pr_crit("%s: Can't create sock SLAB cache!\n",
  2366. prot->name);
  2367. goto out;
  2368. }
  2369. if (prot->rsk_prot != NULL) {
  2370. prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
  2371. if (prot->rsk_prot->slab_name == NULL)
  2372. goto out_free_sock_slab;
  2373. prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
  2374. prot->rsk_prot->obj_size, 0,
  2375. SLAB_HWCACHE_ALIGN, NULL);
  2376. if (prot->rsk_prot->slab == NULL) {
  2377. pr_crit("%s: Can't create request sock SLAB cache!\n",
  2378. prot->name);
  2379. goto out_free_request_sock_slab_name;
  2380. }
  2381. }
  2382. if (prot->twsk_prot != NULL) {
  2383. prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
  2384. if (prot->twsk_prot->twsk_slab_name == NULL)
  2385. goto out_free_request_sock_slab;
  2386. prot->twsk_prot->twsk_slab =
  2387. kmem_cache_create(prot->twsk_prot->twsk_slab_name,
  2388. prot->twsk_prot->twsk_obj_size,
  2389. 0,
  2390. SLAB_HWCACHE_ALIGN |
  2391. prot->slab_flags,
  2392. NULL);
  2393. if (prot->twsk_prot->twsk_slab == NULL)
  2394. goto out_free_timewait_sock_slab_name;
  2395. }
  2396. }
  2397. mutex_lock(&proto_list_mutex);
  2398. list_add(&prot->node, &proto_list);
  2399. assign_proto_idx(prot);
  2400. mutex_unlock(&proto_list_mutex);
  2401. return 0;
  2402. out_free_timewait_sock_slab_name:
  2403. kfree(prot->twsk_prot->twsk_slab_name);
  2404. out_free_request_sock_slab:
  2405. if (prot->rsk_prot && prot->rsk_prot->slab) {
  2406. kmem_cache_destroy(prot->rsk_prot->slab);
  2407. prot->rsk_prot->slab = NULL;
  2408. }
  2409. out_free_request_sock_slab_name:
  2410. if (prot->rsk_prot)
  2411. kfree(prot->rsk_prot->slab_name);
  2412. out_free_sock_slab:
  2413. kmem_cache_destroy(prot->slab);
  2414. prot->slab = NULL;
  2415. out:
  2416. return -ENOBUFS;
  2417. }
  2418. EXPORT_SYMBOL(proto_register);
  2419. void proto_unregister(struct proto *prot)
  2420. {
  2421. mutex_lock(&proto_list_mutex);
  2422. release_proto_idx(prot);
  2423. list_del(&prot->node);
  2424. mutex_unlock(&proto_list_mutex);
  2425. if (prot->slab != NULL) {
  2426. kmem_cache_destroy(prot->slab);
  2427. prot->slab = NULL;
  2428. }
  2429. if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
  2430. kmem_cache_destroy(prot->rsk_prot->slab);
  2431. kfree(prot->rsk_prot->slab_name);
  2432. prot->rsk_prot->slab = NULL;
  2433. }
  2434. if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
  2435. kmem_cache_destroy(prot->twsk_prot->twsk_slab);
  2436. kfree(prot->twsk_prot->twsk_slab_name);
  2437. prot->twsk_prot->twsk_slab = NULL;
  2438. }
  2439. }
  2440. EXPORT_SYMBOL(proto_unregister);
  2441. #ifdef CONFIG_PROC_FS
  2442. static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
  2443. __acquires(proto_list_mutex)
  2444. {
  2445. mutex_lock(&proto_list_mutex);
  2446. return seq_list_start_head(&proto_list, *pos);
  2447. }
  2448. static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2449. {
  2450. return seq_list_next(v, &proto_list, pos);
  2451. }
  2452. static void proto_seq_stop(struct seq_file *seq, void *v)
  2453. __releases(proto_list_mutex)
  2454. {
  2455. mutex_unlock(&proto_list_mutex);
  2456. }
  2457. static char proto_method_implemented(const void *method)
  2458. {
  2459. return method == NULL ? 'n' : 'y';
  2460. }
  2461. static long sock_prot_memory_allocated(struct proto *proto)
  2462. {
  2463. return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
  2464. }
  2465. static char *sock_prot_memory_pressure(struct proto *proto)
  2466. {
  2467. return proto->memory_pressure != NULL ?
  2468. proto_memory_pressure(proto) ? "yes" : "no" : "NI";
  2469. }
  2470. static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
  2471. {
  2472. seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
  2473. "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
  2474. proto->name,
  2475. proto->obj_size,
  2476. sock_prot_inuse_get(seq_file_net(seq), proto),
  2477. sock_prot_memory_allocated(proto),
  2478. sock_prot_memory_pressure(proto),
  2479. proto->max_header,
  2480. proto->slab == NULL ? "no" : "yes",
  2481. module_name(proto->owner),
  2482. proto_method_implemented(proto->close),
  2483. proto_method_implemented(proto->connect),
  2484. proto_method_implemented(proto->disconnect),
  2485. proto_method_implemented(proto->accept),
  2486. proto_method_implemented(proto->ioctl),
  2487. proto_method_implemented(proto->init),
  2488. proto_method_implemented(proto->destroy),
  2489. proto_method_implemented(proto->shutdown),
  2490. proto_method_implemented(proto->setsockopt),
  2491. proto_method_implemented(proto->getsockopt),
  2492. proto_method_implemented(proto->sendmsg),
  2493. proto_method_implemented(proto->recvmsg),
  2494. proto_method_implemented(proto->sendpage),
  2495. proto_method_implemented(proto->bind),
  2496. proto_method_implemented(proto->backlog_rcv),
  2497. proto_method_implemented(proto->hash),
  2498. proto_method_implemented(proto->unhash),
  2499. proto_method_implemented(proto->get_port),
  2500. proto_method_implemented(proto->enter_memory_pressure));
  2501. }
  2502. static int proto_seq_show(struct seq_file *seq, void *v)
  2503. {
  2504. if (v == &proto_list)
  2505. seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
  2506. "protocol",
  2507. "size",
  2508. "sockets",
  2509. "memory",
  2510. "press",
  2511. "maxhdr",
  2512. "slab",
  2513. "module",
  2514. "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
  2515. else
  2516. proto_seq_printf(seq, list_entry(v, struct proto, node));
  2517. return 0;
  2518. }
  2519. static const struct seq_operations proto_seq_ops = {
  2520. .start = proto_seq_start,
  2521. .next = proto_seq_next,
  2522. .stop = proto_seq_stop,
  2523. .show = proto_seq_show,
  2524. };
  2525. static int proto_seq_open(struct inode *inode, struct file *file)
  2526. {
  2527. return seq_open_net(inode, file, &proto_seq_ops,
  2528. sizeof(struct seq_net_private));
  2529. }
  2530. static const struct file_operations proto_seq_fops = {
  2531. .owner = THIS_MODULE,
  2532. .open = proto_seq_open,
  2533. .read = seq_read,
  2534. .llseek = seq_lseek,
  2535. .release = seq_release_net,
  2536. };
  2537. static __net_init int proto_init_net(struct net *net)
  2538. {
  2539. if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
  2540. return -ENOMEM;
  2541. return 0;
  2542. }
  2543. static __net_exit void proto_exit_net(struct net *net)
  2544. {
  2545. remove_proc_entry("protocols", net->proc_net);
  2546. }
  2547. static __net_initdata struct pernet_operations proto_net_ops = {
  2548. .init = proto_init_net,
  2549. .exit = proto_exit_net,
  2550. };
  2551. static int __init proto_init(void)
  2552. {
  2553. return register_pernet_subsys(&proto_net_ops);
  2554. }
  2555. subsys_initcall(proto_init);
  2556. #endif /* PROC_FS */