tcp.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. *
  20. * Fixes:
  21. * Alan Cox : Numerous verify_area() calls
  22. * Alan Cox : Set the ACK bit on a reset
  23. * Alan Cox : Stopped it crashing if it closed while
  24. * sk->inuse=1 and was trying to connect
  25. * (tcp_err()).
  26. * Alan Cox : All icmp error handling was broken
  27. * pointers passed where wrong and the
  28. * socket was looked up backwards. Nobody
  29. * tested any icmp error code obviously.
  30. * Alan Cox : tcp_err() now handled properly. It
  31. * wakes people on errors. poll
  32. * behaves and the icmp error race
  33. * has gone by moving it into sock.c
  34. * Alan Cox : tcp_send_reset() fixed to work for
  35. * everything not just packets for
  36. * unknown sockets.
  37. * Alan Cox : tcp option processing.
  38. * Alan Cox : Reset tweaked (still not 100%) [Had
  39. * syn rule wrong]
  40. * Herp Rosmanith : More reset fixes
  41. * Alan Cox : No longer acks invalid rst frames.
  42. * Acking any kind of RST is right out.
  43. * Alan Cox : Sets an ignore me flag on an rst
  44. * receive otherwise odd bits of prattle
  45. * escape still
  46. * Alan Cox : Fixed another acking RST frame bug.
  47. * Should stop LAN workplace lockups.
  48. * Alan Cox : Some tidyups using the new skb list
  49. * facilities
  50. * Alan Cox : sk->keepopen now seems to work
  51. * Alan Cox : Pulls options out correctly on accepts
  52. * Alan Cox : Fixed assorted sk->rqueue->next errors
  53. * Alan Cox : PSH doesn't end a TCP read. Switched a
  54. * bit to skb ops.
  55. * Alan Cox : Tidied tcp_data to avoid a potential
  56. * nasty.
  57. * Alan Cox : Added some better commenting, as the
  58. * tcp is hard to follow
  59. * Alan Cox : Removed incorrect check for 20 * psh
  60. * Michael O'Reilly : ack < copied bug fix.
  61. * Johannes Stille : Misc tcp fixes (not all in yet).
  62. * Alan Cox : FIN with no memory -> CRASH
  63. * Alan Cox : Added socket option proto entries.
  64. * Also added awareness of them to accept.
  65. * Alan Cox : Added TCP options (SOL_TCP)
  66. * Alan Cox : Switched wakeup calls to callbacks,
  67. * so the kernel can layer network
  68. * sockets.
  69. * Alan Cox : Use ip_tos/ip_ttl settings.
  70. * Alan Cox : Handle FIN (more) properly (we hope).
  71. * Alan Cox : RST frames sent on unsynchronised
  72. * state ack error.
  73. * Alan Cox : Put in missing check for SYN bit.
  74. * Alan Cox : Added tcp_select_window() aka NET2E
  75. * window non shrink trick.
  76. * Alan Cox : Added a couple of small NET2E timer
  77. * fixes
  78. * Charles Hedrick : TCP fixes
  79. * Toomas Tamm : TCP window fixes
  80. * Alan Cox : Small URG fix to rlogin ^C ack fight
  81. * Charles Hedrick : Rewrote most of it to actually work
  82. * Linus : Rewrote tcp_read() and URG handling
  83. * completely
  84. * Gerhard Koerting: Fixed some missing timer handling
  85. * Matthew Dillon : Reworked TCP machine states as per RFC
  86. * Gerhard Koerting: PC/TCP workarounds
  87. * Adam Caldwell : Assorted timer/timing errors
  88. * Matthew Dillon : Fixed another RST bug
  89. * Alan Cox : Move to kernel side addressing changes.
  90. * Alan Cox : Beginning work on TCP fastpathing
  91. * (not yet usable)
  92. * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
  93. * Alan Cox : TCP fast path debugging
  94. * Alan Cox : Window clamping
  95. * Michael Riepe : Bug in tcp_check()
  96. * Matt Dillon : More TCP improvements and RST bug fixes
  97. * Matt Dillon : Yet more small nasties remove from the
  98. * TCP code (Be very nice to this man if
  99. * tcp finally works 100%) 8)
  100. * Alan Cox : BSD accept semantics.
  101. * Alan Cox : Reset on closedown bug.
  102. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
  103. * Michael Pall : Handle poll() after URG properly in
  104. * all cases.
  105. * Michael Pall : Undo the last fix in tcp_read_urg()
  106. * (multi URG PUSH broke rlogin).
  107. * Michael Pall : Fix the multi URG PUSH problem in
  108. * tcp_readable(), poll() after URG
  109. * works now.
  110. * Michael Pall : recv(...,MSG_OOB) never blocks in the
  111. * BSD api.
  112. * Alan Cox : Changed the semantics of sk->socket to
  113. * fix a race and a signal problem with
  114. * accept() and async I/O.
  115. * Alan Cox : Relaxed the rules on tcp_sendto().
  116. * Yury Shevchuk : Really fixed accept() blocking problem.
  117. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
  118. * clients/servers which listen in on
  119. * fixed ports.
  120. * Alan Cox : Cleaned the above up and shrank it to
  121. * a sensible code size.
  122. * Alan Cox : Self connect lockup fix.
  123. * Alan Cox : No connect to multicast.
  124. * Ross Biro : Close unaccepted children on master
  125. * socket close.
  126. * Alan Cox : Reset tracing code.
  127. * Alan Cox : Spurious resets on shutdown.
  128. * Alan Cox : Giant 15 minute/60 second timer error
  129. * Alan Cox : Small whoops in polling before an
  130. * accept.
  131. * Alan Cox : Kept the state trace facility since
  132. * it's handy for debugging.
  133. * Alan Cox : More reset handler fixes.
  134. * Alan Cox : Started rewriting the code based on
  135. * the RFC's for other useful protocol
  136. * references see: Comer, KA9Q NOS, and
  137. * for a reference on the difference
  138. * between specifications and how BSD
  139. * works see the 4.4lite source.
  140. * A.N.Kuznetsov : Don't time wait on completion of tidy
  141. * close.
  142. * Linus Torvalds : Fin/Shutdown & copied_seq changes.
  143. * Linus Torvalds : Fixed BSD port reuse to work first syn
  144. * Alan Cox : Reimplemented timers as per the RFC
  145. * and using multiple timers for sanity.
  146. * Alan Cox : Small bug fixes, and a lot of new
  147. * comments.
  148. * Alan Cox : Fixed dual reader crash by locking
  149. * the buffers (much like datagram.c)
  150. * Alan Cox : Fixed stuck sockets in probe. A probe
  151. * now gets fed up of retrying without
  152. * (even a no space) answer.
  153. * Alan Cox : Extracted closing code better
  154. * Alan Cox : Fixed the closing state machine to
  155. * resemble the RFC.
  156. * Alan Cox : More 'per spec' fixes.
  157. * Jorge Cwik : Even faster checksumming.
  158. * Alan Cox : tcp_data() doesn't ack illegal PSH
  159. * only frames. At least one pc tcp stack
  160. * generates them.
  161. * Alan Cox : Cache last socket.
  162. * Alan Cox : Per route irtt.
  163. * Matt Day : poll()->select() match BSD precisely on error
  164. * Alan Cox : New buffers
  165. * Marc Tamsky : Various sk->prot->retransmits and
  166. * sk->retransmits misupdating fixed.
  167. * Fixed tcp_write_timeout: stuck close,
  168. * and TCP syn retries gets used now.
  169. * Mark Yarvis : In tcp_read_wakeup(), don't send an
  170. * ack if state is TCP_CLOSED.
  171. * Alan Cox : Look up device on a retransmit - routes may
  172. * change. Doesn't yet cope with MSS shrink right
  173. * but it's a start!
  174. * Marc Tamsky : Closing in closing fixes.
  175. * Mike Shaver : RFC1122 verifications.
  176. * Alan Cox : rcv_saddr errors.
  177. * Alan Cox : Block double connect().
  178. * Alan Cox : Small hooks for enSKIP.
  179. * Alexey Kuznetsov: Path MTU discovery.
  180. * Alan Cox : Support soft errors.
  181. * Alan Cox : Fix MTU discovery pathological case
  182. * when the remote claims no mtu!
  183. * Marc Tamsky : TCP_CLOSE fix.
  184. * Colin (G3TNE) : Send a reset on syn ack replies in
  185. * window but wrong (fixes NT lpd problems)
  186. * Pedro Roque : Better TCP window handling, delayed ack.
  187. * Joerg Reuter : No modification of locked buffers in
  188. * tcp_do_retransmit()
  189. * Eric Schenk : Changed receiver side silly window
  190. * avoidance algorithm to BSD style
  191. * algorithm. This doubles throughput
  192. * against machines running Solaris,
  193. * and seems to result in general
  194. * improvement.
  195. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
  196. * Willy Konynenberg : Transparent proxying support.
  197. * Mike McLagan : Routing by source
  198. * Keith Owens : Do proper merging with partial SKB's in
  199. * tcp_do_sendmsg to avoid burstiness.
  200. * Eric Schenk : Fix fast close down bug with
  201. * shutdown() followed by close().
  202. * Andi Kleen : Make poll agree with SIGIO
  203. * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
  204. * lingertime == 0 (RFC 793 ABORT Call)
  205. * Hirokazu Takahashi : Use copy_from_user() instead of
  206. * csum_and_copy_from_user() if possible.
  207. *
  208. * This program is free software; you can redistribute it and/or
  209. * modify it under the terms of the GNU General Public License
  210. * as published by the Free Software Foundation; either version
  211. * 2 of the License, or(at your option) any later version.
  212. *
  213. * Description of States:
  214. *
  215. * TCP_SYN_SENT sent a connection request, waiting for ack
  216. *
  217. * TCP_SYN_RECV received a connection request, sent ack,
  218. * waiting for final ack in three-way handshake.
  219. *
  220. * TCP_ESTABLISHED connection established
  221. *
  222. * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
  223. * transmission of remaining buffered data
  224. *
  225. * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
  226. * to shutdown
  227. *
  228. * TCP_CLOSING both sides have shutdown but we still have
  229. * data we have to finish sending
  230. *
  231. * TCP_TIME_WAIT timeout to catch resent junk before entering
  232. * closed, can only be entered from FIN_WAIT2
  233. * or CLOSING. Required because the other end
  234. * may not have gotten our last ACK causing it
  235. * to retransmit the data packet (which we ignore)
  236. *
  237. * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
  238. * us to finish writing our data and to shutdown
  239. * (we have to close() to move on to LAST_ACK)
  240. *
  241. * TCP_LAST_ACK out side has shutdown after remote has
  242. * shutdown. There may still be data in our
  243. * buffer that we have to finish sending
  244. *
  245. * TCP_CLOSE socket is finished
  246. */
  247. #define pr_fmt(fmt) "TCP: " fmt
  248. #include <linux/kernel.h>
  249. #include <linux/module.h>
  250. #include <linux/types.h>
  251. #include <linux/fcntl.h>
  252. #include <linux/poll.h>
  253. #include <linux/inet_diag.h>
  254. #include <linux/init.h>
  255. #include <linux/fs.h>
  256. #include <linux/skbuff.h>
  257. #include <linux/scatterlist.h>
  258. #include <linux/splice.h>
  259. #include <linux/net.h>
  260. #include <linux/socket.h>
  261. #include <linux/random.h>
  262. #include <linux/bootmem.h>
  263. #include <linux/highmem.h>
  264. #include <linux/swap.h>
  265. #include <linux/cache.h>
  266. #include <linux/err.h>
  267. #include <linux/crypto.h>
  268. #include <linux/time.h>
  269. #include <linux/slab.h>
  270. #include <net/icmp.h>
  271. #include <net/inet_common.h>
  272. #include <net/tcp.h>
  273. #include <net/xfrm.h>
  274. #include <net/ip.h>
  275. #include <net/sock.h>
  276. #include <asm/uaccess.h>
  277. #include <asm/ioctls.h>
  278. #include <asm/unaligned.h>
  279. #include <net/busy_poll.h>
  280. int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
  281. int sysctl_tcp_min_tso_segs __read_mostly = 2;
  282. int sysctl_tcp_autocorking __read_mostly = 1;
  283. struct percpu_counter tcp_orphan_count;
  284. EXPORT_SYMBOL_GPL(tcp_orphan_count);
  285. long sysctl_tcp_mem[3] __read_mostly;
  286. int sysctl_tcp_wmem[3] __read_mostly;
  287. int sysctl_tcp_rmem[3] __read_mostly;
  288. EXPORT_SYMBOL(sysctl_tcp_mem);
  289. EXPORT_SYMBOL(sysctl_tcp_rmem);
  290. EXPORT_SYMBOL(sysctl_tcp_wmem);
  291. atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
  292. EXPORT_SYMBOL(tcp_memory_allocated);
  293. /*
  294. * Current number of TCP sockets.
  295. */
  296. struct percpu_counter tcp_sockets_allocated;
  297. EXPORT_SYMBOL(tcp_sockets_allocated);
  298. /*
  299. * TCP splice context
  300. */
  301. struct tcp_splice_state {
  302. struct pipe_inode_info *pipe;
  303. size_t len;
  304. unsigned int flags;
  305. };
  306. /*
  307. * Pressure flag: try to collapse.
  308. * Technical note: it is used by multiple contexts non atomically.
  309. * All the __sk_mem_schedule() is of this nature: accounting
  310. * is strict, actions are advisory and have some latency.
  311. */
  312. int tcp_memory_pressure __read_mostly;
  313. EXPORT_SYMBOL(tcp_memory_pressure);
  314. void tcp_enter_memory_pressure(struct sock *sk)
  315. {
  316. if (!tcp_memory_pressure) {
  317. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
  318. tcp_memory_pressure = 1;
  319. }
  320. }
  321. EXPORT_SYMBOL(tcp_enter_memory_pressure);
  322. /* Convert seconds to retransmits based on initial and max timeout */
  323. static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
  324. {
  325. u8 res = 0;
  326. if (seconds > 0) {
  327. int period = timeout;
  328. res = 1;
  329. while (seconds > period && res < 255) {
  330. res++;
  331. timeout <<= 1;
  332. if (timeout > rto_max)
  333. timeout = rto_max;
  334. period += timeout;
  335. }
  336. }
  337. return res;
  338. }
  339. /* Convert retransmits to seconds based on initial and max timeout */
  340. static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
  341. {
  342. int period = 0;
  343. if (retrans > 0) {
  344. period = timeout;
  345. while (--retrans) {
  346. timeout <<= 1;
  347. if (timeout > rto_max)
  348. timeout = rto_max;
  349. period += timeout;
  350. }
  351. }
  352. return period;
  353. }
  354. /* Address-family independent initialization for a tcp_sock.
  355. *
  356. * NOTE: A lot of things set to zero explicitly by call to
  357. * sk_alloc() so need not be done here.
  358. */
  359. void tcp_init_sock(struct sock *sk)
  360. {
  361. struct inet_connection_sock *icsk = inet_csk(sk);
  362. struct tcp_sock *tp = tcp_sk(sk);
  363. __skb_queue_head_init(&tp->out_of_order_queue);
  364. tcp_init_xmit_timers(sk);
  365. tcp_prequeue_init(tp);
  366. INIT_LIST_HEAD(&tp->tsq_node);
  367. icsk->icsk_rto = TCP_TIMEOUT_INIT;
  368. tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
  369. tp->rtt_min[0].rtt = ~0U;
  370. /* So many TCP implementations out there (incorrectly) count the
  371. * initial SYN frame in their delayed-ACK and congestion control
  372. * algorithms that we must have the following bandaid to talk
  373. * efficiently to them. -DaveM
  374. */
  375. tp->snd_cwnd = TCP_INIT_CWND;
  376. /* See draft-stevens-tcpca-spec-01 for discussion of the
  377. * initialization of these values.
  378. */
  379. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  380. tp->snd_cwnd_clamp = ~0;
  381. tp->mss_cache = TCP_MSS_DEFAULT;
  382. u64_stats_init(&tp->syncp);
  383. tp->reordering = sysctl_tcp_reordering;
  384. tcp_enable_early_retrans(tp);
  385. tcp_assign_congestion_control(sk);
  386. tp->tsoffset = 0;
  387. sk->sk_state = TCP_CLOSE;
  388. sk->sk_write_space = sk_stream_write_space;
  389. sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
  390. icsk->icsk_sync_mss = tcp_sync_mss;
  391. sk->sk_sndbuf = sysctl_tcp_wmem[1];
  392. sk->sk_rcvbuf = sysctl_tcp_rmem[1];
  393. local_bh_disable();
  394. if (mem_cgroup_sockets_enabled)
  395. sock_update_memcg(sk);
  396. sk_sockets_allocated_inc(sk);
  397. local_bh_enable();
  398. }
  399. EXPORT_SYMBOL(tcp_init_sock);
  400. static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb)
  401. {
  402. if (sk->sk_tsflags) {
  403. struct skb_shared_info *shinfo = skb_shinfo(skb);
  404. sock_tx_timestamp(sk, &shinfo->tx_flags);
  405. if (shinfo->tx_flags & SKBTX_ANY_TSTAMP)
  406. shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
  407. }
  408. }
  409. /*
  410. * Wait for a TCP event.
  411. *
  412. * Note that we don't need to lock the socket, as the upper poll layers
  413. * take care of normal races (between the test and the event) and we don't
  414. * go look at any of the socket buffers directly.
  415. */
  416. unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
  417. {
  418. unsigned int mask;
  419. struct sock *sk = sock->sk;
  420. const struct tcp_sock *tp = tcp_sk(sk);
  421. int state;
  422. sock_rps_record_flow(sk);
  423. sock_poll_wait(file, sk_sleep(sk), wait);
  424. state = sk_state_load(sk);
  425. if (state == TCP_LISTEN)
  426. return inet_csk_listen_poll(sk);
  427. /* Socket is not locked. We are protected from async events
  428. * by poll logic and correct handling of state changes
  429. * made by other threads is impossible in any case.
  430. */
  431. mask = 0;
  432. /*
  433. * POLLHUP is certainly not done right. But poll() doesn't
  434. * have a notion of HUP in just one direction, and for a
  435. * socket the read side is more interesting.
  436. *
  437. * Some poll() documentation says that POLLHUP is incompatible
  438. * with the POLLOUT/POLLWR flags, so somebody should check this
  439. * all. But careful, it tends to be safer to return too many
  440. * bits than too few, and you can easily break real applications
  441. * if you don't tell them that something has hung up!
  442. *
  443. * Check-me.
  444. *
  445. * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
  446. * our fs/select.c). It means that after we received EOF,
  447. * poll always returns immediately, making impossible poll() on write()
  448. * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
  449. * if and only if shutdown has been made in both directions.
  450. * Actually, it is interesting to look how Solaris and DUX
  451. * solve this dilemma. I would prefer, if POLLHUP were maskable,
  452. * then we could set it on SND_SHUTDOWN. BTW examples given
  453. * in Stevens' books assume exactly this behaviour, it explains
  454. * why POLLHUP is incompatible with POLLOUT. --ANK
  455. *
  456. * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
  457. * blocking on fresh not-connected or disconnected socket. --ANK
  458. */
  459. if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
  460. mask |= POLLHUP;
  461. if (sk->sk_shutdown & RCV_SHUTDOWN)
  462. mask |= POLLIN | POLLRDNORM | POLLRDHUP;
  463. /* Connected or passive Fast Open socket? */
  464. if (state != TCP_SYN_SENT &&
  465. (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
  466. int target = sock_rcvlowat(sk, 0, INT_MAX);
  467. if (tp->urg_seq == tp->copied_seq &&
  468. !sock_flag(sk, SOCK_URGINLINE) &&
  469. tp->urg_data)
  470. target++;
  471. if (tp->rcv_nxt - tp->copied_seq >= target)
  472. mask |= POLLIN | POLLRDNORM;
  473. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  474. if (sk_stream_is_writeable(sk)) {
  475. mask |= POLLOUT | POLLWRNORM;
  476. } else { /* send SIGIO later */
  477. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  478. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  479. /* Race breaker. If space is freed after
  480. * wspace test but before the flags are set,
  481. * IO signal will be lost. Memory barrier
  482. * pairs with the input side.
  483. */
  484. smp_mb__after_atomic();
  485. if (sk_stream_is_writeable(sk))
  486. mask |= POLLOUT | POLLWRNORM;
  487. }
  488. } else
  489. mask |= POLLOUT | POLLWRNORM;
  490. if (tp->urg_data & TCP_URG_VALID)
  491. mask |= POLLPRI;
  492. }
  493. /* This barrier is coupled with smp_wmb() in tcp_reset() */
  494. smp_rmb();
  495. if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
  496. mask |= POLLERR;
  497. return mask;
  498. }
  499. EXPORT_SYMBOL(tcp_poll);
  500. int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  501. {
  502. struct tcp_sock *tp = tcp_sk(sk);
  503. int answ;
  504. bool slow;
  505. switch (cmd) {
  506. case SIOCINQ:
  507. if (sk->sk_state == TCP_LISTEN)
  508. return -EINVAL;
  509. slow = lock_sock_fast(sk);
  510. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  511. answ = 0;
  512. else if (sock_flag(sk, SOCK_URGINLINE) ||
  513. !tp->urg_data ||
  514. before(tp->urg_seq, tp->copied_seq) ||
  515. !before(tp->urg_seq, tp->rcv_nxt)) {
  516. answ = tp->rcv_nxt - tp->copied_seq;
  517. /* Subtract 1, if FIN was received */
  518. if (answ && sock_flag(sk, SOCK_DONE))
  519. answ--;
  520. } else
  521. answ = tp->urg_seq - tp->copied_seq;
  522. unlock_sock_fast(sk, slow);
  523. break;
  524. case SIOCATMARK:
  525. answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
  526. break;
  527. case SIOCOUTQ:
  528. if (sk->sk_state == TCP_LISTEN)
  529. return -EINVAL;
  530. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  531. answ = 0;
  532. else
  533. answ = tp->write_seq - tp->snd_una;
  534. break;
  535. case SIOCOUTQNSD:
  536. if (sk->sk_state == TCP_LISTEN)
  537. return -EINVAL;
  538. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  539. answ = 0;
  540. else
  541. answ = tp->write_seq - tp->snd_nxt;
  542. break;
  543. default:
  544. return -ENOIOCTLCMD;
  545. }
  546. return put_user(answ, (int __user *)arg);
  547. }
  548. EXPORT_SYMBOL(tcp_ioctl);
  549. static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
  550. {
  551. TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
  552. tp->pushed_seq = tp->write_seq;
  553. }
  554. static inline bool forced_push(const struct tcp_sock *tp)
  555. {
  556. return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
  557. }
  558. static void skb_entail(struct sock *sk, struct sk_buff *skb)
  559. {
  560. struct tcp_sock *tp = tcp_sk(sk);
  561. struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
  562. skb->csum = 0;
  563. tcb->seq = tcb->end_seq = tp->write_seq;
  564. tcb->tcp_flags = TCPHDR_ACK;
  565. tcb->sacked = 0;
  566. __skb_header_release(skb);
  567. tcp_add_write_queue_tail(sk, skb);
  568. sk->sk_wmem_queued += skb->truesize;
  569. sk_mem_charge(sk, skb->truesize);
  570. if (tp->nonagle & TCP_NAGLE_PUSH)
  571. tp->nonagle &= ~TCP_NAGLE_PUSH;
  572. tcp_slow_start_after_idle_check(sk);
  573. }
  574. static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
  575. {
  576. if (flags & MSG_OOB)
  577. tp->snd_up = tp->write_seq;
  578. }
  579. /* If a not yet filled skb is pushed, do not send it if
  580. * we have data packets in Qdisc or NIC queues :
  581. * Because TX completion will happen shortly, it gives a chance
  582. * to coalesce future sendmsg() payload into this skb, without
  583. * need for a timer, and with no latency trade off.
  584. * As packets containing data payload have a bigger truesize
  585. * than pure acks (dataless) packets, the last checks prevent
  586. * autocorking if we only have an ACK in Qdisc/NIC queues,
  587. * or if TX completion was delayed after we processed ACK packet.
  588. */
  589. static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
  590. int size_goal)
  591. {
  592. return skb->len < size_goal &&
  593. sysctl_tcp_autocorking &&
  594. skb != tcp_write_queue_head(sk) &&
  595. atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
  596. }
  597. static void tcp_push(struct sock *sk, int flags, int mss_now,
  598. int nonagle, int size_goal)
  599. {
  600. struct tcp_sock *tp = tcp_sk(sk);
  601. struct sk_buff *skb;
  602. if (!tcp_send_head(sk))
  603. return;
  604. skb = tcp_write_queue_tail(sk);
  605. if (!(flags & MSG_MORE) || forced_push(tp))
  606. tcp_mark_push(tp, skb);
  607. tcp_mark_urg(tp, flags);
  608. if (tcp_should_autocork(sk, skb, size_goal)) {
  609. /* avoid atomic op if TSQ_THROTTLED bit is already set */
  610. if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
  611. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
  612. set_bit(TSQ_THROTTLED, &tp->tsq_flags);
  613. }
  614. /* It is possible TX completion already happened
  615. * before we set TSQ_THROTTLED.
  616. */
  617. if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
  618. return;
  619. }
  620. if (flags & MSG_MORE)
  621. nonagle = TCP_NAGLE_CORK;
  622. __tcp_push_pending_frames(sk, mss_now, nonagle);
  623. }
  624. static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  625. unsigned int offset, size_t len)
  626. {
  627. struct tcp_splice_state *tss = rd_desc->arg.data;
  628. int ret;
  629. ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
  630. min(rd_desc->count, len), tss->flags,
  631. skb_socket_splice);
  632. if (ret > 0)
  633. rd_desc->count -= ret;
  634. return ret;
  635. }
  636. static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
  637. {
  638. /* Store TCP splice context information in read_descriptor_t. */
  639. read_descriptor_t rd_desc = {
  640. .arg.data = tss,
  641. .count = tss->len,
  642. };
  643. return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
  644. }
  645. /**
  646. * tcp_splice_read - splice data from TCP socket to a pipe
  647. * @sock: socket to splice from
  648. * @ppos: position (not valid)
  649. * @pipe: pipe to splice to
  650. * @len: number of bytes to splice
  651. * @flags: splice modifier flags
  652. *
  653. * Description:
  654. * Will read pages from given socket and fill them into a pipe.
  655. *
  656. **/
  657. ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
  658. struct pipe_inode_info *pipe, size_t len,
  659. unsigned int flags)
  660. {
  661. struct sock *sk = sock->sk;
  662. struct tcp_splice_state tss = {
  663. .pipe = pipe,
  664. .len = len,
  665. .flags = flags,
  666. };
  667. long timeo;
  668. ssize_t spliced;
  669. int ret;
  670. sock_rps_record_flow(sk);
  671. /*
  672. * We can't seek on a socket input
  673. */
  674. if (unlikely(*ppos))
  675. return -ESPIPE;
  676. ret = spliced = 0;
  677. lock_sock(sk);
  678. timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
  679. while (tss.len) {
  680. ret = __tcp_splice_read(sk, &tss);
  681. if (ret < 0)
  682. break;
  683. else if (!ret) {
  684. if (spliced)
  685. break;
  686. if (sock_flag(sk, SOCK_DONE))
  687. break;
  688. if (sk->sk_err) {
  689. ret = sock_error(sk);
  690. break;
  691. }
  692. if (sk->sk_shutdown & RCV_SHUTDOWN)
  693. break;
  694. if (sk->sk_state == TCP_CLOSE) {
  695. /*
  696. * This occurs when user tries to read
  697. * from never connected socket.
  698. */
  699. if (!sock_flag(sk, SOCK_DONE))
  700. ret = -ENOTCONN;
  701. break;
  702. }
  703. if (!timeo) {
  704. ret = -EAGAIN;
  705. break;
  706. }
  707. sk_wait_data(sk, &timeo, NULL);
  708. if (signal_pending(current)) {
  709. ret = sock_intr_errno(timeo);
  710. break;
  711. }
  712. continue;
  713. }
  714. tss.len -= ret;
  715. spliced += ret;
  716. if (!timeo)
  717. break;
  718. release_sock(sk);
  719. lock_sock(sk);
  720. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  721. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  722. signal_pending(current))
  723. break;
  724. }
  725. release_sock(sk);
  726. if (spliced)
  727. return spliced;
  728. return ret;
  729. }
  730. EXPORT_SYMBOL(tcp_splice_read);
  731. struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
  732. bool force_schedule)
  733. {
  734. struct sk_buff *skb;
  735. /* The TCP header must be at least 32-bit aligned. */
  736. size = ALIGN(size, 4);
  737. if (unlikely(tcp_under_memory_pressure(sk)))
  738. sk_mem_reclaim_partial(sk);
  739. skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
  740. if (likely(skb)) {
  741. bool mem_scheduled;
  742. if (force_schedule) {
  743. mem_scheduled = true;
  744. sk_forced_mem_schedule(sk, skb->truesize);
  745. } else {
  746. mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
  747. }
  748. if (likely(mem_scheduled)) {
  749. skb_reserve(skb, sk->sk_prot->max_header);
  750. /*
  751. * Make sure that we have exactly size bytes
  752. * available to the caller, no more, no less.
  753. */
  754. skb->reserved_tailroom = skb->end - skb->tail - size;
  755. return skb;
  756. }
  757. __kfree_skb(skb);
  758. } else {
  759. sk->sk_prot->enter_memory_pressure(sk);
  760. sk_stream_moderate_sndbuf(sk);
  761. }
  762. return NULL;
  763. }
  764. static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
  765. int large_allowed)
  766. {
  767. struct tcp_sock *tp = tcp_sk(sk);
  768. u32 new_size_goal, size_goal;
  769. if (!large_allowed || !sk_can_gso(sk))
  770. return mss_now;
  771. /* Note : tcp_tso_autosize() will eventually split this later */
  772. new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
  773. new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
  774. /* We try hard to avoid divides here */
  775. size_goal = tp->gso_segs * mss_now;
  776. if (unlikely(new_size_goal < size_goal ||
  777. new_size_goal >= size_goal + mss_now)) {
  778. tp->gso_segs = min_t(u16, new_size_goal / mss_now,
  779. sk->sk_gso_max_segs);
  780. size_goal = tp->gso_segs * mss_now;
  781. }
  782. return max(size_goal, mss_now);
  783. }
  784. static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
  785. {
  786. int mss_now;
  787. mss_now = tcp_current_mss(sk);
  788. *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
  789. return mss_now;
  790. }
  791. static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
  792. size_t size, int flags)
  793. {
  794. struct tcp_sock *tp = tcp_sk(sk);
  795. int mss_now, size_goal;
  796. int err;
  797. ssize_t copied;
  798. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  799. /* Wait for a connection to finish. One exception is TCP Fast Open
  800. * (passive side) where data is allowed to be sent before a connection
  801. * is fully established.
  802. */
  803. if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
  804. !tcp_passive_fastopen(sk)) {
  805. err = sk_stream_wait_connect(sk, &timeo);
  806. if (err != 0)
  807. goto out_err;
  808. }
  809. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  810. mss_now = tcp_send_mss(sk, &size_goal, flags);
  811. copied = 0;
  812. err = -EPIPE;
  813. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  814. goto out_err;
  815. while (size > 0) {
  816. struct sk_buff *skb = tcp_write_queue_tail(sk);
  817. int copy, i;
  818. bool can_coalesce;
  819. if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
  820. new_segment:
  821. if (!sk_stream_memory_free(sk))
  822. goto wait_for_sndbuf;
  823. skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
  824. skb_queue_empty(&sk->sk_write_queue));
  825. if (!skb)
  826. goto wait_for_memory;
  827. skb_entail(sk, skb);
  828. copy = size_goal;
  829. }
  830. if (copy > size)
  831. copy = size;
  832. i = skb_shinfo(skb)->nr_frags;
  833. can_coalesce = skb_can_coalesce(skb, i, page, offset);
  834. if (!can_coalesce && i >= MAX_SKB_FRAGS) {
  835. tcp_mark_push(tp, skb);
  836. goto new_segment;
  837. }
  838. if (!sk_wmem_schedule(sk, copy))
  839. goto wait_for_memory;
  840. if (can_coalesce) {
  841. skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
  842. } else {
  843. get_page(page);
  844. skb_fill_page_desc(skb, i, page, offset, copy);
  845. }
  846. skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
  847. skb->len += copy;
  848. skb->data_len += copy;
  849. skb->truesize += copy;
  850. sk->sk_wmem_queued += copy;
  851. sk_mem_charge(sk, copy);
  852. skb->ip_summed = CHECKSUM_PARTIAL;
  853. tp->write_seq += copy;
  854. TCP_SKB_CB(skb)->end_seq += copy;
  855. tcp_skb_pcount_set(skb, 0);
  856. if (!copied)
  857. TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
  858. copied += copy;
  859. offset += copy;
  860. size -= copy;
  861. if (!size) {
  862. tcp_tx_timestamp(sk, skb);
  863. goto out;
  864. }
  865. if (skb->len < size_goal || (flags & MSG_OOB))
  866. continue;
  867. if (forced_push(tp)) {
  868. tcp_mark_push(tp, skb);
  869. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  870. } else if (skb == tcp_send_head(sk))
  871. tcp_push_one(sk, mss_now);
  872. continue;
  873. wait_for_sndbuf:
  874. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  875. wait_for_memory:
  876. tcp_push(sk, flags & ~MSG_MORE, mss_now,
  877. TCP_NAGLE_PUSH, size_goal);
  878. err = sk_stream_wait_memory(sk, &timeo);
  879. if (err != 0)
  880. goto do_error;
  881. mss_now = tcp_send_mss(sk, &size_goal, flags);
  882. }
  883. out:
  884. if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
  885. tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
  886. return copied;
  887. do_error:
  888. if (copied)
  889. goto out;
  890. out_err:
  891. /* make sure we wake any epoll edge trigger waiter */
  892. if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
  893. sk->sk_write_space(sk);
  894. return sk_stream_error(sk, flags, err);
  895. }
  896. int tcp_sendpage(struct sock *sk, struct page *page, int offset,
  897. size_t size, int flags)
  898. {
  899. ssize_t res;
  900. if (!(sk->sk_route_caps & NETIF_F_SG) ||
  901. !sk_check_csum_caps(sk))
  902. return sock_no_sendpage(sk->sk_socket, page, offset, size,
  903. flags);
  904. lock_sock(sk);
  905. res = do_tcp_sendpages(sk, page, offset, size, flags);
  906. release_sock(sk);
  907. return res;
  908. }
  909. EXPORT_SYMBOL(tcp_sendpage);
  910. static inline int select_size(const struct sock *sk, bool sg)
  911. {
  912. const struct tcp_sock *tp = tcp_sk(sk);
  913. int tmp = tp->mss_cache;
  914. if (sg) {
  915. if (sk_can_gso(sk)) {
  916. /* Small frames wont use a full page:
  917. * Payload will immediately follow tcp header.
  918. */
  919. tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
  920. } else {
  921. int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
  922. if (tmp >= pgbreak &&
  923. tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
  924. tmp = pgbreak;
  925. }
  926. }
  927. return tmp;
  928. }
  929. void tcp_free_fastopen_req(struct tcp_sock *tp)
  930. {
  931. if (tp->fastopen_req) {
  932. kfree(tp->fastopen_req);
  933. tp->fastopen_req = NULL;
  934. }
  935. }
  936. static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
  937. int *copied, size_t size)
  938. {
  939. struct tcp_sock *tp = tcp_sk(sk);
  940. int err, flags;
  941. if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
  942. return -EOPNOTSUPP;
  943. if (tp->fastopen_req)
  944. return -EALREADY; /* Another Fast Open is in progress */
  945. tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
  946. sk->sk_allocation);
  947. if (unlikely(!tp->fastopen_req))
  948. return -ENOBUFS;
  949. tp->fastopen_req->data = msg;
  950. tp->fastopen_req->size = size;
  951. flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
  952. err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
  953. msg->msg_namelen, flags);
  954. *copied = tp->fastopen_req->copied;
  955. tcp_free_fastopen_req(tp);
  956. return err;
  957. }
  958. int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  959. {
  960. struct tcp_sock *tp = tcp_sk(sk);
  961. struct sk_buff *skb;
  962. int flags, err, copied = 0;
  963. int mss_now = 0, size_goal, copied_syn = 0;
  964. bool sg;
  965. long timeo;
  966. lock_sock(sk);
  967. flags = msg->msg_flags;
  968. if (flags & MSG_FASTOPEN) {
  969. err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
  970. if (err == -EINPROGRESS && copied_syn > 0)
  971. goto out;
  972. else if (err)
  973. goto out_err;
  974. }
  975. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  976. /* Wait for a connection to finish. One exception is TCP Fast Open
  977. * (passive side) where data is allowed to be sent before a connection
  978. * is fully established.
  979. */
  980. if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
  981. !tcp_passive_fastopen(sk)) {
  982. err = sk_stream_wait_connect(sk, &timeo);
  983. if (err != 0)
  984. goto do_error;
  985. }
  986. if (unlikely(tp->repair)) {
  987. if (tp->repair_queue == TCP_RECV_QUEUE) {
  988. copied = tcp_send_rcvq(sk, msg, size);
  989. goto out_nopush;
  990. }
  991. err = -EINVAL;
  992. if (tp->repair_queue == TCP_NO_QUEUE)
  993. goto out_err;
  994. /* 'common' sending to sendq */
  995. }
  996. /* This should be in poll */
  997. sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  998. mss_now = tcp_send_mss(sk, &size_goal, flags);
  999. /* Ok commence sending. */
  1000. copied = 0;
  1001. err = -EPIPE;
  1002. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  1003. goto out_err;
  1004. sg = !!(sk->sk_route_caps & NETIF_F_SG);
  1005. while (msg_data_left(msg)) {
  1006. int copy = 0;
  1007. int max = size_goal;
  1008. skb = tcp_write_queue_tail(sk);
  1009. if (tcp_send_head(sk)) {
  1010. if (skb->ip_summed == CHECKSUM_NONE)
  1011. max = mss_now;
  1012. copy = max - skb->len;
  1013. }
  1014. if (copy <= 0) {
  1015. new_segment:
  1016. /* Allocate new segment. If the interface is SG,
  1017. * allocate skb fitting to single page.
  1018. */
  1019. if (!sk_stream_memory_free(sk))
  1020. goto wait_for_sndbuf;
  1021. skb = sk_stream_alloc_skb(sk,
  1022. select_size(sk, sg),
  1023. sk->sk_allocation,
  1024. skb_queue_empty(&sk->sk_write_queue));
  1025. if (!skb)
  1026. goto wait_for_memory;
  1027. /*
  1028. * Check whether we can use HW checksum.
  1029. */
  1030. if (sk_check_csum_caps(sk))
  1031. skb->ip_summed = CHECKSUM_PARTIAL;
  1032. skb_entail(sk, skb);
  1033. copy = size_goal;
  1034. max = size_goal;
  1035. /* All packets are restored as if they have
  1036. * already been sent. skb_mstamp isn't set to
  1037. * avoid wrong rtt estimation.
  1038. */
  1039. if (tp->repair)
  1040. TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
  1041. }
  1042. /* Try to append data to the end of skb. */
  1043. if (copy > msg_data_left(msg))
  1044. copy = msg_data_left(msg);
  1045. /* Where to copy to? */
  1046. if (skb_availroom(skb) > 0) {
  1047. /* We have some space in skb head. Superb! */
  1048. copy = min_t(int, copy, skb_availroom(skb));
  1049. err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
  1050. if (err)
  1051. goto do_fault;
  1052. } else {
  1053. bool merge = true;
  1054. int i = skb_shinfo(skb)->nr_frags;
  1055. struct page_frag *pfrag = sk_page_frag(sk);
  1056. if (!sk_page_frag_refill(sk, pfrag))
  1057. goto wait_for_memory;
  1058. if (!skb_can_coalesce(skb, i, pfrag->page,
  1059. pfrag->offset)) {
  1060. if (i == MAX_SKB_FRAGS || !sg) {
  1061. tcp_mark_push(tp, skb);
  1062. goto new_segment;
  1063. }
  1064. merge = false;
  1065. }
  1066. copy = min_t(int, copy, pfrag->size - pfrag->offset);
  1067. if (!sk_wmem_schedule(sk, copy))
  1068. goto wait_for_memory;
  1069. err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
  1070. pfrag->page,
  1071. pfrag->offset,
  1072. copy);
  1073. if (err)
  1074. goto do_error;
  1075. /* Update the skb. */
  1076. if (merge) {
  1077. skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
  1078. } else {
  1079. skb_fill_page_desc(skb, i, pfrag->page,
  1080. pfrag->offset, copy);
  1081. get_page(pfrag->page);
  1082. }
  1083. pfrag->offset += copy;
  1084. }
  1085. if (!copied)
  1086. TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
  1087. tp->write_seq += copy;
  1088. TCP_SKB_CB(skb)->end_seq += copy;
  1089. tcp_skb_pcount_set(skb, 0);
  1090. copied += copy;
  1091. if (!msg_data_left(msg)) {
  1092. tcp_tx_timestamp(sk, skb);
  1093. goto out;
  1094. }
  1095. if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
  1096. continue;
  1097. if (forced_push(tp)) {
  1098. tcp_mark_push(tp, skb);
  1099. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  1100. } else if (skb == tcp_send_head(sk))
  1101. tcp_push_one(sk, mss_now);
  1102. continue;
  1103. wait_for_sndbuf:
  1104. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1105. wait_for_memory:
  1106. if (copied)
  1107. tcp_push(sk, flags & ~MSG_MORE, mss_now,
  1108. TCP_NAGLE_PUSH, size_goal);
  1109. err = sk_stream_wait_memory(sk, &timeo);
  1110. if (err != 0)
  1111. goto do_error;
  1112. mss_now = tcp_send_mss(sk, &size_goal, flags);
  1113. }
  1114. out:
  1115. if (copied)
  1116. tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
  1117. out_nopush:
  1118. release_sock(sk);
  1119. return copied + copied_syn;
  1120. do_fault:
  1121. if (!skb->len) {
  1122. tcp_unlink_write_queue(skb, sk);
  1123. /* It is the one place in all of TCP, except connection
  1124. * reset, where we can be unlinking the send_head.
  1125. */
  1126. tcp_check_send_head(sk, skb);
  1127. sk_wmem_free_skb(sk, skb);
  1128. }
  1129. do_error:
  1130. if (copied + copied_syn)
  1131. goto out;
  1132. out_err:
  1133. err = sk_stream_error(sk, flags, err);
  1134. /* make sure we wake any epoll edge trigger waiter */
  1135. if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
  1136. sk->sk_write_space(sk);
  1137. release_sock(sk);
  1138. return err;
  1139. }
  1140. EXPORT_SYMBOL(tcp_sendmsg);
  1141. /*
  1142. * Handle reading urgent data. BSD has very simple semantics for
  1143. * this, no blocking and very strange errors 8)
  1144. */
  1145. static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
  1146. {
  1147. struct tcp_sock *tp = tcp_sk(sk);
  1148. /* No URG data to read. */
  1149. if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
  1150. tp->urg_data == TCP_URG_READ)
  1151. return -EINVAL; /* Yes this is right ! */
  1152. if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
  1153. return -ENOTCONN;
  1154. if (tp->urg_data & TCP_URG_VALID) {
  1155. int err = 0;
  1156. char c = tp->urg_data;
  1157. if (!(flags & MSG_PEEK))
  1158. tp->urg_data = TCP_URG_READ;
  1159. /* Read urgent data. */
  1160. msg->msg_flags |= MSG_OOB;
  1161. if (len > 0) {
  1162. if (!(flags & MSG_TRUNC))
  1163. err = memcpy_to_msg(msg, &c, 1);
  1164. len = 1;
  1165. } else
  1166. msg->msg_flags |= MSG_TRUNC;
  1167. return err ? -EFAULT : len;
  1168. }
  1169. if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
  1170. return 0;
  1171. /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
  1172. * the available implementations agree in this case:
  1173. * this call should never block, independent of the
  1174. * blocking state of the socket.
  1175. * Mike <pall@rz.uni-karlsruhe.de>
  1176. */
  1177. return -EAGAIN;
  1178. }
  1179. static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
  1180. {
  1181. struct sk_buff *skb;
  1182. int copied = 0, err = 0;
  1183. /* XXX -- need to support SO_PEEK_OFF */
  1184. skb_queue_walk(&sk->sk_write_queue, skb) {
  1185. err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
  1186. if (err)
  1187. break;
  1188. copied += skb->len;
  1189. }
  1190. return err ?: copied;
  1191. }
  1192. /* Clean up the receive buffer for full frames taken by the user,
  1193. * then send an ACK if necessary. COPIED is the number of bytes
  1194. * tcp_recvmsg has given to the user so far, it speeds up the
  1195. * calculation of whether or not we must ACK for the sake of
  1196. * a window update.
  1197. */
  1198. static void tcp_cleanup_rbuf(struct sock *sk, int copied)
  1199. {
  1200. struct tcp_sock *tp = tcp_sk(sk);
  1201. bool time_to_ack = false;
  1202. struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  1203. WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
  1204. "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
  1205. tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
  1206. if (inet_csk_ack_scheduled(sk)) {
  1207. const struct inet_connection_sock *icsk = inet_csk(sk);
  1208. /* Delayed ACKs frequently hit locked sockets during bulk
  1209. * receive. */
  1210. if (icsk->icsk_ack.blocked ||
  1211. /* Once-per-two-segments ACK was not sent by tcp_input.c */
  1212. tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
  1213. /*
  1214. * If this read emptied read buffer, we send ACK, if
  1215. * connection is not bidirectional, user drained
  1216. * receive buffer and there was a small segment
  1217. * in queue.
  1218. */
  1219. (copied > 0 &&
  1220. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
  1221. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
  1222. !icsk->icsk_ack.pingpong)) &&
  1223. !atomic_read(&sk->sk_rmem_alloc)))
  1224. time_to_ack = true;
  1225. }
  1226. /* We send an ACK if we can now advertise a non-zero window
  1227. * which has been raised "significantly".
  1228. *
  1229. * Even if window raised up to infinity, do not send window open ACK
  1230. * in states, where we will not receive more. It is useless.
  1231. */
  1232. if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
  1233. __u32 rcv_window_now = tcp_receive_window(tp);
  1234. /* Optimize, __tcp_select_window() is not cheap. */
  1235. if (2*rcv_window_now <= tp->window_clamp) {
  1236. __u32 new_window = __tcp_select_window(sk);
  1237. /* Send ACK now, if this read freed lots of space
  1238. * in our buffer. Certainly, new_window is new window.
  1239. * We can advertise it now, if it is not less than current one.
  1240. * "Lots" means "at least twice" here.
  1241. */
  1242. if (new_window && new_window >= 2 * rcv_window_now)
  1243. time_to_ack = true;
  1244. }
  1245. }
  1246. if (time_to_ack)
  1247. tcp_send_ack(sk);
  1248. }
  1249. static void tcp_prequeue_process(struct sock *sk)
  1250. {
  1251. struct sk_buff *skb;
  1252. struct tcp_sock *tp = tcp_sk(sk);
  1253. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
  1254. /* RX process wants to run with disabled BHs, though it is not
  1255. * necessary */
  1256. local_bh_disable();
  1257. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  1258. sk_backlog_rcv(sk, skb);
  1259. local_bh_enable();
  1260. /* Clear memory counter. */
  1261. tp->ucopy.memory = 0;
  1262. }
  1263. static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
  1264. {
  1265. struct sk_buff *skb;
  1266. u32 offset;
  1267. while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
  1268. offset = seq - TCP_SKB_CB(skb)->seq;
  1269. if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
  1270. pr_err_once("%s: found a SYN, please report !\n", __func__);
  1271. offset--;
  1272. }
  1273. if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
  1274. *off = offset;
  1275. return skb;
  1276. }
  1277. /* This looks weird, but this can happen if TCP collapsing
  1278. * splitted a fat GRO packet, while we released socket lock
  1279. * in skb_splice_bits()
  1280. */
  1281. sk_eat_skb(sk, skb);
  1282. }
  1283. return NULL;
  1284. }
  1285. /*
  1286. * This routine provides an alternative to tcp_recvmsg() for routines
  1287. * that would like to handle copying from skbuffs directly in 'sendfile'
  1288. * fashion.
  1289. * Note:
  1290. * - It is assumed that the socket was locked by the caller.
  1291. * - The routine does not block.
  1292. * - At present, there is no support for reading OOB data
  1293. * or for 'peeking' the socket using this routine
  1294. * (although both would be easy to implement).
  1295. */
  1296. int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
  1297. sk_read_actor_t recv_actor)
  1298. {
  1299. struct sk_buff *skb;
  1300. struct tcp_sock *tp = tcp_sk(sk);
  1301. u32 seq = tp->copied_seq;
  1302. u32 offset;
  1303. int copied = 0;
  1304. if (sk->sk_state == TCP_LISTEN)
  1305. return -ENOTCONN;
  1306. while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
  1307. if (offset < skb->len) {
  1308. int used;
  1309. size_t len;
  1310. len = skb->len - offset;
  1311. /* Stop reading if we hit a patch of urgent data */
  1312. if (tp->urg_data) {
  1313. u32 urg_offset = tp->urg_seq - seq;
  1314. if (urg_offset < len)
  1315. len = urg_offset;
  1316. if (!len)
  1317. break;
  1318. }
  1319. used = recv_actor(desc, skb, offset, len);
  1320. if (used <= 0) {
  1321. if (!copied)
  1322. copied = used;
  1323. break;
  1324. } else if (used <= len) {
  1325. seq += used;
  1326. copied += used;
  1327. offset += used;
  1328. }
  1329. /* If recv_actor drops the lock (e.g. TCP splice
  1330. * receive) the skb pointer might be invalid when
  1331. * getting here: tcp_collapse might have deleted it
  1332. * while aggregating skbs from the socket queue.
  1333. */
  1334. skb = tcp_recv_skb(sk, seq - 1, &offset);
  1335. if (!skb)
  1336. break;
  1337. /* TCP coalescing might have appended data to the skb.
  1338. * Try to splice more frags
  1339. */
  1340. if (offset + 1 != skb->len)
  1341. continue;
  1342. }
  1343. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
  1344. sk_eat_skb(sk, skb);
  1345. ++seq;
  1346. break;
  1347. }
  1348. sk_eat_skb(sk, skb);
  1349. if (!desc->count)
  1350. break;
  1351. tp->copied_seq = seq;
  1352. }
  1353. tp->copied_seq = seq;
  1354. tcp_rcv_space_adjust(sk);
  1355. /* Clean up data we have read: This will do ACK frames. */
  1356. if (copied > 0) {
  1357. tcp_recv_skb(sk, seq, &offset);
  1358. tcp_cleanup_rbuf(sk, copied);
  1359. }
  1360. return copied;
  1361. }
  1362. EXPORT_SYMBOL(tcp_read_sock);
  1363. /*
  1364. * This routine copies from a sock struct into the user buffer.
  1365. *
  1366. * Technical note: in 2.3 we work on _locked_ socket, so that
  1367. * tricks with *seq access order and skb->users are not required.
  1368. * Probably, code can be easily improved even more.
  1369. */
  1370. int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
  1371. int flags, int *addr_len)
  1372. {
  1373. struct tcp_sock *tp = tcp_sk(sk);
  1374. int copied = 0;
  1375. u32 peek_seq;
  1376. u32 *seq;
  1377. unsigned long used;
  1378. int err;
  1379. int target; /* Read at least this many bytes */
  1380. long timeo;
  1381. struct task_struct *user_recv = NULL;
  1382. struct sk_buff *skb, *last;
  1383. u32 urg_hole = 0;
  1384. if (unlikely(flags & MSG_ERRQUEUE))
  1385. return inet_recv_error(sk, msg, len, addr_len);
  1386. if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
  1387. (sk->sk_state == TCP_ESTABLISHED))
  1388. sk_busy_loop(sk, nonblock);
  1389. lock_sock(sk);
  1390. err = -ENOTCONN;
  1391. if (sk->sk_state == TCP_LISTEN)
  1392. goto out;
  1393. timeo = sock_rcvtimeo(sk, nonblock);
  1394. /* Urgent data needs to be handled specially. */
  1395. if (flags & MSG_OOB)
  1396. goto recv_urg;
  1397. if (unlikely(tp->repair)) {
  1398. err = -EPERM;
  1399. if (!(flags & MSG_PEEK))
  1400. goto out;
  1401. if (tp->repair_queue == TCP_SEND_QUEUE)
  1402. goto recv_sndq;
  1403. err = -EINVAL;
  1404. if (tp->repair_queue == TCP_NO_QUEUE)
  1405. goto out;
  1406. /* 'common' recv queue MSG_PEEK-ing */
  1407. }
  1408. seq = &tp->copied_seq;
  1409. if (flags & MSG_PEEK) {
  1410. peek_seq = tp->copied_seq;
  1411. seq = &peek_seq;
  1412. }
  1413. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1414. do {
  1415. u32 offset;
  1416. /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
  1417. if (tp->urg_data && tp->urg_seq == *seq) {
  1418. if (copied)
  1419. break;
  1420. if (signal_pending(current)) {
  1421. copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
  1422. break;
  1423. }
  1424. }
  1425. /* Next get a buffer. */
  1426. last = skb_peek_tail(&sk->sk_receive_queue);
  1427. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1428. last = skb;
  1429. /* Now that we have two receive queues this
  1430. * shouldn't happen.
  1431. */
  1432. if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
  1433. "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
  1434. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
  1435. flags))
  1436. break;
  1437. offset = *seq - TCP_SKB_CB(skb)->seq;
  1438. if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
  1439. pr_err_once("%s: found a SYN, please report !\n", __func__);
  1440. offset--;
  1441. }
  1442. if (offset < skb->len)
  1443. goto found_ok_skb;
  1444. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
  1445. goto found_fin_ok;
  1446. WARN(!(flags & MSG_PEEK),
  1447. "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
  1448. *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
  1449. }
  1450. /* Well, if we have backlog, try to process it now yet. */
  1451. if (copied >= target && !sk->sk_backlog.tail)
  1452. break;
  1453. if (copied) {
  1454. if (sk->sk_err ||
  1455. sk->sk_state == TCP_CLOSE ||
  1456. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1457. !timeo ||
  1458. signal_pending(current))
  1459. break;
  1460. } else {
  1461. if (sock_flag(sk, SOCK_DONE))
  1462. break;
  1463. if (sk->sk_err) {
  1464. copied = sock_error(sk);
  1465. break;
  1466. }
  1467. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1468. break;
  1469. if (sk->sk_state == TCP_CLOSE) {
  1470. if (!sock_flag(sk, SOCK_DONE)) {
  1471. /* This occurs when user tries to read
  1472. * from never connected socket.
  1473. */
  1474. copied = -ENOTCONN;
  1475. break;
  1476. }
  1477. break;
  1478. }
  1479. if (!timeo) {
  1480. copied = -EAGAIN;
  1481. break;
  1482. }
  1483. if (signal_pending(current)) {
  1484. copied = sock_intr_errno(timeo);
  1485. break;
  1486. }
  1487. }
  1488. tcp_cleanup_rbuf(sk, copied);
  1489. if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
  1490. /* Install new reader */
  1491. if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
  1492. user_recv = current;
  1493. tp->ucopy.task = user_recv;
  1494. tp->ucopy.msg = msg;
  1495. }
  1496. tp->ucopy.len = len;
  1497. WARN_ON(tp->copied_seq != tp->rcv_nxt &&
  1498. !(flags & (MSG_PEEK | MSG_TRUNC)));
  1499. /* Ugly... If prequeue is not empty, we have to
  1500. * process it before releasing socket, otherwise
  1501. * order will be broken at second iteration.
  1502. * More elegant solution is required!!!
  1503. *
  1504. * Look: we have the following (pseudo)queues:
  1505. *
  1506. * 1. packets in flight
  1507. * 2. backlog
  1508. * 3. prequeue
  1509. * 4. receive_queue
  1510. *
  1511. * Each queue can be processed only if the next ones
  1512. * are empty. At this point we have empty receive_queue.
  1513. * But prequeue _can_ be not empty after 2nd iteration,
  1514. * when we jumped to start of loop because backlog
  1515. * processing added something to receive_queue.
  1516. * We cannot release_sock(), because backlog contains
  1517. * packets arrived _after_ prequeued ones.
  1518. *
  1519. * Shortly, algorithm is clear --- to process all
  1520. * the queues in order. We could make it more directly,
  1521. * requeueing packets from backlog to prequeue, if
  1522. * is not empty. It is more elegant, but eats cycles,
  1523. * unfortunately.
  1524. */
  1525. if (!skb_queue_empty(&tp->ucopy.prequeue))
  1526. goto do_prequeue;
  1527. /* __ Set realtime policy in scheduler __ */
  1528. }
  1529. if (copied >= target) {
  1530. /* Do not sleep, just process backlog. */
  1531. release_sock(sk);
  1532. lock_sock(sk);
  1533. } else {
  1534. sk_wait_data(sk, &timeo, last);
  1535. }
  1536. if (user_recv) {
  1537. int chunk;
  1538. /* __ Restore normal policy in scheduler __ */
  1539. chunk = len - tp->ucopy.len;
  1540. if (chunk != 0) {
  1541. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
  1542. len -= chunk;
  1543. copied += chunk;
  1544. }
  1545. if (tp->rcv_nxt == tp->copied_seq &&
  1546. !skb_queue_empty(&tp->ucopy.prequeue)) {
  1547. do_prequeue:
  1548. tcp_prequeue_process(sk);
  1549. chunk = len - tp->ucopy.len;
  1550. if (chunk != 0) {
  1551. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1552. len -= chunk;
  1553. copied += chunk;
  1554. }
  1555. }
  1556. }
  1557. if ((flags & MSG_PEEK) &&
  1558. (peek_seq - copied - urg_hole != tp->copied_seq)) {
  1559. net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
  1560. current->comm,
  1561. task_pid_nr(current));
  1562. peek_seq = tp->copied_seq;
  1563. }
  1564. continue;
  1565. found_ok_skb:
  1566. /* Ok so how much can we use? */
  1567. used = skb->len - offset;
  1568. if (len < used)
  1569. used = len;
  1570. /* Do we have urgent data here? */
  1571. if (tp->urg_data) {
  1572. u32 urg_offset = tp->urg_seq - *seq;
  1573. if (urg_offset < used) {
  1574. if (!urg_offset) {
  1575. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1576. ++*seq;
  1577. urg_hole++;
  1578. offset++;
  1579. used--;
  1580. if (!used)
  1581. goto skip_copy;
  1582. }
  1583. } else
  1584. used = urg_offset;
  1585. }
  1586. }
  1587. if (!(flags & MSG_TRUNC)) {
  1588. err = skb_copy_datagram_msg(skb, offset, msg, used);
  1589. if (err) {
  1590. /* Exception. Bailout! */
  1591. if (!copied)
  1592. copied = -EFAULT;
  1593. break;
  1594. }
  1595. }
  1596. *seq += used;
  1597. copied += used;
  1598. len -= used;
  1599. tcp_rcv_space_adjust(sk);
  1600. skip_copy:
  1601. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
  1602. tp->urg_data = 0;
  1603. tcp_fast_path_check(sk);
  1604. }
  1605. if (used + offset < skb->len)
  1606. continue;
  1607. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
  1608. goto found_fin_ok;
  1609. if (!(flags & MSG_PEEK))
  1610. sk_eat_skb(sk, skb);
  1611. continue;
  1612. found_fin_ok:
  1613. /* Process the FIN. */
  1614. ++*seq;
  1615. if (!(flags & MSG_PEEK))
  1616. sk_eat_skb(sk, skb);
  1617. break;
  1618. } while (len > 0);
  1619. if (user_recv) {
  1620. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  1621. int chunk;
  1622. tp->ucopy.len = copied > 0 ? len : 0;
  1623. tcp_prequeue_process(sk);
  1624. if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
  1625. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1626. len -= chunk;
  1627. copied += chunk;
  1628. }
  1629. }
  1630. tp->ucopy.task = NULL;
  1631. tp->ucopy.len = 0;
  1632. }
  1633. /* According to UNIX98, msg_name/msg_namelen are ignored
  1634. * on connected socket. I was just happy when found this 8) --ANK
  1635. */
  1636. /* Clean up data we have read: This will do ACK frames. */
  1637. tcp_cleanup_rbuf(sk, copied);
  1638. release_sock(sk);
  1639. return copied;
  1640. out:
  1641. release_sock(sk);
  1642. return err;
  1643. recv_urg:
  1644. err = tcp_recv_urg(sk, msg, len, flags);
  1645. goto out;
  1646. recv_sndq:
  1647. err = tcp_peek_sndq(sk, msg, len);
  1648. goto out;
  1649. }
  1650. EXPORT_SYMBOL(tcp_recvmsg);
  1651. void tcp_set_state(struct sock *sk, int state)
  1652. {
  1653. int oldstate = sk->sk_state;
  1654. switch (state) {
  1655. case TCP_ESTABLISHED:
  1656. if (oldstate != TCP_ESTABLISHED)
  1657. TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1658. break;
  1659. case TCP_CLOSE:
  1660. if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
  1661. TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
  1662. sk->sk_prot->unhash(sk);
  1663. if (inet_csk(sk)->icsk_bind_hash &&
  1664. !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
  1665. inet_put_port(sk);
  1666. /* fall through */
  1667. default:
  1668. if (oldstate == TCP_ESTABLISHED)
  1669. TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1670. }
  1671. /* Change state AFTER socket is unhashed to avoid closed
  1672. * socket sitting in hash tables.
  1673. */
  1674. sk_state_store(sk, state);
  1675. #ifdef STATE_TRACE
  1676. SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
  1677. #endif
  1678. }
  1679. EXPORT_SYMBOL_GPL(tcp_set_state);
  1680. /*
  1681. * State processing on a close. This implements the state shift for
  1682. * sending our FIN frame. Note that we only send a FIN for some
  1683. * states. A shutdown() may have already sent the FIN, or we may be
  1684. * closed.
  1685. */
  1686. static const unsigned char new_state[16] = {
  1687. /* current state: new state: action: */
  1688. [0 /* (Invalid) */] = TCP_CLOSE,
  1689. [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1690. [TCP_SYN_SENT] = TCP_CLOSE,
  1691. [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1692. [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
  1693. [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
  1694. [TCP_TIME_WAIT] = TCP_CLOSE,
  1695. [TCP_CLOSE] = TCP_CLOSE,
  1696. [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
  1697. [TCP_LAST_ACK] = TCP_LAST_ACK,
  1698. [TCP_LISTEN] = TCP_CLOSE,
  1699. [TCP_CLOSING] = TCP_CLOSING,
  1700. [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
  1701. };
  1702. static int tcp_close_state(struct sock *sk)
  1703. {
  1704. int next = (int)new_state[sk->sk_state];
  1705. int ns = next & TCP_STATE_MASK;
  1706. tcp_set_state(sk, ns);
  1707. return next & TCP_ACTION_FIN;
  1708. }
  1709. /*
  1710. * Shutdown the sending side of a connection. Much like close except
  1711. * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
  1712. */
  1713. void tcp_shutdown(struct sock *sk, int how)
  1714. {
  1715. /* We need to grab some memory, and put together a FIN,
  1716. * and then put it into the queue to be sent.
  1717. * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
  1718. */
  1719. if (!(how & SEND_SHUTDOWN))
  1720. return;
  1721. /* If we've already sent a FIN, or it's a closed state, skip this. */
  1722. if ((1 << sk->sk_state) &
  1723. (TCPF_ESTABLISHED | TCPF_SYN_SENT |
  1724. TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
  1725. /* Clear out any half completed packets. FIN if needed. */
  1726. if (tcp_close_state(sk))
  1727. tcp_send_fin(sk);
  1728. }
  1729. }
  1730. EXPORT_SYMBOL(tcp_shutdown);
  1731. bool tcp_check_oom(struct sock *sk, int shift)
  1732. {
  1733. bool too_many_orphans, out_of_socket_memory;
  1734. too_many_orphans = tcp_too_many_orphans(sk, shift);
  1735. out_of_socket_memory = tcp_out_of_memory(sk);
  1736. if (too_many_orphans)
  1737. net_info_ratelimited("too many orphaned sockets\n");
  1738. if (out_of_socket_memory)
  1739. net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
  1740. return too_many_orphans || out_of_socket_memory;
  1741. }
  1742. void tcp_close(struct sock *sk, long timeout)
  1743. {
  1744. struct sk_buff *skb;
  1745. int data_was_unread = 0;
  1746. int state;
  1747. lock_sock(sk);
  1748. sk->sk_shutdown = SHUTDOWN_MASK;
  1749. if (sk->sk_state == TCP_LISTEN) {
  1750. tcp_set_state(sk, TCP_CLOSE);
  1751. /* Special case. */
  1752. inet_csk_listen_stop(sk);
  1753. goto adjudge_to_death;
  1754. }
  1755. /* We need to flush the recv. buffs. We do this only on the
  1756. * descriptor close, not protocol-sourced closes, because the
  1757. * reader process may not have drained the data yet!
  1758. */
  1759. while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  1760. u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
  1761. if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
  1762. len--;
  1763. data_was_unread += len;
  1764. __kfree_skb(skb);
  1765. }
  1766. sk_mem_reclaim(sk);
  1767. /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
  1768. if (sk->sk_state == TCP_CLOSE)
  1769. goto adjudge_to_death;
  1770. /* As outlined in RFC 2525, section 2.17, we send a RST here because
  1771. * data was lost. To witness the awful effects of the old behavior of
  1772. * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
  1773. * GET in an FTP client, suspend the process, wait for the client to
  1774. * advertise a zero window, then kill -9 the FTP client, wheee...
  1775. * Note: timeout is always zero in such a case.
  1776. */
  1777. if (unlikely(tcp_sk(sk)->repair)) {
  1778. sk->sk_prot->disconnect(sk, 0);
  1779. } else if (data_was_unread) {
  1780. /* Unread data was tossed, zap the connection. */
  1781. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
  1782. tcp_set_state(sk, TCP_CLOSE);
  1783. tcp_send_active_reset(sk, sk->sk_allocation);
  1784. } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
  1785. /* Check zero linger _after_ checking for unread data. */
  1786. sk->sk_prot->disconnect(sk, 0);
  1787. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
  1788. } else if (tcp_close_state(sk)) {
  1789. /* We FIN if the application ate all the data before
  1790. * zapping the connection.
  1791. */
  1792. /* RED-PEN. Formally speaking, we have broken TCP state
  1793. * machine. State transitions:
  1794. *
  1795. * TCP_ESTABLISHED -> TCP_FIN_WAIT1
  1796. * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
  1797. * TCP_CLOSE_WAIT -> TCP_LAST_ACK
  1798. *
  1799. * are legal only when FIN has been sent (i.e. in window),
  1800. * rather than queued out of window. Purists blame.
  1801. *
  1802. * F.e. "RFC state" is ESTABLISHED,
  1803. * if Linux state is FIN-WAIT-1, but FIN is still not sent.
  1804. *
  1805. * The visible declinations are that sometimes
  1806. * we enter time-wait state, when it is not required really
  1807. * (harmless), do not send active resets, when they are
  1808. * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
  1809. * they look as CLOSING or LAST_ACK for Linux)
  1810. * Probably, I missed some more holelets.
  1811. * --ANK
  1812. * XXX (TFO) - To start off we don't support SYN+ACK+FIN
  1813. * in a single packet! (May consider it later but will
  1814. * probably need API support or TCP_CORK SYN-ACK until
  1815. * data is written and socket is closed.)
  1816. */
  1817. tcp_send_fin(sk);
  1818. }
  1819. sk_stream_wait_close(sk, timeout);
  1820. adjudge_to_death:
  1821. state = sk->sk_state;
  1822. sock_hold(sk);
  1823. sock_orphan(sk);
  1824. /* It is the last release_sock in its life. It will remove backlog. */
  1825. release_sock(sk);
  1826. /* Now socket is owned by kernel and we acquire BH lock
  1827. to finish close. No need to check for user refs.
  1828. */
  1829. local_bh_disable();
  1830. bh_lock_sock(sk);
  1831. WARN_ON(sock_owned_by_user(sk));
  1832. percpu_counter_inc(sk->sk_prot->orphan_count);
  1833. /* Have we already been destroyed by a softirq or backlog? */
  1834. if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
  1835. goto out;
  1836. /* This is a (useful) BSD violating of the RFC. There is a
  1837. * problem with TCP as specified in that the other end could
  1838. * keep a socket open forever with no application left this end.
  1839. * We use a 1 minute timeout (about the same as BSD) then kill
  1840. * our end. If they send after that then tough - BUT: long enough
  1841. * that we won't make the old 4*rto = almost no time - whoops
  1842. * reset mistake.
  1843. *
  1844. * Nope, it was not mistake. It is really desired behaviour
  1845. * f.e. on http servers, when such sockets are useless, but
  1846. * consume significant resources. Let's do it with special
  1847. * linger2 option. --ANK
  1848. */
  1849. if (sk->sk_state == TCP_FIN_WAIT2) {
  1850. struct tcp_sock *tp = tcp_sk(sk);
  1851. if (tp->linger2 < 0) {
  1852. tcp_set_state(sk, TCP_CLOSE);
  1853. tcp_send_active_reset(sk, GFP_ATOMIC);
  1854. NET_INC_STATS_BH(sock_net(sk),
  1855. LINUX_MIB_TCPABORTONLINGER);
  1856. } else {
  1857. const int tmo = tcp_fin_time(sk);
  1858. if (tmo > TCP_TIMEWAIT_LEN) {
  1859. inet_csk_reset_keepalive_timer(sk,
  1860. tmo - TCP_TIMEWAIT_LEN);
  1861. } else {
  1862. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  1863. goto out;
  1864. }
  1865. }
  1866. }
  1867. if (sk->sk_state != TCP_CLOSE) {
  1868. sk_mem_reclaim(sk);
  1869. if (tcp_check_oom(sk, 0)) {
  1870. tcp_set_state(sk, TCP_CLOSE);
  1871. tcp_send_active_reset(sk, GFP_ATOMIC);
  1872. NET_INC_STATS_BH(sock_net(sk),
  1873. LINUX_MIB_TCPABORTONMEMORY);
  1874. }
  1875. }
  1876. if (sk->sk_state == TCP_CLOSE) {
  1877. struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
  1878. /* We could get here with a non-NULL req if the socket is
  1879. * aborted (e.g., closed with unread data) before 3WHS
  1880. * finishes.
  1881. */
  1882. if (req)
  1883. reqsk_fastopen_remove(sk, req, false);
  1884. inet_csk_destroy_sock(sk);
  1885. }
  1886. /* Otherwise, socket is reprieved until protocol close. */
  1887. out:
  1888. bh_unlock_sock(sk);
  1889. local_bh_enable();
  1890. sock_put(sk);
  1891. }
  1892. EXPORT_SYMBOL(tcp_close);
  1893. /* These states need RST on ABORT according to RFC793 */
  1894. static inline bool tcp_need_reset(int state)
  1895. {
  1896. return (1 << state) &
  1897. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
  1898. TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
  1899. }
  1900. int tcp_disconnect(struct sock *sk, int flags)
  1901. {
  1902. struct inet_sock *inet = inet_sk(sk);
  1903. struct inet_connection_sock *icsk = inet_csk(sk);
  1904. struct tcp_sock *tp = tcp_sk(sk);
  1905. int err = 0;
  1906. int old_state = sk->sk_state;
  1907. if (old_state != TCP_CLOSE)
  1908. tcp_set_state(sk, TCP_CLOSE);
  1909. /* ABORT function of RFC793 */
  1910. if (old_state == TCP_LISTEN) {
  1911. inet_csk_listen_stop(sk);
  1912. } else if (unlikely(tp->repair)) {
  1913. sk->sk_err = ECONNABORTED;
  1914. } else if (tcp_need_reset(old_state) ||
  1915. (tp->snd_nxt != tp->write_seq &&
  1916. (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
  1917. /* The last check adjusts for discrepancy of Linux wrt. RFC
  1918. * states
  1919. */
  1920. tcp_send_active_reset(sk, gfp_any());
  1921. sk->sk_err = ECONNRESET;
  1922. } else if (old_state == TCP_SYN_SENT)
  1923. sk->sk_err = ECONNRESET;
  1924. tcp_clear_xmit_timers(sk);
  1925. __skb_queue_purge(&sk->sk_receive_queue);
  1926. tcp_write_queue_purge(sk);
  1927. __skb_queue_purge(&tp->out_of_order_queue);
  1928. inet->inet_dport = 0;
  1929. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  1930. inet_reset_saddr(sk);
  1931. sk->sk_shutdown = 0;
  1932. sock_reset_flag(sk, SOCK_DONE);
  1933. tp->srtt_us = 0;
  1934. tp->write_seq += tp->max_window + 2;
  1935. if (tp->write_seq == 0)
  1936. tp->write_seq = 1;
  1937. icsk->icsk_backoff = 0;
  1938. tp->snd_cwnd = 2;
  1939. icsk->icsk_probes_out = 0;
  1940. tp->packets_out = 0;
  1941. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  1942. tp->snd_cwnd_cnt = 0;
  1943. tp->window_clamp = 0;
  1944. tcp_set_ca_state(sk, TCP_CA_Open);
  1945. tcp_clear_retrans(tp);
  1946. inet_csk_delack_init(sk);
  1947. tcp_init_send_head(sk);
  1948. memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
  1949. __sk_dst_reset(sk);
  1950. WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
  1951. sk->sk_error_report(sk);
  1952. return err;
  1953. }
  1954. EXPORT_SYMBOL(tcp_disconnect);
  1955. static inline bool tcp_can_repair_sock(const struct sock *sk)
  1956. {
  1957. return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
  1958. ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
  1959. }
  1960. static int tcp_repair_options_est(struct tcp_sock *tp,
  1961. struct tcp_repair_opt __user *optbuf, unsigned int len)
  1962. {
  1963. struct tcp_repair_opt opt;
  1964. while (len >= sizeof(opt)) {
  1965. if (copy_from_user(&opt, optbuf, sizeof(opt)))
  1966. return -EFAULT;
  1967. optbuf++;
  1968. len -= sizeof(opt);
  1969. switch (opt.opt_code) {
  1970. case TCPOPT_MSS:
  1971. tp->rx_opt.mss_clamp = opt.opt_val;
  1972. break;
  1973. case TCPOPT_WINDOW:
  1974. {
  1975. u16 snd_wscale = opt.opt_val & 0xFFFF;
  1976. u16 rcv_wscale = opt.opt_val >> 16;
  1977. if (snd_wscale > 14 || rcv_wscale > 14)
  1978. return -EFBIG;
  1979. tp->rx_opt.snd_wscale = snd_wscale;
  1980. tp->rx_opt.rcv_wscale = rcv_wscale;
  1981. tp->rx_opt.wscale_ok = 1;
  1982. }
  1983. break;
  1984. case TCPOPT_SACK_PERM:
  1985. if (opt.opt_val != 0)
  1986. return -EINVAL;
  1987. tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
  1988. if (sysctl_tcp_fack)
  1989. tcp_enable_fack(tp);
  1990. break;
  1991. case TCPOPT_TIMESTAMP:
  1992. if (opt.opt_val != 0)
  1993. return -EINVAL;
  1994. tp->rx_opt.tstamp_ok = 1;
  1995. break;
  1996. }
  1997. }
  1998. return 0;
  1999. }
  2000. /*
  2001. * Socket option code for TCP.
  2002. */
  2003. static int do_tcp_setsockopt(struct sock *sk, int level,
  2004. int optname, char __user *optval, unsigned int optlen)
  2005. {
  2006. struct tcp_sock *tp = tcp_sk(sk);
  2007. struct inet_connection_sock *icsk = inet_csk(sk);
  2008. int val;
  2009. int err = 0;
  2010. /* These are data/string values, all the others are ints */
  2011. switch (optname) {
  2012. case TCP_CONGESTION: {
  2013. char name[TCP_CA_NAME_MAX];
  2014. if (optlen < 1)
  2015. return -EINVAL;
  2016. val = strncpy_from_user(name, optval,
  2017. min_t(long, TCP_CA_NAME_MAX-1, optlen));
  2018. if (val < 0)
  2019. return -EFAULT;
  2020. name[val] = 0;
  2021. lock_sock(sk);
  2022. err = tcp_set_congestion_control(sk, name);
  2023. release_sock(sk);
  2024. return err;
  2025. }
  2026. default:
  2027. /* fallthru */
  2028. break;
  2029. }
  2030. if (optlen < sizeof(int))
  2031. return -EINVAL;
  2032. if (get_user(val, (int __user *)optval))
  2033. return -EFAULT;
  2034. lock_sock(sk);
  2035. switch (optname) {
  2036. case TCP_MAXSEG:
  2037. /* Values greater than interface MTU won't take effect. However
  2038. * at the point when this call is done we typically don't yet
  2039. * know which interface is going to be used */
  2040. if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
  2041. err = -EINVAL;
  2042. break;
  2043. }
  2044. tp->rx_opt.user_mss = val;
  2045. break;
  2046. case TCP_NODELAY:
  2047. if (val) {
  2048. /* TCP_NODELAY is weaker than TCP_CORK, so that
  2049. * this option on corked socket is remembered, but
  2050. * it is not activated until cork is cleared.
  2051. *
  2052. * However, when TCP_NODELAY is set we make
  2053. * an explicit push, which overrides even TCP_CORK
  2054. * for currently queued segments.
  2055. */
  2056. tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
  2057. tcp_push_pending_frames(sk);
  2058. } else {
  2059. tp->nonagle &= ~TCP_NAGLE_OFF;
  2060. }
  2061. break;
  2062. case TCP_THIN_LINEAR_TIMEOUTS:
  2063. if (val < 0 || val > 1)
  2064. err = -EINVAL;
  2065. else
  2066. tp->thin_lto = val;
  2067. break;
  2068. case TCP_THIN_DUPACK:
  2069. if (val < 0 || val > 1)
  2070. err = -EINVAL;
  2071. else {
  2072. tp->thin_dupack = val;
  2073. if (tp->thin_dupack)
  2074. tcp_disable_early_retrans(tp);
  2075. }
  2076. break;
  2077. case TCP_REPAIR:
  2078. if (!tcp_can_repair_sock(sk))
  2079. err = -EPERM;
  2080. else if (val == 1) {
  2081. tp->repair = 1;
  2082. sk->sk_reuse = SK_FORCE_REUSE;
  2083. tp->repair_queue = TCP_NO_QUEUE;
  2084. } else if (val == 0) {
  2085. tp->repair = 0;
  2086. sk->sk_reuse = SK_NO_REUSE;
  2087. tcp_send_window_probe(sk);
  2088. } else
  2089. err = -EINVAL;
  2090. break;
  2091. case TCP_REPAIR_QUEUE:
  2092. if (!tp->repair)
  2093. err = -EPERM;
  2094. else if (val < TCP_QUEUES_NR)
  2095. tp->repair_queue = val;
  2096. else
  2097. err = -EINVAL;
  2098. break;
  2099. case TCP_QUEUE_SEQ:
  2100. if (sk->sk_state != TCP_CLOSE)
  2101. err = -EPERM;
  2102. else if (tp->repair_queue == TCP_SEND_QUEUE)
  2103. tp->write_seq = val;
  2104. else if (tp->repair_queue == TCP_RECV_QUEUE)
  2105. tp->rcv_nxt = val;
  2106. else
  2107. err = -EINVAL;
  2108. break;
  2109. case TCP_REPAIR_OPTIONS:
  2110. if (!tp->repair)
  2111. err = -EINVAL;
  2112. else if (sk->sk_state == TCP_ESTABLISHED)
  2113. err = tcp_repair_options_est(tp,
  2114. (struct tcp_repair_opt __user *)optval,
  2115. optlen);
  2116. else
  2117. err = -EPERM;
  2118. break;
  2119. case TCP_CORK:
  2120. /* When set indicates to always queue non-full frames.
  2121. * Later the user clears this option and we transmit
  2122. * any pending partial frames in the queue. This is
  2123. * meant to be used alongside sendfile() to get properly
  2124. * filled frames when the user (for example) must write
  2125. * out headers with a write() call first and then use
  2126. * sendfile to send out the data parts.
  2127. *
  2128. * TCP_CORK can be set together with TCP_NODELAY and it is
  2129. * stronger than TCP_NODELAY.
  2130. */
  2131. if (val) {
  2132. tp->nonagle |= TCP_NAGLE_CORK;
  2133. } else {
  2134. tp->nonagle &= ~TCP_NAGLE_CORK;
  2135. if (tp->nonagle&TCP_NAGLE_OFF)
  2136. tp->nonagle |= TCP_NAGLE_PUSH;
  2137. tcp_push_pending_frames(sk);
  2138. }
  2139. break;
  2140. case TCP_KEEPIDLE:
  2141. if (val < 1 || val > MAX_TCP_KEEPIDLE)
  2142. err = -EINVAL;
  2143. else {
  2144. tp->keepalive_time = val * HZ;
  2145. if (sock_flag(sk, SOCK_KEEPOPEN) &&
  2146. !((1 << sk->sk_state) &
  2147. (TCPF_CLOSE | TCPF_LISTEN))) {
  2148. u32 elapsed = keepalive_time_elapsed(tp);
  2149. if (tp->keepalive_time > elapsed)
  2150. elapsed = tp->keepalive_time - elapsed;
  2151. else
  2152. elapsed = 0;
  2153. inet_csk_reset_keepalive_timer(sk, elapsed);
  2154. }
  2155. }
  2156. break;
  2157. case TCP_KEEPINTVL:
  2158. if (val < 1 || val > MAX_TCP_KEEPINTVL)
  2159. err = -EINVAL;
  2160. else
  2161. tp->keepalive_intvl = val * HZ;
  2162. break;
  2163. case TCP_KEEPCNT:
  2164. if (val < 1 || val > MAX_TCP_KEEPCNT)
  2165. err = -EINVAL;
  2166. else
  2167. tp->keepalive_probes = val;
  2168. break;
  2169. case TCP_SYNCNT:
  2170. if (val < 1 || val > MAX_TCP_SYNCNT)
  2171. err = -EINVAL;
  2172. else
  2173. icsk->icsk_syn_retries = val;
  2174. break;
  2175. case TCP_SAVE_SYN:
  2176. if (val < 0 || val > 1)
  2177. err = -EINVAL;
  2178. else
  2179. tp->save_syn = val;
  2180. break;
  2181. case TCP_LINGER2:
  2182. if (val < 0)
  2183. tp->linger2 = -1;
  2184. else if (val > sysctl_tcp_fin_timeout / HZ)
  2185. tp->linger2 = 0;
  2186. else
  2187. tp->linger2 = val * HZ;
  2188. break;
  2189. case TCP_DEFER_ACCEPT:
  2190. /* Translate value in seconds to number of retransmits */
  2191. icsk->icsk_accept_queue.rskq_defer_accept =
  2192. secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
  2193. TCP_RTO_MAX / HZ);
  2194. break;
  2195. case TCP_WINDOW_CLAMP:
  2196. if (!val) {
  2197. if (sk->sk_state != TCP_CLOSE) {
  2198. err = -EINVAL;
  2199. break;
  2200. }
  2201. tp->window_clamp = 0;
  2202. } else
  2203. tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
  2204. SOCK_MIN_RCVBUF / 2 : val;
  2205. break;
  2206. case TCP_QUICKACK:
  2207. if (!val) {
  2208. icsk->icsk_ack.pingpong = 1;
  2209. } else {
  2210. icsk->icsk_ack.pingpong = 0;
  2211. if ((1 << sk->sk_state) &
  2212. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
  2213. inet_csk_ack_scheduled(sk)) {
  2214. icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
  2215. tcp_cleanup_rbuf(sk, 1);
  2216. if (!(val & 1))
  2217. icsk->icsk_ack.pingpong = 1;
  2218. }
  2219. }
  2220. break;
  2221. #ifdef CONFIG_TCP_MD5SIG
  2222. case TCP_MD5SIG:
  2223. /* Read the IP->Key mappings from userspace */
  2224. err = tp->af_specific->md5_parse(sk, optval, optlen);
  2225. break;
  2226. #endif
  2227. case TCP_USER_TIMEOUT:
  2228. /* Cap the max time in ms TCP will retry or probe the window
  2229. * before giving up and aborting (ETIMEDOUT) a connection.
  2230. */
  2231. if (val < 0)
  2232. err = -EINVAL;
  2233. else
  2234. icsk->icsk_user_timeout = msecs_to_jiffies(val);
  2235. break;
  2236. case TCP_FASTOPEN:
  2237. if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
  2238. TCPF_LISTEN))) {
  2239. tcp_fastopen_init_key_once(true);
  2240. fastopen_queue_tune(sk, val);
  2241. } else {
  2242. err = -EINVAL;
  2243. }
  2244. break;
  2245. case TCP_TIMESTAMP:
  2246. if (!tp->repair)
  2247. err = -EPERM;
  2248. else
  2249. tp->tsoffset = val - tcp_time_stamp;
  2250. break;
  2251. case TCP_NOTSENT_LOWAT:
  2252. tp->notsent_lowat = val;
  2253. sk->sk_write_space(sk);
  2254. break;
  2255. default:
  2256. err = -ENOPROTOOPT;
  2257. break;
  2258. }
  2259. release_sock(sk);
  2260. return err;
  2261. }
  2262. int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
  2263. unsigned int optlen)
  2264. {
  2265. const struct inet_connection_sock *icsk = inet_csk(sk);
  2266. if (level != SOL_TCP)
  2267. return icsk->icsk_af_ops->setsockopt(sk, level, optname,
  2268. optval, optlen);
  2269. return do_tcp_setsockopt(sk, level, optname, optval, optlen);
  2270. }
  2271. EXPORT_SYMBOL(tcp_setsockopt);
  2272. #ifdef CONFIG_COMPAT
  2273. int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
  2274. char __user *optval, unsigned int optlen)
  2275. {
  2276. if (level != SOL_TCP)
  2277. return inet_csk_compat_setsockopt(sk, level, optname,
  2278. optval, optlen);
  2279. return do_tcp_setsockopt(sk, level, optname, optval, optlen);
  2280. }
  2281. EXPORT_SYMBOL(compat_tcp_setsockopt);
  2282. #endif
  2283. /* Return information about state of tcp endpoint in API format. */
  2284. void tcp_get_info(struct sock *sk, struct tcp_info *info)
  2285. {
  2286. const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
  2287. const struct inet_connection_sock *icsk = inet_csk(sk);
  2288. u32 now = tcp_time_stamp;
  2289. unsigned int start;
  2290. u64 rate64;
  2291. u32 rate;
  2292. memset(info, 0, sizeof(*info));
  2293. if (sk->sk_type != SOCK_STREAM)
  2294. return;
  2295. info->tcpi_state = sk_state_load(sk);
  2296. info->tcpi_ca_state = icsk->icsk_ca_state;
  2297. info->tcpi_retransmits = icsk->icsk_retransmits;
  2298. info->tcpi_probes = icsk->icsk_probes_out;
  2299. info->tcpi_backoff = icsk->icsk_backoff;
  2300. if (tp->rx_opt.tstamp_ok)
  2301. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  2302. if (tcp_is_sack(tp))
  2303. info->tcpi_options |= TCPI_OPT_SACK;
  2304. if (tp->rx_opt.wscale_ok) {
  2305. info->tcpi_options |= TCPI_OPT_WSCALE;
  2306. info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
  2307. info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
  2308. }
  2309. if (tp->ecn_flags & TCP_ECN_OK)
  2310. info->tcpi_options |= TCPI_OPT_ECN;
  2311. if (tp->ecn_flags & TCP_ECN_SEEN)
  2312. info->tcpi_options |= TCPI_OPT_ECN_SEEN;
  2313. if (tp->syn_data_acked)
  2314. info->tcpi_options |= TCPI_OPT_SYN_DATA;
  2315. info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
  2316. info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
  2317. info->tcpi_snd_mss = tp->mss_cache;
  2318. info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
  2319. if (info->tcpi_state == TCP_LISTEN) {
  2320. info->tcpi_unacked = sk->sk_ack_backlog;
  2321. info->tcpi_sacked = sk->sk_max_ack_backlog;
  2322. } else {
  2323. info->tcpi_unacked = tp->packets_out;
  2324. info->tcpi_sacked = tp->sacked_out;
  2325. }
  2326. info->tcpi_lost = tp->lost_out;
  2327. info->tcpi_retrans = tp->retrans_out;
  2328. info->tcpi_fackets = tp->fackets_out;
  2329. info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
  2330. info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
  2331. info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
  2332. info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
  2333. info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
  2334. info->tcpi_rtt = tp->srtt_us >> 3;
  2335. info->tcpi_rttvar = tp->mdev_us >> 2;
  2336. info->tcpi_snd_ssthresh = tp->snd_ssthresh;
  2337. info->tcpi_snd_cwnd = tp->snd_cwnd;
  2338. info->tcpi_advmss = tp->advmss;
  2339. info->tcpi_reordering = tp->reordering;
  2340. info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
  2341. info->tcpi_rcv_space = tp->rcvq_space.space;
  2342. info->tcpi_total_retrans = tp->total_retrans;
  2343. rate = READ_ONCE(sk->sk_pacing_rate);
  2344. rate64 = rate != ~0U ? rate : ~0ULL;
  2345. put_unaligned(rate64, &info->tcpi_pacing_rate);
  2346. rate = READ_ONCE(sk->sk_max_pacing_rate);
  2347. rate64 = rate != ~0U ? rate : ~0ULL;
  2348. put_unaligned(rate64, &info->tcpi_max_pacing_rate);
  2349. do {
  2350. start = u64_stats_fetch_begin_irq(&tp->syncp);
  2351. put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
  2352. put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
  2353. } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
  2354. info->tcpi_segs_out = tp->segs_out;
  2355. info->tcpi_segs_in = tp->segs_in;
  2356. }
  2357. EXPORT_SYMBOL_GPL(tcp_get_info);
  2358. static int do_tcp_getsockopt(struct sock *sk, int level,
  2359. int optname, char __user *optval, int __user *optlen)
  2360. {
  2361. struct inet_connection_sock *icsk = inet_csk(sk);
  2362. struct tcp_sock *tp = tcp_sk(sk);
  2363. int val, len;
  2364. if (get_user(len, optlen))
  2365. return -EFAULT;
  2366. len = min_t(unsigned int, len, sizeof(int));
  2367. if (len < 0)
  2368. return -EINVAL;
  2369. switch (optname) {
  2370. case TCP_MAXSEG:
  2371. val = tp->mss_cache;
  2372. if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
  2373. val = tp->rx_opt.user_mss;
  2374. if (tp->repair)
  2375. val = tp->rx_opt.mss_clamp;
  2376. break;
  2377. case TCP_NODELAY:
  2378. val = !!(tp->nonagle&TCP_NAGLE_OFF);
  2379. break;
  2380. case TCP_CORK:
  2381. val = !!(tp->nonagle&TCP_NAGLE_CORK);
  2382. break;
  2383. case TCP_KEEPIDLE:
  2384. val = keepalive_time_when(tp) / HZ;
  2385. break;
  2386. case TCP_KEEPINTVL:
  2387. val = keepalive_intvl_when(tp) / HZ;
  2388. break;
  2389. case TCP_KEEPCNT:
  2390. val = keepalive_probes(tp);
  2391. break;
  2392. case TCP_SYNCNT:
  2393. val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
  2394. break;
  2395. case TCP_LINGER2:
  2396. val = tp->linger2;
  2397. if (val >= 0)
  2398. val = (val ? : sysctl_tcp_fin_timeout) / HZ;
  2399. break;
  2400. case TCP_DEFER_ACCEPT:
  2401. val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
  2402. TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
  2403. break;
  2404. case TCP_WINDOW_CLAMP:
  2405. val = tp->window_clamp;
  2406. break;
  2407. case TCP_INFO: {
  2408. struct tcp_info info;
  2409. if (get_user(len, optlen))
  2410. return -EFAULT;
  2411. tcp_get_info(sk, &info);
  2412. len = min_t(unsigned int, len, sizeof(info));
  2413. if (put_user(len, optlen))
  2414. return -EFAULT;
  2415. if (copy_to_user(optval, &info, len))
  2416. return -EFAULT;
  2417. return 0;
  2418. }
  2419. case TCP_CC_INFO: {
  2420. const struct tcp_congestion_ops *ca_ops;
  2421. union tcp_cc_info info;
  2422. size_t sz = 0;
  2423. int attr;
  2424. if (get_user(len, optlen))
  2425. return -EFAULT;
  2426. ca_ops = icsk->icsk_ca_ops;
  2427. if (ca_ops && ca_ops->get_info)
  2428. sz = ca_ops->get_info(sk, ~0U, &attr, &info);
  2429. len = min_t(unsigned int, len, sz);
  2430. if (put_user(len, optlen))
  2431. return -EFAULT;
  2432. if (copy_to_user(optval, &info, len))
  2433. return -EFAULT;
  2434. return 0;
  2435. }
  2436. case TCP_QUICKACK:
  2437. val = !icsk->icsk_ack.pingpong;
  2438. break;
  2439. case TCP_CONGESTION:
  2440. if (get_user(len, optlen))
  2441. return -EFAULT;
  2442. len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
  2443. if (put_user(len, optlen))
  2444. return -EFAULT;
  2445. if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
  2446. return -EFAULT;
  2447. return 0;
  2448. case TCP_THIN_LINEAR_TIMEOUTS:
  2449. val = tp->thin_lto;
  2450. break;
  2451. case TCP_THIN_DUPACK:
  2452. val = tp->thin_dupack;
  2453. break;
  2454. case TCP_REPAIR:
  2455. val = tp->repair;
  2456. break;
  2457. case TCP_REPAIR_QUEUE:
  2458. if (tp->repair)
  2459. val = tp->repair_queue;
  2460. else
  2461. return -EINVAL;
  2462. break;
  2463. case TCP_QUEUE_SEQ:
  2464. if (tp->repair_queue == TCP_SEND_QUEUE)
  2465. val = tp->write_seq;
  2466. else if (tp->repair_queue == TCP_RECV_QUEUE)
  2467. val = tp->rcv_nxt;
  2468. else
  2469. return -EINVAL;
  2470. break;
  2471. case TCP_USER_TIMEOUT:
  2472. val = jiffies_to_msecs(icsk->icsk_user_timeout);
  2473. break;
  2474. case TCP_FASTOPEN:
  2475. val = icsk->icsk_accept_queue.fastopenq.max_qlen;
  2476. break;
  2477. case TCP_TIMESTAMP:
  2478. val = tcp_time_stamp + tp->tsoffset;
  2479. break;
  2480. case TCP_NOTSENT_LOWAT:
  2481. val = tp->notsent_lowat;
  2482. break;
  2483. case TCP_SAVE_SYN:
  2484. val = tp->save_syn;
  2485. break;
  2486. case TCP_SAVED_SYN: {
  2487. if (get_user(len, optlen))
  2488. return -EFAULT;
  2489. lock_sock(sk);
  2490. if (tp->saved_syn) {
  2491. if (len < tp->saved_syn[0]) {
  2492. if (put_user(tp->saved_syn[0], optlen)) {
  2493. release_sock(sk);
  2494. return -EFAULT;
  2495. }
  2496. release_sock(sk);
  2497. return -EINVAL;
  2498. }
  2499. len = tp->saved_syn[0];
  2500. if (put_user(len, optlen)) {
  2501. release_sock(sk);
  2502. return -EFAULT;
  2503. }
  2504. if (copy_to_user(optval, tp->saved_syn + 1, len)) {
  2505. release_sock(sk);
  2506. return -EFAULT;
  2507. }
  2508. tcp_saved_syn_free(tp);
  2509. release_sock(sk);
  2510. } else {
  2511. release_sock(sk);
  2512. len = 0;
  2513. if (put_user(len, optlen))
  2514. return -EFAULT;
  2515. }
  2516. return 0;
  2517. }
  2518. default:
  2519. return -ENOPROTOOPT;
  2520. }
  2521. if (put_user(len, optlen))
  2522. return -EFAULT;
  2523. if (copy_to_user(optval, &val, len))
  2524. return -EFAULT;
  2525. return 0;
  2526. }
  2527. int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
  2528. int __user *optlen)
  2529. {
  2530. struct inet_connection_sock *icsk = inet_csk(sk);
  2531. if (level != SOL_TCP)
  2532. return icsk->icsk_af_ops->getsockopt(sk, level, optname,
  2533. optval, optlen);
  2534. return do_tcp_getsockopt(sk, level, optname, optval, optlen);
  2535. }
  2536. EXPORT_SYMBOL(tcp_getsockopt);
  2537. #ifdef CONFIG_COMPAT
  2538. int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
  2539. char __user *optval, int __user *optlen)
  2540. {
  2541. if (level != SOL_TCP)
  2542. return inet_csk_compat_getsockopt(sk, level, optname,
  2543. optval, optlen);
  2544. return do_tcp_getsockopt(sk, level, optname, optval, optlen);
  2545. }
  2546. EXPORT_SYMBOL(compat_tcp_getsockopt);
  2547. #endif
  2548. #ifdef CONFIG_TCP_MD5SIG
  2549. static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
  2550. static DEFINE_MUTEX(tcp_md5sig_mutex);
  2551. static bool tcp_md5sig_pool_populated = false;
  2552. static void __tcp_alloc_md5sig_pool(void)
  2553. {
  2554. int cpu;
  2555. for_each_possible_cpu(cpu) {
  2556. if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
  2557. struct crypto_hash *hash;
  2558. hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
  2559. if (IS_ERR_OR_NULL(hash))
  2560. return;
  2561. per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
  2562. }
  2563. }
  2564. /* before setting tcp_md5sig_pool_populated, we must commit all writes
  2565. * to memory. See smp_rmb() in tcp_get_md5sig_pool()
  2566. */
  2567. smp_wmb();
  2568. tcp_md5sig_pool_populated = true;
  2569. }
  2570. bool tcp_alloc_md5sig_pool(void)
  2571. {
  2572. if (unlikely(!tcp_md5sig_pool_populated)) {
  2573. mutex_lock(&tcp_md5sig_mutex);
  2574. if (!tcp_md5sig_pool_populated)
  2575. __tcp_alloc_md5sig_pool();
  2576. mutex_unlock(&tcp_md5sig_mutex);
  2577. }
  2578. return tcp_md5sig_pool_populated;
  2579. }
  2580. EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
  2581. /**
  2582. * tcp_get_md5sig_pool - get md5sig_pool for this user
  2583. *
  2584. * We use percpu structure, so if we succeed, we exit with preemption
  2585. * and BH disabled, to make sure another thread or softirq handling
  2586. * wont try to get same context.
  2587. */
  2588. struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
  2589. {
  2590. local_bh_disable();
  2591. if (tcp_md5sig_pool_populated) {
  2592. /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
  2593. smp_rmb();
  2594. return this_cpu_ptr(&tcp_md5sig_pool);
  2595. }
  2596. local_bh_enable();
  2597. return NULL;
  2598. }
  2599. EXPORT_SYMBOL(tcp_get_md5sig_pool);
  2600. int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
  2601. const struct tcphdr *th)
  2602. {
  2603. struct scatterlist sg;
  2604. struct tcphdr hdr;
  2605. int err;
  2606. /* We are not allowed to change tcphdr, make a local copy */
  2607. memcpy(&hdr, th, sizeof(hdr));
  2608. hdr.check = 0;
  2609. /* options aren't included in the hash */
  2610. sg_init_one(&sg, &hdr, sizeof(hdr));
  2611. err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
  2612. return err;
  2613. }
  2614. EXPORT_SYMBOL(tcp_md5_hash_header);
  2615. int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
  2616. const struct sk_buff *skb, unsigned int header_len)
  2617. {
  2618. struct scatterlist sg;
  2619. const struct tcphdr *tp = tcp_hdr(skb);
  2620. struct hash_desc *desc = &hp->md5_desc;
  2621. unsigned int i;
  2622. const unsigned int head_data_len = skb_headlen(skb) > header_len ?
  2623. skb_headlen(skb) - header_len : 0;
  2624. const struct skb_shared_info *shi = skb_shinfo(skb);
  2625. struct sk_buff *frag_iter;
  2626. sg_init_table(&sg, 1);
  2627. sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
  2628. if (crypto_hash_update(desc, &sg, head_data_len))
  2629. return 1;
  2630. for (i = 0; i < shi->nr_frags; ++i) {
  2631. const struct skb_frag_struct *f = &shi->frags[i];
  2632. unsigned int offset = f->page_offset;
  2633. struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
  2634. sg_set_page(&sg, page, skb_frag_size(f),
  2635. offset_in_page(offset));
  2636. if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
  2637. return 1;
  2638. }
  2639. skb_walk_frags(skb, frag_iter)
  2640. if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
  2641. return 1;
  2642. return 0;
  2643. }
  2644. EXPORT_SYMBOL(tcp_md5_hash_skb_data);
  2645. int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
  2646. {
  2647. struct scatterlist sg;
  2648. sg_init_one(&sg, key->key, key->keylen);
  2649. return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
  2650. }
  2651. EXPORT_SYMBOL(tcp_md5_hash_key);
  2652. #endif
  2653. void tcp_done(struct sock *sk)
  2654. {
  2655. struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
  2656. if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
  2657. TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
  2658. tcp_set_state(sk, TCP_CLOSE);
  2659. tcp_clear_xmit_timers(sk);
  2660. if (req)
  2661. reqsk_fastopen_remove(sk, req, false);
  2662. sk->sk_shutdown = SHUTDOWN_MASK;
  2663. if (!sock_flag(sk, SOCK_DEAD))
  2664. sk->sk_state_change(sk);
  2665. else
  2666. inet_csk_destroy_sock(sk);
  2667. }
  2668. EXPORT_SYMBOL_GPL(tcp_done);
  2669. int tcp_abort(struct sock *sk, int err)
  2670. {
  2671. if (!sk_fullsock(sk)) {
  2672. if (sk->sk_state == TCP_NEW_SYN_RECV) {
  2673. struct request_sock *req = inet_reqsk(sk);
  2674. local_bh_disable();
  2675. inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
  2676. req);
  2677. local_bh_enable();
  2678. return 0;
  2679. }
  2680. sock_gen_put(sk);
  2681. return -EOPNOTSUPP;
  2682. }
  2683. /* Don't race with userspace socket closes such as tcp_close. */
  2684. lock_sock(sk);
  2685. if (sk->sk_state == TCP_LISTEN) {
  2686. tcp_set_state(sk, TCP_CLOSE);
  2687. inet_csk_listen_stop(sk);
  2688. }
  2689. /* Don't race with BH socket closes such as inet_csk_listen_stop. */
  2690. local_bh_disable();
  2691. bh_lock_sock(sk);
  2692. if (!sock_flag(sk, SOCK_DEAD)) {
  2693. sk->sk_err = err;
  2694. /* This barrier is coupled with smp_rmb() in tcp_poll() */
  2695. smp_wmb();
  2696. sk->sk_error_report(sk);
  2697. if (tcp_need_reset(sk->sk_state))
  2698. tcp_send_active_reset(sk, GFP_ATOMIC);
  2699. tcp_done(sk);
  2700. }
  2701. bh_unlock_sock(sk);
  2702. local_bh_enable();
  2703. release_sock(sk);
  2704. sock_put(sk);
  2705. return 0;
  2706. }
  2707. EXPORT_SYMBOL_GPL(tcp_abort);
  2708. extern struct tcp_congestion_ops tcp_reno;
  2709. static __initdata unsigned long thash_entries;
  2710. static int __init set_thash_entries(char *str)
  2711. {
  2712. ssize_t ret;
  2713. if (!str)
  2714. return 0;
  2715. ret = kstrtoul(str, 0, &thash_entries);
  2716. if (ret)
  2717. return 0;
  2718. return 1;
  2719. }
  2720. __setup("thash_entries=", set_thash_entries);
  2721. static void __init tcp_init_mem(void)
  2722. {
  2723. unsigned long limit = nr_free_buffer_pages() / 16;
  2724. limit = max(limit, 128UL);
  2725. sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
  2726. sysctl_tcp_mem[1] = limit; /* 6.25 % */
  2727. sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
  2728. }
  2729. void __init tcp_init(void)
  2730. {
  2731. unsigned long limit;
  2732. int max_rshare, max_wshare, cnt;
  2733. unsigned int i;
  2734. sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
  2735. percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
  2736. percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
  2737. tcp_hashinfo.bind_bucket_cachep =
  2738. kmem_cache_create("tcp_bind_bucket",
  2739. sizeof(struct inet_bind_bucket), 0,
  2740. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  2741. /* Size and allocate the main established and bind bucket
  2742. * hash tables.
  2743. *
  2744. * The methodology is similar to that of the buffer cache.
  2745. */
  2746. tcp_hashinfo.ehash =
  2747. alloc_large_system_hash("TCP established",
  2748. sizeof(struct inet_ehash_bucket),
  2749. thash_entries,
  2750. 17, /* one slot per 128 KB of memory */
  2751. 0,
  2752. NULL,
  2753. &tcp_hashinfo.ehash_mask,
  2754. 0,
  2755. thash_entries ? 0 : 512 * 1024);
  2756. for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
  2757. INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
  2758. if (inet_ehash_locks_alloc(&tcp_hashinfo))
  2759. panic("TCP: failed to alloc ehash_locks");
  2760. tcp_hashinfo.bhash =
  2761. alloc_large_system_hash("TCP bind",
  2762. sizeof(struct inet_bind_hashbucket),
  2763. tcp_hashinfo.ehash_mask + 1,
  2764. 17, /* one slot per 128 KB of memory */
  2765. 0,
  2766. &tcp_hashinfo.bhash_size,
  2767. NULL,
  2768. 0,
  2769. 64 * 1024);
  2770. tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
  2771. for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
  2772. spin_lock_init(&tcp_hashinfo.bhash[i].lock);
  2773. INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
  2774. }
  2775. cnt = tcp_hashinfo.ehash_mask + 1;
  2776. tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
  2777. sysctl_tcp_max_orphans = cnt / 2;
  2778. sysctl_max_syn_backlog = max(128, cnt / 256);
  2779. tcp_init_mem();
  2780. /* Set per-socket limits to no more than 1/128 the pressure threshold */
  2781. limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
  2782. max_wshare = min(4UL*1024*1024, limit);
  2783. max_rshare = min(6UL*1024*1024, limit);
  2784. sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
  2785. sysctl_tcp_wmem[1] = 16*1024;
  2786. sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
  2787. sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
  2788. sysctl_tcp_rmem[1] = 87380;
  2789. sysctl_tcp_rmem[2] = max(87380, max_rshare);
  2790. pr_info("Hash tables configured (established %u bind %u)\n",
  2791. tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
  2792. tcp_metrics_init();
  2793. BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
  2794. tcp_tasklet_init();
  2795. }