skbuff.c 137 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574
  1. /*
  2. * Routines having to do with the 'struct sk_buff' memory handlers.
  3. *
  4. * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
  5. * Florian La Roche <rzsfl@rz.uni-sb.de>
  6. *
  7. * Fixes:
  8. * Alan Cox : Fixed the worst of the load
  9. * balancer bugs.
  10. * Dave Platt : Interrupt stacking fix.
  11. * Richard Kooijman : Timestamp fixes.
  12. * Alan Cox : Changed buffer format.
  13. * Alan Cox : destructor hook for AF_UNIX etc.
  14. * Linus Torvalds : Better skb_clone.
  15. * Alan Cox : Added skb_copy.
  16. * Alan Cox : Added all the changed routines Linus
  17. * only put in the headers
  18. * Ray VanTassle : Fixed --skb->lock in free
  19. * Alan Cox : skb_copy copy arp field
  20. * Andi Kleen : slabified it.
  21. * Robert Olsson : Removed skb_head_pool
  22. *
  23. * NOTE:
  24. * The __skb_ routines should be called with interrupts
  25. * disabled, or you better be *real* sure that the operation is atomic
  26. * with respect to whatever list is being frobbed (e.g. via lock_sock()
  27. * or via disabling bottom half handlers, etc).
  28. *
  29. * This program is free software; you can redistribute it and/or
  30. * modify it under the terms of the GNU General Public License
  31. * as published by the Free Software Foundation; either version
  32. * 2 of the License, or (at your option) any later version.
  33. */
  34. /*
  35. * The functions in this file will not compile correctly with gcc 2.4.x
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/module.h>
  39. #include <linux/types.h>
  40. #include <linux/kernel.h>
  41. #include <linux/mm.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/in.h>
  44. #include <linux/inet.h>
  45. #include <linux/slab.h>
  46. #include <linux/tcp.h>
  47. #include <linux/udp.h>
  48. #include <linux/sctp.h>
  49. #include <linux/netdevice.h>
  50. #ifdef CONFIG_NET_CLS_ACT
  51. #include <net/pkt_sched.h>
  52. #endif
  53. #include <linux/string.h>
  54. #include <linux/skbuff.h>
  55. #include <linux/splice.h>
  56. #include <linux/cache.h>
  57. #include <linux/rtnetlink.h>
  58. #include <linux/init.h>
  59. #include <linux/scatterlist.h>
  60. #include <linux/errqueue.h>
  61. #include <linux/prefetch.h>
  62. #include <linux/if_vlan.h>
  63. #include <net/protocol.h>
  64. #include <net/dst.h>
  65. #include <net/sock.h>
  66. #include <net/checksum.h>
  67. #include <net/ip6_checksum.h>
  68. #include <net/xfrm.h>
  69. #include <linux/uaccess.h>
  70. #include <trace/events/skb.h>
  71. #include <linux/highmem.h>
  72. #include <linux/capability.h>
  73. #include <linux/user_namespace.h>
  74. struct kmem_cache *skbuff_head_cache __ro_after_init;
  75. static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  76. int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
  77. EXPORT_SYMBOL(sysctl_max_skb_frags);
  78. /**
  79. * skb_panic - private function for out-of-line support
  80. * @skb: buffer
  81. * @sz: size
  82. * @addr: address
  83. * @msg: skb_over_panic or skb_under_panic
  84. *
  85. * Out-of-line support for skb_put() and skb_push().
  86. * Called via the wrapper skb_over_panic() or skb_under_panic().
  87. * Keep out of line to prevent kernel bloat.
  88. * __builtin_return_address is not used because it is not always reliable.
  89. */
  90. static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
  91. const char msg[])
  92. {
  93. pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
  94. msg, addr, skb->len, sz, skb->head, skb->data,
  95. (unsigned long)skb->tail, (unsigned long)skb->end,
  96. skb->dev ? skb->dev->name : "<NULL>");
  97. BUG();
  98. }
  99. static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
  100. {
  101. skb_panic(skb, sz, addr, __func__);
  102. }
  103. static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
  104. {
  105. skb_panic(skb, sz, addr, __func__);
  106. }
  107. /*
  108. * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
  109. * the caller if emergency pfmemalloc reserves are being used. If it is and
  110. * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
  111. * may be used. Otherwise, the packet data may be discarded until enough
  112. * memory is free
  113. */
  114. #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
  115. __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
  116. static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
  117. unsigned long ip, bool *pfmemalloc)
  118. {
  119. void *obj;
  120. bool ret_pfmemalloc = false;
  121. /*
  122. * Try a regular allocation, when that fails and we're not entitled
  123. * to the reserves, fail.
  124. */
  125. obj = kmalloc_node_track_caller(size,
  126. flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
  127. node);
  128. if (obj || !(gfp_pfmemalloc_allowed(flags)))
  129. goto out;
  130. /* Try again but now we are using pfmemalloc reserves */
  131. ret_pfmemalloc = true;
  132. obj = kmalloc_node_track_caller(size, flags, node);
  133. out:
  134. if (pfmemalloc)
  135. *pfmemalloc = ret_pfmemalloc;
  136. return obj;
  137. }
  138. /* Allocate a new skbuff. We do this ourselves so we can fill in a few
  139. * 'private' fields and also do memory statistics to find all the
  140. * [BEEP] leaks.
  141. *
  142. */
  143. /**
  144. * __alloc_skb - allocate a network buffer
  145. * @size: size to allocate
  146. * @gfp_mask: allocation mask
  147. * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
  148. * instead of head cache and allocate a cloned (child) skb.
  149. * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
  150. * allocations in case the data is required for writeback
  151. * @node: numa node to allocate memory on
  152. *
  153. * Allocate a new &sk_buff. The returned buffer has no headroom and a
  154. * tail room of at least size bytes. The object has a reference count
  155. * of one. The return is the buffer. On a failure the return is %NULL.
  156. *
  157. * Buffers may only be allocated from interrupts using a @gfp_mask of
  158. * %GFP_ATOMIC.
  159. */
  160. struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
  161. int flags, int node)
  162. {
  163. struct kmem_cache *cache;
  164. struct skb_shared_info *shinfo;
  165. struct sk_buff *skb;
  166. u8 *data;
  167. bool pfmemalloc;
  168. cache = (flags & SKB_ALLOC_FCLONE)
  169. ? skbuff_fclone_cache : skbuff_head_cache;
  170. if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
  171. gfp_mask |= __GFP_MEMALLOC;
  172. /* Get the HEAD */
  173. skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
  174. if (!skb)
  175. goto out;
  176. prefetchw(skb);
  177. /* We do our best to align skb_shared_info on a separate cache
  178. * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
  179. * aligned memory blocks, unless SLUB/SLAB debug is enabled.
  180. * Both skb->head and skb_shared_info are cache line aligned.
  181. */
  182. size = SKB_DATA_ALIGN(size);
  183. size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  184. data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
  185. if (!data)
  186. goto nodata;
  187. /* kmalloc(size) might give us more room than requested.
  188. * Put skb_shared_info exactly at the end of allocated zone,
  189. * to allow max possible filling before reallocation.
  190. */
  191. size = SKB_WITH_OVERHEAD(ksize(data));
  192. prefetchw(data + size);
  193. /*
  194. * Only clear those fields we need to clear, not those that we will
  195. * actually initialise below. Hence, don't put any more fields after
  196. * the tail pointer in struct sk_buff!
  197. */
  198. memset(skb, 0, offsetof(struct sk_buff, tail));
  199. /* Account for allocated memory : skb + skb->head */
  200. skb->truesize = SKB_TRUESIZE(size);
  201. skb->pfmemalloc = pfmemalloc;
  202. refcount_set(&skb->users, 1);
  203. skb->head = data;
  204. skb->data = data;
  205. skb_reset_tail_pointer(skb);
  206. skb->end = skb->tail + size;
  207. skb->mac_header = (typeof(skb->mac_header))~0U;
  208. skb->transport_header = (typeof(skb->transport_header))~0U;
  209. /* make sure we initialize shinfo sequentially */
  210. shinfo = skb_shinfo(skb);
  211. memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
  212. atomic_set(&shinfo->dataref, 1);
  213. if (flags & SKB_ALLOC_FCLONE) {
  214. struct sk_buff_fclones *fclones;
  215. fclones = container_of(skb, struct sk_buff_fclones, skb1);
  216. skb->fclone = SKB_FCLONE_ORIG;
  217. refcount_set(&fclones->fclone_ref, 1);
  218. fclones->skb2.fclone = SKB_FCLONE_CLONE;
  219. }
  220. out:
  221. return skb;
  222. nodata:
  223. kmem_cache_free(cache, skb);
  224. skb = NULL;
  225. goto out;
  226. }
  227. EXPORT_SYMBOL(__alloc_skb);
  228. /**
  229. * __build_skb - build a network buffer
  230. * @data: data buffer provided by caller
  231. * @frag_size: size of data, or 0 if head was kmalloced
  232. *
  233. * Allocate a new &sk_buff. Caller provides space holding head and
  234. * skb_shared_info. @data must have been allocated by kmalloc() only if
  235. * @frag_size is 0, otherwise data should come from the page allocator
  236. * or vmalloc()
  237. * The return is the new skb buffer.
  238. * On a failure the return is %NULL, and @data is not freed.
  239. * Notes :
  240. * Before IO, driver allocates only data buffer where NIC put incoming frame
  241. * Driver should add room at head (NET_SKB_PAD) and
  242. * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
  243. * After IO, driver calls build_skb(), to allocate sk_buff and populate it
  244. * before giving packet to stack.
  245. * RX rings only contains data buffers, not full skbs.
  246. */
  247. struct sk_buff *__build_skb(void *data, unsigned int frag_size)
  248. {
  249. struct skb_shared_info *shinfo;
  250. struct sk_buff *skb;
  251. unsigned int size = frag_size ? : ksize(data);
  252. skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
  253. if (!skb)
  254. return NULL;
  255. size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  256. memset(skb, 0, offsetof(struct sk_buff, tail));
  257. skb->truesize = SKB_TRUESIZE(size);
  258. refcount_set(&skb->users, 1);
  259. skb->head = data;
  260. skb->data = data;
  261. skb_reset_tail_pointer(skb);
  262. skb->end = skb->tail + size;
  263. skb->mac_header = (typeof(skb->mac_header))~0U;
  264. skb->transport_header = (typeof(skb->transport_header))~0U;
  265. /* make sure we initialize shinfo sequentially */
  266. shinfo = skb_shinfo(skb);
  267. memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
  268. atomic_set(&shinfo->dataref, 1);
  269. return skb;
  270. }
  271. /* build_skb() is wrapper over __build_skb(), that specifically
  272. * takes care of skb->head and skb->pfmemalloc
  273. * This means that if @frag_size is not zero, then @data must be backed
  274. * by a page fragment, not kmalloc() or vmalloc()
  275. */
  276. struct sk_buff *build_skb(void *data, unsigned int frag_size)
  277. {
  278. struct sk_buff *skb = __build_skb(data, frag_size);
  279. if (skb && frag_size) {
  280. skb->head_frag = 1;
  281. if (page_is_pfmemalloc(virt_to_head_page(data)))
  282. skb->pfmemalloc = 1;
  283. }
  284. return skb;
  285. }
  286. EXPORT_SYMBOL(build_skb);
  287. #define NAPI_SKB_CACHE_SIZE 64
  288. struct napi_alloc_cache {
  289. struct page_frag_cache page;
  290. unsigned int skb_count;
  291. void *skb_cache[NAPI_SKB_CACHE_SIZE];
  292. };
  293. static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
  294. static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
  295. static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  296. {
  297. struct page_frag_cache *nc;
  298. unsigned long flags;
  299. void *data;
  300. local_irq_save(flags);
  301. nc = this_cpu_ptr(&netdev_alloc_cache);
  302. data = page_frag_alloc(nc, fragsz, gfp_mask);
  303. local_irq_restore(flags);
  304. return data;
  305. }
  306. /**
  307. * netdev_alloc_frag - allocate a page fragment
  308. * @fragsz: fragment size
  309. *
  310. * Allocates a frag from a page for receive buffer.
  311. * Uses GFP_ATOMIC allocations.
  312. */
  313. void *netdev_alloc_frag(unsigned int fragsz)
  314. {
  315. return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
  316. }
  317. EXPORT_SYMBOL(netdev_alloc_frag);
  318. static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  319. {
  320. struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
  321. return page_frag_alloc(&nc->page, fragsz, gfp_mask);
  322. }
  323. void *napi_alloc_frag(unsigned int fragsz)
  324. {
  325. return __napi_alloc_frag(fragsz, GFP_ATOMIC);
  326. }
  327. EXPORT_SYMBOL(napi_alloc_frag);
  328. /**
  329. * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
  330. * @dev: network device to receive on
  331. * @len: length to allocate
  332. * @gfp_mask: get_free_pages mask, passed to alloc_skb
  333. *
  334. * Allocate a new &sk_buff and assign it a usage count of one. The
  335. * buffer has NET_SKB_PAD headroom built in. Users should allocate
  336. * the headroom they think they need without accounting for the
  337. * built in space. The built in space is used for optimisations.
  338. *
  339. * %NULL is returned if there is no free memory.
  340. */
  341. struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
  342. gfp_t gfp_mask)
  343. {
  344. struct page_frag_cache *nc;
  345. unsigned long flags;
  346. struct sk_buff *skb;
  347. bool pfmemalloc;
  348. void *data;
  349. len += NET_SKB_PAD;
  350. if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
  351. (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
  352. skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
  353. if (!skb)
  354. goto skb_fail;
  355. goto skb_success;
  356. }
  357. len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  358. len = SKB_DATA_ALIGN(len);
  359. if (sk_memalloc_socks())
  360. gfp_mask |= __GFP_MEMALLOC;
  361. local_irq_save(flags);
  362. nc = this_cpu_ptr(&netdev_alloc_cache);
  363. data = page_frag_alloc(nc, len, gfp_mask);
  364. pfmemalloc = nc->pfmemalloc;
  365. local_irq_restore(flags);
  366. if (unlikely(!data))
  367. return NULL;
  368. skb = __build_skb(data, len);
  369. if (unlikely(!skb)) {
  370. skb_free_frag(data);
  371. return NULL;
  372. }
  373. /* use OR instead of assignment to avoid clearing of bits in mask */
  374. if (pfmemalloc)
  375. skb->pfmemalloc = 1;
  376. skb->head_frag = 1;
  377. skb_success:
  378. skb_reserve(skb, NET_SKB_PAD);
  379. skb->dev = dev;
  380. skb_fail:
  381. return skb;
  382. }
  383. EXPORT_SYMBOL(__netdev_alloc_skb);
  384. /**
  385. * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
  386. * @napi: napi instance this buffer was allocated for
  387. * @len: length to allocate
  388. * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
  389. *
  390. * Allocate a new sk_buff for use in NAPI receive. This buffer will
  391. * attempt to allocate the head from a special reserved region used
  392. * only for NAPI Rx allocation. By doing this we can save several
  393. * CPU cycles by avoiding having to disable and re-enable IRQs.
  394. *
  395. * %NULL is returned if there is no free memory.
  396. */
  397. struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
  398. gfp_t gfp_mask)
  399. {
  400. struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
  401. struct sk_buff *skb;
  402. void *data;
  403. len += NET_SKB_PAD + NET_IP_ALIGN;
  404. if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
  405. (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
  406. skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
  407. if (!skb)
  408. goto skb_fail;
  409. goto skb_success;
  410. }
  411. len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  412. len = SKB_DATA_ALIGN(len);
  413. if (sk_memalloc_socks())
  414. gfp_mask |= __GFP_MEMALLOC;
  415. data = page_frag_alloc(&nc->page, len, gfp_mask);
  416. if (unlikely(!data))
  417. return NULL;
  418. skb = __build_skb(data, len);
  419. if (unlikely(!skb)) {
  420. skb_free_frag(data);
  421. return NULL;
  422. }
  423. /* use OR instead of assignment to avoid clearing of bits in mask */
  424. if (nc->page.pfmemalloc)
  425. skb->pfmemalloc = 1;
  426. skb->head_frag = 1;
  427. skb_success:
  428. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  429. skb->dev = napi->dev;
  430. skb_fail:
  431. return skb;
  432. }
  433. EXPORT_SYMBOL(__napi_alloc_skb);
  434. void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
  435. int size, unsigned int truesize)
  436. {
  437. skb_fill_page_desc(skb, i, page, off, size);
  438. skb->len += size;
  439. skb->data_len += size;
  440. skb->truesize += truesize;
  441. }
  442. EXPORT_SYMBOL(skb_add_rx_frag);
  443. void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
  444. unsigned int truesize)
  445. {
  446. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  447. skb_frag_size_add(frag, size);
  448. skb->len += size;
  449. skb->data_len += size;
  450. skb->truesize += truesize;
  451. }
  452. EXPORT_SYMBOL(skb_coalesce_rx_frag);
  453. static void skb_drop_list(struct sk_buff **listp)
  454. {
  455. kfree_skb_list(*listp);
  456. *listp = NULL;
  457. }
  458. static inline void skb_drop_fraglist(struct sk_buff *skb)
  459. {
  460. skb_drop_list(&skb_shinfo(skb)->frag_list);
  461. }
  462. static void skb_clone_fraglist(struct sk_buff *skb)
  463. {
  464. struct sk_buff *list;
  465. skb_walk_frags(skb, list)
  466. skb_get(list);
  467. }
  468. static void skb_free_head(struct sk_buff *skb)
  469. {
  470. unsigned char *head = skb->head;
  471. if (skb->head_frag)
  472. skb_free_frag(head);
  473. else
  474. kfree(head);
  475. }
  476. static void skb_release_data(struct sk_buff *skb)
  477. {
  478. struct skb_shared_info *shinfo = skb_shinfo(skb);
  479. int i;
  480. if (skb->cloned &&
  481. atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
  482. &shinfo->dataref))
  483. return;
  484. for (i = 0; i < shinfo->nr_frags; i++)
  485. __skb_frag_unref(&shinfo->frags[i]);
  486. if (shinfo->frag_list)
  487. kfree_skb_list(shinfo->frag_list);
  488. skb_zcopy_clear(skb, true);
  489. skb_free_head(skb);
  490. }
  491. /*
  492. * Free an skbuff by memory without cleaning the state.
  493. */
  494. static void kfree_skbmem(struct sk_buff *skb)
  495. {
  496. struct sk_buff_fclones *fclones;
  497. switch (skb->fclone) {
  498. case SKB_FCLONE_UNAVAILABLE:
  499. kmem_cache_free(skbuff_head_cache, skb);
  500. return;
  501. case SKB_FCLONE_ORIG:
  502. fclones = container_of(skb, struct sk_buff_fclones, skb1);
  503. /* We usually free the clone (TX completion) before original skb
  504. * This test would have no chance to be true for the clone,
  505. * while here, branch prediction will be good.
  506. */
  507. if (refcount_read(&fclones->fclone_ref) == 1)
  508. goto fastpath;
  509. break;
  510. default: /* SKB_FCLONE_CLONE */
  511. fclones = container_of(skb, struct sk_buff_fclones, skb2);
  512. break;
  513. }
  514. if (!refcount_dec_and_test(&fclones->fclone_ref))
  515. return;
  516. fastpath:
  517. kmem_cache_free(skbuff_fclone_cache, fclones);
  518. }
  519. void skb_release_head_state(struct sk_buff *skb)
  520. {
  521. skb_dst_drop(skb);
  522. secpath_reset(skb);
  523. if (skb->destructor) {
  524. WARN_ON(in_irq());
  525. skb->destructor(skb);
  526. }
  527. #if IS_ENABLED(CONFIG_NF_CONNTRACK)
  528. nf_conntrack_put(skb_nfct(skb));
  529. #endif
  530. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  531. nf_bridge_put(skb->nf_bridge);
  532. #endif
  533. }
  534. /* Free everything but the sk_buff shell. */
  535. static void skb_release_all(struct sk_buff *skb)
  536. {
  537. skb_release_head_state(skb);
  538. if (likely(skb->head))
  539. skb_release_data(skb);
  540. }
  541. /**
  542. * __kfree_skb - private function
  543. * @skb: buffer
  544. *
  545. * Free an sk_buff. Release anything attached to the buffer.
  546. * Clean the state. This is an internal helper function. Users should
  547. * always call kfree_skb
  548. */
  549. void __kfree_skb(struct sk_buff *skb)
  550. {
  551. skb_release_all(skb);
  552. kfree_skbmem(skb);
  553. }
  554. EXPORT_SYMBOL(__kfree_skb);
  555. /**
  556. * kfree_skb - free an sk_buff
  557. * @skb: buffer to free
  558. *
  559. * Drop a reference to the buffer and free it if the usage count has
  560. * hit zero.
  561. */
  562. void kfree_skb(struct sk_buff *skb)
  563. {
  564. if (!skb_unref(skb))
  565. return;
  566. trace_kfree_skb(skb, __builtin_return_address(0));
  567. __kfree_skb(skb);
  568. }
  569. EXPORT_SYMBOL(kfree_skb);
  570. void kfree_skb_list(struct sk_buff *segs)
  571. {
  572. while (segs) {
  573. struct sk_buff *next = segs->next;
  574. kfree_skb(segs);
  575. segs = next;
  576. }
  577. }
  578. EXPORT_SYMBOL(kfree_skb_list);
  579. /**
  580. * skb_tx_error - report an sk_buff xmit error
  581. * @skb: buffer that triggered an error
  582. *
  583. * Report xmit error if a device callback is tracking this skb.
  584. * skb must be freed afterwards.
  585. */
  586. void skb_tx_error(struct sk_buff *skb)
  587. {
  588. skb_zcopy_clear(skb, true);
  589. }
  590. EXPORT_SYMBOL(skb_tx_error);
  591. /**
  592. * consume_skb - free an skbuff
  593. * @skb: buffer to free
  594. *
  595. * Drop a ref to the buffer and free it if the usage count has hit zero
  596. * Functions identically to kfree_skb, but kfree_skb assumes that the frame
  597. * is being dropped after a failure and notes that
  598. */
  599. void consume_skb(struct sk_buff *skb)
  600. {
  601. if (!skb_unref(skb))
  602. return;
  603. trace_consume_skb(skb);
  604. __kfree_skb(skb);
  605. }
  606. EXPORT_SYMBOL(consume_skb);
  607. /**
  608. * consume_stateless_skb - free an skbuff, assuming it is stateless
  609. * @skb: buffer to free
  610. *
  611. * Alike consume_skb(), but this variant assumes that this is the last
  612. * skb reference and all the head states have been already dropped
  613. */
  614. void __consume_stateless_skb(struct sk_buff *skb)
  615. {
  616. trace_consume_skb(skb);
  617. skb_release_data(skb);
  618. kfree_skbmem(skb);
  619. }
  620. void __kfree_skb_flush(void)
  621. {
  622. struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
  623. /* flush skb_cache if containing objects */
  624. if (nc->skb_count) {
  625. kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
  626. nc->skb_cache);
  627. nc->skb_count = 0;
  628. }
  629. }
  630. static inline void _kfree_skb_defer(struct sk_buff *skb)
  631. {
  632. struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
  633. /* drop skb->head and call any destructors for packet */
  634. skb_release_all(skb);
  635. /* record skb to CPU local list */
  636. nc->skb_cache[nc->skb_count++] = skb;
  637. #ifdef CONFIG_SLUB
  638. /* SLUB writes into objects when freeing */
  639. prefetchw(skb);
  640. #endif
  641. /* flush skb_cache if it is filled */
  642. if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
  643. kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
  644. nc->skb_cache);
  645. nc->skb_count = 0;
  646. }
  647. }
  648. void __kfree_skb_defer(struct sk_buff *skb)
  649. {
  650. _kfree_skb_defer(skb);
  651. }
  652. void napi_consume_skb(struct sk_buff *skb, int budget)
  653. {
  654. if (unlikely(!skb))
  655. return;
  656. /* Zero budget indicate non-NAPI context called us, like netpoll */
  657. if (unlikely(!budget)) {
  658. dev_consume_skb_any(skb);
  659. return;
  660. }
  661. if (!skb_unref(skb))
  662. return;
  663. /* if reaching here SKB is ready to free */
  664. trace_consume_skb(skb);
  665. /* if SKB is a clone, don't handle this case */
  666. if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
  667. __kfree_skb(skb);
  668. return;
  669. }
  670. _kfree_skb_defer(skb);
  671. }
  672. EXPORT_SYMBOL(napi_consume_skb);
  673. /* Make sure a field is enclosed inside headers_start/headers_end section */
  674. #define CHECK_SKB_FIELD(field) \
  675. BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
  676. offsetof(struct sk_buff, headers_start)); \
  677. BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
  678. offsetof(struct sk_buff, headers_end)); \
  679. static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
  680. {
  681. new->tstamp = old->tstamp;
  682. /* We do not copy old->sk */
  683. new->dev = old->dev;
  684. memcpy(new->cb, old->cb, sizeof(old->cb));
  685. skb_dst_copy(new, old);
  686. #ifdef CONFIG_XFRM
  687. new->sp = secpath_get(old->sp);
  688. #endif
  689. __nf_copy(new, old, false);
  690. /* Note : this field could be in headers_start/headers_end section
  691. * It is not yet because we do not want to have a 16 bit hole
  692. */
  693. new->queue_mapping = old->queue_mapping;
  694. memcpy(&new->headers_start, &old->headers_start,
  695. offsetof(struct sk_buff, headers_end) -
  696. offsetof(struct sk_buff, headers_start));
  697. CHECK_SKB_FIELD(protocol);
  698. CHECK_SKB_FIELD(csum);
  699. CHECK_SKB_FIELD(hash);
  700. CHECK_SKB_FIELD(priority);
  701. CHECK_SKB_FIELD(skb_iif);
  702. CHECK_SKB_FIELD(vlan_proto);
  703. CHECK_SKB_FIELD(vlan_tci);
  704. CHECK_SKB_FIELD(transport_header);
  705. CHECK_SKB_FIELD(network_header);
  706. CHECK_SKB_FIELD(mac_header);
  707. CHECK_SKB_FIELD(inner_protocol);
  708. CHECK_SKB_FIELD(inner_transport_header);
  709. CHECK_SKB_FIELD(inner_network_header);
  710. CHECK_SKB_FIELD(inner_mac_header);
  711. CHECK_SKB_FIELD(mark);
  712. #ifdef CONFIG_NETWORK_SECMARK
  713. CHECK_SKB_FIELD(secmark);
  714. #endif
  715. #ifdef CONFIG_NET_RX_BUSY_POLL
  716. CHECK_SKB_FIELD(napi_id);
  717. #endif
  718. #ifdef CONFIG_XPS
  719. CHECK_SKB_FIELD(sender_cpu);
  720. #endif
  721. #ifdef CONFIG_NET_SCHED
  722. CHECK_SKB_FIELD(tc_index);
  723. #endif
  724. }
  725. /*
  726. * You should not add any new code to this function. Add it to
  727. * __copy_skb_header above instead.
  728. */
  729. static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
  730. {
  731. #define C(x) n->x = skb->x
  732. n->next = n->prev = NULL;
  733. n->sk = NULL;
  734. __copy_skb_header(n, skb);
  735. C(len);
  736. C(data_len);
  737. C(mac_len);
  738. n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
  739. n->cloned = 1;
  740. n->nohdr = 0;
  741. n->peeked = 0;
  742. n->destructor = NULL;
  743. C(tail);
  744. C(end);
  745. C(head);
  746. C(head_frag);
  747. C(data);
  748. C(truesize);
  749. refcount_set(&n->users, 1);
  750. atomic_inc(&(skb_shinfo(skb)->dataref));
  751. skb->cloned = 1;
  752. return n;
  753. #undef C
  754. }
  755. /**
  756. * skb_morph - morph one skb into another
  757. * @dst: the skb to receive the contents
  758. * @src: the skb to supply the contents
  759. *
  760. * This is identical to skb_clone except that the target skb is
  761. * supplied by the user.
  762. *
  763. * The target skb is returned upon exit.
  764. */
  765. struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
  766. {
  767. skb_release_all(dst);
  768. return __skb_clone(dst, src);
  769. }
  770. EXPORT_SYMBOL_GPL(skb_morph);
  771. int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
  772. {
  773. unsigned long max_pg, num_pg, new_pg, old_pg;
  774. struct user_struct *user;
  775. if (capable(CAP_IPC_LOCK) || !size)
  776. return 0;
  777. num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
  778. max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  779. user = mmp->user ? : current_user();
  780. do {
  781. old_pg = atomic_long_read(&user->locked_vm);
  782. new_pg = old_pg + num_pg;
  783. if (new_pg > max_pg)
  784. return -ENOBUFS;
  785. } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
  786. old_pg);
  787. if (!mmp->user) {
  788. mmp->user = get_uid(user);
  789. mmp->num_pg = num_pg;
  790. } else {
  791. mmp->num_pg += num_pg;
  792. }
  793. return 0;
  794. }
  795. EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
  796. void mm_unaccount_pinned_pages(struct mmpin *mmp)
  797. {
  798. if (mmp->user) {
  799. atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
  800. free_uid(mmp->user);
  801. }
  802. }
  803. EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
  804. struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
  805. {
  806. struct ubuf_info *uarg;
  807. struct sk_buff *skb;
  808. WARN_ON_ONCE(!in_task());
  809. if (!sock_flag(sk, SOCK_ZEROCOPY))
  810. return NULL;
  811. skb = sock_omalloc(sk, 0, GFP_KERNEL);
  812. if (!skb)
  813. return NULL;
  814. BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
  815. uarg = (void *)skb->cb;
  816. uarg->mmp.user = NULL;
  817. if (mm_account_pinned_pages(&uarg->mmp, size)) {
  818. kfree_skb(skb);
  819. return NULL;
  820. }
  821. uarg->callback = sock_zerocopy_callback;
  822. uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
  823. uarg->len = 1;
  824. uarg->bytelen = size;
  825. uarg->zerocopy = 1;
  826. refcount_set(&uarg->refcnt, 1);
  827. sock_hold(sk);
  828. return uarg;
  829. }
  830. EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
  831. static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
  832. {
  833. return container_of((void *)uarg, struct sk_buff, cb);
  834. }
  835. struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
  836. struct ubuf_info *uarg)
  837. {
  838. if (uarg) {
  839. const u32 byte_limit = 1 << 19; /* limit to a few TSO */
  840. u32 bytelen, next;
  841. /* realloc only when socket is locked (TCP, UDP cork),
  842. * so uarg->len and sk_zckey access is serialized
  843. */
  844. if (!sock_owned_by_user(sk)) {
  845. WARN_ON_ONCE(1);
  846. return NULL;
  847. }
  848. bytelen = uarg->bytelen + size;
  849. if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
  850. /* TCP can create new skb to attach new uarg */
  851. if (sk->sk_type == SOCK_STREAM)
  852. goto new_alloc;
  853. return NULL;
  854. }
  855. next = (u32)atomic_read(&sk->sk_zckey);
  856. if ((u32)(uarg->id + uarg->len) == next) {
  857. if (mm_account_pinned_pages(&uarg->mmp, size))
  858. return NULL;
  859. uarg->len++;
  860. uarg->bytelen = bytelen;
  861. atomic_set(&sk->sk_zckey, ++next);
  862. sock_zerocopy_get(uarg);
  863. return uarg;
  864. }
  865. }
  866. new_alloc:
  867. return sock_zerocopy_alloc(sk, size);
  868. }
  869. EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
  870. static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
  871. {
  872. struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
  873. u32 old_lo, old_hi;
  874. u64 sum_len;
  875. old_lo = serr->ee.ee_info;
  876. old_hi = serr->ee.ee_data;
  877. sum_len = old_hi - old_lo + 1ULL + len;
  878. if (sum_len >= (1ULL << 32))
  879. return false;
  880. if (lo != old_hi + 1)
  881. return false;
  882. serr->ee.ee_data += len;
  883. return true;
  884. }
  885. void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
  886. {
  887. struct sk_buff *tail, *skb = skb_from_uarg(uarg);
  888. struct sock_exterr_skb *serr;
  889. struct sock *sk = skb->sk;
  890. struct sk_buff_head *q;
  891. unsigned long flags;
  892. u32 lo, hi;
  893. u16 len;
  894. mm_unaccount_pinned_pages(&uarg->mmp);
  895. /* if !len, there was only 1 call, and it was aborted
  896. * so do not queue a completion notification
  897. */
  898. if (!uarg->len || sock_flag(sk, SOCK_DEAD))
  899. goto release;
  900. len = uarg->len;
  901. lo = uarg->id;
  902. hi = uarg->id + len - 1;
  903. serr = SKB_EXT_ERR(skb);
  904. memset(serr, 0, sizeof(*serr));
  905. serr->ee.ee_errno = 0;
  906. serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
  907. serr->ee.ee_data = hi;
  908. serr->ee.ee_info = lo;
  909. if (!success)
  910. serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
  911. q = &sk->sk_error_queue;
  912. spin_lock_irqsave(&q->lock, flags);
  913. tail = skb_peek_tail(q);
  914. if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
  915. !skb_zerocopy_notify_extend(tail, lo, len)) {
  916. __skb_queue_tail(q, skb);
  917. skb = NULL;
  918. }
  919. spin_unlock_irqrestore(&q->lock, flags);
  920. sk->sk_error_report(sk);
  921. release:
  922. consume_skb(skb);
  923. sock_put(sk);
  924. }
  925. EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
  926. void sock_zerocopy_put(struct ubuf_info *uarg)
  927. {
  928. if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
  929. if (uarg->callback)
  930. uarg->callback(uarg, uarg->zerocopy);
  931. else
  932. consume_skb(skb_from_uarg(uarg));
  933. }
  934. }
  935. EXPORT_SYMBOL_GPL(sock_zerocopy_put);
  936. void sock_zerocopy_put_abort(struct ubuf_info *uarg)
  937. {
  938. if (uarg) {
  939. struct sock *sk = skb_from_uarg(uarg)->sk;
  940. atomic_dec(&sk->sk_zckey);
  941. uarg->len--;
  942. sock_zerocopy_put(uarg);
  943. }
  944. }
  945. EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
  946. extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
  947. struct iov_iter *from, size_t length);
  948. int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
  949. struct msghdr *msg, int len,
  950. struct ubuf_info *uarg)
  951. {
  952. struct ubuf_info *orig_uarg = skb_zcopy(skb);
  953. struct iov_iter orig_iter = msg->msg_iter;
  954. int err, orig_len = skb->len;
  955. /* An skb can only point to one uarg. This edge case happens when
  956. * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
  957. */
  958. if (orig_uarg && uarg != orig_uarg)
  959. return -EEXIST;
  960. err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
  961. if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
  962. struct sock *save_sk = skb->sk;
  963. /* Streams do not free skb on error. Reset to prev state. */
  964. msg->msg_iter = orig_iter;
  965. skb->sk = sk;
  966. ___pskb_trim(skb, orig_len);
  967. skb->sk = save_sk;
  968. return err;
  969. }
  970. skb_zcopy_set(skb, uarg);
  971. return skb->len - orig_len;
  972. }
  973. EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
  974. static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
  975. gfp_t gfp_mask)
  976. {
  977. if (skb_zcopy(orig)) {
  978. if (skb_zcopy(nskb)) {
  979. /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
  980. if (!gfp_mask) {
  981. WARN_ON_ONCE(1);
  982. return -ENOMEM;
  983. }
  984. if (skb_uarg(nskb) == skb_uarg(orig))
  985. return 0;
  986. if (skb_copy_ubufs(nskb, GFP_ATOMIC))
  987. return -EIO;
  988. }
  989. skb_zcopy_set(nskb, skb_uarg(orig));
  990. }
  991. return 0;
  992. }
  993. /**
  994. * skb_copy_ubufs - copy userspace skb frags buffers to kernel
  995. * @skb: the skb to modify
  996. * @gfp_mask: allocation priority
  997. *
  998. * This must be called on SKBTX_DEV_ZEROCOPY skb.
  999. * It will copy all frags into kernel and drop the reference
  1000. * to userspace pages.
  1001. *
  1002. * If this function is called from an interrupt gfp_mask() must be
  1003. * %GFP_ATOMIC.
  1004. *
  1005. * Returns 0 on success or a negative error code on failure
  1006. * to allocate kernel memory to copy to.
  1007. */
  1008. int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
  1009. {
  1010. int num_frags = skb_shinfo(skb)->nr_frags;
  1011. struct page *page, *head = NULL;
  1012. int i, new_frags;
  1013. u32 d_off;
  1014. if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
  1015. return -EINVAL;
  1016. if (!num_frags)
  1017. goto release;
  1018. new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1019. for (i = 0; i < new_frags; i++) {
  1020. page = alloc_page(gfp_mask);
  1021. if (!page) {
  1022. while (head) {
  1023. struct page *next = (struct page *)page_private(head);
  1024. put_page(head);
  1025. head = next;
  1026. }
  1027. return -ENOMEM;
  1028. }
  1029. set_page_private(page, (unsigned long)head);
  1030. head = page;
  1031. }
  1032. page = head;
  1033. d_off = 0;
  1034. for (i = 0; i < num_frags; i++) {
  1035. skb_frag_t *f = &skb_shinfo(skb)->frags[i];
  1036. u32 p_off, p_len, copied;
  1037. struct page *p;
  1038. u8 *vaddr;
  1039. skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
  1040. p, p_off, p_len, copied) {
  1041. u32 copy, done = 0;
  1042. vaddr = kmap_atomic(p);
  1043. while (done < p_len) {
  1044. if (d_off == PAGE_SIZE) {
  1045. d_off = 0;
  1046. page = (struct page *)page_private(page);
  1047. }
  1048. copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
  1049. memcpy(page_address(page) + d_off,
  1050. vaddr + p_off + done, copy);
  1051. done += copy;
  1052. d_off += copy;
  1053. }
  1054. kunmap_atomic(vaddr);
  1055. }
  1056. }
  1057. /* skb frags release userspace buffers */
  1058. for (i = 0; i < num_frags; i++)
  1059. skb_frag_unref(skb, i);
  1060. /* skb frags point to kernel buffers */
  1061. for (i = 0; i < new_frags - 1; i++) {
  1062. __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
  1063. head = (struct page *)page_private(head);
  1064. }
  1065. __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
  1066. skb_shinfo(skb)->nr_frags = new_frags;
  1067. release:
  1068. skb_zcopy_clear(skb, false);
  1069. return 0;
  1070. }
  1071. EXPORT_SYMBOL_GPL(skb_copy_ubufs);
  1072. /**
  1073. * skb_clone - duplicate an sk_buff
  1074. * @skb: buffer to clone
  1075. * @gfp_mask: allocation priority
  1076. *
  1077. * Duplicate an &sk_buff. The new one is not owned by a socket. Both
  1078. * copies share the same packet data but not structure. The new
  1079. * buffer has a reference count of 1. If the allocation fails the
  1080. * function returns %NULL otherwise the new buffer is returned.
  1081. *
  1082. * If this function is called from an interrupt gfp_mask() must be
  1083. * %GFP_ATOMIC.
  1084. */
  1085. struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
  1086. {
  1087. struct sk_buff_fclones *fclones = container_of(skb,
  1088. struct sk_buff_fclones,
  1089. skb1);
  1090. struct sk_buff *n;
  1091. if (skb_orphan_frags(skb, gfp_mask))
  1092. return NULL;
  1093. if (skb->fclone == SKB_FCLONE_ORIG &&
  1094. refcount_read(&fclones->fclone_ref) == 1) {
  1095. n = &fclones->skb2;
  1096. refcount_set(&fclones->fclone_ref, 2);
  1097. } else {
  1098. if (skb_pfmemalloc(skb))
  1099. gfp_mask |= __GFP_MEMALLOC;
  1100. n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
  1101. if (!n)
  1102. return NULL;
  1103. n->fclone = SKB_FCLONE_UNAVAILABLE;
  1104. }
  1105. return __skb_clone(n, skb);
  1106. }
  1107. EXPORT_SYMBOL(skb_clone);
  1108. static void skb_headers_offset_update(struct sk_buff *skb, int off)
  1109. {
  1110. /* Only adjust this if it actually is csum_start rather than csum */
  1111. if (skb->ip_summed == CHECKSUM_PARTIAL)
  1112. skb->csum_start += off;
  1113. /* {transport,network,mac}_header and tail are relative to skb->head */
  1114. skb->transport_header += off;
  1115. skb->network_header += off;
  1116. if (skb_mac_header_was_set(skb))
  1117. skb->mac_header += off;
  1118. skb->inner_transport_header += off;
  1119. skb->inner_network_header += off;
  1120. skb->inner_mac_header += off;
  1121. }
  1122. void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
  1123. {
  1124. __copy_skb_header(new, old);
  1125. skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
  1126. skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
  1127. skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
  1128. }
  1129. EXPORT_SYMBOL(skb_copy_header);
  1130. static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
  1131. {
  1132. if (skb_pfmemalloc(skb))
  1133. return SKB_ALLOC_RX;
  1134. return 0;
  1135. }
  1136. /**
  1137. * skb_copy - create private copy of an sk_buff
  1138. * @skb: buffer to copy
  1139. * @gfp_mask: allocation priority
  1140. *
  1141. * Make a copy of both an &sk_buff and its data. This is used when the
  1142. * caller wishes to modify the data and needs a private copy of the
  1143. * data to alter. Returns %NULL on failure or the pointer to the buffer
  1144. * on success. The returned buffer has a reference count of 1.
  1145. *
  1146. * As by-product this function converts non-linear &sk_buff to linear
  1147. * one, so that &sk_buff becomes completely private and caller is allowed
  1148. * to modify all the data of returned buffer. This means that this
  1149. * function is not recommended for use in circumstances when only
  1150. * header is going to be modified. Use pskb_copy() instead.
  1151. */
  1152. struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
  1153. {
  1154. int headerlen = skb_headroom(skb);
  1155. unsigned int size = skb_end_offset(skb) + skb->data_len;
  1156. struct sk_buff *n = __alloc_skb(size, gfp_mask,
  1157. skb_alloc_rx_flag(skb), NUMA_NO_NODE);
  1158. if (!n)
  1159. return NULL;
  1160. /* Set the data pointer */
  1161. skb_reserve(n, headerlen);
  1162. /* Set the tail pointer and length */
  1163. skb_put(n, skb->len);
  1164. BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
  1165. skb_copy_header(n, skb);
  1166. return n;
  1167. }
  1168. EXPORT_SYMBOL(skb_copy);
  1169. /**
  1170. * __pskb_copy_fclone - create copy of an sk_buff with private head.
  1171. * @skb: buffer to copy
  1172. * @headroom: headroom of new skb
  1173. * @gfp_mask: allocation priority
  1174. * @fclone: if true allocate the copy of the skb from the fclone
  1175. * cache instead of the head cache; it is recommended to set this
  1176. * to true for the cases where the copy will likely be cloned
  1177. *
  1178. * Make a copy of both an &sk_buff and part of its data, located
  1179. * in header. Fragmented data remain shared. This is used when
  1180. * the caller wishes to modify only header of &sk_buff and needs
  1181. * private copy of the header to alter. Returns %NULL on failure
  1182. * or the pointer to the buffer on success.
  1183. * The returned buffer has a reference count of 1.
  1184. */
  1185. struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
  1186. gfp_t gfp_mask, bool fclone)
  1187. {
  1188. unsigned int size = skb_headlen(skb) + headroom;
  1189. int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
  1190. struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
  1191. if (!n)
  1192. goto out;
  1193. /* Set the data pointer */
  1194. skb_reserve(n, headroom);
  1195. /* Set the tail pointer and length */
  1196. skb_put(n, skb_headlen(skb));
  1197. /* Copy the bytes */
  1198. skb_copy_from_linear_data(skb, n->data, n->len);
  1199. n->truesize += skb->data_len;
  1200. n->data_len = skb->data_len;
  1201. n->len = skb->len;
  1202. if (skb_shinfo(skb)->nr_frags) {
  1203. int i;
  1204. if (skb_orphan_frags(skb, gfp_mask) ||
  1205. skb_zerocopy_clone(n, skb, gfp_mask)) {
  1206. kfree_skb(n);
  1207. n = NULL;
  1208. goto out;
  1209. }
  1210. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1211. skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
  1212. skb_frag_ref(skb, i);
  1213. }
  1214. skb_shinfo(n)->nr_frags = i;
  1215. }
  1216. if (skb_has_frag_list(skb)) {
  1217. skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
  1218. skb_clone_fraglist(n);
  1219. }
  1220. skb_copy_header(n, skb);
  1221. out:
  1222. return n;
  1223. }
  1224. EXPORT_SYMBOL(__pskb_copy_fclone);
  1225. /**
  1226. * pskb_expand_head - reallocate header of &sk_buff
  1227. * @skb: buffer to reallocate
  1228. * @nhead: room to add at head
  1229. * @ntail: room to add at tail
  1230. * @gfp_mask: allocation priority
  1231. *
  1232. * Expands (or creates identical copy, if @nhead and @ntail are zero)
  1233. * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
  1234. * reference count of 1. Returns zero in the case of success or error,
  1235. * if expansion failed. In the last case, &sk_buff is not changed.
  1236. *
  1237. * All the pointers pointing into skb header may change and must be
  1238. * reloaded after call to this function.
  1239. */
  1240. int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
  1241. gfp_t gfp_mask)
  1242. {
  1243. int i, osize = skb_end_offset(skb);
  1244. int size = osize + nhead + ntail;
  1245. long off;
  1246. u8 *data;
  1247. BUG_ON(nhead < 0);
  1248. BUG_ON(skb_shared(skb));
  1249. size = SKB_DATA_ALIGN(size);
  1250. if (skb_pfmemalloc(skb))
  1251. gfp_mask |= __GFP_MEMALLOC;
  1252. data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  1253. gfp_mask, NUMA_NO_NODE, NULL);
  1254. if (!data)
  1255. goto nodata;
  1256. size = SKB_WITH_OVERHEAD(ksize(data));
  1257. /* Copy only real data... and, alas, header. This should be
  1258. * optimized for the cases when header is void.
  1259. */
  1260. memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
  1261. memcpy((struct skb_shared_info *)(data + size),
  1262. skb_shinfo(skb),
  1263. offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
  1264. /*
  1265. * if shinfo is shared we must drop the old head gracefully, but if it
  1266. * is not we can just drop the old head and let the existing refcount
  1267. * be since all we did is relocate the values
  1268. */
  1269. if (skb_cloned(skb)) {
  1270. if (skb_orphan_frags(skb, gfp_mask))
  1271. goto nofrags;
  1272. if (skb_zcopy(skb))
  1273. refcount_inc(&skb_uarg(skb)->refcnt);
  1274. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  1275. skb_frag_ref(skb, i);
  1276. if (skb_has_frag_list(skb))
  1277. skb_clone_fraglist(skb);
  1278. skb_release_data(skb);
  1279. } else {
  1280. skb_free_head(skb);
  1281. }
  1282. off = (data + nhead) - skb->head;
  1283. skb->head = data;
  1284. skb->head_frag = 0;
  1285. skb->data += off;
  1286. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  1287. skb->end = size;
  1288. off = nhead;
  1289. #else
  1290. skb->end = skb->head + size;
  1291. #endif
  1292. skb->tail += off;
  1293. skb_headers_offset_update(skb, nhead);
  1294. skb->cloned = 0;
  1295. skb->hdr_len = 0;
  1296. skb->nohdr = 0;
  1297. atomic_set(&skb_shinfo(skb)->dataref, 1);
  1298. skb_metadata_clear(skb);
  1299. /* It is not generally safe to change skb->truesize.
  1300. * For the moment, we really care of rx path, or
  1301. * when skb is orphaned (not attached to a socket).
  1302. */
  1303. if (!skb->sk || skb->destructor == sock_edemux)
  1304. skb->truesize += size - osize;
  1305. return 0;
  1306. nofrags:
  1307. kfree(data);
  1308. nodata:
  1309. return -ENOMEM;
  1310. }
  1311. EXPORT_SYMBOL(pskb_expand_head);
  1312. /* Make private copy of skb with writable head and some headroom */
  1313. struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
  1314. {
  1315. struct sk_buff *skb2;
  1316. int delta = headroom - skb_headroom(skb);
  1317. if (delta <= 0)
  1318. skb2 = pskb_copy(skb, GFP_ATOMIC);
  1319. else {
  1320. skb2 = skb_clone(skb, GFP_ATOMIC);
  1321. if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
  1322. GFP_ATOMIC)) {
  1323. kfree_skb(skb2);
  1324. skb2 = NULL;
  1325. }
  1326. }
  1327. return skb2;
  1328. }
  1329. EXPORT_SYMBOL(skb_realloc_headroom);
  1330. /**
  1331. * skb_copy_expand - copy and expand sk_buff
  1332. * @skb: buffer to copy
  1333. * @newheadroom: new free bytes at head
  1334. * @newtailroom: new free bytes at tail
  1335. * @gfp_mask: allocation priority
  1336. *
  1337. * Make a copy of both an &sk_buff and its data and while doing so
  1338. * allocate additional space.
  1339. *
  1340. * This is used when the caller wishes to modify the data and needs a
  1341. * private copy of the data to alter as well as more space for new fields.
  1342. * Returns %NULL on failure or the pointer to the buffer
  1343. * on success. The returned buffer has a reference count of 1.
  1344. *
  1345. * You must pass %GFP_ATOMIC as the allocation priority if this function
  1346. * is called from an interrupt.
  1347. */
  1348. struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
  1349. int newheadroom, int newtailroom,
  1350. gfp_t gfp_mask)
  1351. {
  1352. /*
  1353. * Allocate the copy buffer
  1354. */
  1355. struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
  1356. gfp_mask, skb_alloc_rx_flag(skb),
  1357. NUMA_NO_NODE);
  1358. int oldheadroom = skb_headroom(skb);
  1359. int head_copy_len, head_copy_off;
  1360. if (!n)
  1361. return NULL;
  1362. skb_reserve(n, newheadroom);
  1363. /* Set the tail pointer and length */
  1364. skb_put(n, skb->len);
  1365. head_copy_len = oldheadroom;
  1366. head_copy_off = 0;
  1367. if (newheadroom <= head_copy_len)
  1368. head_copy_len = newheadroom;
  1369. else
  1370. head_copy_off = newheadroom - head_copy_len;
  1371. /* Copy the linear header and data. */
  1372. BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
  1373. skb->len + head_copy_len));
  1374. skb_copy_header(n, skb);
  1375. skb_headers_offset_update(n, newheadroom - oldheadroom);
  1376. return n;
  1377. }
  1378. EXPORT_SYMBOL(skb_copy_expand);
  1379. /**
  1380. * __skb_pad - zero pad the tail of an skb
  1381. * @skb: buffer to pad
  1382. * @pad: space to pad
  1383. * @free_on_error: free buffer on error
  1384. *
  1385. * Ensure that a buffer is followed by a padding area that is zero
  1386. * filled. Used by network drivers which may DMA or transfer data
  1387. * beyond the buffer end onto the wire.
  1388. *
  1389. * May return error in out of memory cases. The skb is freed on error
  1390. * if @free_on_error is true.
  1391. */
  1392. int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
  1393. {
  1394. int err;
  1395. int ntail;
  1396. /* If the skbuff is non linear tailroom is always zero.. */
  1397. if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
  1398. memset(skb->data+skb->len, 0, pad);
  1399. return 0;
  1400. }
  1401. ntail = skb->data_len + pad - (skb->end - skb->tail);
  1402. if (likely(skb_cloned(skb) || ntail > 0)) {
  1403. err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
  1404. if (unlikely(err))
  1405. goto free_skb;
  1406. }
  1407. /* FIXME: The use of this function with non-linear skb's really needs
  1408. * to be audited.
  1409. */
  1410. err = skb_linearize(skb);
  1411. if (unlikely(err))
  1412. goto free_skb;
  1413. memset(skb->data + skb->len, 0, pad);
  1414. return 0;
  1415. free_skb:
  1416. if (free_on_error)
  1417. kfree_skb(skb);
  1418. return err;
  1419. }
  1420. EXPORT_SYMBOL(__skb_pad);
  1421. /**
  1422. * pskb_put - add data to the tail of a potentially fragmented buffer
  1423. * @skb: start of the buffer to use
  1424. * @tail: tail fragment of the buffer to use
  1425. * @len: amount of data to add
  1426. *
  1427. * This function extends the used data area of the potentially
  1428. * fragmented buffer. @tail must be the last fragment of @skb -- or
  1429. * @skb itself. If this would exceed the total buffer size the kernel
  1430. * will panic. A pointer to the first byte of the extra data is
  1431. * returned.
  1432. */
  1433. void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
  1434. {
  1435. if (tail != skb) {
  1436. skb->data_len += len;
  1437. skb->len += len;
  1438. }
  1439. return skb_put(tail, len);
  1440. }
  1441. EXPORT_SYMBOL_GPL(pskb_put);
  1442. /**
  1443. * skb_put - add data to a buffer
  1444. * @skb: buffer to use
  1445. * @len: amount of data to add
  1446. *
  1447. * This function extends the used data area of the buffer. If this would
  1448. * exceed the total buffer size the kernel will panic. A pointer to the
  1449. * first byte of the extra data is returned.
  1450. */
  1451. void *skb_put(struct sk_buff *skb, unsigned int len)
  1452. {
  1453. void *tmp = skb_tail_pointer(skb);
  1454. SKB_LINEAR_ASSERT(skb);
  1455. skb->tail += len;
  1456. skb->len += len;
  1457. if (unlikely(skb->tail > skb->end))
  1458. skb_over_panic(skb, len, __builtin_return_address(0));
  1459. return tmp;
  1460. }
  1461. EXPORT_SYMBOL(skb_put);
  1462. /**
  1463. * skb_push - add data to the start of a buffer
  1464. * @skb: buffer to use
  1465. * @len: amount of data to add
  1466. *
  1467. * This function extends the used data area of the buffer at the buffer
  1468. * start. If this would exceed the total buffer headroom the kernel will
  1469. * panic. A pointer to the first byte of the extra data is returned.
  1470. */
  1471. void *skb_push(struct sk_buff *skb, unsigned int len)
  1472. {
  1473. skb->data -= len;
  1474. skb->len += len;
  1475. if (unlikely(skb->data<skb->head))
  1476. skb_under_panic(skb, len, __builtin_return_address(0));
  1477. return skb->data;
  1478. }
  1479. EXPORT_SYMBOL(skb_push);
  1480. /**
  1481. * skb_pull - remove data from the start of a buffer
  1482. * @skb: buffer to use
  1483. * @len: amount of data to remove
  1484. *
  1485. * This function removes data from the start of a buffer, returning
  1486. * the memory to the headroom. A pointer to the next data in the buffer
  1487. * is returned. Once the data has been pulled future pushes will overwrite
  1488. * the old data.
  1489. */
  1490. void *skb_pull(struct sk_buff *skb, unsigned int len)
  1491. {
  1492. return skb_pull_inline(skb, len);
  1493. }
  1494. EXPORT_SYMBOL(skb_pull);
  1495. /**
  1496. * skb_trim - remove end from a buffer
  1497. * @skb: buffer to alter
  1498. * @len: new length
  1499. *
  1500. * Cut the length of a buffer down by removing data from the tail. If
  1501. * the buffer is already under the length specified it is not modified.
  1502. * The skb must be linear.
  1503. */
  1504. void skb_trim(struct sk_buff *skb, unsigned int len)
  1505. {
  1506. if (skb->len > len)
  1507. __skb_trim(skb, len);
  1508. }
  1509. EXPORT_SYMBOL(skb_trim);
  1510. /* Trims skb to length len. It can change skb pointers.
  1511. */
  1512. int ___pskb_trim(struct sk_buff *skb, unsigned int len)
  1513. {
  1514. struct sk_buff **fragp;
  1515. struct sk_buff *frag;
  1516. int offset = skb_headlen(skb);
  1517. int nfrags = skb_shinfo(skb)->nr_frags;
  1518. int i;
  1519. int err;
  1520. if (skb_cloned(skb) &&
  1521. unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
  1522. return err;
  1523. i = 0;
  1524. if (offset >= len)
  1525. goto drop_pages;
  1526. for (; i < nfrags; i++) {
  1527. int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1528. if (end < len) {
  1529. offset = end;
  1530. continue;
  1531. }
  1532. skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
  1533. drop_pages:
  1534. skb_shinfo(skb)->nr_frags = i;
  1535. for (; i < nfrags; i++)
  1536. skb_frag_unref(skb, i);
  1537. if (skb_has_frag_list(skb))
  1538. skb_drop_fraglist(skb);
  1539. goto done;
  1540. }
  1541. for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
  1542. fragp = &frag->next) {
  1543. int end = offset + frag->len;
  1544. if (skb_shared(frag)) {
  1545. struct sk_buff *nfrag;
  1546. nfrag = skb_clone(frag, GFP_ATOMIC);
  1547. if (unlikely(!nfrag))
  1548. return -ENOMEM;
  1549. nfrag->next = frag->next;
  1550. consume_skb(frag);
  1551. frag = nfrag;
  1552. *fragp = frag;
  1553. }
  1554. if (end < len) {
  1555. offset = end;
  1556. continue;
  1557. }
  1558. if (end > len &&
  1559. unlikely((err = pskb_trim(frag, len - offset))))
  1560. return err;
  1561. if (frag->next)
  1562. skb_drop_list(&frag->next);
  1563. break;
  1564. }
  1565. done:
  1566. if (len > skb_headlen(skb)) {
  1567. skb->data_len -= skb->len - len;
  1568. skb->len = len;
  1569. } else {
  1570. skb->len = len;
  1571. skb->data_len = 0;
  1572. skb_set_tail_pointer(skb, len);
  1573. }
  1574. if (!skb->sk || skb->destructor == sock_edemux)
  1575. skb_condense(skb);
  1576. return 0;
  1577. }
  1578. EXPORT_SYMBOL(___pskb_trim);
  1579. /* Note : use pskb_trim_rcsum() instead of calling this directly
  1580. */
  1581. int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
  1582. {
  1583. if (skb->ip_summed == CHECKSUM_COMPLETE) {
  1584. int delta = skb->len - len;
  1585. skb->csum = csum_sub(skb->csum,
  1586. skb_checksum(skb, len, delta, 0));
  1587. }
  1588. return __pskb_trim(skb, len);
  1589. }
  1590. EXPORT_SYMBOL(pskb_trim_rcsum_slow);
  1591. /**
  1592. * __pskb_pull_tail - advance tail of skb header
  1593. * @skb: buffer to reallocate
  1594. * @delta: number of bytes to advance tail
  1595. *
  1596. * The function makes a sense only on a fragmented &sk_buff,
  1597. * it expands header moving its tail forward and copying necessary
  1598. * data from fragmented part.
  1599. *
  1600. * &sk_buff MUST have reference count of 1.
  1601. *
  1602. * Returns %NULL (and &sk_buff does not change) if pull failed
  1603. * or value of new tail of skb in the case of success.
  1604. *
  1605. * All the pointers pointing into skb header may change and must be
  1606. * reloaded after call to this function.
  1607. */
  1608. /* Moves tail of skb head forward, copying data from fragmented part,
  1609. * when it is necessary.
  1610. * 1. It may fail due to malloc failure.
  1611. * 2. It may change skb pointers.
  1612. *
  1613. * It is pretty complicated. Luckily, it is called only in exceptional cases.
  1614. */
  1615. void *__pskb_pull_tail(struct sk_buff *skb, int delta)
  1616. {
  1617. /* If skb has not enough free space at tail, get new one
  1618. * plus 128 bytes for future expansions. If we have enough
  1619. * room at tail, reallocate without expansion only if skb is cloned.
  1620. */
  1621. int i, k, eat = (skb->tail + delta) - skb->end;
  1622. if (eat > 0 || skb_cloned(skb)) {
  1623. if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
  1624. GFP_ATOMIC))
  1625. return NULL;
  1626. }
  1627. BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
  1628. skb_tail_pointer(skb), delta));
  1629. /* Optimization: no fragments, no reasons to preestimate
  1630. * size of pulled pages. Superb.
  1631. */
  1632. if (!skb_has_frag_list(skb))
  1633. goto pull_pages;
  1634. /* Estimate size of pulled pages. */
  1635. eat = delta;
  1636. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1637. int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1638. if (size >= eat)
  1639. goto pull_pages;
  1640. eat -= size;
  1641. }
  1642. /* If we need update frag list, we are in troubles.
  1643. * Certainly, it is possible to add an offset to skb data,
  1644. * but taking into account that pulling is expected to
  1645. * be very rare operation, it is worth to fight against
  1646. * further bloating skb head and crucify ourselves here instead.
  1647. * Pure masohism, indeed. 8)8)
  1648. */
  1649. if (eat) {
  1650. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  1651. struct sk_buff *clone = NULL;
  1652. struct sk_buff *insp = NULL;
  1653. do {
  1654. BUG_ON(!list);
  1655. if (list->len <= eat) {
  1656. /* Eaten as whole. */
  1657. eat -= list->len;
  1658. list = list->next;
  1659. insp = list;
  1660. } else {
  1661. /* Eaten partially. */
  1662. if (skb_shared(list)) {
  1663. /* Sucks! We need to fork list. :-( */
  1664. clone = skb_clone(list, GFP_ATOMIC);
  1665. if (!clone)
  1666. return NULL;
  1667. insp = list->next;
  1668. list = clone;
  1669. } else {
  1670. /* This may be pulled without
  1671. * problems. */
  1672. insp = list;
  1673. }
  1674. if (!pskb_pull(list, eat)) {
  1675. kfree_skb(clone);
  1676. return NULL;
  1677. }
  1678. break;
  1679. }
  1680. } while (eat);
  1681. /* Free pulled out fragments. */
  1682. while ((list = skb_shinfo(skb)->frag_list) != insp) {
  1683. skb_shinfo(skb)->frag_list = list->next;
  1684. kfree_skb(list);
  1685. }
  1686. /* And insert new clone at head. */
  1687. if (clone) {
  1688. clone->next = list;
  1689. skb_shinfo(skb)->frag_list = clone;
  1690. }
  1691. }
  1692. /* Success! Now we may commit changes to skb data. */
  1693. pull_pages:
  1694. eat = delta;
  1695. k = 0;
  1696. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1697. int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1698. if (size <= eat) {
  1699. skb_frag_unref(skb, i);
  1700. eat -= size;
  1701. } else {
  1702. skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
  1703. if (eat) {
  1704. skb_shinfo(skb)->frags[k].page_offset += eat;
  1705. skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
  1706. if (!i)
  1707. goto end;
  1708. eat = 0;
  1709. }
  1710. k++;
  1711. }
  1712. }
  1713. skb_shinfo(skb)->nr_frags = k;
  1714. end:
  1715. skb->tail += delta;
  1716. skb->data_len -= delta;
  1717. if (!skb->data_len)
  1718. skb_zcopy_clear(skb, false);
  1719. return skb_tail_pointer(skb);
  1720. }
  1721. EXPORT_SYMBOL(__pskb_pull_tail);
  1722. /**
  1723. * skb_copy_bits - copy bits from skb to kernel buffer
  1724. * @skb: source skb
  1725. * @offset: offset in source
  1726. * @to: destination buffer
  1727. * @len: number of bytes to copy
  1728. *
  1729. * Copy the specified number of bytes from the source skb to the
  1730. * destination buffer.
  1731. *
  1732. * CAUTION ! :
  1733. * If its prototype is ever changed,
  1734. * check arch/{*}/net/{*}.S files,
  1735. * since it is called from BPF assembly code.
  1736. */
  1737. int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
  1738. {
  1739. int start = skb_headlen(skb);
  1740. struct sk_buff *frag_iter;
  1741. int i, copy;
  1742. if (offset > (int)skb->len - len)
  1743. goto fault;
  1744. /* Copy header. */
  1745. if ((copy = start - offset) > 0) {
  1746. if (copy > len)
  1747. copy = len;
  1748. skb_copy_from_linear_data_offset(skb, offset, to, copy);
  1749. if ((len -= copy) == 0)
  1750. return 0;
  1751. offset += copy;
  1752. to += copy;
  1753. }
  1754. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1755. int end;
  1756. skb_frag_t *f = &skb_shinfo(skb)->frags[i];
  1757. WARN_ON(start > offset + len);
  1758. end = start + skb_frag_size(f);
  1759. if ((copy = end - offset) > 0) {
  1760. u32 p_off, p_len, copied;
  1761. struct page *p;
  1762. u8 *vaddr;
  1763. if (copy > len)
  1764. copy = len;
  1765. skb_frag_foreach_page(f,
  1766. f->page_offset + offset - start,
  1767. copy, p, p_off, p_len, copied) {
  1768. vaddr = kmap_atomic(p);
  1769. memcpy(to + copied, vaddr + p_off, p_len);
  1770. kunmap_atomic(vaddr);
  1771. }
  1772. if ((len -= copy) == 0)
  1773. return 0;
  1774. offset += copy;
  1775. to += copy;
  1776. }
  1777. start = end;
  1778. }
  1779. skb_walk_frags(skb, frag_iter) {
  1780. int end;
  1781. WARN_ON(start > offset + len);
  1782. end = start + frag_iter->len;
  1783. if ((copy = end - offset) > 0) {
  1784. if (copy > len)
  1785. copy = len;
  1786. if (skb_copy_bits(frag_iter, offset - start, to, copy))
  1787. goto fault;
  1788. if ((len -= copy) == 0)
  1789. return 0;
  1790. offset += copy;
  1791. to += copy;
  1792. }
  1793. start = end;
  1794. }
  1795. if (!len)
  1796. return 0;
  1797. fault:
  1798. return -EFAULT;
  1799. }
  1800. EXPORT_SYMBOL(skb_copy_bits);
  1801. /*
  1802. * Callback from splice_to_pipe(), if we need to release some pages
  1803. * at the end of the spd in case we error'ed out in filling the pipe.
  1804. */
  1805. static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
  1806. {
  1807. put_page(spd->pages[i]);
  1808. }
  1809. static struct page *linear_to_page(struct page *page, unsigned int *len,
  1810. unsigned int *offset,
  1811. struct sock *sk)
  1812. {
  1813. struct page_frag *pfrag = sk_page_frag(sk);
  1814. if (!sk_page_frag_refill(sk, pfrag))
  1815. return NULL;
  1816. *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
  1817. memcpy(page_address(pfrag->page) + pfrag->offset,
  1818. page_address(page) + *offset, *len);
  1819. *offset = pfrag->offset;
  1820. pfrag->offset += *len;
  1821. return pfrag->page;
  1822. }
  1823. static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
  1824. struct page *page,
  1825. unsigned int offset)
  1826. {
  1827. return spd->nr_pages &&
  1828. spd->pages[spd->nr_pages - 1] == page &&
  1829. (spd->partial[spd->nr_pages - 1].offset +
  1830. spd->partial[spd->nr_pages - 1].len == offset);
  1831. }
  1832. /*
  1833. * Fill page/offset/length into spd, if it can hold more pages.
  1834. */
  1835. static bool spd_fill_page(struct splice_pipe_desc *spd,
  1836. struct pipe_inode_info *pipe, struct page *page,
  1837. unsigned int *len, unsigned int offset,
  1838. bool linear,
  1839. struct sock *sk)
  1840. {
  1841. if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
  1842. return true;
  1843. if (linear) {
  1844. page = linear_to_page(page, len, &offset, sk);
  1845. if (!page)
  1846. return true;
  1847. }
  1848. if (spd_can_coalesce(spd, page, offset)) {
  1849. spd->partial[spd->nr_pages - 1].len += *len;
  1850. return false;
  1851. }
  1852. get_page(page);
  1853. spd->pages[spd->nr_pages] = page;
  1854. spd->partial[spd->nr_pages].len = *len;
  1855. spd->partial[spd->nr_pages].offset = offset;
  1856. spd->nr_pages++;
  1857. return false;
  1858. }
  1859. static bool __splice_segment(struct page *page, unsigned int poff,
  1860. unsigned int plen, unsigned int *off,
  1861. unsigned int *len,
  1862. struct splice_pipe_desc *spd, bool linear,
  1863. struct sock *sk,
  1864. struct pipe_inode_info *pipe)
  1865. {
  1866. if (!*len)
  1867. return true;
  1868. /* skip this segment if already processed */
  1869. if (*off >= plen) {
  1870. *off -= plen;
  1871. return false;
  1872. }
  1873. /* ignore any bits we already processed */
  1874. poff += *off;
  1875. plen -= *off;
  1876. *off = 0;
  1877. do {
  1878. unsigned int flen = min(*len, plen);
  1879. if (spd_fill_page(spd, pipe, page, &flen, poff,
  1880. linear, sk))
  1881. return true;
  1882. poff += flen;
  1883. plen -= flen;
  1884. *len -= flen;
  1885. } while (*len && plen);
  1886. return false;
  1887. }
  1888. /*
  1889. * Map linear and fragment data from the skb to spd. It reports true if the
  1890. * pipe is full or if we already spliced the requested length.
  1891. */
  1892. static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
  1893. unsigned int *offset, unsigned int *len,
  1894. struct splice_pipe_desc *spd, struct sock *sk)
  1895. {
  1896. int seg;
  1897. struct sk_buff *iter;
  1898. /* map the linear part :
  1899. * If skb->head_frag is set, this 'linear' part is backed by a
  1900. * fragment, and if the head is not shared with any clones then
  1901. * we can avoid a copy since we own the head portion of this page.
  1902. */
  1903. if (__splice_segment(virt_to_page(skb->data),
  1904. (unsigned long) skb->data & (PAGE_SIZE - 1),
  1905. skb_headlen(skb),
  1906. offset, len, spd,
  1907. skb_head_is_locked(skb),
  1908. sk, pipe))
  1909. return true;
  1910. /*
  1911. * then map the fragments
  1912. */
  1913. for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
  1914. const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
  1915. if (__splice_segment(skb_frag_page(f),
  1916. f->page_offset, skb_frag_size(f),
  1917. offset, len, spd, false, sk, pipe))
  1918. return true;
  1919. }
  1920. skb_walk_frags(skb, iter) {
  1921. if (*offset >= iter->len) {
  1922. *offset -= iter->len;
  1923. continue;
  1924. }
  1925. /* __skb_splice_bits() only fails if the output has no room
  1926. * left, so no point in going over the frag_list for the error
  1927. * case.
  1928. */
  1929. if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
  1930. return true;
  1931. }
  1932. return false;
  1933. }
  1934. /*
  1935. * Map data from the skb to a pipe. Should handle both the linear part,
  1936. * the fragments, and the frag list.
  1937. */
  1938. int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
  1939. struct pipe_inode_info *pipe, unsigned int tlen,
  1940. unsigned int flags)
  1941. {
  1942. struct partial_page partial[MAX_SKB_FRAGS];
  1943. struct page *pages[MAX_SKB_FRAGS];
  1944. struct splice_pipe_desc spd = {
  1945. .pages = pages,
  1946. .partial = partial,
  1947. .nr_pages_max = MAX_SKB_FRAGS,
  1948. .ops = &nosteal_pipe_buf_ops,
  1949. .spd_release = sock_spd_release,
  1950. };
  1951. int ret = 0;
  1952. __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
  1953. if (spd.nr_pages)
  1954. ret = splice_to_pipe(pipe, &spd);
  1955. return ret;
  1956. }
  1957. EXPORT_SYMBOL_GPL(skb_splice_bits);
  1958. /* Send skb data on a socket. Socket must be locked. */
  1959. int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
  1960. int len)
  1961. {
  1962. unsigned int orig_len = len;
  1963. struct sk_buff *head = skb;
  1964. unsigned short fragidx;
  1965. int slen, ret;
  1966. do_frag_list:
  1967. /* Deal with head data */
  1968. while (offset < skb_headlen(skb) && len) {
  1969. struct kvec kv;
  1970. struct msghdr msg;
  1971. slen = min_t(int, len, skb_headlen(skb) - offset);
  1972. kv.iov_base = skb->data + offset;
  1973. kv.iov_len = slen;
  1974. memset(&msg, 0, sizeof(msg));
  1975. ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
  1976. if (ret <= 0)
  1977. goto error;
  1978. offset += ret;
  1979. len -= ret;
  1980. }
  1981. /* All the data was skb head? */
  1982. if (!len)
  1983. goto out;
  1984. /* Make offset relative to start of frags */
  1985. offset -= skb_headlen(skb);
  1986. /* Find where we are in frag list */
  1987. for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
  1988. skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
  1989. if (offset < frag->size)
  1990. break;
  1991. offset -= frag->size;
  1992. }
  1993. for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
  1994. skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
  1995. slen = min_t(size_t, len, frag->size - offset);
  1996. while (slen) {
  1997. ret = kernel_sendpage_locked(sk, frag->page.p,
  1998. frag->page_offset + offset,
  1999. slen, MSG_DONTWAIT);
  2000. if (ret <= 0)
  2001. goto error;
  2002. len -= ret;
  2003. offset += ret;
  2004. slen -= ret;
  2005. }
  2006. offset = 0;
  2007. }
  2008. if (len) {
  2009. /* Process any frag lists */
  2010. if (skb == head) {
  2011. if (skb_has_frag_list(skb)) {
  2012. skb = skb_shinfo(skb)->frag_list;
  2013. goto do_frag_list;
  2014. }
  2015. } else if (skb->next) {
  2016. skb = skb->next;
  2017. goto do_frag_list;
  2018. }
  2019. }
  2020. out:
  2021. return orig_len - len;
  2022. error:
  2023. return orig_len == len ? ret : orig_len - len;
  2024. }
  2025. EXPORT_SYMBOL_GPL(skb_send_sock_locked);
  2026. /* Send skb data on a socket. */
  2027. int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
  2028. {
  2029. int ret = 0;
  2030. lock_sock(sk);
  2031. ret = skb_send_sock_locked(sk, skb, offset, len);
  2032. release_sock(sk);
  2033. return ret;
  2034. }
  2035. EXPORT_SYMBOL_GPL(skb_send_sock);
  2036. /**
  2037. * skb_store_bits - store bits from kernel buffer to skb
  2038. * @skb: destination buffer
  2039. * @offset: offset in destination
  2040. * @from: source buffer
  2041. * @len: number of bytes to copy
  2042. *
  2043. * Copy the specified number of bytes from the source buffer to the
  2044. * destination skb. This function handles all the messy bits of
  2045. * traversing fragment lists and such.
  2046. */
  2047. int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
  2048. {
  2049. int start = skb_headlen(skb);
  2050. struct sk_buff *frag_iter;
  2051. int i, copy;
  2052. if (offset > (int)skb->len - len)
  2053. goto fault;
  2054. if ((copy = start - offset) > 0) {
  2055. if (copy > len)
  2056. copy = len;
  2057. skb_copy_to_linear_data_offset(skb, offset, from, copy);
  2058. if ((len -= copy) == 0)
  2059. return 0;
  2060. offset += copy;
  2061. from += copy;
  2062. }
  2063. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2064. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2065. int end;
  2066. WARN_ON(start > offset + len);
  2067. end = start + skb_frag_size(frag);
  2068. if ((copy = end - offset) > 0) {
  2069. u32 p_off, p_len, copied;
  2070. struct page *p;
  2071. u8 *vaddr;
  2072. if (copy > len)
  2073. copy = len;
  2074. skb_frag_foreach_page(frag,
  2075. frag->page_offset + offset - start,
  2076. copy, p, p_off, p_len, copied) {
  2077. vaddr = kmap_atomic(p);
  2078. memcpy(vaddr + p_off, from + copied, p_len);
  2079. kunmap_atomic(vaddr);
  2080. }
  2081. if ((len -= copy) == 0)
  2082. return 0;
  2083. offset += copy;
  2084. from += copy;
  2085. }
  2086. start = end;
  2087. }
  2088. skb_walk_frags(skb, frag_iter) {
  2089. int end;
  2090. WARN_ON(start > offset + len);
  2091. end = start + frag_iter->len;
  2092. if ((copy = end - offset) > 0) {
  2093. if (copy > len)
  2094. copy = len;
  2095. if (skb_store_bits(frag_iter, offset - start,
  2096. from, copy))
  2097. goto fault;
  2098. if ((len -= copy) == 0)
  2099. return 0;
  2100. offset += copy;
  2101. from += copy;
  2102. }
  2103. start = end;
  2104. }
  2105. if (!len)
  2106. return 0;
  2107. fault:
  2108. return -EFAULT;
  2109. }
  2110. EXPORT_SYMBOL(skb_store_bits);
  2111. /* Checksum skb data. */
  2112. __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
  2113. __wsum csum, const struct skb_checksum_ops *ops)
  2114. {
  2115. int start = skb_headlen(skb);
  2116. int i, copy = start - offset;
  2117. struct sk_buff *frag_iter;
  2118. int pos = 0;
  2119. /* Checksum header. */
  2120. if (copy > 0) {
  2121. if (copy > len)
  2122. copy = len;
  2123. csum = ops->update(skb->data + offset, copy, csum);
  2124. if ((len -= copy) == 0)
  2125. return csum;
  2126. offset += copy;
  2127. pos = copy;
  2128. }
  2129. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2130. int end;
  2131. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2132. WARN_ON(start > offset + len);
  2133. end = start + skb_frag_size(frag);
  2134. if ((copy = end - offset) > 0) {
  2135. u32 p_off, p_len, copied;
  2136. struct page *p;
  2137. __wsum csum2;
  2138. u8 *vaddr;
  2139. if (copy > len)
  2140. copy = len;
  2141. skb_frag_foreach_page(frag,
  2142. frag->page_offset + offset - start,
  2143. copy, p, p_off, p_len, copied) {
  2144. vaddr = kmap_atomic(p);
  2145. csum2 = ops->update(vaddr + p_off, p_len, 0);
  2146. kunmap_atomic(vaddr);
  2147. csum = ops->combine(csum, csum2, pos, p_len);
  2148. pos += p_len;
  2149. }
  2150. if (!(len -= copy))
  2151. return csum;
  2152. offset += copy;
  2153. }
  2154. start = end;
  2155. }
  2156. skb_walk_frags(skb, frag_iter) {
  2157. int end;
  2158. WARN_ON(start > offset + len);
  2159. end = start + frag_iter->len;
  2160. if ((copy = end - offset) > 0) {
  2161. __wsum csum2;
  2162. if (copy > len)
  2163. copy = len;
  2164. csum2 = __skb_checksum(frag_iter, offset - start,
  2165. copy, 0, ops);
  2166. csum = ops->combine(csum, csum2, pos, copy);
  2167. if ((len -= copy) == 0)
  2168. return csum;
  2169. offset += copy;
  2170. pos += copy;
  2171. }
  2172. start = end;
  2173. }
  2174. BUG_ON(len);
  2175. return csum;
  2176. }
  2177. EXPORT_SYMBOL(__skb_checksum);
  2178. __wsum skb_checksum(const struct sk_buff *skb, int offset,
  2179. int len, __wsum csum)
  2180. {
  2181. const struct skb_checksum_ops ops = {
  2182. .update = csum_partial_ext,
  2183. .combine = csum_block_add_ext,
  2184. };
  2185. return __skb_checksum(skb, offset, len, csum, &ops);
  2186. }
  2187. EXPORT_SYMBOL(skb_checksum);
  2188. /* Both of above in one bottle. */
  2189. __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
  2190. u8 *to, int len, __wsum csum)
  2191. {
  2192. int start = skb_headlen(skb);
  2193. int i, copy = start - offset;
  2194. struct sk_buff *frag_iter;
  2195. int pos = 0;
  2196. /* Copy header. */
  2197. if (copy > 0) {
  2198. if (copy > len)
  2199. copy = len;
  2200. csum = csum_partial_copy_nocheck(skb->data + offset, to,
  2201. copy, csum);
  2202. if ((len -= copy) == 0)
  2203. return csum;
  2204. offset += copy;
  2205. to += copy;
  2206. pos = copy;
  2207. }
  2208. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2209. int end;
  2210. WARN_ON(start > offset + len);
  2211. end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
  2212. if ((copy = end - offset) > 0) {
  2213. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2214. u32 p_off, p_len, copied;
  2215. struct page *p;
  2216. __wsum csum2;
  2217. u8 *vaddr;
  2218. if (copy > len)
  2219. copy = len;
  2220. skb_frag_foreach_page(frag,
  2221. frag->page_offset + offset - start,
  2222. copy, p, p_off, p_len, copied) {
  2223. vaddr = kmap_atomic(p);
  2224. csum2 = csum_partial_copy_nocheck(vaddr + p_off,
  2225. to + copied,
  2226. p_len, 0);
  2227. kunmap_atomic(vaddr);
  2228. csum = csum_block_add(csum, csum2, pos);
  2229. pos += p_len;
  2230. }
  2231. if (!(len -= copy))
  2232. return csum;
  2233. offset += copy;
  2234. to += copy;
  2235. }
  2236. start = end;
  2237. }
  2238. skb_walk_frags(skb, frag_iter) {
  2239. __wsum csum2;
  2240. int end;
  2241. WARN_ON(start > offset + len);
  2242. end = start + frag_iter->len;
  2243. if ((copy = end - offset) > 0) {
  2244. if (copy > len)
  2245. copy = len;
  2246. csum2 = skb_copy_and_csum_bits(frag_iter,
  2247. offset - start,
  2248. to, copy, 0);
  2249. csum = csum_block_add(csum, csum2, pos);
  2250. if ((len -= copy) == 0)
  2251. return csum;
  2252. offset += copy;
  2253. to += copy;
  2254. pos += copy;
  2255. }
  2256. start = end;
  2257. }
  2258. BUG_ON(len);
  2259. return csum;
  2260. }
  2261. EXPORT_SYMBOL(skb_copy_and_csum_bits);
  2262. static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
  2263. {
  2264. net_warn_ratelimited(
  2265. "%s: attempt to compute crc32c without libcrc32c.ko\n",
  2266. __func__);
  2267. return 0;
  2268. }
  2269. static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
  2270. int offset, int len)
  2271. {
  2272. net_warn_ratelimited(
  2273. "%s: attempt to compute crc32c without libcrc32c.ko\n",
  2274. __func__);
  2275. return 0;
  2276. }
  2277. static const struct skb_checksum_ops default_crc32c_ops = {
  2278. .update = warn_crc32c_csum_update,
  2279. .combine = warn_crc32c_csum_combine,
  2280. };
  2281. const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
  2282. &default_crc32c_ops;
  2283. EXPORT_SYMBOL(crc32c_csum_stub);
  2284. /**
  2285. * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
  2286. * @from: source buffer
  2287. *
  2288. * Calculates the amount of linear headroom needed in the 'to' skb passed
  2289. * into skb_zerocopy().
  2290. */
  2291. unsigned int
  2292. skb_zerocopy_headlen(const struct sk_buff *from)
  2293. {
  2294. unsigned int hlen = 0;
  2295. if (!from->head_frag ||
  2296. skb_headlen(from) < L1_CACHE_BYTES ||
  2297. skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
  2298. hlen = skb_headlen(from);
  2299. if (skb_has_frag_list(from))
  2300. hlen = from->len;
  2301. return hlen;
  2302. }
  2303. EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
  2304. /**
  2305. * skb_zerocopy - Zero copy skb to skb
  2306. * @to: destination buffer
  2307. * @from: source buffer
  2308. * @len: number of bytes to copy from source buffer
  2309. * @hlen: size of linear headroom in destination buffer
  2310. *
  2311. * Copies up to `len` bytes from `from` to `to` by creating references
  2312. * to the frags in the source buffer.
  2313. *
  2314. * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
  2315. * headroom in the `to` buffer.
  2316. *
  2317. * Return value:
  2318. * 0: everything is OK
  2319. * -ENOMEM: couldn't orphan frags of @from due to lack of memory
  2320. * -EFAULT: skb_copy_bits() found some problem with skb geometry
  2321. */
  2322. int
  2323. skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
  2324. {
  2325. int i, j = 0;
  2326. int plen = 0; /* length of skb->head fragment */
  2327. int ret;
  2328. struct page *page;
  2329. unsigned int offset;
  2330. BUG_ON(!from->head_frag && !hlen);
  2331. /* dont bother with small payloads */
  2332. if (len <= skb_tailroom(to))
  2333. return skb_copy_bits(from, 0, skb_put(to, len), len);
  2334. if (hlen) {
  2335. ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
  2336. if (unlikely(ret))
  2337. return ret;
  2338. len -= hlen;
  2339. } else {
  2340. plen = min_t(int, skb_headlen(from), len);
  2341. if (plen) {
  2342. page = virt_to_head_page(from->head);
  2343. offset = from->data - (unsigned char *)page_address(page);
  2344. __skb_fill_page_desc(to, 0, page, offset, plen);
  2345. get_page(page);
  2346. j = 1;
  2347. len -= plen;
  2348. }
  2349. }
  2350. to->truesize += len + plen;
  2351. to->len += len + plen;
  2352. to->data_len += len + plen;
  2353. if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
  2354. skb_tx_error(from);
  2355. return -ENOMEM;
  2356. }
  2357. skb_zerocopy_clone(to, from, GFP_ATOMIC);
  2358. for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
  2359. if (!len)
  2360. break;
  2361. skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
  2362. skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
  2363. len -= skb_shinfo(to)->frags[j].size;
  2364. skb_frag_ref(to, j);
  2365. j++;
  2366. }
  2367. skb_shinfo(to)->nr_frags = j;
  2368. return 0;
  2369. }
  2370. EXPORT_SYMBOL_GPL(skb_zerocopy);
  2371. void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
  2372. {
  2373. __wsum csum;
  2374. long csstart;
  2375. if (skb->ip_summed == CHECKSUM_PARTIAL)
  2376. csstart = skb_checksum_start_offset(skb);
  2377. else
  2378. csstart = skb_headlen(skb);
  2379. BUG_ON(csstart > skb_headlen(skb));
  2380. skb_copy_from_linear_data(skb, to, csstart);
  2381. csum = 0;
  2382. if (csstart != skb->len)
  2383. csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
  2384. skb->len - csstart, 0);
  2385. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2386. long csstuff = csstart + skb->csum_offset;
  2387. *((__sum16 *)(to + csstuff)) = csum_fold(csum);
  2388. }
  2389. }
  2390. EXPORT_SYMBOL(skb_copy_and_csum_dev);
  2391. /**
  2392. * skb_dequeue - remove from the head of the queue
  2393. * @list: list to dequeue from
  2394. *
  2395. * Remove the head of the list. The list lock is taken so the function
  2396. * may be used safely with other locking list functions. The head item is
  2397. * returned or %NULL if the list is empty.
  2398. */
  2399. struct sk_buff *skb_dequeue(struct sk_buff_head *list)
  2400. {
  2401. unsigned long flags;
  2402. struct sk_buff *result;
  2403. spin_lock_irqsave(&list->lock, flags);
  2404. result = __skb_dequeue(list);
  2405. spin_unlock_irqrestore(&list->lock, flags);
  2406. return result;
  2407. }
  2408. EXPORT_SYMBOL(skb_dequeue);
  2409. /**
  2410. * skb_dequeue_tail - remove from the tail of the queue
  2411. * @list: list to dequeue from
  2412. *
  2413. * Remove the tail of the list. The list lock is taken so the function
  2414. * may be used safely with other locking list functions. The tail item is
  2415. * returned or %NULL if the list is empty.
  2416. */
  2417. struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
  2418. {
  2419. unsigned long flags;
  2420. struct sk_buff *result;
  2421. spin_lock_irqsave(&list->lock, flags);
  2422. result = __skb_dequeue_tail(list);
  2423. spin_unlock_irqrestore(&list->lock, flags);
  2424. return result;
  2425. }
  2426. EXPORT_SYMBOL(skb_dequeue_tail);
  2427. /**
  2428. * skb_queue_purge - empty a list
  2429. * @list: list to empty
  2430. *
  2431. * Delete all buffers on an &sk_buff list. Each buffer is removed from
  2432. * the list and one reference dropped. This function takes the list
  2433. * lock and is atomic with respect to other list locking functions.
  2434. */
  2435. void skb_queue_purge(struct sk_buff_head *list)
  2436. {
  2437. struct sk_buff *skb;
  2438. while ((skb = skb_dequeue(list)) != NULL)
  2439. kfree_skb(skb);
  2440. }
  2441. EXPORT_SYMBOL(skb_queue_purge);
  2442. /**
  2443. * skb_rbtree_purge - empty a skb rbtree
  2444. * @root: root of the rbtree to empty
  2445. *
  2446. * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
  2447. * the list and one reference dropped. This function does not take
  2448. * any lock. Synchronization should be handled by the caller (e.g., TCP
  2449. * out-of-order queue is protected by the socket lock).
  2450. */
  2451. void skb_rbtree_purge(struct rb_root *root)
  2452. {
  2453. struct rb_node *p = rb_first(root);
  2454. while (p) {
  2455. struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
  2456. p = rb_next(p);
  2457. rb_erase(&skb->rbnode, root);
  2458. kfree_skb(skb);
  2459. }
  2460. }
  2461. /**
  2462. * skb_queue_head - queue a buffer at the list head
  2463. * @list: list to use
  2464. * @newsk: buffer to queue
  2465. *
  2466. * Queue a buffer at the start of the list. This function takes the
  2467. * list lock and can be used safely with other locking &sk_buff functions
  2468. * safely.
  2469. *
  2470. * A buffer cannot be placed on two lists at the same time.
  2471. */
  2472. void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
  2473. {
  2474. unsigned long flags;
  2475. spin_lock_irqsave(&list->lock, flags);
  2476. __skb_queue_head(list, newsk);
  2477. spin_unlock_irqrestore(&list->lock, flags);
  2478. }
  2479. EXPORT_SYMBOL(skb_queue_head);
  2480. /**
  2481. * skb_queue_tail - queue a buffer at the list tail
  2482. * @list: list to use
  2483. * @newsk: buffer to queue
  2484. *
  2485. * Queue a buffer at the tail of the list. This function takes the
  2486. * list lock and can be used safely with other locking &sk_buff functions
  2487. * safely.
  2488. *
  2489. * A buffer cannot be placed on two lists at the same time.
  2490. */
  2491. void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
  2492. {
  2493. unsigned long flags;
  2494. spin_lock_irqsave(&list->lock, flags);
  2495. __skb_queue_tail(list, newsk);
  2496. spin_unlock_irqrestore(&list->lock, flags);
  2497. }
  2498. EXPORT_SYMBOL(skb_queue_tail);
  2499. /**
  2500. * skb_unlink - remove a buffer from a list
  2501. * @skb: buffer to remove
  2502. * @list: list to use
  2503. *
  2504. * Remove a packet from a list. The list locks are taken and this
  2505. * function is atomic with respect to other list locked calls
  2506. *
  2507. * You must know what list the SKB is on.
  2508. */
  2509. void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  2510. {
  2511. unsigned long flags;
  2512. spin_lock_irqsave(&list->lock, flags);
  2513. __skb_unlink(skb, list);
  2514. spin_unlock_irqrestore(&list->lock, flags);
  2515. }
  2516. EXPORT_SYMBOL(skb_unlink);
  2517. /**
  2518. * skb_append - append a buffer
  2519. * @old: buffer to insert after
  2520. * @newsk: buffer to insert
  2521. * @list: list to use
  2522. *
  2523. * Place a packet after a given packet in a list. The list locks are taken
  2524. * and this function is atomic with respect to other list locked calls.
  2525. * A buffer cannot be placed on two lists at the same time.
  2526. */
  2527. void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
  2528. {
  2529. unsigned long flags;
  2530. spin_lock_irqsave(&list->lock, flags);
  2531. __skb_queue_after(list, old, newsk);
  2532. spin_unlock_irqrestore(&list->lock, flags);
  2533. }
  2534. EXPORT_SYMBOL(skb_append);
  2535. /**
  2536. * skb_insert - insert a buffer
  2537. * @old: buffer to insert before
  2538. * @newsk: buffer to insert
  2539. * @list: list to use
  2540. *
  2541. * Place a packet before a given packet in a list. The list locks are
  2542. * taken and this function is atomic with respect to other list locked
  2543. * calls.
  2544. *
  2545. * A buffer cannot be placed on two lists at the same time.
  2546. */
  2547. void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
  2548. {
  2549. unsigned long flags;
  2550. spin_lock_irqsave(&list->lock, flags);
  2551. __skb_insert(newsk, old->prev, old, list);
  2552. spin_unlock_irqrestore(&list->lock, flags);
  2553. }
  2554. EXPORT_SYMBOL(skb_insert);
  2555. static inline void skb_split_inside_header(struct sk_buff *skb,
  2556. struct sk_buff* skb1,
  2557. const u32 len, const int pos)
  2558. {
  2559. int i;
  2560. skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
  2561. pos - len);
  2562. /* And move data appendix as is. */
  2563. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  2564. skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
  2565. skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
  2566. skb_shinfo(skb)->nr_frags = 0;
  2567. skb1->data_len = skb->data_len;
  2568. skb1->len += skb1->data_len;
  2569. skb->data_len = 0;
  2570. skb->len = len;
  2571. skb_set_tail_pointer(skb, len);
  2572. }
  2573. static inline void skb_split_no_header(struct sk_buff *skb,
  2574. struct sk_buff* skb1,
  2575. const u32 len, int pos)
  2576. {
  2577. int i, k = 0;
  2578. const int nfrags = skb_shinfo(skb)->nr_frags;
  2579. skb_shinfo(skb)->nr_frags = 0;
  2580. skb1->len = skb1->data_len = skb->len - len;
  2581. skb->len = len;
  2582. skb->data_len = len - pos;
  2583. for (i = 0; i < nfrags; i++) {
  2584. int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  2585. if (pos + size > len) {
  2586. skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
  2587. if (pos < len) {
  2588. /* Split frag.
  2589. * We have two variants in this case:
  2590. * 1. Move all the frag to the second
  2591. * part, if it is possible. F.e.
  2592. * this approach is mandatory for TUX,
  2593. * where splitting is expensive.
  2594. * 2. Split is accurately. We make this.
  2595. */
  2596. skb_frag_ref(skb, i);
  2597. skb_shinfo(skb1)->frags[0].page_offset += len - pos;
  2598. skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
  2599. skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
  2600. skb_shinfo(skb)->nr_frags++;
  2601. }
  2602. k++;
  2603. } else
  2604. skb_shinfo(skb)->nr_frags++;
  2605. pos += size;
  2606. }
  2607. skb_shinfo(skb1)->nr_frags = k;
  2608. }
  2609. /**
  2610. * skb_split - Split fragmented skb to two parts at length len.
  2611. * @skb: the buffer to split
  2612. * @skb1: the buffer to receive the second part
  2613. * @len: new length for skb
  2614. */
  2615. void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
  2616. {
  2617. int pos = skb_headlen(skb);
  2618. skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
  2619. SKBTX_SHARED_FRAG;
  2620. skb_zerocopy_clone(skb1, skb, 0);
  2621. if (len < pos) /* Split line is inside header. */
  2622. skb_split_inside_header(skb, skb1, len, pos);
  2623. else /* Second chunk has no header, nothing to copy. */
  2624. skb_split_no_header(skb, skb1, len, pos);
  2625. }
  2626. EXPORT_SYMBOL(skb_split);
  2627. /* Shifting from/to a cloned skb is a no-go.
  2628. *
  2629. * Caller cannot keep skb_shinfo related pointers past calling here!
  2630. */
  2631. static int skb_prepare_for_shift(struct sk_buff *skb)
  2632. {
  2633. return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  2634. }
  2635. /**
  2636. * skb_shift - Shifts paged data partially from skb to another
  2637. * @tgt: buffer into which tail data gets added
  2638. * @skb: buffer from which the paged data comes from
  2639. * @shiftlen: shift up to this many bytes
  2640. *
  2641. * Attempts to shift up to shiftlen worth of bytes, which may be less than
  2642. * the length of the skb, from skb to tgt. Returns number bytes shifted.
  2643. * It's up to caller to free skb if everything was shifted.
  2644. *
  2645. * If @tgt runs out of frags, the whole operation is aborted.
  2646. *
  2647. * Skb cannot include anything else but paged data while tgt is allowed
  2648. * to have non-paged data as well.
  2649. *
  2650. * TODO: full sized shift could be optimized but that would need
  2651. * specialized skb free'er to handle frags without up-to-date nr_frags.
  2652. */
  2653. int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
  2654. {
  2655. int from, to, merge, todo;
  2656. struct skb_frag_struct *fragfrom, *fragto;
  2657. BUG_ON(shiftlen > skb->len);
  2658. if (skb_headlen(skb))
  2659. return 0;
  2660. if (skb_zcopy(tgt) || skb_zcopy(skb))
  2661. return 0;
  2662. todo = shiftlen;
  2663. from = 0;
  2664. to = skb_shinfo(tgt)->nr_frags;
  2665. fragfrom = &skb_shinfo(skb)->frags[from];
  2666. /* Actual merge is delayed until the point when we know we can
  2667. * commit all, so that we don't have to undo partial changes
  2668. */
  2669. if (!to ||
  2670. !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
  2671. fragfrom->page_offset)) {
  2672. merge = -1;
  2673. } else {
  2674. merge = to - 1;
  2675. todo -= skb_frag_size(fragfrom);
  2676. if (todo < 0) {
  2677. if (skb_prepare_for_shift(skb) ||
  2678. skb_prepare_for_shift(tgt))
  2679. return 0;
  2680. /* All previous frag pointers might be stale! */
  2681. fragfrom = &skb_shinfo(skb)->frags[from];
  2682. fragto = &skb_shinfo(tgt)->frags[merge];
  2683. skb_frag_size_add(fragto, shiftlen);
  2684. skb_frag_size_sub(fragfrom, shiftlen);
  2685. fragfrom->page_offset += shiftlen;
  2686. goto onlymerged;
  2687. }
  2688. from++;
  2689. }
  2690. /* Skip full, not-fitting skb to avoid expensive operations */
  2691. if ((shiftlen == skb->len) &&
  2692. (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
  2693. return 0;
  2694. if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
  2695. return 0;
  2696. while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
  2697. if (to == MAX_SKB_FRAGS)
  2698. return 0;
  2699. fragfrom = &skb_shinfo(skb)->frags[from];
  2700. fragto = &skb_shinfo(tgt)->frags[to];
  2701. if (todo >= skb_frag_size(fragfrom)) {
  2702. *fragto = *fragfrom;
  2703. todo -= skb_frag_size(fragfrom);
  2704. from++;
  2705. to++;
  2706. } else {
  2707. __skb_frag_ref(fragfrom);
  2708. fragto->page = fragfrom->page;
  2709. fragto->page_offset = fragfrom->page_offset;
  2710. skb_frag_size_set(fragto, todo);
  2711. fragfrom->page_offset += todo;
  2712. skb_frag_size_sub(fragfrom, todo);
  2713. todo = 0;
  2714. to++;
  2715. break;
  2716. }
  2717. }
  2718. /* Ready to "commit" this state change to tgt */
  2719. skb_shinfo(tgt)->nr_frags = to;
  2720. if (merge >= 0) {
  2721. fragfrom = &skb_shinfo(skb)->frags[0];
  2722. fragto = &skb_shinfo(tgt)->frags[merge];
  2723. skb_frag_size_add(fragto, skb_frag_size(fragfrom));
  2724. __skb_frag_unref(fragfrom);
  2725. }
  2726. /* Reposition in the original skb */
  2727. to = 0;
  2728. while (from < skb_shinfo(skb)->nr_frags)
  2729. skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
  2730. skb_shinfo(skb)->nr_frags = to;
  2731. BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
  2732. onlymerged:
  2733. /* Most likely the tgt won't ever need its checksum anymore, skb on
  2734. * the other hand might need it if it needs to be resent
  2735. */
  2736. tgt->ip_summed = CHECKSUM_PARTIAL;
  2737. skb->ip_summed = CHECKSUM_PARTIAL;
  2738. /* Yak, is it really working this way? Some helper please? */
  2739. skb->len -= shiftlen;
  2740. skb->data_len -= shiftlen;
  2741. skb->truesize -= shiftlen;
  2742. tgt->len += shiftlen;
  2743. tgt->data_len += shiftlen;
  2744. tgt->truesize += shiftlen;
  2745. return shiftlen;
  2746. }
  2747. /**
  2748. * skb_prepare_seq_read - Prepare a sequential read of skb data
  2749. * @skb: the buffer to read
  2750. * @from: lower offset of data to be read
  2751. * @to: upper offset of data to be read
  2752. * @st: state variable
  2753. *
  2754. * Initializes the specified state variable. Must be called before
  2755. * invoking skb_seq_read() for the first time.
  2756. */
  2757. void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
  2758. unsigned int to, struct skb_seq_state *st)
  2759. {
  2760. st->lower_offset = from;
  2761. st->upper_offset = to;
  2762. st->root_skb = st->cur_skb = skb;
  2763. st->frag_idx = st->stepped_offset = 0;
  2764. st->frag_data = NULL;
  2765. }
  2766. EXPORT_SYMBOL(skb_prepare_seq_read);
  2767. /**
  2768. * skb_seq_read - Sequentially read skb data
  2769. * @consumed: number of bytes consumed by the caller so far
  2770. * @data: destination pointer for data to be returned
  2771. * @st: state variable
  2772. *
  2773. * Reads a block of skb data at @consumed relative to the
  2774. * lower offset specified to skb_prepare_seq_read(). Assigns
  2775. * the head of the data block to @data and returns the length
  2776. * of the block or 0 if the end of the skb data or the upper
  2777. * offset has been reached.
  2778. *
  2779. * The caller is not required to consume all of the data
  2780. * returned, i.e. @consumed is typically set to the number
  2781. * of bytes already consumed and the next call to
  2782. * skb_seq_read() will return the remaining part of the block.
  2783. *
  2784. * Note 1: The size of each block of data returned can be arbitrary,
  2785. * this limitation is the cost for zerocopy sequential
  2786. * reads of potentially non linear data.
  2787. *
  2788. * Note 2: Fragment lists within fragments are not implemented
  2789. * at the moment, state->root_skb could be replaced with
  2790. * a stack for this purpose.
  2791. */
  2792. unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
  2793. struct skb_seq_state *st)
  2794. {
  2795. unsigned int block_limit, abs_offset = consumed + st->lower_offset;
  2796. skb_frag_t *frag;
  2797. if (unlikely(abs_offset >= st->upper_offset)) {
  2798. if (st->frag_data) {
  2799. kunmap_atomic(st->frag_data);
  2800. st->frag_data = NULL;
  2801. }
  2802. return 0;
  2803. }
  2804. next_skb:
  2805. block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
  2806. if (abs_offset < block_limit && !st->frag_data) {
  2807. *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
  2808. return block_limit - abs_offset;
  2809. }
  2810. if (st->frag_idx == 0 && !st->frag_data)
  2811. st->stepped_offset += skb_headlen(st->cur_skb);
  2812. while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
  2813. frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
  2814. block_limit = skb_frag_size(frag) + st->stepped_offset;
  2815. if (abs_offset < block_limit) {
  2816. if (!st->frag_data)
  2817. st->frag_data = kmap_atomic(skb_frag_page(frag));
  2818. *data = (u8 *) st->frag_data + frag->page_offset +
  2819. (abs_offset - st->stepped_offset);
  2820. return block_limit - abs_offset;
  2821. }
  2822. if (st->frag_data) {
  2823. kunmap_atomic(st->frag_data);
  2824. st->frag_data = NULL;
  2825. }
  2826. st->frag_idx++;
  2827. st->stepped_offset += skb_frag_size(frag);
  2828. }
  2829. if (st->frag_data) {
  2830. kunmap_atomic(st->frag_data);
  2831. st->frag_data = NULL;
  2832. }
  2833. if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
  2834. st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
  2835. st->frag_idx = 0;
  2836. goto next_skb;
  2837. } else if (st->cur_skb->next) {
  2838. st->cur_skb = st->cur_skb->next;
  2839. st->frag_idx = 0;
  2840. goto next_skb;
  2841. }
  2842. return 0;
  2843. }
  2844. EXPORT_SYMBOL(skb_seq_read);
  2845. /**
  2846. * skb_abort_seq_read - Abort a sequential read of skb data
  2847. * @st: state variable
  2848. *
  2849. * Must be called if skb_seq_read() was not called until it
  2850. * returned 0.
  2851. */
  2852. void skb_abort_seq_read(struct skb_seq_state *st)
  2853. {
  2854. if (st->frag_data)
  2855. kunmap_atomic(st->frag_data);
  2856. }
  2857. EXPORT_SYMBOL(skb_abort_seq_read);
  2858. #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
  2859. static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
  2860. struct ts_config *conf,
  2861. struct ts_state *state)
  2862. {
  2863. return skb_seq_read(offset, text, TS_SKB_CB(state));
  2864. }
  2865. static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
  2866. {
  2867. skb_abort_seq_read(TS_SKB_CB(state));
  2868. }
  2869. /**
  2870. * skb_find_text - Find a text pattern in skb data
  2871. * @skb: the buffer to look in
  2872. * @from: search offset
  2873. * @to: search limit
  2874. * @config: textsearch configuration
  2875. *
  2876. * Finds a pattern in the skb data according to the specified
  2877. * textsearch configuration. Use textsearch_next() to retrieve
  2878. * subsequent occurrences of the pattern. Returns the offset
  2879. * to the first occurrence or UINT_MAX if no match was found.
  2880. */
  2881. unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
  2882. unsigned int to, struct ts_config *config)
  2883. {
  2884. struct ts_state state;
  2885. unsigned int ret;
  2886. config->get_next_block = skb_ts_get_next_block;
  2887. config->finish = skb_ts_finish;
  2888. skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
  2889. ret = textsearch_find(config, &state);
  2890. return (ret <= to - from ? ret : UINT_MAX);
  2891. }
  2892. EXPORT_SYMBOL(skb_find_text);
  2893. /**
  2894. * skb_append_datato_frags - append the user data to a skb
  2895. * @sk: sock structure
  2896. * @skb: skb structure to be appended with user data.
  2897. * @getfrag: call back function to be used for getting the user data
  2898. * @from: pointer to user message iov
  2899. * @length: length of the iov message
  2900. *
  2901. * Description: This procedure append the user data in the fragment part
  2902. * of the skb if any page alloc fails user this procedure returns -ENOMEM
  2903. */
  2904. int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
  2905. int (*getfrag)(void *from, char *to, int offset,
  2906. int len, int odd, struct sk_buff *skb),
  2907. void *from, int length)
  2908. {
  2909. int frg_cnt = skb_shinfo(skb)->nr_frags;
  2910. int copy;
  2911. int offset = 0;
  2912. int ret;
  2913. struct page_frag *pfrag = &current->task_frag;
  2914. do {
  2915. /* Return error if we don't have space for new frag */
  2916. if (frg_cnt >= MAX_SKB_FRAGS)
  2917. return -EMSGSIZE;
  2918. if (!sk_page_frag_refill(sk, pfrag))
  2919. return -ENOMEM;
  2920. /* copy the user data to page */
  2921. copy = min_t(int, length, pfrag->size - pfrag->offset);
  2922. ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
  2923. offset, copy, 0, skb);
  2924. if (ret < 0)
  2925. return -EFAULT;
  2926. /* copy was successful so update the size parameters */
  2927. skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
  2928. copy);
  2929. frg_cnt++;
  2930. pfrag->offset += copy;
  2931. get_page(pfrag->page);
  2932. skb->truesize += copy;
  2933. refcount_add(copy, &sk->sk_wmem_alloc);
  2934. skb->len += copy;
  2935. skb->data_len += copy;
  2936. offset += copy;
  2937. length -= copy;
  2938. } while (length > 0);
  2939. return 0;
  2940. }
  2941. EXPORT_SYMBOL(skb_append_datato_frags);
  2942. int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
  2943. int offset, size_t size)
  2944. {
  2945. int i = skb_shinfo(skb)->nr_frags;
  2946. if (skb_can_coalesce(skb, i, page, offset)) {
  2947. skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
  2948. } else if (i < MAX_SKB_FRAGS) {
  2949. get_page(page);
  2950. skb_fill_page_desc(skb, i, page, offset, size);
  2951. } else {
  2952. return -EMSGSIZE;
  2953. }
  2954. return 0;
  2955. }
  2956. EXPORT_SYMBOL_GPL(skb_append_pagefrags);
  2957. /**
  2958. * skb_pull_rcsum - pull skb and update receive checksum
  2959. * @skb: buffer to update
  2960. * @len: length of data pulled
  2961. *
  2962. * This function performs an skb_pull on the packet and updates
  2963. * the CHECKSUM_COMPLETE checksum. It should be used on
  2964. * receive path processing instead of skb_pull unless you know
  2965. * that the checksum difference is zero (e.g., a valid IP header)
  2966. * or you are setting ip_summed to CHECKSUM_NONE.
  2967. */
  2968. void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
  2969. {
  2970. unsigned char *data = skb->data;
  2971. BUG_ON(len > skb->len);
  2972. __skb_pull(skb, len);
  2973. skb_postpull_rcsum(skb, data, len);
  2974. return skb->data;
  2975. }
  2976. EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  2977. static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
  2978. {
  2979. skb_frag_t head_frag;
  2980. struct page *page;
  2981. page = virt_to_head_page(frag_skb->head);
  2982. head_frag.page.p = page;
  2983. head_frag.page_offset = frag_skb->data -
  2984. (unsigned char *)page_address(page);
  2985. head_frag.size = skb_headlen(frag_skb);
  2986. return head_frag;
  2987. }
  2988. /**
  2989. * skb_segment - Perform protocol segmentation on skb.
  2990. * @head_skb: buffer to segment
  2991. * @features: features for the output path (see dev->features)
  2992. *
  2993. * This function performs segmentation on the given skb. It returns
  2994. * a pointer to the first in a list of new skbs for the segments.
  2995. * In case of error it returns ERR_PTR(err).
  2996. */
  2997. struct sk_buff *skb_segment(struct sk_buff *head_skb,
  2998. netdev_features_t features)
  2999. {
  3000. struct sk_buff *segs = NULL;
  3001. struct sk_buff *tail = NULL;
  3002. struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
  3003. skb_frag_t *frag = skb_shinfo(head_skb)->frags;
  3004. unsigned int mss = skb_shinfo(head_skb)->gso_size;
  3005. unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
  3006. struct sk_buff *frag_skb = head_skb;
  3007. unsigned int offset = doffset;
  3008. unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
  3009. unsigned int partial_segs = 0;
  3010. unsigned int headroom;
  3011. unsigned int len = head_skb->len;
  3012. __be16 proto;
  3013. bool csum, sg;
  3014. int nfrags = skb_shinfo(head_skb)->nr_frags;
  3015. int err = -ENOMEM;
  3016. int i = 0;
  3017. int pos;
  3018. int dummy;
  3019. __skb_push(head_skb, doffset);
  3020. proto = skb_network_protocol(head_skb, &dummy);
  3021. if (unlikely(!proto))
  3022. return ERR_PTR(-EINVAL);
  3023. sg = !!(features & NETIF_F_SG);
  3024. csum = !!can_checksum_protocol(features, proto);
  3025. if (sg && csum && (mss != GSO_BY_FRAGS)) {
  3026. if (!(features & NETIF_F_GSO_PARTIAL)) {
  3027. struct sk_buff *iter;
  3028. unsigned int frag_len;
  3029. if (!list_skb ||
  3030. !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
  3031. goto normal;
  3032. /* If we get here then all the required
  3033. * GSO features except frag_list are supported.
  3034. * Try to split the SKB to multiple GSO SKBs
  3035. * with no frag_list.
  3036. * Currently we can do that only when the buffers don't
  3037. * have a linear part and all the buffers except
  3038. * the last are of the same length.
  3039. */
  3040. frag_len = list_skb->len;
  3041. skb_walk_frags(head_skb, iter) {
  3042. if (frag_len != iter->len && iter->next)
  3043. goto normal;
  3044. if (skb_headlen(iter) && !iter->head_frag)
  3045. goto normal;
  3046. len -= iter->len;
  3047. }
  3048. if (len != frag_len)
  3049. goto normal;
  3050. }
  3051. /* GSO partial only requires that we trim off any excess that
  3052. * doesn't fit into an MSS sized block, so take care of that
  3053. * now.
  3054. */
  3055. partial_segs = len / mss;
  3056. if (partial_segs > 1)
  3057. mss *= partial_segs;
  3058. else
  3059. partial_segs = 0;
  3060. }
  3061. normal:
  3062. headroom = skb_headroom(head_skb);
  3063. pos = skb_headlen(head_skb);
  3064. do {
  3065. struct sk_buff *nskb;
  3066. skb_frag_t *nskb_frag;
  3067. int hsize;
  3068. int size;
  3069. if (unlikely(mss == GSO_BY_FRAGS)) {
  3070. len = list_skb->len;
  3071. } else {
  3072. len = head_skb->len - offset;
  3073. if (len > mss)
  3074. len = mss;
  3075. }
  3076. hsize = skb_headlen(head_skb) - offset;
  3077. if (hsize < 0)
  3078. hsize = 0;
  3079. if (hsize > len || !sg)
  3080. hsize = len;
  3081. if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
  3082. (skb_headlen(list_skb) == len || sg)) {
  3083. BUG_ON(skb_headlen(list_skb) > len);
  3084. i = 0;
  3085. nfrags = skb_shinfo(list_skb)->nr_frags;
  3086. frag = skb_shinfo(list_skb)->frags;
  3087. frag_skb = list_skb;
  3088. pos += skb_headlen(list_skb);
  3089. while (pos < offset + len) {
  3090. BUG_ON(i >= nfrags);
  3091. size = skb_frag_size(frag);
  3092. if (pos + size > offset + len)
  3093. break;
  3094. i++;
  3095. pos += size;
  3096. frag++;
  3097. }
  3098. nskb = skb_clone(list_skb, GFP_ATOMIC);
  3099. list_skb = list_skb->next;
  3100. if (unlikely(!nskb))
  3101. goto err;
  3102. if (unlikely(pskb_trim(nskb, len))) {
  3103. kfree_skb(nskb);
  3104. goto err;
  3105. }
  3106. hsize = skb_end_offset(nskb);
  3107. if (skb_cow_head(nskb, doffset + headroom)) {
  3108. kfree_skb(nskb);
  3109. goto err;
  3110. }
  3111. nskb->truesize += skb_end_offset(nskb) - hsize;
  3112. skb_release_head_state(nskb);
  3113. __skb_push(nskb, doffset);
  3114. } else {
  3115. nskb = __alloc_skb(hsize + doffset + headroom,
  3116. GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
  3117. NUMA_NO_NODE);
  3118. if (unlikely(!nskb))
  3119. goto err;
  3120. skb_reserve(nskb, headroom);
  3121. __skb_put(nskb, doffset);
  3122. }
  3123. if (segs)
  3124. tail->next = nskb;
  3125. else
  3126. segs = nskb;
  3127. tail = nskb;
  3128. __copy_skb_header(nskb, head_skb);
  3129. skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
  3130. skb_reset_mac_len(nskb);
  3131. skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
  3132. nskb->data - tnl_hlen,
  3133. doffset + tnl_hlen);
  3134. if (nskb->len == len + doffset)
  3135. goto perform_csum_check;
  3136. if (!sg) {
  3137. if (!nskb->remcsum_offload)
  3138. nskb->ip_summed = CHECKSUM_NONE;
  3139. SKB_GSO_CB(nskb)->csum =
  3140. skb_copy_and_csum_bits(head_skb, offset,
  3141. skb_put(nskb, len),
  3142. len, 0);
  3143. SKB_GSO_CB(nskb)->csum_start =
  3144. skb_headroom(nskb) + doffset;
  3145. continue;
  3146. }
  3147. nskb_frag = skb_shinfo(nskb)->frags;
  3148. skb_copy_from_linear_data_offset(head_skb, offset,
  3149. skb_put(nskb, hsize), hsize);
  3150. skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
  3151. SKBTX_SHARED_FRAG;
  3152. if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
  3153. skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
  3154. goto err;
  3155. while (pos < offset + len) {
  3156. if (i >= nfrags) {
  3157. i = 0;
  3158. nfrags = skb_shinfo(list_skb)->nr_frags;
  3159. frag = skb_shinfo(list_skb)->frags;
  3160. frag_skb = list_skb;
  3161. if (!skb_headlen(list_skb)) {
  3162. BUG_ON(!nfrags);
  3163. } else {
  3164. BUG_ON(!list_skb->head_frag);
  3165. /* to make room for head_frag. */
  3166. i--;
  3167. frag--;
  3168. }
  3169. if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
  3170. skb_zerocopy_clone(nskb, frag_skb,
  3171. GFP_ATOMIC))
  3172. goto err;
  3173. list_skb = list_skb->next;
  3174. }
  3175. if (unlikely(skb_shinfo(nskb)->nr_frags >=
  3176. MAX_SKB_FRAGS)) {
  3177. net_warn_ratelimited(
  3178. "skb_segment: too many frags: %u %u\n",
  3179. pos, mss);
  3180. goto err;
  3181. }
  3182. *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
  3183. __skb_frag_ref(nskb_frag);
  3184. size = skb_frag_size(nskb_frag);
  3185. if (pos < offset) {
  3186. nskb_frag->page_offset += offset - pos;
  3187. skb_frag_size_sub(nskb_frag, offset - pos);
  3188. }
  3189. skb_shinfo(nskb)->nr_frags++;
  3190. if (pos + size <= offset + len) {
  3191. i++;
  3192. frag++;
  3193. pos += size;
  3194. } else {
  3195. skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
  3196. goto skip_fraglist;
  3197. }
  3198. nskb_frag++;
  3199. }
  3200. skip_fraglist:
  3201. nskb->data_len = len - hsize;
  3202. nskb->len += nskb->data_len;
  3203. nskb->truesize += nskb->data_len;
  3204. perform_csum_check:
  3205. if (!csum) {
  3206. if (skb_has_shared_frag(nskb)) {
  3207. err = __skb_linearize(nskb);
  3208. if (err)
  3209. goto err;
  3210. }
  3211. if (!nskb->remcsum_offload)
  3212. nskb->ip_summed = CHECKSUM_NONE;
  3213. SKB_GSO_CB(nskb)->csum =
  3214. skb_checksum(nskb, doffset,
  3215. nskb->len - doffset, 0);
  3216. SKB_GSO_CB(nskb)->csum_start =
  3217. skb_headroom(nskb) + doffset;
  3218. }
  3219. } while ((offset += len) < head_skb->len);
  3220. /* Some callers want to get the end of the list.
  3221. * Put it in segs->prev to avoid walking the list.
  3222. * (see validate_xmit_skb_list() for example)
  3223. */
  3224. segs->prev = tail;
  3225. if (partial_segs) {
  3226. struct sk_buff *iter;
  3227. int type = skb_shinfo(head_skb)->gso_type;
  3228. unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
  3229. /* Update type to add partial and then remove dodgy if set */
  3230. type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
  3231. type &= ~SKB_GSO_DODGY;
  3232. /* Update GSO info and prepare to start updating headers on
  3233. * our way back down the stack of protocols.
  3234. */
  3235. for (iter = segs; iter; iter = iter->next) {
  3236. skb_shinfo(iter)->gso_size = gso_size;
  3237. skb_shinfo(iter)->gso_segs = partial_segs;
  3238. skb_shinfo(iter)->gso_type = type;
  3239. SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
  3240. }
  3241. if (tail->len - doffset <= gso_size)
  3242. skb_shinfo(tail)->gso_size = 0;
  3243. else if (tail != segs)
  3244. skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
  3245. }
  3246. /* Following permits correct backpressure, for protocols
  3247. * using skb_set_owner_w().
  3248. * Idea is to tranfert ownership from head_skb to last segment.
  3249. */
  3250. if (head_skb->destructor == sock_wfree) {
  3251. swap(tail->truesize, head_skb->truesize);
  3252. swap(tail->destructor, head_skb->destructor);
  3253. swap(tail->sk, head_skb->sk);
  3254. }
  3255. return segs;
  3256. err:
  3257. kfree_skb_list(segs);
  3258. return ERR_PTR(err);
  3259. }
  3260. EXPORT_SYMBOL_GPL(skb_segment);
  3261. int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  3262. {
  3263. struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
  3264. unsigned int offset = skb_gro_offset(skb);
  3265. unsigned int headlen = skb_headlen(skb);
  3266. unsigned int len = skb_gro_len(skb);
  3267. struct sk_buff *lp, *p = *head;
  3268. unsigned int delta_truesize;
  3269. if (unlikely(p->len + len >= 65536))
  3270. return -E2BIG;
  3271. lp = NAPI_GRO_CB(p)->last;
  3272. pinfo = skb_shinfo(lp);
  3273. if (headlen <= offset) {
  3274. skb_frag_t *frag;
  3275. skb_frag_t *frag2;
  3276. int i = skbinfo->nr_frags;
  3277. int nr_frags = pinfo->nr_frags + i;
  3278. if (nr_frags > MAX_SKB_FRAGS)
  3279. goto merge;
  3280. offset -= headlen;
  3281. pinfo->nr_frags = nr_frags;
  3282. skbinfo->nr_frags = 0;
  3283. frag = pinfo->frags + nr_frags;
  3284. frag2 = skbinfo->frags + i;
  3285. do {
  3286. *--frag = *--frag2;
  3287. } while (--i);
  3288. frag->page_offset += offset;
  3289. skb_frag_size_sub(frag, offset);
  3290. /* all fragments truesize : remove (head size + sk_buff) */
  3291. delta_truesize = skb->truesize -
  3292. SKB_TRUESIZE(skb_end_offset(skb));
  3293. skb->truesize -= skb->data_len;
  3294. skb->len -= skb->data_len;
  3295. skb->data_len = 0;
  3296. NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
  3297. goto done;
  3298. } else if (skb->head_frag) {
  3299. int nr_frags = pinfo->nr_frags;
  3300. skb_frag_t *frag = pinfo->frags + nr_frags;
  3301. struct page *page = virt_to_head_page(skb->head);
  3302. unsigned int first_size = headlen - offset;
  3303. unsigned int first_offset;
  3304. if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
  3305. goto merge;
  3306. first_offset = skb->data -
  3307. (unsigned char *)page_address(page) +
  3308. offset;
  3309. pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
  3310. frag->page.p = page;
  3311. frag->page_offset = first_offset;
  3312. skb_frag_size_set(frag, first_size);
  3313. memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
  3314. /* We dont need to clear skbinfo->nr_frags here */
  3315. delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
  3316. NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
  3317. goto done;
  3318. }
  3319. merge:
  3320. delta_truesize = skb->truesize;
  3321. if (offset > headlen) {
  3322. unsigned int eat = offset - headlen;
  3323. skbinfo->frags[0].page_offset += eat;
  3324. skb_frag_size_sub(&skbinfo->frags[0], eat);
  3325. skb->data_len -= eat;
  3326. skb->len -= eat;
  3327. offset = headlen;
  3328. }
  3329. __skb_pull(skb, offset);
  3330. if (NAPI_GRO_CB(p)->last == p)
  3331. skb_shinfo(p)->frag_list = skb;
  3332. else
  3333. NAPI_GRO_CB(p)->last->next = skb;
  3334. NAPI_GRO_CB(p)->last = skb;
  3335. __skb_header_release(skb);
  3336. lp = p;
  3337. done:
  3338. NAPI_GRO_CB(p)->count++;
  3339. p->data_len += len;
  3340. p->truesize += delta_truesize;
  3341. p->len += len;
  3342. if (lp != p) {
  3343. lp->data_len += len;
  3344. lp->truesize += delta_truesize;
  3345. lp->len += len;
  3346. }
  3347. NAPI_GRO_CB(skb)->same_flow = 1;
  3348. return 0;
  3349. }
  3350. EXPORT_SYMBOL_GPL(skb_gro_receive);
  3351. void __init skb_init(void)
  3352. {
  3353. skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
  3354. sizeof(struct sk_buff),
  3355. 0,
  3356. SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  3357. offsetof(struct sk_buff, cb),
  3358. sizeof_field(struct sk_buff, cb),
  3359. NULL);
  3360. skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
  3361. sizeof(struct sk_buff_fclones),
  3362. 0,
  3363. SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  3364. NULL);
  3365. }
  3366. static int
  3367. __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
  3368. unsigned int recursion_level)
  3369. {
  3370. int start = skb_headlen(skb);
  3371. int i, copy = start - offset;
  3372. struct sk_buff *frag_iter;
  3373. int elt = 0;
  3374. if (unlikely(recursion_level >= 24))
  3375. return -EMSGSIZE;
  3376. if (copy > 0) {
  3377. if (copy > len)
  3378. copy = len;
  3379. sg_set_buf(sg, skb->data + offset, copy);
  3380. elt++;
  3381. if ((len -= copy) == 0)
  3382. return elt;
  3383. offset += copy;
  3384. }
  3385. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3386. int end;
  3387. WARN_ON(start > offset + len);
  3388. end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
  3389. if ((copy = end - offset) > 0) {
  3390. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  3391. if (unlikely(elt && sg_is_last(&sg[elt - 1])))
  3392. return -EMSGSIZE;
  3393. if (copy > len)
  3394. copy = len;
  3395. sg_set_page(&sg[elt], skb_frag_page(frag), copy,
  3396. frag->page_offset+offset-start);
  3397. elt++;
  3398. if (!(len -= copy))
  3399. return elt;
  3400. offset += copy;
  3401. }
  3402. start = end;
  3403. }
  3404. skb_walk_frags(skb, frag_iter) {
  3405. int end, ret;
  3406. WARN_ON(start > offset + len);
  3407. end = start + frag_iter->len;
  3408. if ((copy = end - offset) > 0) {
  3409. if (unlikely(elt && sg_is_last(&sg[elt - 1])))
  3410. return -EMSGSIZE;
  3411. if (copy > len)
  3412. copy = len;
  3413. ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
  3414. copy, recursion_level + 1);
  3415. if (unlikely(ret < 0))
  3416. return ret;
  3417. elt += ret;
  3418. if ((len -= copy) == 0)
  3419. return elt;
  3420. offset += copy;
  3421. }
  3422. start = end;
  3423. }
  3424. BUG_ON(len);
  3425. return elt;
  3426. }
  3427. /**
  3428. * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
  3429. * @skb: Socket buffer containing the buffers to be mapped
  3430. * @sg: The scatter-gather list to map into
  3431. * @offset: The offset into the buffer's contents to start mapping
  3432. * @len: Length of buffer space to be mapped
  3433. *
  3434. * Fill the specified scatter-gather list with mappings/pointers into a
  3435. * region of the buffer space attached to a socket buffer. Returns either
  3436. * the number of scatterlist items used, or -EMSGSIZE if the contents
  3437. * could not fit.
  3438. */
  3439. int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  3440. {
  3441. int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
  3442. if (nsg <= 0)
  3443. return nsg;
  3444. sg_mark_end(&sg[nsg - 1]);
  3445. return nsg;
  3446. }
  3447. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  3448. /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
  3449. * sglist without mark the sg which contain last skb data as the end.
  3450. * So the caller can mannipulate sg list as will when padding new data after
  3451. * the first call without calling sg_unmark_end to expend sg list.
  3452. *
  3453. * Scenario to use skb_to_sgvec_nomark:
  3454. * 1. sg_init_table
  3455. * 2. skb_to_sgvec_nomark(payload1)
  3456. * 3. skb_to_sgvec_nomark(payload2)
  3457. *
  3458. * This is equivalent to:
  3459. * 1. sg_init_table
  3460. * 2. skb_to_sgvec(payload1)
  3461. * 3. sg_unmark_end
  3462. * 4. skb_to_sgvec(payload2)
  3463. *
  3464. * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
  3465. * is more preferable.
  3466. */
  3467. int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
  3468. int offset, int len)
  3469. {
  3470. return __skb_to_sgvec(skb, sg, offset, len, 0);
  3471. }
  3472. EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
  3473. /**
  3474. * skb_cow_data - Check that a socket buffer's data buffers are writable
  3475. * @skb: The socket buffer to check.
  3476. * @tailbits: Amount of trailing space to be added
  3477. * @trailer: Returned pointer to the skb where the @tailbits space begins
  3478. *
  3479. * Make sure that the data buffers attached to a socket buffer are
  3480. * writable. If they are not, private copies are made of the data buffers
  3481. * and the socket buffer is set to use these instead.
  3482. *
  3483. * If @tailbits is given, make sure that there is space to write @tailbits
  3484. * bytes of data beyond current end of socket buffer. @trailer will be
  3485. * set to point to the skb in which this space begins.
  3486. *
  3487. * The number of scatterlist elements required to completely map the
  3488. * COW'd and extended socket buffer will be returned.
  3489. */
  3490. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  3491. {
  3492. int copyflag;
  3493. int elt;
  3494. struct sk_buff *skb1, **skb_p;
  3495. /* If skb is cloned or its head is paged, reallocate
  3496. * head pulling out all the pages (pages are considered not writable
  3497. * at the moment even if they are anonymous).
  3498. */
  3499. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  3500. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  3501. return -ENOMEM;
  3502. /* Easy case. Most of packets will go this way. */
  3503. if (!skb_has_frag_list(skb)) {
  3504. /* A little of trouble, not enough of space for trailer.
  3505. * This should not happen, when stack is tuned to generate
  3506. * good frames. OK, on miss we reallocate and reserve even more
  3507. * space, 128 bytes is fair. */
  3508. if (skb_tailroom(skb) < tailbits &&
  3509. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  3510. return -ENOMEM;
  3511. /* Voila! */
  3512. *trailer = skb;
  3513. return 1;
  3514. }
  3515. /* Misery. We are in troubles, going to mincer fragments... */
  3516. elt = 1;
  3517. skb_p = &skb_shinfo(skb)->frag_list;
  3518. copyflag = 0;
  3519. while ((skb1 = *skb_p) != NULL) {
  3520. int ntail = 0;
  3521. /* The fragment is partially pulled by someone,
  3522. * this can happen on input. Copy it and everything
  3523. * after it. */
  3524. if (skb_shared(skb1))
  3525. copyflag = 1;
  3526. /* If the skb is the last, worry about trailer. */
  3527. if (skb1->next == NULL && tailbits) {
  3528. if (skb_shinfo(skb1)->nr_frags ||
  3529. skb_has_frag_list(skb1) ||
  3530. skb_tailroom(skb1) < tailbits)
  3531. ntail = tailbits + 128;
  3532. }
  3533. if (copyflag ||
  3534. skb_cloned(skb1) ||
  3535. ntail ||
  3536. skb_shinfo(skb1)->nr_frags ||
  3537. skb_has_frag_list(skb1)) {
  3538. struct sk_buff *skb2;
  3539. /* Fuck, we are miserable poor guys... */
  3540. if (ntail == 0)
  3541. skb2 = skb_copy(skb1, GFP_ATOMIC);
  3542. else
  3543. skb2 = skb_copy_expand(skb1,
  3544. skb_headroom(skb1),
  3545. ntail,
  3546. GFP_ATOMIC);
  3547. if (unlikely(skb2 == NULL))
  3548. return -ENOMEM;
  3549. if (skb1->sk)
  3550. skb_set_owner_w(skb2, skb1->sk);
  3551. /* Looking around. Are we still alive?
  3552. * OK, link new skb, drop old one */
  3553. skb2->next = skb1->next;
  3554. *skb_p = skb2;
  3555. kfree_skb(skb1);
  3556. skb1 = skb2;
  3557. }
  3558. elt++;
  3559. *trailer = skb1;
  3560. skb_p = &skb1->next;
  3561. }
  3562. return elt;
  3563. }
  3564. EXPORT_SYMBOL_GPL(skb_cow_data);
  3565. static void sock_rmem_free(struct sk_buff *skb)
  3566. {
  3567. struct sock *sk = skb->sk;
  3568. atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
  3569. }
  3570. static void skb_set_err_queue(struct sk_buff *skb)
  3571. {
  3572. /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
  3573. * So, it is safe to (mis)use it to mark skbs on the error queue.
  3574. */
  3575. skb->pkt_type = PACKET_OUTGOING;
  3576. BUILD_BUG_ON(PACKET_OUTGOING == 0);
  3577. }
  3578. /*
  3579. * Note: We dont mem charge error packets (no sk_forward_alloc changes)
  3580. */
  3581. int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
  3582. {
  3583. if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
  3584. (unsigned int)sk->sk_rcvbuf)
  3585. return -ENOMEM;
  3586. skb_orphan(skb);
  3587. skb->sk = sk;
  3588. skb->destructor = sock_rmem_free;
  3589. atomic_add(skb->truesize, &sk->sk_rmem_alloc);
  3590. skb_set_err_queue(skb);
  3591. /* before exiting rcu section, make sure dst is refcounted */
  3592. skb_dst_force(skb);
  3593. skb_queue_tail(&sk->sk_error_queue, skb);
  3594. if (!sock_flag(sk, SOCK_DEAD))
  3595. sk->sk_error_report(sk);
  3596. return 0;
  3597. }
  3598. EXPORT_SYMBOL(sock_queue_err_skb);
  3599. static bool is_icmp_err_skb(const struct sk_buff *skb)
  3600. {
  3601. return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
  3602. SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
  3603. }
  3604. struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
  3605. {
  3606. struct sk_buff_head *q = &sk->sk_error_queue;
  3607. struct sk_buff *skb, *skb_next = NULL;
  3608. bool icmp_next = false;
  3609. unsigned long flags;
  3610. spin_lock_irqsave(&q->lock, flags);
  3611. skb = __skb_dequeue(q);
  3612. if (skb && (skb_next = skb_peek(q))) {
  3613. icmp_next = is_icmp_err_skb(skb_next);
  3614. if (icmp_next)
  3615. sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
  3616. }
  3617. spin_unlock_irqrestore(&q->lock, flags);
  3618. if (is_icmp_err_skb(skb) && !icmp_next)
  3619. sk->sk_err = 0;
  3620. if (skb_next)
  3621. sk->sk_error_report(sk);
  3622. return skb;
  3623. }
  3624. EXPORT_SYMBOL(sock_dequeue_err_skb);
  3625. /**
  3626. * skb_clone_sk - create clone of skb, and take reference to socket
  3627. * @skb: the skb to clone
  3628. *
  3629. * This function creates a clone of a buffer that holds a reference on
  3630. * sk_refcnt. Buffers created via this function are meant to be
  3631. * returned using sock_queue_err_skb, or free via kfree_skb.
  3632. *
  3633. * When passing buffers allocated with this function to sock_queue_err_skb
  3634. * it is necessary to wrap the call with sock_hold/sock_put in order to
  3635. * prevent the socket from being released prior to being enqueued on
  3636. * the sk_error_queue.
  3637. */
  3638. struct sk_buff *skb_clone_sk(struct sk_buff *skb)
  3639. {
  3640. struct sock *sk = skb->sk;
  3641. struct sk_buff *clone;
  3642. if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
  3643. return NULL;
  3644. clone = skb_clone(skb, GFP_ATOMIC);
  3645. if (!clone) {
  3646. sock_put(sk);
  3647. return NULL;
  3648. }
  3649. clone->sk = sk;
  3650. clone->destructor = sock_efree;
  3651. return clone;
  3652. }
  3653. EXPORT_SYMBOL(skb_clone_sk);
  3654. static void __skb_complete_tx_timestamp(struct sk_buff *skb,
  3655. struct sock *sk,
  3656. int tstype,
  3657. bool opt_stats)
  3658. {
  3659. struct sock_exterr_skb *serr;
  3660. int err;
  3661. BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
  3662. serr = SKB_EXT_ERR(skb);
  3663. memset(serr, 0, sizeof(*serr));
  3664. serr->ee.ee_errno = ENOMSG;
  3665. serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
  3666. serr->ee.ee_info = tstype;
  3667. serr->opt_stats = opt_stats;
  3668. serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
  3669. if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
  3670. serr->ee.ee_data = skb_shinfo(skb)->tskey;
  3671. if (sk->sk_protocol == IPPROTO_TCP &&
  3672. sk->sk_type == SOCK_STREAM)
  3673. serr->ee.ee_data -= sk->sk_tskey;
  3674. }
  3675. err = sock_queue_err_skb(sk, skb);
  3676. if (err)
  3677. kfree_skb(skb);
  3678. }
  3679. static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
  3680. {
  3681. bool ret;
  3682. if (likely(sysctl_tstamp_allow_data || tsonly))
  3683. return true;
  3684. read_lock_bh(&sk->sk_callback_lock);
  3685. ret = sk->sk_socket && sk->sk_socket->file &&
  3686. file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
  3687. read_unlock_bh(&sk->sk_callback_lock);
  3688. return ret;
  3689. }
  3690. void skb_complete_tx_timestamp(struct sk_buff *skb,
  3691. struct skb_shared_hwtstamps *hwtstamps)
  3692. {
  3693. struct sock *sk = skb->sk;
  3694. if (!skb_may_tx_timestamp(sk, false))
  3695. goto err;
  3696. /* Take a reference to prevent skb_orphan() from freeing the socket,
  3697. * but only if the socket refcount is not zero.
  3698. */
  3699. if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
  3700. *skb_hwtstamps(skb) = *hwtstamps;
  3701. __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
  3702. sock_put(sk);
  3703. return;
  3704. }
  3705. err:
  3706. kfree_skb(skb);
  3707. }
  3708. EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
  3709. void __skb_tstamp_tx(struct sk_buff *orig_skb,
  3710. struct skb_shared_hwtstamps *hwtstamps,
  3711. struct sock *sk, int tstype)
  3712. {
  3713. struct sk_buff *skb;
  3714. bool tsonly, opt_stats = false;
  3715. if (!sk)
  3716. return;
  3717. if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
  3718. skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
  3719. return;
  3720. tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
  3721. if (!skb_may_tx_timestamp(sk, tsonly))
  3722. return;
  3723. if (tsonly) {
  3724. #ifdef CONFIG_INET
  3725. if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
  3726. sk->sk_protocol == IPPROTO_TCP &&
  3727. sk->sk_type == SOCK_STREAM) {
  3728. skb = tcp_get_timestamping_opt_stats(sk);
  3729. opt_stats = true;
  3730. } else
  3731. #endif
  3732. skb = alloc_skb(0, GFP_ATOMIC);
  3733. } else {
  3734. skb = skb_clone(orig_skb, GFP_ATOMIC);
  3735. }
  3736. if (!skb)
  3737. return;
  3738. if (tsonly) {
  3739. skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
  3740. SKBTX_ANY_TSTAMP;
  3741. skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
  3742. }
  3743. if (hwtstamps)
  3744. *skb_hwtstamps(skb) = *hwtstamps;
  3745. else
  3746. skb->tstamp = ktime_get_real();
  3747. __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
  3748. }
  3749. EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
  3750. void skb_tstamp_tx(struct sk_buff *orig_skb,
  3751. struct skb_shared_hwtstamps *hwtstamps)
  3752. {
  3753. return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
  3754. SCM_TSTAMP_SND);
  3755. }
  3756. EXPORT_SYMBOL_GPL(skb_tstamp_tx);
  3757. void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
  3758. {
  3759. struct sock *sk = skb->sk;
  3760. struct sock_exterr_skb *serr;
  3761. int err = 1;
  3762. skb->wifi_acked_valid = 1;
  3763. skb->wifi_acked = acked;
  3764. serr = SKB_EXT_ERR(skb);
  3765. memset(serr, 0, sizeof(*serr));
  3766. serr->ee.ee_errno = ENOMSG;
  3767. serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
  3768. /* Take a reference to prevent skb_orphan() from freeing the socket,
  3769. * but only if the socket refcount is not zero.
  3770. */
  3771. if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
  3772. err = sock_queue_err_skb(sk, skb);
  3773. sock_put(sk);
  3774. }
  3775. if (err)
  3776. kfree_skb(skb);
  3777. }
  3778. EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
  3779. /**
  3780. * skb_partial_csum_set - set up and verify partial csum values for packet
  3781. * @skb: the skb to set
  3782. * @start: the number of bytes after skb->data to start checksumming.
  3783. * @off: the offset from start to place the checksum.
  3784. *
  3785. * For untrusted partially-checksummed packets, we need to make sure the values
  3786. * for skb->csum_start and skb->csum_offset are valid so we don't oops.
  3787. *
  3788. * This function checks and sets those values and skb->ip_summed: if this
  3789. * returns false you should drop the packet.
  3790. */
  3791. bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
  3792. {
  3793. if (unlikely(start > skb_headlen(skb)) ||
  3794. unlikely((int)start + off > skb_headlen(skb) - 2)) {
  3795. net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
  3796. start, off, skb_headlen(skb));
  3797. return false;
  3798. }
  3799. skb->ip_summed = CHECKSUM_PARTIAL;
  3800. skb->csum_start = skb_headroom(skb) + start;
  3801. skb->csum_offset = off;
  3802. skb_set_transport_header(skb, start);
  3803. return true;
  3804. }
  3805. EXPORT_SYMBOL_GPL(skb_partial_csum_set);
  3806. static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
  3807. unsigned int max)
  3808. {
  3809. if (skb_headlen(skb) >= len)
  3810. return 0;
  3811. /* If we need to pullup then pullup to the max, so we
  3812. * won't need to do it again.
  3813. */
  3814. if (max > skb->len)
  3815. max = skb->len;
  3816. if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
  3817. return -ENOMEM;
  3818. if (skb_headlen(skb) < len)
  3819. return -EPROTO;
  3820. return 0;
  3821. }
  3822. #define MAX_TCP_HDR_LEN (15 * 4)
  3823. static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
  3824. typeof(IPPROTO_IP) proto,
  3825. unsigned int off)
  3826. {
  3827. switch (proto) {
  3828. int err;
  3829. case IPPROTO_TCP:
  3830. err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
  3831. off + MAX_TCP_HDR_LEN);
  3832. if (!err && !skb_partial_csum_set(skb, off,
  3833. offsetof(struct tcphdr,
  3834. check)))
  3835. err = -EPROTO;
  3836. return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
  3837. case IPPROTO_UDP:
  3838. err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
  3839. off + sizeof(struct udphdr));
  3840. if (!err && !skb_partial_csum_set(skb, off,
  3841. offsetof(struct udphdr,
  3842. check)))
  3843. err = -EPROTO;
  3844. return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
  3845. }
  3846. return ERR_PTR(-EPROTO);
  3847. }
  3848. /* This value should be large enough to cover a tagged ethernet header plus
  3849. * maximally sized IP and TCP or UDP headers.
  3850. */
  3851. #define MAX_IP_HDR_LEN 128
  3852. static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
  3853. {
  3854. unsigned int off;
  3855. bool fragment;
  3856. __sum16 *csum;
  3857. int err;
  3858. fragment = false;
  3859. err = skb_maybe_pull_tail(skb,
  3860. sizeof(struct iphdr),
  3861. MAX_IP_HDR_LEN);
  3862. if (err < 0)
  3863. goto out;
  3864. if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
  3865. fragment = true;
  3866. off = ip_hdrlen(skb);
  3867. err = -EPROTO;
  3868. if (fragment)
  3869. goto out;
  3870. csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
  3871. if (IS_ERR(csum))
  3872. return PTR_ERR(csum);
  3873. if (recalculate)
  3874. *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  3875. ip_hdr(skb)->daddr,
  3876. skb->len - off,
  3877. ip_hdr(skb)->protocol, 0);
  3878. err = 0;
  3879. out:
  3880. return err;
  3881. }
  3882. /* This value should be large enough to cover a tagged ethernet header plus
  3883. * an IPv6 header, all options, and a maximal TCP or UDP header.
  3884. */
  3885. #define MAX_IPV6_HDR_LEN 256
  3886. #define OPT_HDR(type, skb, off) \
  3887. (type *)(skb_network_header(skb) + (off))
  3888. static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
  3889. {
  3890. int err;
  3891. u8 nexthdr;
  3892. unsigned int off;
  3893. unsigned int len;
  3894. bool fragment;
  3895. bool done;
  3896. __sum16 *csum;
  3897. fragment = false;
  3898. done = false;
  3899. off = sizeof(struct ipv6hdr);
  3900. err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
  3901. if (err < 0)
  3902. goto out;
  3903. nexthdr = ipv6_hdr(skb)->nexthdr;
  3904. len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
  3905. while (off <= len && !done) {
  3906. switch (nexthdr) {
  3907. case IPPROTO_DSTOPTS:
  3908. case IPPROTO_HOPOPTS:
  3909. case IPPROTO_ROUTING: {
  3910. struct ipv6_opt_hdr *hp;
  3911. err = skb_maybe_pull_tail(skb,
  3912. off +
  3913. sizeof(struct ipv6_opt_hdr),
  3914. MAX_IPV6_HDR_LEN);
  3915. if (err < 0)
  3916. goto out;
  3917. hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
  3918. nexthdr = hp->nexthdr;
  3919. off += ipv6_optlen(hp);
  3920. break;
  3921. }
  3922. case IPPROTO_AH: {
  3923. struct ip_auth_hdr *hp;
  3924. err = skb_maybe_pull_tail(skb,
  3925. off +
  3926. sizeof(struct ip_auth_hdr),
  3927. MAX_IPV6_HDR_LEN);
  3928. if (err < 0)
  3929. goto out;
  3930. hp = OPT_HDR(struct ip_auth_hdr, skb, off);
  3931. nexthdr = hp->nexthdr;
  3932. off += ipv6_authlen(hp);
  3933. break;
  3934. }
  3935. case IPPROTO_FRAGMENT: {
  3936. struct frag_hdr *hp;
  3937. err = skb_maybe_pull_tail(skb,
  3938. off +
  3939. sizeof(struct frag_hdr),
  3940. MAX_IPV6_HDR_LEN);
  3941. if (err < 0)
  3942. goto out;
  3943. hp = OPT_HDR(struct frag_hdr, skb, off);
  3944. if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
  3945. fragment = true;
  3946. nexthdr = hp->nexthdr;
  3947. off += sizeof(struct frag_hdr);
  3948. break;
  3949. }
  3950. default:
  3951. done = true;
  3952. break;
  3953. }
  3954. }
  3955. err = -EPROTO;
  3956. if (!done || fragment)
  3957. goto out;
  3958. csum = skb_checksum_setup_ip(skb, nexthdr, off);
  3959. if (IS_ERR(csum))
  3960. return PTR_ERR(csum);
  3961. if (recalculate)
  3962. *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  3963. &ipv6_hdr(skb)->daddr,
  3964. skb->len - off, nexthdr, 0);
  3965. err = 0;
  3966. out:
  3967. return err;
  3968. }
  3969. /**
  3970. * skb_checksum_setup - set up partial checksum offset
  3971. * @skb: the skb to set up
  3972. * @recalculate: if true the pseudo-header checksum will be recalculated
  3973. */
  3974. int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
  3975. {
  3976. int err;
  3977. switch (skb->protocol) {
  3978. case htons(ETH_P_IP):
  3979. err = skb_checksum_setup_ipv4(skb, recalculate);
  3980. break;
  3981. case htons(ETH_P_IPV6):
  3982. err = skb_checksum_setup_ipv6(skb, recalculate);
  3983. break;
  3984. default:
  3985. err = -EPROTO;
  3986. break;
  3987. }
  3988. return err;
  3989. }
  3990. EXPORT_SYMBOL(skb_checksum_setup);
  3991. /**
  3992. * skb_checksum_maybe_trim - maybe trims the given skb
  3993. * @skb: the skb to check
  3994. * @transport_len: the data length beyond the network header
  3995. *
  3996. * Checks whether the given skb has data beyond the given transport length.
  3997. * If so, returns a cloned skb trimmed to this transport length.
  3998. * Otherwise returns the provided skb. Returns NULL in error cases
  3999. * (e.g. transport_len exceeds skb length or out-of-memory).
  4000. *
  4001. * Caller needs to set the skb transport header and free any returned skb if it
  4002. * differs from the provided skb.
  4003. */
  4004. static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
  4005. unsigned int transport_len)
  4006. {
  4007. struct sk_buff *skb_chk;
  4008. unsigned int len = skb_transport_offset(skb) + transport_len;
  4009. int ret;
  4010. if (skb->len < len)
  4011. return NULL;
  4012. else if (skb->len == len)
  4013. return skb;
  4014. skb_chk = skb_clone(skb, GFP_ATOMIC);
  4015. if (!skb_chk)
  4016. return NULL;
  4017. ret = pskb_trim_rcsum(skb_chk, len);
  4018. if (ret) {
  4019. kfree_skb(skb_chk);
  4020. return NULL;
  4021. }
  4022. return skb_chk;
  4023. }
  4024. /**
  4025. * skb_checksum_trimmed - validate checksum of an skb
  4026. * @skb: the skb to check
  4027. * @transport_len: the data length beyond the network header
  4028. * @skb_chkf: checksum function to use
  4029. *
  4030. * Applies the given checksum function skb_chkf to the provided skb.
  4031. * Returns a checked and maybe trimmed skb. Returns NULL on error.
  4032. *
  4033. * If the skb has data beyond the given transport length, then a
  4034. * trimmed & cloned skb is checked and returned.
  4035. *
  4036. * Caller needs to set the skb transport header and free any returned skb if it
  4037. * differs from the provided skb.
  4038. */
  4039. struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
  4040. unsigned int transport_len,
  4041. __sum16(*skb_chkf)(struct sk_buff *skb))
  4042. {
  4043. struct sk_buff *skb_chk;
  4044. unsigned int offset = skb_transport_offset(skb);
  4045. __sum16 ret;
  4046. skb_chk = skb_checksum_maybe_trim(skb, transport_len);
  4047. if (!skb_chk)
  4048. goto err;
  4049. if (!pskb_may_pull(skb_chk, offset))
  4050. goto err;
  4051. skb_pull_rcsum(skb_chk, offset);
  4052. ret = skb_chkf(skb_chk);
  4053. skb_push_rcsum(skb_chk, offset);
  4054. if (ret)
  4055. goto err;
  4056. return skb_chk;
  4057. err:
  4058. if (skb_chk && skb_chk != skb)
  4059. kfree_skb(skb_chk);
  4060. return NULL;
  4061. }
  4062. EXPORT_SYMBOL(skb_checksum_trimmed);
  4063. void __skb_warn_lro_forwarding(const struct sk_buff *skb)
  4064. {
  4065. net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
  4066. skb->dev->name);
  4067. }
  4068. EXPORT_SYMBOL(__skb_warn_lro_forwarding);
  4069. void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
  4070. {
  4071. if (head_stolen) {
  4072. skb_release_head_state(skb);
  4073. kmem_cache_free(skbuff_head_cache, skb);
  4074. } else {
  4075. __kfree_skb(skb);
  4076. }
  4077. }
  4078. EXPORT_SYMBOL(kfree_skb_partial);
  4079. /**
  4080. * skb_try_coalesce - try to merge skb to prior one
  4081. * @to: prior buffer
  4082. * @from: buffer to add
  4083. * @fragstolen: pointer to boolean
  4084. * @delta_truesize: how much more was allocated than was requested
  4085. */
  4086. bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
  4087. bool *fragstolen, int *delta_truesize)
  4088. {
  4089. struct skb_shared_info *to_shinfo, *from_shinfo;
  4090. int i, delta, len = from->len;
  4091. *fragstolen = false;
  4092. if (skb_cloned(to))
  4093. return false;
  4094. if (len <= skb_tailroom(to)) {
  4095. if (len)
  4096. BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
  4097. *delta_truesize = 0;
  4098. return true;
  4099. }
  4100. to_shinfo = skb_shinfo(to);
  4101. from_shinfo = skb_shinfo(from);
  4102. if (to_shinfo->frag_list || from_shinfo->frag_list)
  4103. return false;
  4104. if (skb_zcopy(to) || skb_zcopy(from))
  4105. return false;
  4106. if (skb_headlen(from) != 0) {
  4107. struct page *page;
  4108. unsigned int offset;
  4109. if (to_shinfo->nr_frags +
  4110. from_shinfo->nr_frags >= MAX_SKB_FRAGS)
  4111. return false;
  4112. if (skb_head_is_locked(from))
  4113. return false;
  4114. delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
  4115. page = virt_to_head_page(from->head);
  4116. offset = from->data - (unsigned char *)page_address(page);
  4117. skb_fill_page_desc(to, to_shinfo->nr_frags,
  4118. page, offset, skb_headlen(from));
  4119. *fragstolen = true;
  4120. } else {
  4121. if (to_shinfo->nr_frags +
  4122. from_shinfo->nr_frags > MAX_SKB_FRAGS)
  4123. return false;
  4124. delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
  4125. }
  4126. WARN_ON_ONCE(delta < len);
  4127. memcpy(to_shinfo->frags + to_shinfo->nr_frags,
  4128. from_shinfo->frags,
  4129. from_shinfo->nr_frags * sizeof(skb_frag_t));
  4130. to_shinfo->nr_frags += from_shinfo->nr_frags;
  4131. if (!skb_cloned(from))
  4132. from_shinfo->nr_frags = 0;
  4133. /* if the skb is not cloned this does nothing
  4134. * since we set nr_frags to 0.
  4135. */
  4136. for (i = 0; i < from_shinfo->nr_frags; i++)
  4137. __skb_frag_ref(&from_shinfo->frags[i]);
  4138. to->truesize += delta;
  4139. to->len += len;
  4140. to->data_len += len;
  4141. *delta_truesize = delta;
  4142. return true;
  4143. }
  4144. EXPORT_SYMBOL(skb_try_coalesce);
  4145. /**
  4146. * skb_scrub_packet - scrub an skb
  4147. *
  4148. * @skb: buffer to clean
  4149. * @xnet: packet is crossing netns
  4150. *
  4151. * skb_scrub_packet can be used after encapsulating or decapsulting a packet
  4152. * into/from a tunnel. Some information have to be cleared during these
  4153. * operations.
  4154. * skb_scrub_packet can also be used to clean a skb before injecting it in
  4155. * another namespace (@xnet == true). We have to clear all information in the
  4156. * skb that could impact namespace isolation.
  4157. */
  4158. void skb_scrub_packet(struct sk_buff *skb, bool xnet)
  4159. {
  4160. skb->tstamp = 0;
  4161. skb->pkt_type = PACKET_HOST;
  4162. skb->skb_iif = 0;
  4163. skb->ignore_df = 0;
  4164. skb_dst_drop(skb);
  4165. secpath_reset(skb);
  4166. nf_reset(skb);
  4167. nf_reset_trace(skb);
  4168. if (!xnet)
  4169. return;
  4170. ipvs_reset(skb);
  4171. skb_orphan(skb);
  4172. skb->mark = 0;
  4173. }
  4174. EXPORT_SYMBOL_GPL(skb_scrub_packet);
  4175. /**
  4176. * skb_gso_transport_seglen - Return length of individual segments of a gso packet
  4177. *
  4178. * @skb: GSO skb
  4179. *
  4180. * skb_gso_transport_seglen is used to determine the real size of the
  4181. * individual segments, including Layer4 headers (TCP/UDP).
  4182. *
  4183. * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
  4184. */
  4185. static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
  4186. {
  4187. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  4188. unsigned int thlen = 0;
  4189. if (skb->encapsulation) {
  4190. thlen = skb_inner_transport_header(skb) -
  4191. skb_transport_header(skb);
  4192. if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
  4193. thlen += inner_tcp_hdrlen(skb);
  4194. } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
  4195. thlen = tcp_hdrlen(skb);
  4196. } else if (unlikely(skb_is_gso_sctp(skb))) {
  4197. thlen = sizeof(struct sctphdr);
  4198. } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
  4199. thlen = sizeof(struct udphdr);
  4200. }
  4201. /* UFO sets gso_size to the size of the fragmentation
  4202. * payload, i.e. the size of the L4 (UDP) header is already
  4203. * accounted for.
  4204. */
  4205. return thlen + shinfo->gso_size;
  4206. }
  4207. /**
  4208. * skb_gso_network_seglen - Return length of individual segments of a gso packet
  4209. *
  4210. * @skb: GSO skb
  4211. *
  4212. * skb_gso_network_seglen is used to determine the real size of the
  4213. * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
  4214. *
  4215. * The MAC/L2 header is not accounted for.
  4216. */
  4217. static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
  4218. {
  4219. unsigned int hdr_len = skb_transport_header(skb) -
  4220. skb_network_header(skb);
  4221. return hdr_len + skb_gso_transport_seglen(skb);
  4222. }
  4223. /**
  4224. * skb_gso_mac_seglen - Return length of individual segments of a gso packet
  4225. *
  4226. * @skb: GSO skb
  4227. *
  4228. * skb_gso_mac_seglen is used to determine the real size of the
  4229. * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
  4230. * headers (TCP/UDP).
  4231. */
  4232. static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
  4233. {
  4234. unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
  4235. return hdr_len + skb_gso_transport_seglen(skb);
  4236. }
  4237. /**
  4238. * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
  4239. *
  4240. * There are a couple of instances where we have a GSO skb, and we
  4241. * want to determine what size it would be after it is segmented.
  4242. *
  4243. * We might want to check:
  4244. * - L3+L4+payload size (e.g. IP forwarding)
  4245. * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
  4246. *
  4247. * This is a helper to do that correctly considering GSO_BY_FRAGS.
  4248. *
  4249. * @seg_len: The segmented length (from skb_gso_*_seglen). In the
  4250. * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
  4251. *
  4252. * @max_len: The maximum permissible length.
  4253. *
  4254. * Returns true if the segmented length <= max length.
  4255. */
  4256. static inline bool skb_gso_size_check(const struct sk_buff *skb,
  4257. unsigned int seg_len,
  4258. unsigned int max_len) {
  4259. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  4260. const struct sk_buff *iter;
  4261. if (shinfo->gso_size != GSO_BY_FRAGS)
  4262. return seg_len <= max_len;
  4263. /* Undo this so we can re-use header sizes */
  4264. seg_len -= GSO_BY_FRAGS;
  4265. skb_walk_frags(skb, iter) {
  4266. if (seg_len + skb_headlen(iter) > max_len)
  4267. return false;
  4268. }
  4269. return true;
  4270. }
  4271. /**
  4272. * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
  4273. *
  4274. * @skb: GSO skb
  4275. * @mtu: MTU to validate against
  4276. *
  4277. * skb_gso_validate_network_len validates if a given skb will fit a
  4278. * wanted MTU once split. It considers L3 headers, L4 headers, and the
  4279. * payload.
  4280. */
  4281. bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
  4282. {
  4283. return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
  4284. }
  4285. EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
  4286. /**
  4287. * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
  4288. *
  4289. * @skb: GSO skb
  4290. * @len: length to validate against
  4291. *
  4292. * skb_gso_validate_mac_len validates if a given skb will fit a wanted
  4293. * length once split, including L2, L3 and L4 headers and the payload.
  4294. */
  4295. bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
  4296. {
  4297. return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
  4298. }
  4299. EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
  4300. static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
  4301. {
  4302. int mac_len;
  4303. if (skb_cow(skb, skb_headroom(skb)) < 0) {
  4304. kfree_skb(skb);
  4305. return NULL;
  4306. }
  4307. mac_len = skb->data - skb_mac_header(skb);
  4308. if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
  4309. memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
  4310. mac_len - VLAN_HLEN - ETH_TLEN);
  4311. }
  4312. skb->mac_header += VLAN_HLEN;
  4313. return skb;
  4314. }
  4315. struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
  4316. {
  4317. struct vlan_hdr *vhdr;
  4318. u16 vlan_tci;
  4319. if (unlikely(skb_vlan_tag_present(skb))) {
  4320. /* vlan_tci is already set-up so leave this for another time */
  4321. return skb;
  4322. }
  4323. skb = skb_share_check(skb, GFP_ATOMIC);
  4324. if (unlikely(!skb))
  4325. goto err_free;
  4326. if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
  4327. goto err_free;
  4328. vhdr = (struct vlan_hdr *)skb->data;
  4329. vlan_tci = ntohs(vhdr->h_vlan_TCI);
  4330. __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
  4331. skb_pull_rcsum(skb, VLAN_HLEN);
  4332. vlan_set_encap_proto(skb, vhdr);
  4333. skb = skb_reorder_vlan_header(skb);
  4334. if (unlikely(!skb))
  4335. goto err_free;
  4336. skb_reset_network_header(skb);
  4337. skb_reset_transport_header(skb);
  4338. skb_reset_mac_len(skb);
  4339. return skb;
  4340. err_free:
  4341. kfree_skb(skb);
  4342. return NULL;
  4343. }
  4344. EXPORT_SYMBOL(skb_vlan_untag);
  4345. int skb_ensure_writable(struct sk_buff *skb, int write_len)
  4346. {
  4347. if (!pskb_may_pull(skb, write_len))
  4348. return -ENOMEM;
  4349. if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
  4350. return 0;
  4351. return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
  4352. }
  4353. EXPORT_SYMBOL(skb_ensure_writable);
  4354. /* remove VLAN header from packet and update csum accordingly.
  4355. * expects a non skb_vlan_tag_present skb with a vlan tag payload
  4356. */
  4357. int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
  4358. {
  4359. struct vlan_hdr *vhdr;
  4360. int offset = skb->data - skb_mac_header(skb);
  4361. int err;
  4362. if (WARN_ONCE(offset,
  4363. "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
  4364. offset)) {
  4365. return -EINVAL;
  4366. }
  4367. err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
  4368. if (unlikely(err))
  4369. return err;
  4370. skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
  4371. vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
  4372. *vlan_tci = ntohs(vhdr->h_vlan_TCI);
  4373. memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
  4374. __skb_pull(skb, VLAN_HLEN);
  4375. vlan_set_encap_proto(skb, vhdr);
  4376. skb->mac_header += VLAN_HLEN;
  4377. if (skb_network_offset(skb) < ETH_HLEN)
  4378. skb_set_network_header(skb, ETH_HLEN);
  4379. skb_reset_mac_len(skb);
  4380. return err;
  4381. }
  4382. EXPORT_SYMBOL(__skb_vlan_pop);
  4383. /* Pop a vlan tag either from hwaccel or from payload.
  4384. * Expects skb->data at mac header.
  4385. */
  4386. int skb_vlan_pop(struct sk_buff *skb)
  4387. {
  4388. u16 vlan_tci;
  4389. __be16 vlan_proto;
  4390. int err;
  4391. if (likely(skb_vlan_tag_present(skb))) {
  4392. skb->vlan_tci = 0;
  4393. } else {
  4394. if (unlikely(!eth_type_vlan(skb->protocol)))
  4395. return 0;
  4396. err = __skb_vlan_pop(skb, &vlan_tci);
  4397. if (err)
  4398. return err;
  4399. }
  4400. /* move next vlan tag to hw accel tag */
  4401. if (likely(!eth_type_vlan(skb->protocol)))
  4402. return 0;
  4403. vlan_proto = skb->protocol;
  4404. err = __skb_vlan_pop(skb, &vlan_tci);
  4405. if (unlikely(err))
  4406. return err;
  4407. __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
  4408. return 0;
  4409. }
  4410. EXPORT_SYMBOL(skb_vlan_pop);
  4411. /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
  4412. * Expects skb->data at mac header.
  4413. */
  4414. int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
  4415. {
  4416. if (skb_vlan_tag_present(skb)) {
  4417. int offset = skb->data - skb_mac_header(skb);
  4418. int err;
  4419. if (WARN_ONCE(offset,
  4420. "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
  4421. offset)) {
  4422. return -EINVAL;
  4423. }
  4424. err = __vlan_insert_tag(skb, skb->vlan_proto,
  4425. skb_vlan_tag_get(skb));
  4426. if (err)
  4427. return err;
  4428. skb->protocol = skb->vlan_proto;
  4429. skb->mac_len += VLAN_HLEN;
  4430. skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
  4431. }
  4432. __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
  4433. return 0;
  4434. }
  4435. EXPORT_SYMBOL(skb_vlan_push);
  4436. /**
  4437. * alloc_skb_with_frags - allocate skb with page frags
  4438. *
  4439. * @header_len: size of linear part
  4440. * @data_len: needed length in frags
  4441. * @max_page_order: max page order desired.
  4442. * @errcode: pointer to error code if any
  4443. * @gfp_mask: allocation mask
  4444. *
  4445. * This can be used to allocate a paged skb, given a maximal order for frags.
  4446. */
  4447. struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
  4448. unsigned long data_len,
  4449. int max_page_order,
  4450. int *errcode,
  4451. gfp_t gfp_mask)
  4452. {
  4453. int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  4454. unsigned long chunk;
  4455. struct sk_buff *skb;
  4456. struct page *page;
  4457. gfp_t gfp_head;
  4458. int i;
  4459. *errcode = -EMSGSIZE;
  4460. /* Note this test could be relaxed, if we succeed to allocate
  4461. * high order pages...
  4462. */
  4463. if (npages > MAX_SKB_FRAGS)
  4464. return NULL;
  4465. gfp_head = gfp_mask;
  4466. if (gfp_head & __GFP_DIRECT_RECLAIM)
  4467. gfp_head |= __GFP_RETRY_MAYFAIL;
  4468. *errcode = -ENOBUFS;
  4469. skb = alloc_skb(header_len, gfp_head);
  4470. if (!skb)
  4471. return NULL;
  4472. skb->truesize += npages << PAGE_SHIFT;
  4473. for (i = 0; npages > 0; i++) {
  4474. int order = max_page_order;
  4475. while (order) {
  4476. if (npages >= 1 << order) {
  4477. page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
  4478. __GFP_COMP |
  4479. __GFP_NOWARN |
  4480. __GFP_NORETRY,
  4481. order);
  4482. if (page)
  4483. goto fill_page;
  4484. /* Do not retry other high order allocations */
  4485. order = 1;
  4486. max_page_order = 0;
  4487. }
  4488. order--;
  4489. }
  4490. page = alloc_page(gfp_mask);
  4491. if (!page)
  4492. goto failure;
  4493. fill_page:
  4494. chunk = min_t(unsigned long, data_len,
  4495. PAGE_SIZE << order);
  4496. skb_fill_page_desc(skb, i, page, 0, chunk);
  4497. data_len -= chunk;
  4498. npages -= 1 << order;
  4499. }
  4500. return skb;
  4501. failure:
  4502. kfree_skb(skb);
  4503. return NULL;
  4504. }
  4505. EXPORT_SYMBOL(alloc_skb_with_frags);
  4506. /* carve out the first off bytes from skb when off < headlen */
  4507. static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
  4508. const int headlen, gfp_t gfp_mask)
  4509. {
  4510. int i;
  4511. int size = skb_end_offset(skb);
  4512. int new_hlen = headlen - off;
  4513. u8 *data;
  4514. size = SKB_DATA_ALIGN(size);
  4515. if (skb_pfmemalloc(skb))
  4516. gfp_mask |= __GFP_MEMALLOC;
  4517. data = kmalloc_reserve(size +
  4518. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  4519. gfp_mask, NUMA_NO_NODE, NULL);
  4520. if (!data)
  4521. return -ENOMEM;
  4522. size = SKB_WITH_OVERHEAD(ksize(data));
  4523. /* Copy real data, and all frags */
  4524. skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
  4525. skb->len -= off;
  4526. memcpy((struct skb_shared_info *)(data + size),
  4527. skb_shinfo(skb),
  4528. offsetof(struct skb_shared_info,
  4529. frags[skb_shinfo(skb)->nr_frags]));
  4530. if (skb_cloned(skb)) {
  4531. /* drop the old head gracefully */
  4532. if (skb_orphan_frags(skb, gfp_mask)) {
  4533. kfree(data);
  4534. return -ENOMEM;
  4535. }
  4536. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
  4537. skb_frag_ref(skb, i);
  4538. if (skb_has_frag_list(skb))
  4539. skb_clone_fraglist(skb);
  4540. skb_release_data(skb);
  4541. } else {
  4542. /* we can reuse existing recount- all we did was
  4543. * relocate values
  4544. */
  4545. skb_free_head(skb);
  4546. }
  4547. skb->head = data;
  4548. skb->data = data;
  4549. skb->head_frag = 0;
  4550. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  4551. skb->end = size;
  4552. #else
  4553. skb->end = skb->head + size;
  4554. #endif
  4555. skb_set_tail_pointer(skb, skb_headlen(skb));
  4556. skb_headers_offset_update(skb, 0);
  4557. skb->cloned = 0;
  4558. skb->hdr_len = 0;
  4559. skb->nohdr = 0;
  4560. atomic_set(&skb_shinfo(skb)->dataref, 1);
  4561. return 0;
  4562. }
  4563. static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
  4564. /* carve out the first eat bytes from skb's frag_list. May recurse into
  4565. * pskb_carve()
  4566. */
  4567. static int pskb_carve_frag_list(struct sk_buff *skb,
  4568. struct skb_shared_info *shinfo, int eat,
  4569. gfp_t gfp_mask)
  4570. {
  4571. struct sk_buff *list = shinfo->frag_list;
  4572. struct sk_buff *clone = NULL;
  4573. struct sk_buff *insp = NULL;
  4574. do {
  4575. if (!list) {
  4576. pr_err("Not enough bytes to eat. Want %d\n", eat);
  4577. return -EFAULT;
  4578. }
  4579. if (list->len <= eat) {
  4580. /* Eaten as whole. */
  4581. eat -= list->len;
  4582. list = list->next;
  4583. insp = list;
  4584. } else {
  4585. /* Eaten partially. */
  4586. if (skb_shared(list)) {
  4587. clone = skb_clone(list, gfp_mask);
  4588. if (!clone)
  4589. return -ENOMEM;
  4590. insp = list->next;
  4591. list = clone;
  4592. } else {
  4593. /* This may be pulled without problems. */
  4594. insp = list;
  4595. }
  4596. if (pskb_carve(list, eat, gfp_mask) < 0) {
  4597. kfree_skb(clone);
  4598. return -ENOMEM;
  4599. }
  4600. break;
  4601. }
  4602. } while (eat);
  4603. /* Free pulled out fragments. */
  4604. while ((list = shinfo->frag_list) != insp) {
  4605. shinfo->frag_list = list->next;
  4606. kfree_skb(list);
  4607. }
  4608. /* And insert new clone at head. */
  4609. if (clone) {
  4610. clone->next = list;
  4611. shinfo->frag_list = clone;
  4612. }
  4613. return 0;
  4614. }
  4615. /* carve off first len bytes from skb. Split line (off) is in the
  4616. * non-linear part of skb
  4617. */
  4618. static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
  4619. int pos, gfp_t gfp_mask)
  4620. {
  4621. int i, k = 0;
  4622. int size = skb_end_offset(skb);
  4623. u8 *data;
  4624. const int nfrags = skb_shinfo(skb)->nr_frags;
  4625. struct skb_shared_info *shinfo;
  4626. size = SKB_DATA_ALIGN(size);
  4627. if (skb_pfmemalloc(skb))
  4628. gfp_mask |= __GFP_MEMALLOC;
  4629. data = kmalloc_reserve(size +
  4630. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
  4631. gfp_mask, NUMA_NO_NODE, NULL);
  4632. if (!data)
  4633. return -ENOMEM;
  4634. size = SKB_WITH_OVERHEAD(ksize(data));
  4635. memcpy((struct skb_shared_info *)(data + size),
  4636. skb_shinfo(skb), offsetof(struct skb_shared_info,
  4637. frags[skb_shinfo(skb)->nr_frags]));
  4638. if (skb_orphan_frags(skb, gfp_mask)) {
  4639. kfree(data);
  4640. return -ENOMEM;
  4641. }
  4642. shinfo = (struct skb_shared_info *)(data + size);
  4643. for (i = 0; i < nfrags; i++) {
  4644. int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  4645. if (pos + fsize > off) {
  4646. shinfo->frags[k] = skb_shinfo(skb)->frags[i];
  4647. if (pos < off) {
  4648. /* Split frag.
  4649. * We have two variants in this case:
  4650. * 1. Move all the frag to the second
  4651. * part, if it is possible. F.e.
  4652. * this approach is mandatory for TUX,
  4653. * where splitting is expensive.
  4654. * 2. Split is accurately. We make this.
  4655. */
  4656. shinfo->frags[0].page_offset += off - pos;
  4657. skb_frag_size_sub(&shinfo->frags[0], off - pos);
  4658. }
  4659. skb_frag_ref(skb, i);
  4660. k++;
  4661. }
  4662. pos += fsize;
  4663. }
  4664. shinfo->nr_frags = k;
  4665. if (skb_has_frag_list(skb))
  4666. skb_clone_fraglist(skb);
  4667. if (k == 0) {
  4668. /* split line is in frag list */
  4669. pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
  4670. }
  4671. skb_release_data(skb);
  4672. skb->head = data;
  4673. skb->head_frag = 0;
  4674. skb->data = data;
  4675. #ifdef NET_SKBUFF_DATA_USES_OFFSET
  4676. skb->end = size;
  4677. #else
  4678. skb->end = skb->head + size;
  4679. #endif
  4680. skb_reset_tail_pointer(skb);
  4681. skb_headers_offset_update(skb, 0);
  4682. skb->cloned = 0;
  4683. skb->hdr_len = 0;
  4684. skb->nohdr = 0;
  4685. skb->len -= off;
  4686. skb->data_len = skb->len;
  4687. atomic_set(&skb_shinfo(skb)->dataref, 1);
  4688. return 0;
  4689. }
  4690. /* remove len bytes from the beginning of the skb */
  4691. static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
  4692. {
  4693. int headlen = skb_headlen(skb);
  4694. if (len < headlen)
  4695. return pskb_carve_inside_header(skb, len, headlen, gfp);
  4696. else
  4697. return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
  4698. }
  4699. /* Extract to_copy bytes starting at off from skb, and return this in
  4700. * a new skb
  4701. */
  4702. struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
  4703. int to_copy, gfp_t gfp)
  4704. {
  4705. struct sk_buff *clone = skb_clone(skb, gfp);
  4706. if (!clone)
  4707. return NULL;
  4708. if (pskb_carve(clone, off, gfp) < 0 ||
  4709. pskb_trim(clone, to_copy)) {
  4710. kfree_skb(clone);
  4711. return NULL;
  4712. }
  4713. return clone;
  4714. }
  4715. EXPORT_SYMBOL(pskb_extract);
  4716. /**
  4717. * skb_condense - try to get rid of fragments/frag_list if possible
  4718. * @skb: buffer
  4719. *
  4720. * Can be used to save memory before skb is added to a busy queue.
  4721. * If packet has bytes in frags and enough tail room in skb->head,
  4722. * pull all of them, so that we can free the frags right now and adjust
  4723. * truesize.
  4724. * Notes:
  4725. * We do not reallocate skb->head thus can not fail.
  4726. * Caller must re-evaluate skb->truesize if needed.
  4727. */
  4728. void skb_condense(struct sk_buff *skb)
  4729. {
  4730. if (skb->data_len) {
  4731. if (skb->data_len > skb->end - skb->tail ||
  4732. skb_cloned(skb))
  4733. return;
  4734. /* Nice, we can free page frag(s) right now */
  4735. __pskb_pull_tail(skb, skb->data_len);
  4736. }
  4737. /* At this point, skb->truesize might be over estimated,
  4738. * because skb had a fragment, and fragments do not tell
  4739. * their truesize.
  4740. * When we pulled its content into skb->head, fragment
  4741. * was freed, but __pskb_pull_tail() could not possibly
  4742. * adjust skb->truesize, not knowing the frag truesize.
  4743. */
  4744. skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
  4745. }