send.c 140 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024
  1. /*
  2. * Copyright (C) 2012 Alexander Block. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/bsearch.h>
  19. #include <linux/fs.h>
  20. #include <linux/file.h>
  21. #include <linux/sort.h>
  22. #include <linux/mount.h>
  23. #include <linux/xattr.h>
  24. #include <linux/posix_acl_xattr.h>
  25. #include <linux/radix-tree.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/string.h>
  28. #include "send.h"
  29. #include "backref.h"
  30. #include "hash.h"
  31. #include "locking.h"
  32. #include "disk-io.h"
  33. #include "btrfs_inode.h"
  34. #include "transaction.h"
  35. static int g_verbose = 0;
  36. #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
  37. /*
  38. * A fs_path is a helper to dynamically build path names with unknown size.
  39. * It reallocates the internal buffer on demand.
  40. * It allows fast adding of path elements on the right side (normal path) and
  41. * fast adding to the left side (reversed path). A reversed path can also be
  42. * unreversed if needed.
  43. */
  44. struct fs_path {
  45. union {
  46. struct {
  47. char *start;
  48. char *end;
  49. char *buf;
  50. unsigned short buf_len:15;
  51. unsigned short reversed:1;
  52. char inline_buf[];
  53. };
  54. /*
  55. * Average path length does not exceed 200 bytes, we'll have
  56. * better packing in the slab and higher chance to satisfy
  57. * a allocation later during send.
  58. */
  59. char pad[256];
  60. };
  61. };
  62. #define FS_PATH_INLINE_SIZE \
  63. (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
  64. /* reused for each extent */
  65. struct clone_root {
  66. struct btrfs_root *root;
  67. u64 ino;
  68. u64 offset;
  69. u64 found_refs;
  70. };
  71. #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
  72. #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
  73. struct send_ctx {
  74. struct file *send_filp;
  75. loff_t send_off;
  76. char *send_buf;
  77. u32 send_size;
  78. u32 send_max_size;
  79. u64 total_send_size;
  80. u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
  81. u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
  82. struct btrfs_root *send_root;
  83. struct btrfs_root *parent_root;
  84. struct clone_root *clone_roots;
  85. int clone_roots_cnt;
  86. /* current state of the compare_tree call */
  87. struct btrfs_path *left_path;
  88. struct btrfs_path *right_path;
  89. struct btrfs_key *cmp_key;
  90. /*
  91. * infos of the currently processed inode. In case of deleted inodes,
  92. * these are the values from the deleted inode.
  93. */
  94. u64 cur_ino;
  95. u64 cur_inode_gen;
  96. int cur_inode_new;
  97. int cur_inode_new_gen;
  98. int cur_inode_deleted;
  99. u64 cur_inode_size;
  100. u64 cur_inode_mode;
  101. u64 cur_inode_rdev;
  102. u64 cur_inode_last_extent;
  103. u64 send_progress;
  104. struct list_head new_refs;
  105. struct list_head deleted_refs;
  106. struct radix_tree_root name_cache;
  107. struct list_head name_cache_list;
  108. int name_cache_size;
  109. struct file_ra_state ra;
  110. char *read_buf;
  111. /*
  112. * We process inodes by their increasing order, so if before an
  113. * incremental send we reverse the parent/child relationship of
  114. * directories such that a directory with a lower inode number was
  115. * the parent of a directory with a higher inode number, and the one
  116. * becoming the new parent got renamed too, we can't rename/move the
  117. * directory with lower inode number when we finish processing it - we
  118. * must process the directory with higher inode number first, then
  119. * rename/move it and then rename/move the directory with lower inode
  120. * number. Example follows.
  121. *
  122. * Tree state when the first send was performed:
  123. *
  124. * .
  125. * |-- a (ino 257)
  126. * |-- b (ino 258)
  127. * |
  128. * |
  129. * |-- c (ino 259)
  130. * | |-- d (ino 260)
  131. * |
  132. * |-- c2 (ino 261)
  133. *
  134. * Tree state when the second (incremental) send is performed:
  135. *
  136. * .
  137. * |-- a (ino 257)
  138. * |-- b (ino 258)
  139. * |-- c2 (ino 261)
  140. * |-- d2 (ino 260)
  141. * |-- cc (ino 259)
  142. *
  143. * The sequence of steps that lead to the second state was:
  144. *
  145. * mv /a/b/c/d /a/b/c2/d2
  146. * mv /a/b/c /a/b/c2/d2/cc
  147. *
  148. * "c" has lower inode number, but we can't move it (2nd mv operation)
  149. * before we move "d", which has higher inode number.
  150. *
  151. * So we just memorize which move/rename operations must be performed
  152. * later when their respective parent is processed and moved/renamed.
  153. */
  154. /* Indexed by parent directory inode number. */
  155. struct rb_root pending_dir_moves;
  156. /*
  157. * Reverse index, indexed by the inode number of a directory that
  158. * is waiting for the move/rename of its immediate parent before its
  159. * own move/rename can be performed.
  160. */
  161. struct rb_root waiting_dir_moves;
  162. /*
  163. * A directory that is going to be rm'ed might have a child directory
  164. * which is in the pending directory moves index above. In this case,
  165. * the directory can only be removed after the move/rename of its child
  166. * is performed. Example:
  167. *
  168. * Parent snapshot:
  169. *
  170. * . (ino 256)
  171. * |-- a/ (ino 257)
  172. * |-- b/ (ino 258)
  173. * |-- c/ (ino 259)
  174. * | |-- x/ (ino 260)
  175. * |
  176. * |-- y/ (ino 261)
  177. *
  178. * Send snapshot:
  179. *
  180. * . (ino 256)
  181. * |-- a/ (ino 257)
  182. * |-- b/ (ino 258)
  183. * |-- YY/ (ino 261)
  184. * |-- x/ (ino 260)
  185. *
  186. * Sequence of steps that lead to the send snapshot:
  187. * rm -f /a/b/c/foo.txt
  188. * mv /a/b/y /a/b/YY
  189. * mv /a/b/c/x /a/b/YY
  190. * rmdir /a/b/c
  191. *
  192. * When the child is processed, its move/rename is delayed until its
  193. * parent is processed (as explained above), but all other operations
  194. * like update utimes, chown, chgrp, etc, are performed and the paths
  195. * that it uses for those operations must use the orphanized name of
  196. * its parent (the directory we're going to rm later), so we need to
  197. * memorize that name.
  198. *
  199. * Indexed by the inode number of the directory to be deleted.
  200. */
  201. struct rb_root orphan_dirs;
  202. };
  203. struct pending_dir_move {
  204. struct rb_node node;
  205. struct list_head list;
  206. u64 parent_ino;
  207. u64 ino;
  208. u64 gen;
  209. bool is_orphan;
  210. struct list_head update_refs;
  211. };
  212. struct waiting_dir_move {
  213. struct rb_node node;
  214. u64 ino;
  215. /*
  216. * There might be some directory that could not be removed because it
  217. * was waiting for this directory inode to be moved first. Therefore
  218. * after this directory is moved, we can try to rmdir the ino rmdir_ino.
  219. */
  220. u64 rmdir_ino;
  221. bool orphanized;
  222. };
  223. struct orphan_dir_info {
  224. struct rb_node node;
  225. u64 ino;
  226. u64 gen;
  227. };
  228. struct name_cache_entry {
  229. struct list_head list;
  230. /*
  231. * radix_tree has only 32bit entries but we need to handle 64bit inums.
  232. * We use the lower 32bit of the 64bit inum to store it in the tree. If
  233. * more then one inum would fall into the same entry, we use radix_list
  234. * to store the additional entries. radix_list is also used to store
  235. * entries where two entries have the same inum but different
  236. * generations.
  237. */
  238. struct list_head radix_list;
  239. u64 ino;
  240. u64 gen;
  241. u64 parent_ino;
  242. u64 parent_gen;
  243. int ret;
  244. int need_later_update;
  245. int name_len;
  246. char name[];
  247. };
  248. static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
  249. static struct waiting_dir_move *
  250. get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
  251. static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
  252. static int need_send_hole(struct send_ctx *sctx)
  253. {
  254. return (sctx->parent_root && !sctx->cur_inode_new &&
  255. !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
  256. S_ISREG(sctx->cur_inode_mode));
  257. }
  258. static void fs_path_reset(struct fs_path *p)
  259. {
  260. if (p->reversed) {
  261. p->start = p->buf + p->buf_len - 1;
  262. p->end = p->start;
  263. *p->start = 0;
  264. } else {
  265. p->start = p->buf;
  266. p->end = p->start;
  267. *p->start = 0;
  268. }
  269. }
  270. static struct fs_path *fs_path_alloc(void)
  271. {
  272. struct fs_path *p;
  273. p = kmalloc(sizeof(*p), GFP_NOFS);
  274. if (!p)
  275. return NULL;
  276. p->reversed = 0;
  277. p->buf = p->inline_buf;
  278. p->buf_len = FS_PATH_INLINE_SIZE;
  279. fs_path_reset(p);
  280. return p;
  281. }
  282. static struct fs_path *fs_path_alloc_reversed(void)
  283. {
  284. struct fs_path *p;
  285. p = fs_path_alloc();
  286. if (!p)
  287. return NULL;
  288. p->reversed = 1;
  289. fs_path_reset(p);
  290. return p;
  291. }
  292. static void fs_path_free(struct fs_path *p)
  293. {
  294. if (!p)
  295. return;
  296. if (p->buf != p->inline_buf)
  297. kfree(p->buf);
  298. kfree(p);
  299. }
  300. static int fs_path_len(struct fs_path *p)
  301. {
  302. return p->end - p->start;
  303. }
  304. static int fs_path_ensure_buf(struct fs_path *p, int len)
  305. {
  306. char *tmp_buf;
  307. int path_len;
  308. int old_buf_len;
  309. len++;
  310. if (p->buf_len >= len)
  311. return 0;
  312. if (len > PATH_MAX) {
  313. WARN_ON(1);
  314. return -ENOMEM;
  315. }
  316. path_len = p->end - p->start;
  317. old_buf_len = p->buf_len;
  318. /*
  319. * First time the inline_buf does not suffice
  320. */
  321. if (p->buf == p->inline_buf) {
  322. tmp_buf = kmalloc(len, GFP_NOFS);
  323. if (tmp_buf)
  324. memcpy(tmp_buf, p->buf, old_buf_len);
  325. } else {
  326. tmp_buf = krealloc(p->buf, len, GFP_NOFS);
  327. }
  328. if (!tmp_buf)
  329. return -ENOMEM;
  330. p->buf = tmp_buf;
  331. /*
  332. * The real size of the buffer is bigger, this will let the fast path
  333. * happen most of the time
  334. */
  335. p->buf_len = ksize(p->buf);
  336. if (p->reversed) {
  337. tmp_buf = p->buf + old_buf_len - path_len - 1;
  338. p->end = p->buf + p->buf_len - 1;
  339. p->start = p->end - path_len;
  340. memmove(p->start, tmp_buf, path_len + 1);
  341. } else {
  342. p->start = p->buf;
  343. p->end = p->start + path_len;
  344. }
  345. return 0;
  346. }
  347. static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
  348. char **prepared)
  349. {
  350. int ret;
  351. int new_len;
  352. new_len = p->end - p->start + name_len;
  353. if (p->start != p->end)
  354. new_len++;
  355. ret = fs_path_ensure_buf(p, new_len);
  356. if (ret < 0)
  357. goto out;
  358. if (p->reversed) {
  359. if (p->start != p->end)
  360. *--p->start = '/';
  361. p->start -= name_len;
  362. *prepared = p->start;
  363. } else {
  364. if (p->start != p->end)
  365. *p->end++ = '/';
  366. *prepared = p->end;
  367. p->end += name_len;
  368. *p->end = 0;
  369. }
  370. out:
  371. return ret;
  372. }
  373. static int fs_path_add(struct fs_path *p, const char *name, int name_len)
  374. {
  375. int ret;
  376. char *prepared;
  377. ret = fs_path_prepare_for_add(p, name_len, &prepared);
  378. if (ret < 0)
  379. goto out;
  380. memcpy(prepared, name, name_len);
  381. out:
  382. return ret;
  383. }
  384. static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
  385. {
  386. int ret;
  387. char *prepared;
  388. ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
  389. if (ret < 0)
  390. goto out;
  391. memcpy(prepared, p2->start, p2->end - p2->start);
  392. out:
  393. return ret;
  394. }
  395. static int fs_path_add_from_extent_buffer(struct fs_path *p,
  396. struct extent_buffer *eb,
  397. unsigned long off, int len)
  398. {
  399. int ret;
  400. char *prepared;
  401. ret = fs_path_prepare_for_add(p, len, &prepared);
  402. if (ret < 0)
  403. goto out;
  404. read_extent_buffer(eb, prepared, off, len);
  405. out:
  406. return ret;
  407. }
  408. static int fs_path_copy(struct fs_path *p, struct fs_path *from)
  409. {
  410. int ret;
  411. p->reversed = from->reversed;
  412. fs_path_reset(p);
  413. ret = fs_path_add_path(p, from);
  414. return ret;
  415. }
  416. static void fs_path_unreverse(struct fs_path *p)
  417. {
  418. char *tmp;
  419. int len;
  420. if (!p->reversed)
  421. return;
  422. tmp = p->start;
  423. len = p->end - p->start;
  424. p->start = p->buf;
  425. p->end = p->start + len;
  426. memmove(p->start, tmp, len + 1);
  427. p->reversed = 0;
  428. }
  429. static struct btrfs_path *alloc_path_for_send(void)
  430. {
  431. struct btrfs_path *path;
  432. path = btrfs_alloc_path();
  433. if (!path)
  434. return NULL;
  435. path->search_commit_root = 1;
  436. path->skip_locking = 1;
  437. path->need_commit_sem = 1;
  438. return path;
  439. }
  440. static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
  441. {
  442. int ret;
  443. mm_segment_t old_fs;
  444. u32 pos = 0;
  445. old_fs = get_fs();
  446. set_fs(KERNEL_DS);
  447. while (pos < len) {
  448. ret = vfs_write(filp, (__force const char __user *)buf + pos,
  449. len - pos, off);
  450. /* TODO handle that correctly */
  451. /*if (ret == -ERESTARTSYS) {
  452. continue;
  453. }*/
  454. if (ret < 0)
  455. goto out;
  456. if (ret == 0) {
  457. ret = -EIO;
  458. goto out;
  459. }
  460. pos += ret;
  461. }
  462. ret = 0;
  463. out:
  464. set_fs(old_fs);
  465. return ret;
  466. }
  467. static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
  468. {
  469. struct btrfs_tlv_header *hdr;
  470. int total_len = sizeof(*hdr) + len;
  471. int left = sctx->send_max_size - sctx->send_size;
  472. if (unlikely(left < total_len))
  473. return -EOVERFLOW;
  474. hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
  475. hdr->tlv_type = cpu_to_le16(attr);
  476. hdr->tlv_len = cpu_to_le16(len);
  477. memcpy(hdr + 1, data, len);
  478. sctx->send_size += total_len;
  479. return 0;
  480. }
  481. #define TLV_PUT_DEFINE_INT(bits) \
  482. static int tlv_put_u##bits(struct send_ctx *sctx, \
  483. u##bits attr, u##bits value) \
  484. { \
  485. __le##bits __tmp = cpu_to_le##bits(value); \
  486. return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
  487. }
  488. TLV_PUT_DEFINE_INT(64)
  489. static int tlv_put_string(struct send_ctx *sctx, u16 attr,
  490. const char *str, int len)
  491. {
  492. if (len == -1)
  493. len = strlen(str);
  494. return tlv_put(sctx, attr, str, len);
  495. }
  496. static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
  497. const u8 *uuid)
  498. {
  499. return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
  500. }
  501. static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
  502. struct extent_buffer *eb,
  503. struct btrfs_timespec *ts)
  504. {
  505. struct btrfs_timespec bts;
  506. read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
  507. return tlv_put(sctx, attr, &bts, sizeof(bts));
  508. }
  509. #define TLV_PUT(sctx, attrtype, attrlen, data) \
  510. do { \
  511. ret = tlv_put(sctx, attrtype, attrlen, data); \
  512. if (ret < 0) \
  513. goto tlv_put_failure; \
  514. } while (0)
  515. #define TLV_PUT_INT(sctx, attrtype, bits, value) \
  516. do { \
  517. ret = tlv_put_u##bits(sctx, attrtype, value); \
  518. if (ret < 0) \
  519. goto tlv_put_failure; \
  520. } while (0)
  521. #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
  522. #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
  523. #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
  524. #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
  525. #define TLV_PUT_STRING(sctx, attrtype, str, len) \
  526. do { \
  527. ret = tlv_put_string(sctx, attrtype, str, len); \
  528. if (ret < 0) \
  529. goto tlv_put_failure; \
  530. } while (0)
  531. #define TLV_PUT_PATH(sctx, attrtype, p) \
  532. do { \
  533. ret = tlv_put_string(sctx, attrtype, p->start, \
  534. p->end - p->start); \
  535. if (ret < 0) \
  536. goto tlv_put_failure; \
  537. } while(0)
  538. #define TLV_PUT_UUID(sctx, attrtype, uuid) \
  539. do { \
  540. ret = tlv_put_uuid(sctx, attrtype, uuid); \
  541. if (ret < 0) \
  542. goto tlv_put_failure; \
  543. } while (0)
  544. #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
  545. do { \
  546. ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
  547. if (ret < 0) \
  548. goto tlv_put_failure; \
  549. } while (0)
  550. static int send_header(struct send_ctx *sctx)
  551. {
  552. struct btrfs_stream_header hdr;
  553. strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
  554. hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
  555. return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
  556. &sctx->send_off);
  557. }
  558. /*
  559. * For each command/item we want to send to userspace, we call this function.
  560. */
  561. static int begin_cmd(struct send_ctx *sctx, int cmd)
  562. {
  563. struct btrfs_cmd_header *hdr;
  564. if (WARN_ON(!sctx->send_buf))
  565. return -EINVAL;
  566. BUG_ON(sctx->send_size);
  567. sctx->send_size += sizeof(*hdr);
  568. hdr = (struct btrfs_cmd_header *)sctx->send_buf;
  569. hdr->cmd = cpu_to_le16(cmd);
  570. return 0;
  571. }
  572. static int send_cmd(struct send_ctx *sctx)
  573. {
  574. int ret;
  575. struct btrfs_cmd_header *hdr;
  576. u32 crc;
  577. hdr = (struct btrfs_cmd_header *)sctx->send_buf;
  578. hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
  579. hdr->crc = 0;
  580. crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
  581. hdr->crc = cpu_to_le32(crc);
  582. ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
  583. &sctx->send_off);
  584. sctx->total_send_size += sctx->send_size;
  585. sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
  586. sctx->send_size = 0;
  587. return ret;
  588. }
  589. /*
  590. * Sends a move instruction to user space
  591. */
  592. static int send_rename(struct send_ctx *sctx,
  593. struct fs_path *from, struct fs_path *to)
  594. {
  595. int ret;
  596. verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
  597. ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
  598. if (ret < 0)
  599. goto out;
  600. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
  601. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
  602. ret = send_cmd(sctx);
  603. tlv_put_failure:
  604. out:
  605. return ret;
  606. }
  607. /*
  608. * Sends a link instruction to user space
  609. */
  610. static int send_link(struct send_ctx *sctx,
  611. struct fs_path *path, struct fs_path *lnk)
  612. {
  613. int ret;
  614. verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
  615. ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
  616. if (ret < 0)
  617. goto out;
  618. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  619. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
  620. ret = send_cmd(sctx);
  621. tlv_put_failure:
  622. out:
  623. return ret;
  624. }
  625. /*
  626. * Sends an unlink instruction to user space
  627. */
  628. static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
  629. {
  630. int ret;
  631. verbose_printk("btrfs: send_unlink %s\n", path->start);
  632. ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
  633. if (ret < 0)
  634. goto out;
  635. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  636. ret = send_cmd(sctx);
  637. tlv_put_failure:
  638. out:
  639. return ret;
  640. }
  641. /*
  642. * Sends a rmdir instruction to user space
  643. */
  644. static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
  645. {
  646. int ret;
  647. verbose_printk("btrfs: send_rmdir %s\n", path->start);
  648. ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
  649. if (ret < 0)
  650. goto out;
  651. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  652. ret = send_cmd(sctx);
  653. tlv_put_failure:
  654. out:
  655. return ret;
  656. }
  657. /*
  658. * Helper function to retrieve some fields from an inode item.
  659. */
  660. static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
  661. u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
  662. u64 *gid, u64 *rdev)
  663. {
  664. int ret;
  665. struct btrfs_inode_item *ii;
  666. struct btrfs_key key;
  667. key.objectid = ino;
  668. key.type = BTRFS_INODE_ITEM_KEY;
  669. key.offset = 0;
  670. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  671. if (ret) {
  672. if (ret > 0)
  673. ret = -ENOENT;
  674. return ret;
  675. }
  676. ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
  677. struct btrfs_inode_item);
  678. if (size)
  679. *size = btrfs_inode_size(path->nodes[0], ii);
  680. if (gen)
  681. *gen = btrfs_inode_generation(path->nodes[0], ii);
  682. if (mode)
  683. *mode = btrfs_inode_mode(path->nodes[0], ii);
  684. if (uid)
  685. *uid = btrfs_inode_uid(path->nodes[0], ii);
  686. if (gid)
  687. *gid = btrfs_inode_gid(path->nodes[0], ii);
  688. if (rdev)
  689. *rdev = btrfs_inode_rdev(path->nodes[0], ii);
  690. return ret;
  691. }
  692. static int get_inode_info(struct btrfs_root *root,
  693. u64 ino, u64 *size, u64 *gen,
  694. u64 *mode, u64 *uid, u64 *gid,
  695. u64 *rdev)
  696. {
  697. struct btrfs_path *path;
  698. int ret;
  699. path = alloc_path_for_send();
  700. if (!path)
  701. return -ENOMEM;
  702. ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
  703. rdev);
  704. btrfs_free_path(path);
  705. return ret;
  706. }
  707. typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
  708. struct fs_path *p,
  709. void *ctx);
  710. /*
  711. * Helper function to iterate the entries in ONE btrfs_inode_ref or
  712. * btrfs_inode_extref.
  713. * The iterate callback may return a non zero value to stop iteration. This can
  714. * be a negative value for error codes or 1 to simply stop it.
  715. *
  716. * path must point to the INODE_REF or INODE_EXTREF when called.
  717. */
  718. static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
  719. struct btrfs_key *found_key, int resolve,
  720. iterate_inode_ref_t iterate, void *ctx)
  721. {
  722. struct extent_buffer *eb = path->nodes[0];
  723. struct btrfs_item *item;
  724. struct btrfs_inode_ref *iref;
  725. struct btrfs_inode_extref *extref;
  726. struct btrfs_path *tmp_path;
  727. struct fs_path *p;
  728. u32 cur = 0;
  729. u32 total;
  730. int slot = path->slots[0];
  731. u32 name_len;
  732. char *start;
  733. int ret = 0;
  734. int num = 0;
  735. int index;
  736. u64 dir;
  737. unsigned long name_off;
  738. unsigned long elem_size;
  739. unsigned long ptr;
  740. p = fs_path_alloc_reversed();
  741. if (!p)
  742. return -ENOMEM;
  743. tmp_path = alloc_path_for_send();
  744. if (!tmp_path) {
  745. fs_path_free(p);
  746. return -ENOMEM;
  747. }
  748. if (found_key->type == BTRFS_INODE_REF_KEY) {
  749. ptr = (unsigned long)btrfs_item_ptr(eb, slot,
  750. struct btrfs_inode_ref);
  751. item = btrfs_item_nr(slot);
  752. total = btrfs_item_size(eb, item);
  753. elem_size = sizeof(*iref);
  754. } else {
  755. ptr = btrfs_item_ptr_offset(eb, slot);
  756. total = btrfs_item_size_nr(eb, slot);
  757. elem_size = sizeof(*extref);
  758. }
  759. while (cur < total) {
  760. fs_path_reset(p);
  761. if (found_key->type == BTRFS_INODE_REF_KEY) {
  762. iref = (struct btrfs_inode_ref *)(ptr + cur);
  763. name_len = btrfs_inode_ref_name_len(eb, iref);
  764. name_off = (unsigned long)(iref + 1);
  765. index = btrfs_inode_ref_index(eb, iref);
  766. dir = found_key->offset;
  767. } else {
  768. extref = (struct btrfs_inode_extref *)(ptr + cur);
  769. name_len = btrfs_inode_extref_name_len(eb, extref);
  770. name_off = (unsigned long)&extref->name;
  771. index = btrfs_inode_extref_index(eb, extref);
  772. dir = btrfs_inode_extref_parent(eb, extref);
  773. }
  774. if (resolve) {
  775. start = btrfs_ref_to_path(root, tmp_path, name_len,
  776. name_off, eb, dir,
  777. p->buf, p->buf_len);
  778. if (IS_ERR(start)) {
  779. ret = PTR_ERR(start);
  780. goto out;
  781. }
  782. if (start < p->buf) {
  783. /* overflow , try again with larger buffer */
  784. ret = fs_path_ensure_buf(p,
  785. p->buf_len + p->buf - start);
  786. if (ret < 0)
  787. goto out;
  788. start = btrfs_ref_to_path(root, tmp_path,
  789. name_len, name_off,
  790. eb, dir,
  791. p->buf, p->buf_len);
  792. if (IS_ERR(start)) {
  793. ret = PTR_ERR(start);
  794. goto out;
  795. }
  796. BUG_ON(start < p->buf);
  797. }
  798. p->start = start;
  799. } else {
  800. ret = fs_path_add_from_extent_buffer(p, eb, name_off,
  801. name_len);
  802. if (ret < 0)
  803. goto out;
  804. }
  805. cur += elem_size + name_len;
  806. ret = iterate(num, dir, index, p, ctx);
  807. if (ret)
  808. goto out;
  809. num++;
  810. }
  811. out:
  812. btrfs_free_path(tmp_path);
  813. fs_path_free(p);
  814. return ret;
  815. }
  816. typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
  817. const char *name, int name_len,
  818. const char *data, int data_len,
  819. u8 type, void *ctx);
  820. /*
  821. * Helper function to iterate the entries in ONE btrfs_dir_item.
  822. * The iterate callback may return a non zero value to stop iteration. This can
  823. * be a negative value for error codes or 1 to simply stop it.
  824. *
  825. * path must point to the dir item when called.
  826. */
  827. static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
  828. struct btrfs_key *found_key,
  829. iterate_dir_item_t iterate, void *ctx)
  830. {
  831. int ret = 0;
  832. struct extent_buffer *eb;
  833. struct btrfs_item *item;
  834. struct btrfs_dir_item *di;
  835. struct btrfs_key di_key;
  836. char *buf = NULL;
  837. int buf_len;
  838. u32 name_len;
  839. u32 data_len;
  840. u32 cur;
  841. u32 len;
  842. u32 total;
  843. int slot;
  844. int num;
  845. u8 type;
  846. /*
  847. * Start with a small buffer (1 page). If later we end up needing more
  848. * space, which can happen for xattrs on a fs with a leaf size greater
  849. * then the page size, attempt to increase the buffer. Typically xattr
  850. * values are small.
  851. */
  852. buf_len = PATH_MAX;
  853. buf = kmalloc(buf_len, GFP_NOFS);
  854. if (!buf) {
  855. ret = -ENOMEM;
  856. goto out;
  857. }
  858. eb = path->nodes[0];
  859. slot = path->slots[0];
  860. item = btrfs_item_nr(slot);
  861. di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
  862. cur = 0;
  863. len = 0;
  864. total = btrfs_item_size(eb, item);
  865. num = 0;
  866. while (cur < total) {
  867. name_len = btrfs_dir_name_len(eb, di);
  868. data_len = btrfs_dir_data_len(eb, di);
  869. type = btrfs_dir_type(eb, di);
  870. btrfs_dir_item_key_to_cpu(eb, di, &di_key);
  871. if (type == BTRFS_FT_XATTR) {
  872. if (name_len > XATTR_NAME_MAX) {
  873. ret = -ENAMETOOLONG;
  874. goto out;
  875. }
  876. if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
  877. ret = -E2BIG;
  878. goto out;
  879. }
  880. } else {
  881. /*
  882. * Path too long
  883. */
  884. if (name_len + data_len > PATH_MAX) {
  885. ret = -ENAMETOOLONG;
  886. goto out;
  887. }
  888. }
  889. if (name_len + data_len > buf_len) {
  890. buf_len = name_len + data_len;
  891. if (is_vmalloc_addr(buf)) {
  892. vfree(buf);
  893. buf = NULL;
  894. } else {
  895. char *tmp = krealloc(buf, buf_len,
  896. GFP_NOFS | __GFP_NOWARN);
  897. if (!tmp)
  898. kfree(buf);
  899. buf = tmp;
  900. }
  901. if (!buf) {
  902. buf = vmalloc(buf_len);
  903. if (!buf) {
  904. ret = -ENOMEM;
  905. goto out;
  906. }
  907. }
  908. }
  909. read_extent_buffer(eb, buf, (unsigned long)(di + 1),
  910. name_len + data_len);
  911. len = sizeof(*di) + name_len + data_len;
  912. di = (struct btrfs_dir_item *)((char *)di + len);
  913. cur += len;
  914. ret = iterate(num, &di_key, buf, name_len, buf + name_len,
  915. data_len, type, ctx);
  916. if (ret < 0)
  917. goto out;
  918. if (ret) {
  919. ret = 0;
  920. goto out;
  921. }
  922. num++;
  923. }
  924. out:
  925. kvfree(buf);
  926. return ret;
  927. }
  928. static int __copy_first_ref(int num, u64 dir, int index,
  929. struct fs_path *p, void *ctx)
  930. {
  931. int ret;
  932. struct fs_path *pt = ctx;
  933. ret = fs_path_copy(pt, p);
  934. if (ret < 0)
  935. return ret;
  936. /* we want the first only */
  937. return 1;
  938. }
  939. /*
  940. * Retrieve the first path of an inode. If an inode has more then one
  941. * ref/hardlink, this is ignored.
  942. */
  943. static int get_inode_path(struct btrfs_root *root,
  944. u64 ino, struct fs_path *path)
  945. {
  946. int ret;
  947. struct btrfs_key key, found_key;
  948. struct btrfs_path *p;
  949. p = alloc_path_for_send();
  950. if (!p)
  951. return -ENOMEM;
  952. fs_path_reset(path);
  953. key.objectid = ino;
  954. key.type = BTRFS_INODE_REF_KEY;
  955. key.offset = 0;
  956. ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
  957. if (ret < 0)
  958. goto out;
  959. if (ret) {
  960. ret = 1;
  961. goto out;
  962. }
  963. btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
  964. if (found_key.objectid != ino ||
  965. (found_key.type != BTRFS_INODE_REF_KEY &&
  966. found_key.type != BTRFS_INODE_EXTREF_KEY)) {
  967. ret = -ENOENT;
  968. goto out;
  969. }
  970. ret = iterate_inode_ref(root, p, &found_key, 1,
  971. __copy_first_ref, path);
  972. if (ret < 0)
  973. goto out;
  974. ret = 0;
  975. out:
  976. btrfs_free_path(p);
  977. return ret;
  978. }
  979. struct backref_ctx {
  980. struct send_ctx *sctx;
  981. struct btrfs_path *path;
  982. /* number of total found references */
  983. u64 found;
  984. /*
  985. * used for clones found in send_root. clones found behind cur_objectid
  986. * and cur_offset are not considered as allowed clones.
  987. */
  988. u64 cur_objectid;
  989. u64 cur_offset;
  990. /* may be truncated in case it's the last extent in a file */
  991. u64 extent_len;
  992. /* Just to check for bugs in backref resolving */
  993. int found_itself;
  994. };
  995. static int __clone_root_cmp_bsearch(const void *key, const void *elt)
  996. {
  997. u64 root = (u64)(uintptr_t)key;
  998. struct clone_root *cr = (struct clone_root *)elt;
  999. if (root < cr->root->objectid)
  1000. return -1;
  1001. if (root > cr->root->objectid)
  1002. return 1;
  1003. return 0;
  1004. }
  1005. static int __clone_root_cmp_sort(const void *e1, const void *e2)
  1006. {
  1007. struct clone_root *cr1 = (struct clone_root *)e1;
  1008. struct clone_root *cr2 = (struct clone_root *)e2;
  1009. if (cr1->root->objectid < cr2->root->objectid)
  1010. return -1;
  1011. if (cr1->root->objectid > cr2->root->objectid)
  1012. return 1;
  1013. return 0;
  1014. }
  1015. /*
  1016. * Called for every backref that is found for the current extent.
  1017. * Results are collected in sctx->clone_roots->ino/offset/found_refs
  1018. */
  1019. static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
  1020. {
  1021. struct backref_ctx *bctx = ctx_;
  1022. struct clone_root *found;
  1023. int ret;
  1024. u64 i_size;
  1025. /* First check if the root is in the list of accepted clone sources */
  1026. found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
  1027. bctx->sctx->clone_roots_cnt,
  1028. sizeof(struct clone_root),
  1029. __clone_root_cmp_bsearch);
  1030. if (!found)
  1031. return 0;
  1032. if (found->root == bctx->sctx->send_root &&
  1033. ino == bctx->cur_objectid &&
  1034. offset == bctx->cur_offset) {
  1035. bctx->found_itself = 1;
  1036. }
  1037. /*
  1038. * There are inodes that have extents that lie behind its i_size. Don't
  1039. * accept clones from these extents.
  1040. */
  1041. ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
  1042. NULL, NULL, NULL);
  1043. btrfs_release_path(bctx->path);
  1044. if (ret < 0)
  1045. return ret;
  1046. if (offset + bctx->extent_len > i_size)
  1047. return 0;
  1048. /*
  1049. * Make sure we don't consider clones from send_root that are
  1050. * behind the current inode/offset.
  1051. */
  1052. if (found->root == bctx->sctx->send_root) {
  1053. /*
  1054. * TODO for the moment we don't accept clones from the inode
  1055. * that is currently send. We may change this when
  1056. * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
  1057. * file.
  1058. */
  1059. if (ino >= bctx->cur_objectid)
  1060. return 0;
  1061. #if 0
  1062. if (ino > bctx->cur_objectid)
  1063. return 0;
  1064. if (offset + bctx->extent_len > bctx->cur_offset)
  1065. return 0;
  1066. #endif
  1067. }
  1068. bctx->found++;
  1069. found->found_refs++;
  1070. if (ino < found->ino) {
  1071. found->ino = ino;
  1072. found->offset = offset;
  1073. } else if (found->ino == ino) {
  1074. /*
  1075. * same extent found more then once in the same file.
  1076. */
  1077. if (found->offset > offset + bctx->extent_len)
  1078. found->offset = offset;
  1079. }
  1080. return 0;
  1081. }
  1082. /*
  1083. * Given an inode, offset and extent item, it finds a good clone for a clone
  1084. * instruction. Returns -ENOENT when none could be found. The function makes
  1085. * sure that the returned clone is usable at the point where sending is at the
  1086. * moment. This means, that no clones are accepted which lie behind the current
  1087. * inode+offset.
  1088. *
  1089. * path must point to the extent item when called.
  1090. */
  1091. static int find_extent_clone(struct send_ctx *sctx,
  1092. struct btrfs_path *path,
  1093. u64 ino, u64 data_offset,
  1094. u64 ino_size,
  1095. struct clone_root **found)
  1096. {
  1097. int ret;
  1098. int extent_type;
  1099. u64 logical;
  1100. u64 disk_byte;
  1101. u64 num_bytes;
  1102. u64 extent_item_pos;
  1103. u64 flags = 0;
  1104. struct btrfs_file_extent_item *fi;
  1105. struct extent_buffer *eb = path->nodes[0];
  1106. struct backref_ctx *backref_ctx = NULL;
  1107. struct clone_root *cur_clone_root;
  1108. struct btrfs_key found_key;
  1109. struct btrfs_path *tmp_path;
  1110. int compressed;
  1111. u32 i;
  1112. tmp_path = alloc_path_for_send();
  1113. if (!tmp_path)
  1114. return -ENOMEM;
  1115. /* We only use this path under the commit sem */
  1116. tmp_path->need_commit_sem = 0;
  1117. backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
  1118. if (!backref_ctx) {
  1119. ret = -ENOMEM;
  1120. goto out;
  1121. }
  1122. backref_ctx->path = tmp_path;
  1123. if (data_offset >= ino_size) {
  1124. /*
  1125. * There may be extents that lie behind the file's size.
  1126. * I at least had this in combination with snapshotting while
  1127. * writing large files.
  1128. */
  1129. ret = 0;
  1130. goto out;
  1131. }
  1132. fi = btrfs_item_ptr(eb, path->slots[0],
  1133. struct btrfs_file_extent_item);
  1134. extent_type = btrfs_file_extent_type(eb, fi);
  1135. if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  1136. ret = -ENOENT;
  1137. goto out;
  1138. }
  1139. compressed = btrfs_file_extent_compression(eb, fi);
  1140. num_bytes = btrfs_file_extent_num_bytes(eb, fi);
  1141. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  1142. if (disk_byte == 0) {
  1143. ret = -ENOENT;
  1144. goto out;
  1145. }
  1146. logical = disk_byte + btrfs_file_extent_offset(eb, fi);
  1147. down_read(&sctx->send_root->fs_info->commit_root_sem);
  1148. ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
  1149. &found_key, &flags);
  1150. up_read(&sctx->send_root->fs_info->commit_root_sem);
  1151. btrfs_release_path(tmp_path);
  1152. if (ret < 0)
  1153. goto out;
  1154. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1155. ret = -EIO;
  1156. goto out;
  1157. }
  1158. /*
  1159. * Setup the clone roots.
  1160. */
  1161. for (i = 0; i < sctx->clone_roots_cnt; i++) {
  1162. cur_clone_root = sctx->clone_roots + i;
  1163. cur_clone_root->ino = (u64)-1;
  1164. cur_clone_root->offset = 0;
  1165. cur_clone_root->found_refs = 0;
  1166. }
  1167. backref_ctx->sctx = sctx;
  1168. backref_ctx->found = 0;
  1169. backref_ctx->cur_objectid = ino;
  1170. backref_ctx->cur_offset = data_offset;
  1171. backref_ctx->found_itself = 0;
  1172. backref_ctx->extent_len = num_bytes;
  1173. /*
  1174. * The last extent of a file may be too large due to page alignment.
  1175. * We need to adjust extent_len in this case so that the checks in
  1176. * __iterate_backrefs work.
  1177. */
  1178. if (data_offset + num_bytes >= ino_size)
  1179. backref_ctx->extent_len = ino_size - data_offset;
  1180. /*
  1181. * Now collect all backrefs.
  1182. */
  1183. if (compressed == BTRFS_COMPRESS_NONE)
  1184. extent_item_pos = logical - found_key.objectid;
  1185. else
  1186. extent_item_pos = 0;
  1187. ret = iterate_extent_inodes(sctx->send_root->fs_info,
  1188. found_key.objectid, extent_item_pos, 1,
  1189. __iterate_backrefs, backref_ctx);
  1190. if (ret < 0)
  1191. goto out;
  1192. if (!backref_ctx->found_itself) {
  1193. /* found a bug in backref code? */
  1194. ret = -EIO;
  1195. btrfs_err(sctx->send_root->fs_info, "did not find backref in "
  1196. "send_root. inode=%llu, offset=%llu, "
  1197. "disk_byte=%llu found extent=%llu",
  1198. ino, data_offset, disk_byte, found_key.objectid);
  1199. goto out;
  1200. }
  1201. verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
  1202. "ino=%llu, "
  1203. "num_bytes=%llu, logical=%llu\n",
  1204. data_offset, ino, num_bytes, logical);
  1205. if (!backref_ctx->found)
  1206. verbose_printk("btrfs: no clones found\n");
  1207. cur_clone_root = NULL;
  1208. for (i = 0; i < sctx->clone_roots_cnt; i++) {
  1209. if (sctx->clone_roots[i].found_refs) {
  1210. if (!cur_clone_root)
  1211. cur_clone_root = sctx->clone_roots + i;
  1212. else if (sctx->clone_roots[i].root == sctx->send_root)
  1213. /* prefer clones from send_root over others */
  1214. cur_clone_root = sctx->clone_roots + i;
  1215. }
  1216. }
  1217. if (cur_clone_root) {
  1218. if (compressed != BTRFS_COMPRESS_NONE) {
  1219. /*
  1220. * Offsets given by iterate_extent_inodes() are relative
  1221. * to the start of the extent, we need to add logical
  1222. * offset from the file extent item.
  1223. * (See why at backref.c:check_extent_in_eb())
  1224. */
  1225. cur_clone_root->offset += btrfs_file_extent_offset(eb,
  1226. fi);
  1227. }
  1228. *found = cur_clone_root;
  1229. ret = 0;
  1230. } else {
  1231. ret = -ENOENT;
  1232. }
  1233. out:
  1234. btrfs_free_path(tmp_path);
  1235. kfree(backref_ctx);
  1236. return ret;
  1237. }
  1238. static int read_symlink(struct btrfs_root *root,
  1239. u64 ino,
  1240. struct fs_path *dest)
  1241. {
  1242. int ret;
  1243. struct btrfs_path *path;
  1244. struct btrfs_key key;
  1245. struct btrfs_file_extent_item *ei;
  1246. u8 type;
  1247. u8 compression;
  1248. unsigned long off;
  1249. int len;
  1250. path = alloc_path_for_send();
  1251. if (!path)
  1252. return -ENOMEM;
  1253. key.objectid = ino;
  1254. key.type = BTRFS_EXTENT_DATA_KEY;
  1255. key.offset = 0;
  1256. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1257. if (ret < 0)
  1258. goto out;
  1259. BUG_ON(ret);
  1260. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1261. struct btrfs_file_extent_item);
  1262. type = btrfs_file_extent_type(path->nodes[0], ei);
  1263. compression = btrfs_file_extent_compression(path->nodes[0], ei);
  1264. BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
  1265. BUG_ON(compression);
  1266. off = btrfs_file_extent_inline_start(ei);
  1267. len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
  1268. ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
  1269. out:
  1270. btrfs_free_path(path);
  1271. return ret;
  1272. }
  1273. /*
  1274. * Helper function to generate a file name that is unique in the root of
  1275. * send_root and parent_root. This is used to generate names for orphan inodes.
  1276. */
  1277. static int gen_unique_name(struct send_ctx *sctx,
  1278. u64 ino, u64 gen,
  1279. struct fs_path *dest)
  1280. {
  1281. int ret = 0;
  1282. struct btrfs_path *path;
  1283. struct btrfs_dir_item *di;
  1284. char tmp[64];
  1285. int len;
  1286. u64 idx = 0;
  1287. path = alloc_path_for_send();
  1288. if (!path)
  1289. return -ENOMEM;
  1290. while (1) {
  1291. len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
  1292. ino, gen, idx);
  1293. ASSERT(len < sizeof(tmp));
  1294. di = btrfs_lookup_dir_item(NULL, sctx->send_root,
  1295. path, BTRFS_FIRST_FREE_OBJECTID,
  1296. tmp, strlen(tmp), 0);
  1297. btrfs_release_path(path);
  1298. if (IS_ERR(di)) {
  1299. ret = PTR_ERR(di);
  1300. goto out;
  1301. }
  1302. if (di) {
  1303. /* not unique, try again */
  1304. idx++;
  1305. continue;
  1306. }
  1307. if (!sctx->parent_root) {
  1308. /* unique */
  1309. ret = 0;
  1310. break;
  1311. }
  1312. di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
  1313. path, BTRFS_FIRST_FREE_OBJECTID,
  1314. tmp, strlen(tmp), 0);
  1315. btrfs_release_path(path);
  1316. if (IS_ERR(di)) {
  1317. ret = PTR_ERR(di);
  1318. goto out;
  1319. }
  1320. if (di) {
  1321. /* not unique, try again */
  1322. idx++;
  1323. continue;
  1324. }
  1325. /* unique */
  1326. break;
  1327. }
  1328. ret = fs_path_add(dest, tmp, strlen(tmp));
  1329. out:
  1330. btrfs_free_path(path);
  1331. return ret;
  1332. }
  1333. enum inode_state {
  1334. inode_state_no_change,
  1335. inode_state_will_create,
  1336. inode_state_did_create,
  1337. inode_state_will_delete,
  1338. inode_state_did_delete,
  1339. };
  1340. static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
  1341. {
  1342. int ret;
  1343. int left_ret;
  1344. int right_ret;
  1345. u64 left_gen;
  1346. u64 right_gen;
  1347. ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
  1348. NULL, NULL);
  1349. if (ret < 0 && ret != -ENOENT)
  1350. goto out;
  1351. left_ret = ret;
  1352. if (!sctx->parent_root) {
  1353. right_ret = -ENOENT;
  1354. } else {
  1355. ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
  1356. NULL, NULL, NULL, NULL);
  1357. if (ret < 0 && ret != -ENOENT)
  1358. goto out;
  1359. right_ret = ret;
  1360. }
  1361. if (!left_ret && !right_ret) {
  1362. if (left_gen == gen && right_gen == gen) {
  1363. ret = inode_state_no_change;
  1364. } else if (left_gen == gen) {
  1365. if (ino < sctx->send_progress)
  1366. ret = inode_state_did_create;
  1367. else
  1368. ret = inode_state_will_create;
  1369. } else if (right_gen == gen) {
  1370. if (ino < sctx->send_progress)
  1371. ret = inode_state_did_delete;
  1372. else
  1373. ret = inode_state_will_delete;
  1374. } else {
  1375. ret = -ENOENT;
  1376. }
  1377. } else if (!left_ret) {
  1378. if (left_gen == gen) {
  1379. if (ino < sctx->send_progress)
  1380. ret = inode_state_did_create;
  1381. else
  1382. ret = inode_state_will_create;
  1383. } else {
  1384. ret = -ENOENT;
  1385. }
  1386. } else if (!right_ret) {
  1387. if (right_gen == gen) {
  1388. if (ino < sctx->send_progress)
  1389. ret = inode_state_did_delete;
  1390. else
  1391. ret = inode_state_will_delete;
  1392. } else {
  1393. ret = -ENOENT;
  1394. }
  1395. } else {
  1396. ret = -ENOENT;
  1397. }
  1398. out:
  1399. return ret;
  1400. }
  1401. static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
  1402. {
  1403. int ret;
  1404. ret = get_cur_inode_state(sctx, ino, gen);
  1405. if (ret < 0)
  1406. goto out;
  1407. if (ret == inode_state_no_change ||
  1408. ret == inode_state_did_create ||
  1409. ret == inode_state_will_delete)
  1410. ret = 1;
  1411. else
  1412. ret = 0;
  1413. out:
  1414. return ret;
  1415. }
  1416. /*
  1417. * Helper function to lookup a dir item in a dir.
  1418. */
  1419. static int lookup_dir_item_inode(struct btrfs_root *root,
  1420. u64 dir, const char *name, int name_len,
  1421. u64 *found_inode,
  1422. u8 *found_type)
  1423. {
  1424. int ret = 0;
  1425. struct btrfs_dir_item *di;
  1426. struct btrfs_key key;
  1427. struct btrfs_path *path;
  1428. path = alloc_path_for_send();
  1429. if (!path)
  1430. return -ENOMEM;
  1431. di = btrfs_lookup_dir_item(NULL, root, path,
  1432. dir, name, name_len, 0);
  1433. if (!di) {
  1434. ret = -ENOENT;
  1435. goto out;
  1436. }
  1437. if (IS_ERR(di)) {
  1438. ret = PTR_ERR(di);
  1439. goto out;
  1440. }
  1441. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
  1442. if (key.type == BTRFS_ROOT_ITEM_KEY) {
  1443. ret = -ENOENT;
  1444. goto out;
  1445. }
  1446. *found_inode = key.objectid;
  1447. *found_type = btrfs_dir_type(path->nodes[0], di);
  1448. out:
  1449. btrfs_free_path(path);
  1450. return ret;
  1451. }
  1452. /*
  1453. * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
  1454. * generation of the parent dir and the name of the dir entry.
  1455. */
  1456. static int get_first_ref(struct btrfs_root *root, u64 ino,
  1457. u64 *dir, u64 *dir_gen, struct fs_path *name)
  1458. {
  1459. int ret;
  1460. struct btrfs_key key;
  1461. struct btrfs_key found_key;
  1462. struct btrfs_path *path;
  1463. int len;
  1464. u64 parent_dir;
  1465. path = alloc_path_for_send();
  1466. if (!path)
  1467. return -ENOMEM;
  1468. key.objectid = ino;
  1469. key.type = BTRFS_INODE_REF_KEY;
  1470. key.offset = 0;
  1471. ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
  1472. if (ret < 0)
  1473. goto out;
  1474. if (!ret)
  1475. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  1476. path->slots[0]);
  1477. if (ret || found_key.objectid != ino ||
  1478. (found_key.type != BTRFS_INODE_REF_KEY &&
  1479. found_key.type != BTRFS_INODE_EXTREF_KEY)) {
  1480. ret = -ENOENT;
  1481. goto out;
  1482. }
  1483. if (found_key.type == BTRFS_INODE_REF_KEY) {
  1484. struct btrfs_inode_ref *iref;
  1485. iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1486. struct btrfs_inode_ref);
  1487. len = btrfs_inode_ref_name_len(path->nodes[0], iref);
  1488. ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
  1489. (unsigned long)(iref + 1),
  1490. len);
  1491. parent_dir = found_key.offset;
  1492. } else {
  1493. struct btrfs_inode_extref *extref;
  1494. extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1495. struct btrfs_inode_extref);
  1496. len = btrfs_inode_extref_name_len(path->nodes[0], extref);
  1497. ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
  1498. (unsigned long)&extref->name, len);
  1499. parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
  1500. }
  1501. if (ret < 0)
  1502. goto out;
  1503. btrfs_release_path(path);
  1504. if (dir_gen) {
  1505. ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
  1506. NULL, NULL, NULL);
  1507. if (ret < 0)
  1508. goto out;
  1509. }
  1510. *dir = parent_dir;
  1511. out:
  1512. btrfs_free_path(path);
  1513. return ret;
  1514. }
  1515. static int is_first_ref(struct btrfs_root *root,
  1516. u64 ino, u64 dir,
  1517. const char *name, int name_len)
  1518. {
  1519. int ret;
  1520. struct fs_path *tmp_name;
  1521. u64 tmp_dir;
  1522. tmp_name = fs_path_alloc();
  1523. if (!tmp_name)
  1524. return -ENOMEM;
  1525. ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
  1526. if (ret < 0)
  1527. goto out;
  1528. if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
  1529. ret = 0;
  1530. goto out;
  1531. }
  1532. ret = !memcmp(tmp_name->start, name, name_len);
  1533. out:
  1534. fs_path_free(tmp_name);
  1535. return ret;
  1536. }
  1537. /*
  1538. * Used by process_recorded_refs to determine if a new ref would overwrite an
  1539. * already existing ref. In case it detects an overwrite, it returns the
  1540. * inode/gen in who_ino/who_gen.
  1541. * When an overwrite is detected, process_recorded_refs does proper orphanizing
  1542. * to make sure later references to the overwritten inode are possible.
  1543. * Orphanizing is however only required for the first ref of an inode.
  1544. * process_recorded_refs does an additional is_first_ref check to see if
  1545. * orphanizing is really required.
  1546. */
  1547. static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
  1548. const char *name, int name_len,
  1549. u64 *who_ino, u64 *who_gen)
  1550. {
  1551. int ret = 0;
  1552. u64 gen;
  1553. u64 other_inode = 0;
  1554. u8 other_type = 0;
  1555. if (!sctx->parent_root)
  1556. goto out;
  1557. ret = is_inode_existent(sctx, dir, dir_gen);
  1558. if (ret <= 0)
  1559. goto out;
  1560. /*
  1561. * If we have a parent root we need to verify that the parent dir was
  1562. * not delted and then re-created, if it was then we have no overwrite
  1563. * and we can just unlink this entry.
  1564. */
  1565. if (sctx->parent_root) {
  1566. ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
  1567. NULL, NULL, NULL);
  1568. if (ret < 0 && ret != -ENOENT)
  1569. goto out;
  1570. if (ret) {
  1571. ret = 0;
  1572. goto out;
  1573. }
  1574. if (gen != dir_gen)
  1575. goto out;
  1576. }
  1577. ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
  1578. &other_inode, &other_type);
  1579. if (ret < 0 && ret != -ENOENT)
  1580. goto out;
  1581. if (ret) {
  1582. ret = 0;
  1583. goto out;
  1584. }
  1585. /*
  1586. * Check if the overwritten ref was already processed. If yes, the ref
  1587. * was already unlinked/moved, so we can safely assume that we will not
  1588. * overwrite anything at this point in time.
  1589. */
  1590. if (other_inode > sctx->send_progress) {
  1591. ret = get_inode_info(sctx->parent_root, other_inode, NULL,
  1592. who_gen, NULL, NULL, NULL, NULL);
  1593. if (ret < 0)
  1594. goto out;
  1595. ret = 1;
  1596. *who_ino = other_inode;
  1597. } else {
  1598. ret = 0;
  1599. }
  1600. out:
  1601. return ret;
  1602. }
  1603. /*
  1604. * Checks if the ref was overwritten by an already processed inode. This is
  1605. * used by __get_cur_name_and_parent to find out if the ref was orphanized and
  1606. * thus the orphan name needs be used.
  1607. * process_recorded_refs also uses it to avoid unlinking of refs that were
  1608. * overwritten.
  1609. */
  1610. static int did_overwrite_ref(struct send_ctx *sctx,
  1611. u64 dir, u64 dir_gen,
  1612. u64 ino, u64 ino_gen,
  1613. const char *name, int name_len)
  1614. {
  1615. int ret = 0;
  1616. u64 gen;
  1617. u64 ow_inode;
  1618. u8 other_type;
  1619. if (!sctx->parent_root)
  1620. goto out;
  1621. ret = is_inode_existent(sctx, dir, dir_gen);
  1622. if (ret <= 0)
  1623. goto out;
  1624. /* check if the ref was overwritten by another ref */
  1625. ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
  1626. &ow_inode, &other_type);
  1627. if (ret < 0 && ret != -ENOENT)
  1628. goto out;
  1629. if (ret) {
  1630. /* was never and will never be overwritten */
  1631. ret = 0;
  1632. goto out;
  1633. }
  1634. ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
  1635. NULL, NULL);
  1636. if (ret < 0)
  1637. goto out;
  1638. if (ow_inode == ino && gen == ino_gen) {
  1639. ret = 0;
  1640. goto out;
  1641. }
  1642. /*
  1643. * We know that it is or will be overwritten. Check this now.
  1644. * The current inode being processed might have been the one that caused
  1645. * inode 'ino' to be orphanized, therefore ow_inode can actually be the
  1646. * same as sctx->send_progress.
  1647. */
  1648. if (ow_inode <= sctx->send_progress)
  1649. ret = 1;
  1650. else
  1651. ret = 0;
  1652. out:
  1653. return ret;
  1654. }
  1655. /*
  1656. * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
  1657. * that got overwritten. This is used by process_recorded_refs to determine
  1658. * if it has to use the path as returned by get_cur_path or the orphan name.
  1659. */
  1660. static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
  1661. {
  1662. int ret = 0;
  1663. struct fs_path *name = NULL;
  1664. u64 dir;
  1665. u64 dir_gen;
  1666. if (!sctx->parent_root)
  1667. goto out;
  1668. name = fs_path_alloc();
  1669. if (!name)
  1670. return -ENOMEM;
  1671. ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
  1672. if (ret < 0)
  1673. goto out;
  1674. ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
  1675. name->start, fs_path_len(name));
  1676. out:
  1677. fs_path_free(name);
  1678. return ret;
  1679. }
  1680. /*
  1681. * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
  1682. * so we need to do some special handling in case we have clashes. This function
  1683. * takes care of this with the help of name_cache_entry::radix_list.
  1684. * In case of error, nce is kfreed.
  1685. */
  1686. static int name_cache_insert(struct send_ctx *sctx,
  1687. struct name_cache_entry *nce)
  1688. {
  1689. int ret = 0;
  1690. struct list_head *nce_head;
  1691. nce_head = radix_tree_lookup(&sctx->name_cache,
  1692. (unsigned long)nce->ino);
  1693. if (!nce_head) {
  1694. nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
  1695. if (!nce_head) {
  1696. kfree(nce);
  1697. return -ENOMEM;
  1698. }
  1699. INIT_LIST_HEAD(nce_head);
  1700. ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
  1701. if (ret < 0) {
  1702. kfree(nce_head);
  1703. kfree(nce);
  1704. return ret;
  1705. }
  1706. }
  1707. list_add_tail(&nce->radix_list, nce_head);
  1708. list_add_tail(&nce->list, &sctx->name_cache_list);
  1709. sctx->name_cache_size++;
  1710. return ret;
  1711. }
  1712. static void name_cache_delete(struct send_ctx *sctx,
  1713. struct name_cache_entry *nce)
  1714. {
  1715. struct list_head *nce_head;
  1716. nce_head = radix_tree_lookup(&sctx->name_cache,
  1717. (unsigned long)nce->ino);
  1718. if (!nce_head) {
  1719. btrfs_err(sctx->send_root->fs_info,
  1720. "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
  1721. nce->ino, sctx->name_cache_size);
  1722. }
  1723. list_del(&nce->radix_list);
  1724. list_del(&nce->list);
  1725. sctx->name_cache_size--;
  1726. /*
  1727. * We may not get to the final release of nce_head if the lookup fails
  1728. */
  1729. if (nce_head && list_empty(nce_head)) {
  1730. radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
  1731. kfree(nce_head);
  1732. }
  1733. }
  1734. static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
  1735. u64 ino, u64 gen)
  1736. {
  1737. struct list_head *nce_head;
  1738. struct name_cache_entry *cur;
  1739. nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
  1740. if (!nce_head)
  1741. return NULL;
  1742. list_for_each_entry(cur, nce_head, radix_list) {
  1743. if (cur->ino == ino && cur->gen == gen)
  1744. return cur;
  1745. }
  1746. return NULL;
  1747. }
  1748. /*
  1749. * Removes the entry from the list and adds it back to the end. This marks the
  1750. * entry as recently used so that name_cache_clean_unused does not remove it.
  1751. */
  1752. static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
  1753. {
  1754. list_del(&nce->list);
  1755. list_add_tail(&nce->list, &sctx->name_cache_list);
  1756. }
  1757. /*
  1758. * Remove some entries from the beginning of name_cache_list.
  1759. */
  1760. static void name_cache_clean_unused(struct send_ctx *sctx)
  1761. {
  1762. struct name_cache_entry *nce;
  1763. if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
  1764. return;
  1765. while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
  1766. nce = list_entry(sctx->name_cache_list.next,
  1767. struct name_cache_entry, list);
  1768. name_cache_delete(sctx, nce);
  1769. kfree(nce);
  1770. }
  1771. }
  1772. static void name_cache_free(struct send_ctx *sctx)
  1773. {
  1774. struct name_cache_entry *nce;
  1775. while (!list_empty(&sctx->name_cache_list)) {
  1776. nce = list_entry(sctx->name_cache_list.next,
  1777. struct name_cache_entry, list);
  1778. name_cache_delete(sctx, nce);
  1779. kfree(nce);
  1780. }
  1781. }
  1782. /*
  1783. * Used by get_cur_path for each ref up to the root.
  1784. * Returns 0 if it succeeded.
  1785. * Returns 1 if the inode is not existent or got overwritten. In that case, the
  1786. * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
  1787. * is returned, parent_ino/parent_gen are not guaranteed to be valid.
  1788. * Returns <0 in case of error.
  1789. */
  1790. static int __get_cur_name_and_parent(struct send_ctx *sctx,
  1791. u64 ino, u64 gen,
  1792. u64 *parent_ino,
  1793. u64 *parent_gen,
  1794. struct fs_path *dest)
  1795. {
  1796. int ret;
  1797. int nce_ret;
  1798. struct name_cache_entry *nce = NULL;
  1799. /*
  1800. * First check if we already did a call to this function with the same
  1801. * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
  1802. * return the cached result.
  1803. */
  1804. nce = name_cache_search(sctx, ino, gen);
  1805. if (nce) {
  1806. if (ino < sctx->send_progress && nce->need_later_update) {
  1807. name_cache_delete(sctx, nce);
  1808. kfree(nce);
  1809. nce = NULL;
  1810. } else {
  1811. name_cache_used(sctx, nce);
  1812. *parent_ino = nce->parent_ino;
  1813. *parent_gen = nce->parent_gen;
  1814. ret = fs_path_add(dest, nce->name, nce->name_len);
  1815. if (ret < 0)
  1816. goto out;
  1817. ret = nce->ret;
  1818. goto out;
  1819. }
  1820. }
  1821. /*
  1822. * If the inode is not existent yet, add the orphan name and return 1.
  1823. * This should only happen for the parent dir that we determine in
  1824. * __record_new_ref
  1825. */
  1826. ret = is_inode_existent(sctx, ino, gen);
  1827. if (ret < 0)
  1828. goto out;
  1829. if (!ret) {
  1830. ret = gen_unique_name(sctx, ino, gen, dest);
  1831. if (ret < 0)
  1832. goto out;
  1833. ret = 1;
  1834. goto out_cache;
  1835. }
  1836. /*
  1837. * Depending on whether the inode was already processed or not, use
  1838. * send_root or parent_root for ref lookup.
  1839. */
  1840. if (ino < sctx->send_progress)
  1841. ret = get_first_ref(sctx->send_root, ino,
  1842. parent_ino, parent_gen, dest);
  1843. else
  1844. ret = get_first_ref(sctx->parent_root, ino,
  1845. parent_ino, parent_gen, dest);
  1846. if (ret < 0)
  1847. goto out;
  1848. /*
  1849. * Check if the ref was overwritten by an inode's ref that was processed
  1850. * earlier. If yes, treat as orphan and return 1.
  1851. */
  1852. ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
  1853. dest->start, dest->end - dest->start);
  1854. if (ret < 0)
  1855. goto out;
  1856. if (ret) {
  1857. fs_path_reset(dest);
  1858. ret = gen_unique_name(sctx, ino, gen, dest);
  1859. if (ret < 0)
  1860. goto out;
  1861. ret = 1;
  1862. }
  1863. out_cache:
  1864. /*
  1865. * Store the result of the lookup in the name cache.
  1866. */
  1867. nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
  1868. if (!nce) {
  1869. ret = -ENOMEM;
  1870. goto out;
  1871. }
  1872. nce->ino = ino;
  1873. nce->gen = gen;
  1874. nce->parent_ino = *parent_ino;
  1875. nce->parent_gen = *parent_gen;
  1876. nce->name_len = fs_path_len(dest);
  1877. nce->ret = ret;
  1878. strcpy(nce->name, dest->start);
  1879. if (ino < sctx->send_progress)
  1880. nce->need_later_update = 0;
  1881. else
  1882. nce->need_later_update = 1;
  1883. nce_ret = name_cache_insert(sctx, nce);
  1884. if (nce_ret < 0)
  1885. ret = nce_ret;
  1886. name_cache_clean_unused(sctx);
  1887. out:
  1888. return ret;
  1889. }
  1890. /*
  1891. * Magic happens here. This function returns the first ref to an inode as it
  1892. * would look like while receiving the stream at this point in time.
  1893. * We walk the path up to the root. For every inode in between, we check if it
  1894. * was already processed/sent. If yes, we continue with the parent as found
  1895. * in send_root. If not, we continue with the parent as found in parent_root.
  1896. * If we encounter an inode that was deleted at this point in time, we use the
  1897. * inodes "orphan" name instead of the real name and stop. Same with new inodes
  1898. * that were not created yet and overwritten inodes/refs.
  1899. *
  1900. * When do we have have orphan inodes:
  1901. * 1. When an inode is freshly created and thus no valid refs are available yet
  1902. * 2. When a directory lost all it's refs (deleted) but still has dir items
  1903. * inside which were not processed yet (pending for move/delete). If anyone
  1904. * tried to get the path to the dir items, it would get a path inside that
  1905. * orphan directory.
  1906. * 3. When an inode is moved around or gets new links, it may overwrite the ref
  1907. * of an unprocessed inode. If in that case the first ref would be
  1908. * overwritten, the overwritten inode gets "orphanized". Later when we
  1909. * process this overwritten inode, it is restored at a new place by moving
  1910. * the orphan inode.
  1911. *
  1912. * sctx->send_progress tells this function at which point in time receiving
  1913. * would be.
  1914. */
  1915. static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
  1916. struct fs_path *dest)
  1917. {
  1918. int ret = 0;
  1919. struct fs_path *name = NULL;
  1920. u64 parent_inode = 0;
  1921. u64 parent_gen = 0;
  1922. int stop = 0;
  1923. name = fs_path_alloc();
  1924. if (!name) {
  1925. ret = -ENOMEM;
  1926. goto out;
  1927. }
  1928. dest->reversed = 1;
  1929. fs_path_reset(dest);
  1930. while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
  1931. struct waiting_dir_move *wdm;
  1932. fs_path_reset(name);
  1933. if (is_waiting_for_rm(sctx, ino)) {
  1934. ret = gen_unique_name(sctx, ino, gen, name);
  1935. if (ret < 0)
  1936. goto out;
  1937. ret = fs_path_add_path(dest, name);
  1938. break;
  1939. }
  1940. wdm = get_waiting_dir_move(sctx, ino);
  1941. if (wdm && wdm->orphanized) {
  1942. ret = gen_unique_name(sctx, ino, gen, name);
  1943. stop = 1;
  1944. } else if (wdm) {
  1945. ret = get_first_ref(sctx->parent_root, ino,
  1946. &parent_inode, &parent_gen, name);
  1947. } else {
  1948. ret = __get_cur_name_and_parent(sctx, ino, gen,
  1949. &parent_inode,
  1950. &parent_gen, name);
  1951. if (ret)
  1952. stop = 1;
  1953. }
  1954. if (ret < 0)
  1955. goto out;
  1956. ret = fs_path_add_path(dest, name);
  1957. if (ret < 0)
  1958. goto out;
  1959. ino = parent_inode;
  1960. gen = parent_gen;
  1961. }
  1962. out:
  1963. fs_path_free(name);
  1964. if (!ret)
  1965. fs_path_unreverse(dest);
  1966. return ret;
  1967. }
  1968. /*
  1969. * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
  1970. */
  1971. static int send_subvol_begin(struct send_ctx *sctx)
  1972. {
  1973. int ret;
  1974. struct btrfs_root *send_root = sctx->send_root;
  1975. struct btrfs_root *parent_root = sctx->parent_root;
  1976. struct btrfs_path *path;
  1977. struct btrfs_key key;
  1978. struct btrfs_root_ref *ref;
  1979. struct extent_buffer *leaf;
  1980. char *name = NULL;
  1981. int namelen;
  1982. path = btrfs_alloc_path();
  1983. if (!path)
  1984. return -ENOMEM;
  1985. name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
  1986. if (!name) {
  1987. btrfs_free_path(path);
  1988. return -ENOMEM;
  1989. }
  1990. key.objectid = send_root->objectid;
  1991. key.type = BTRFS_ROOT_BACKREF_KEY;
  1992. key.offset = 0;
  1993. ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
  1994. &key, path, 1, 0);
  1995. if (ret < 0)
  1996. goto out;
  1997. if (ret) {
  1998. ret = -ENOENT;
  1999. goto out;
  2000. }
  2001. leaf = path->nodes[0];
  2002. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2003. if (key.type != BTRFS_ROOT_BACKREF_KEY ||
  2004. key.objectid != send_root->objectid) {
  2005. ret = -ENOENT;
  2006. goto out;
  2007. }
  2008. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
  2009. namelen = btrfs_root_ref_name_len(leaf, ref);
  2010. read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
  2011. btrfs_release_path(path);
  2012. if (parent_root) {
  2013. ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
  2014. if (ret < 0)
  2015. goto out;
  2016. } else {
  2017. ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
  2018. if (ret < 0)
  2019. goto out;
  2020. }
  2021. TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
  2022. TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
  2023. sctx->send_root->root_item.uuid);
  2024. TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
  2025. le64_to_cpu(sctx->send_root->root_item.ctransid));
  2026. if (parent_root) {
  2027. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  2028. sctx->parent_root->root_item.uuid);
  2029. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
  2030. le64_to_cpu(sctx->parent_root->root_item.ctransid));
  2031. }
  2032. ret = send_cmd(sctx);
  2033. tlv_put_failure:
  2034. out:
  2035. btrfs_free_path(path);
  2036. kfree(name);
  2037. return ret;
  2038. }
  2039. static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
  2040. {
  2041. int ret = 0;
  2042. struct fs_path *p;
  2043. verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
  2044. p = fs_path_alloc();
  2045. if (!p)
  2046. return -ENOMEM;
  2047. ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
  2048. if (ret < 0)
  2049. goto out;
  2050. ret = get_cur_path(sctx, ino, gen, p);
  2051. if (ret < 0)
  2052. goto out;
  2053. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2054. TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
  2055. ret = send_cmd(sctx);
  2056. tlv_put_failure:
  2057. out:
  2058. fs_path_free(p);
  2059. return ret;
  2060. }
  2061. static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
  2062. {
  2063. int ret = 0;
  2064. struct fs_path *p;
  2065. verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
  2066. p = fs_path_alloc();
  2067. if (!p)
  2068. return -ENOMEM;
  2069. ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
  2070. if (ret < 0)
  2071. goto out;
  2072. ret = get_cur_path(sctx, ino, gen, p);
  2073. if (ret < 0)
  2074. goto out;
  2075. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2076. TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
  2077. ret = send_cmd(sctx);
  2078. tlv_put_failure:
  2079. out:
  2080. fs_path_free(p);
  2081. return ret;
  2082. }
  2083. static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
  2084. {
  2085. int ret = 0;
  2086. struct fs_path *p;
  2087. verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
  2088. p = fs_path_alloc();
  2089. if (!p)
  2090. return -ENOMEM;
  2091. ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
  2092. if (ret < 0)
  2093. goto out;
  2094. ret = get_cur_path(sctx, ino, gen, p);
  2095. if (ret < 0)
  2096. goto out;
  2097. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2098. TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
  2099. TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
  2100. ret = send_cmd(sctx);
  2101. tlv_put_failure:
  2102. out:
  2103. fs_path_free(p);
  2104. return ret;
  2105. }
  2106. static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
  2107. {
  2108. int ret = 0;
  2109. struct fs_path *p = NULL;
  2110. struct btrfs_inode_item *ii;
  2111. struct btrfs_path *path = NULL;
  2112. struct extent_buffer *eb;
  2113. struct btrfs_key key;
  2114. int slot;
  2115. verbose_printk("btrfs: send_utimes %llu\n", ino);
  2116. p = fs_path_alloc();
  2117. if (!p)
  2118. return -ENOMEM;
  2119. path = alloc_path_for_send();
  2120. if (!path) {
  2121. ret = -ENOMEM;
  2122. goto out;
  2123. }
  2124. key.objectid = ino;
  2125. key.type = BTRFS_INODE_ITEM_KEY;
  2126. key.offset = 0;
  2127. ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
  2128. if (ret < 0)
  2129. goto out;
  2130. eb = path->nodes[0];
  2131. slot = path->slots[0];
  2132. ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
  2133. ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
  2134. if (ret < 0)
  2135. goto out;
  2136. ret = get_cur_path(sctx, ino, gen, p);
  2137. if (ret < 0)
  2138. goto out;
  2139. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2140. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
  2141. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
  2142. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
  2143. /* TODO Add otime support when the otime patches get into upstream */
  2144. ret = send_cmd(sctx);
  2145. tlv_put_failure:
  2146. out:
  2147. fs_path_free(p);
  2148. btrfs_free_path(path);
  2149. return ret;
  2150. }
  2151. /*
  2152. * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
  2153. * a valid path yet because we did not process the refs yet. So, the inode
  2154. * is created as orphan.
  2155. */
  2156. static int send_create_inode(struct send_ctx *sctx, u64 ino)
  2157. {
  2158. int ret = 0;
  2159. struct fs_path *p;
  2160. int cmd;
  2161. u64 gen;
  2162. u64 mode;
  2163. u64 rdev;
  2164. verbose_printk("btrfs: send_create_inode %llu\n", ino);
  2165. p = fs_path_alloc();
  2166. if (!p)
  2167. return -ENOMEM;
  2168. if (ino != sctx->cur_ino) {
  2169. ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
  2170. NULL, NULL, &rdev);
  2171. if (ret < 0)
  2172. goto out;
  2173. } else {
  2174. gen = sctx->cur_inode_gen;
  2175. mode = sctx->cur_inode_mode;
  2176. rdev = sctx->cur_inode_rdev;
  2177. }
  2178. if (S_ISREG(mode)) {
  2179. cmd = BTRFS_SEND_C_MKFILE;
  2180. } else if (S_ISDIR(mode)) {
  2181. cmd = BTRFS_SEND_C_MKDIR;
  2182. } else if (S_ISLNK(mode)) {
  2183. cmd = BTRFS_SEND_C_SYMLINK;
  2184. } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
  2185. cmd = BTRFS_SEND_C_MKNOD;
  2186. } else if (S_ISFIFO(mode)) {
  2187. cmd = BTRFS_SEND_C_MKFIFO;
  2188. } else if (S_ISSOCK(mode)) {
  2189. cmd = BTRFS_SEND_C_MKSOCK;
  2190. } else {
  2191. printk(KERN_WARNING "btrfs: unexpected inode type %o",
  2192. (int)(mode & S_IFMT));
  2193. ret = -ENOTSUPP;
  2194. goto out;
  2195. }
  2196. ret = begin_cmd(sctx, cmd);
  2197. if (ret < 0)
  2198. goto out;
  2199. ret = gen_unique_name(sctx, ino, gen, p);
  2200. if (ret < 0)
  2201. goto out;
  2202. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2203. TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
  2204. if (S_ISLNK(mode)) {
  2205. fs_path_reset(p);
  2206. ret = read_symlink(sctx->send_root, ino, p);
  2207. if (ret < 0)
  2208. goto out;
  2209. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
  2210. } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
  2211. S_ISFIFO(mode) || S_ISSOCK(mode)) {
  2212. TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
  2213. TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
  2214. }
  2215. ret = send_cmd(sctx);
  2216. if (ret < 0)
  2217. goto out;
  2218. tlv_put_failure:
  2219. out:
  2220. fs_path_free(p);
  2221. return ret;
  2222. }
  2223. /*
  2224. * We need some special handling for inodes that get processed before the parent
  2225. * directory got created. See process_recorded_refs for details.
  2226. * This function does the check if we already created the dir out of order.
  2227. */
  2228. static int did_create_dir(struct send_ctx *sctx, u64 dir)
  2229. {
  2230. int ret = 0;
  2231. struct btrfs_path *path = NULL;
  2232. struct btrfs_key key;
  2233. struct btrfs_key found_key;
  2234. struct btrfs_key di_key;
  2235. struct extent_buffer *eb;
  2236. struct btrfs_dir_item *di;
  2237. int slot;
  2238. path = alloc_path_for_send();
  2239. if (!path) {
  2240. ret = -ENOMEM;
  2241. goto out;
  2242. }
  2243. key.objectid = dir;
  2244. key.type = BTRFS_DIR_INDEX_KEY;
  2245. key.offset = 0;
  2246. ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
  2247. if (ret < 0)
  2248. goto out;
  2249. while (1) {
  2250. eb = path->nodes[0];
  2251. slot = path->slots[0];
  2252. if (slot >= btrfs_header_nritems(eb)) {
  2253. ret = btrfs_next_leaf(sctx->send_root, path);
  2254. if (ret < 0) {
  2255. goto out;
  2256. } else if (ret > 0) {
  2257. ret = 0;
  2258. break;
  2259. }
  2260. continue;
  2261. }
  2262. btrfs_item_key_to_cpu(eb, &found_key, slot);
  2263. if (found_key.objectid != key.objectid ||
  2264. found_key.type != key.type) {
  2265. ret = 0;
  2266. goto out;
  2267. }
  2268. di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
  2269. btrfs_dir_item_key_to_cpu(eb, di, &di_key);
  2270. if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
  2271. di_key.objectid < sctx->send_progress) {
  2272. ret = 1;
  2273. goto out;
  2274. }
  2275. path->slots[0]++;
  2276. }
  2277. out:
  2278. btrfs_free_path(path);
  2279. return ret;
  2280. }
  2281. /*
  2282. * Only creates the inode if it is:
  2283. * 1. Not a directory
  2284. * 2. Or a directory which was not created already due to out of order
  2285. * directories. See did_create_dir and process_recorded_refs for details.
  2286. */
  2287. static int send_create_inode_if_needed(struct send_ctx *sctx)
  2288. {
  2289. int ret;
  2290. if (S_ISDIR(sctx->cur_inode_mode)) {
  2291. ret = did_create_dir(sctx, sctx->cur_ino);
  2292. if (ret < 0)
  2293. goto out;
  2294. if (ret) {
  2295. ret = 0;
  2296. goto out;
  2297. }
  2298. }
  2299. ret = send_create_inode(sctx, sctx->cur_ino);
  2300. if (ret < 0)
  2301. goto out;
  2302. out:
  2303. return ret;
  2304. }
  2305. struct recorded_ref {
  2306. struct list_head list;
  2307. char *dir_path;
  2308. char *name;
  2309. struct fs_path *full_path;
  2310. u64 dir;
  2311. u64 dir_gen;
  2312. int dir_path_len;
  2313. int name_len;
  2314. };
  2315. /*
  2316. * We need to process new refs before deleted refs, but compare_tree gives us
  2317. * everything mixed. So we first record all refs and later process them.
  2318. * This function is a helper to record one ref.
  2319. */
  2320. static int __record_ref(struct list_head *head, u64 dir,
  2321. u64 dir_gen, struct fs_path *path)
  2322. {
  2323. struct recorded_ref *ref;
  2324. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  2325. if (!ref)
  2326. return -ENOMEM;
  2327. ref->dir = dir;
  2328. ref->dir_gen = dir_gen;
  2329. ref->full_path = path;
  2330. ref->name = (char *)kbasename(ref->full_path->start);
  2331. ref->name_len = ref->full_path->end - ref->name;
  2332. ref->dir_path = ref->full_path->start;
  2333. if (ref->name == ref->full_path->start)
  2334. ref->dir_path_len = 0;
  2335. else
  2336. ref->dir_path_len = ref->full_path->end -
  2337. ref->full_path->start - 1 - ref->name_len;
  2338. list_add_tail(&ref->list, head);
  2339. return 0;
  2340. }
  2341. static int dup_ref(struct recorded_ref *ref, struct list_head *list)
  2342. {
  2343. struct recorded_ref *new;
  2344. new = kmalloc(sizeof(*ref), GFP_NOFS);
  2345. if (!new)
  2346. return -ENOMEM;
  2347. new->dir = ref->dir;
  2348. new->dir_gen = ref->dir_gen;
  2349. new->full_path = NULL;
  2350. INIT_LIST_HEAD(&new->list);
  2351. list_add_tail(&new->list, list);
  2352. return 0;
  2353. }
  2354. static void __free_recorded_refs(struct list_head *head)
  2355. {
  2356. struct recorded_ref *cur;
  2357. while (!list_empty(head)) {
  2358. cur = list_entry(head->next, struct recorded_ref, list);
  2359. fs_path_free(cur->full_path);
  2360. list_del(&cur->list);
  2361. kfree(cur);
  2362. }
  2363. }
  2364. static void free_recorded_refs(struct send_ctx *sctx)
  2365. {
  2366. __free_recorded_refs(&sctx->new_refs);
  2367. __free_recorded_refs(&sctx->deleted_refs);
  2368. }
  2369. /*
  2370. * Renames/moves a file/dir to its orphan name. Used when the first
  2371. * ref of an unprocessed inode gets overwritten and for all non empty
  2372. * directories.
  2373. */
  2374. static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
  2375. struct fs_path *path)
  2376. {
  2377. int ret;
  2378. struct fs_path *orphan;
  2379. orphan = fs_path_alloc();
  2380. if (!orphan)
  2381. return -ENOMEM;
  2382. ret = gen_unique_name(sctx, ino, gen, orphan);
  2383. if (ret < 0)
  2384. goto out;
  2385. ret = send_rename(sctx, path, orphan);
  2386. out:
  2387. fs_path_free(orphan);
  2388. return ret;
  2389. }
  2390. static struct orphan_dir_info *
  2391. add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
  2392. {
  2393. struct rb_node **p = &sctx->orphan_dirs.rb_node;
  2394. struct rb_node *parent = NULL;
  2395. struct orphan_dir_info *entry, *odi;
  2396. odi = kmalloc(sizeof(*odi), GFP_NOFS);
  2397. if (!odi)
  2398. return ERR_PTR(-ENOMEM);
  2399. odi->ino = dir_ino;
  2400. odi->gen = 0;
  2401. while (*p) {
  2402. parent = *p;
  2403. entry = rb_entry(parent, struct orphan_dir_info, node);
  2404. if (dir_ino < entry->ino) {
  2405. p = &(*p)->rb_left;
  2406. } else if (dir_ino > entry->ino) {
  2407. p = &(*p)->rb_right;
  2408. } else {
  2409. kfree(odi);
  2410. return entry;
  2411. }
  2412. }
  2413. rb_link_node(&odi->node, parent, p);
  2414. rb_insert_color(&odi->node, &sctx->orphan_dirs);
  2415. return odi;
  2416. }
  2417. static struct orphan_dir_info *
  2418. get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
  2419. {
  2420. struct rb_node *n = sctx->orphan_dirs.rb_node;
  2421. struct orphan_dir_info *entry;
  2422. while (n) {
  2423. entry = rb_entry(n, struct orphan_dir_info, node);
  2424. if (dir_ino < entry->ino)
  2425. n = n->rb_left;
  2426. else if (dir_ino > entry->ino)
  2427. n = n->rb_right;
  2428. else
  2429. return entry;
  2430. }
  2431. return NULL;
  2432. }
  2433. static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
  2434. {
  2435. struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
  2436. return odi != NULL;
  2437. }
  2438. static void free_orphan_dir_info(struct send_ctx *sctx,
  2439. struct orphan_dir_info *odi)
  2440. {
  2441. if (!odi)
  2442. return;
  2443. rb_erase(&odi->node, &sctx->orphan_dirs);
  2444. kfree(odi);
  2445. }
  2446. /*
  2447. * Returns 1 if a directory can be removed at this point in time.
  2448. * We check this by iterating all dir items and checking if the inode behind
  2449. * the dir item was already processed.
  2450. */
  2451. static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
  2452. u64 send_progress)
  2453. {
  2454. int ret = 0;
  2455. struct btrfs_root *root = sctx->parent_root;
  2456. struct btrfs_path *path;
  2457. struct btrfs_key key;
  2458. struct btrfs_key found_key;
  2459. struct btrfs_key loc;
  2460. struct btrfs_dir_item *di;
  2461. /*
  2462. * Don't try to rmdir the top/root subvolume dir.
  2463. */
  2464. if (dir == BTRFS_FIRST_FREE_OBJECTID)
  2465. return 0;
  2466. path = alloc_path_for_send();
  2467. if (!path)
  2468. return -ENOMEM;
  2469. key.objectid = dir;
  2470. key.type = BTRFS_DIR_INDEX_KEY;
  2471. key.offset = 0;
  2472. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2473. if (ret < 0)
  2474. goto out;
  2475. while (1) {
  2476. struct waiting_dir_move *dm;
  2477. if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
  2478. ret = btrfs_next_leaf(root, path);
  2479. if (ret < 0)
  2480. goto out;
  2481. else if (ret > 0)
  2482. break;
  2483. continue;
  2484. }
  2485. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  2486. path->slots[0]);
  2487. if (found_key.objectid != key.objectid ||
  2488. found_key.type != key.type)
  2489. break;
  2490. di = btrfs_item_ptr(path->nodes[0], path->slots[0],
  2491. struct btrfs_dir_item);
  2492. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
  2493. dm = get_waiting_dir_move(sctx, loc.objectid);
  2494. if (dm) {
  2495. struct orphan_dir_info *odi;
  2496. odi = add_orphan_dir_info(sctx, dir);
  2497. if (IS_ERR(odi)) {
  2498. ret = PTR_ERR(odi);
  2499. goto out;
  2500. }
  2501. odi->gen = dir_gen;
  2502. dm->rmdir_ino = dir;
  2503. ret = 0;
  2504. goto out;
  2505. }
  2506. if (loc.objectid > send_progress) {
  2507. ret = 0;
  2508. goto out;
  2509. }
  2510. path->slots[0]++;
  2511. }
  2512. ret = 1;
  2513. out:
  2514. btrfs_free_path(path);
  2515. return ret;
  2516. }
  2517. static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
  2518. {
  2519. struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
  2520. return entry != NULL;
  2521. }
  2522. static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
  2523. {
  2524. struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
  2525. struct rb_node *parent = NULL;
  2526. struct waiting_dir_move *entry, *dm;
  2527. dm = kmalloc(sizeof(*dm), GFP_NOFS);
  2528. if (!dm)
  2529. return -ENOMEM;
  2530. dm->ino = ino;
  2531. dm->rmdir_ino = 0;
  2532. dm->orphanized = orphanized;
  2533. while (*p) {
  2534. parent = *p;
  2535. entry = rb_entry(parent, struct waiting_dir_move, node);
  2536. if (ino < entry->ino) {
  2537. p = &(*p)->rb_left;
  2538. } else if (ino > entry->ino) {
  2539. p = &(*p)->rb_right;
  2540. } else {
  2541. kfree(dm);
  2542. return -EEXIST;
  2543. }
  2544. }
  2545. rb_link_node(&dm->node, parent, p);
  2546. rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
  2547. return 0;
  2548. }
  2549. static struct waiting_dir_move *
  2550. get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
  2551. {
  2552. struct rb_node *n = sctx->waiting_dir_moves.rb_node;
  2553. struct waiting_dir_move *entry;
  2554. while (n) {
  2555. entry = rb_entry(n, struct waiting_dir_move, node);
  2556. if (ino < entry->ino)
  2557. n = n->rb_left;
  2558. else if (ino > entry->ino)
  2559. n = n->rb_right;
  2560. else
  2561. return entry;
  2562. }
  2563. return NULL;
  2564. }
  2565. static void free_waiting_dir_move(struct send_ctx *sctx,
  2566. struct waiting_dir_move *dm)
  2567. {
  2568. if (!dm)
  2569. return;
  2570. rb_erase(&dm->node, &sctx->waiting_dir_moves);
  2571. kfree(dm);
  2572. }
  2573. static int add_pending_dir_move(struct send_ctx *sctx,
  2574. u64 ino,
  2575. u64 ino_gen,
  2576. u64 parent_ino,
  2577. struct list_head *new_refs,
  2578. struct list_head *deleted_refs,
  2579. const bool is_orphan)
  2580. {
  2581. struct rb_node **p = &sctx->pending_dir_moves.rb_node;
  2582. struct rb_node *parent = NULL;
  2583. struct pending_dir_move *entry = NULL, *pm;
  2584. struct recorded_ref *cur;
  2585. int exists = 0;
  2586. int ret;
  2587. pm = kmalloc(sizeof(*pm), GFP_NOFS);
  2588. if (!pm)
  2589. return -ENOMEM;
  2590. pm->parent_ino = parent_ino;
  2591. pm->ino = ino;
  2592. pm->gen = ino_gen;
  2593. pm->is_orphan = is_orphan;
  2594. INIT_LIST_HEAD(&pm->list);
  2595. INIT_LIST_HEAD(&pm->update_refs);
  2596. RB_CLEAR_NODE(&pm->node);
  2597. while (*p) {
  2598. parent = *p;
  2599. entry = rb_entry(parent, struct pending_dir_move, node);
  2600. if (parent_ino < entry->parent_ino) {
  2601. p = &(*p)->rb_left;
  2602. } else if (parent_ino > entry->parent_ino) {
  2603. p = &(*p)->rb_right;
  2604. } else {
  2605. exists = 1;
  2606. break;
  2607. }
  2608. }
  2609. list_for_each_entry(cur, deleted_refs, list) {
  2610. ret = dup_ref(cur, &pm->update_refs);
  2611. if (ret < 0)
  2612. goto out;
  2613. }
  2614. list_for_each_entry(cur, new_refs, list) {
  2615. ret = dup_ref(cur, &pm->update_refs);
  2616. if (ret < 0)
  2617. goto out;
  2618. }
  2619. ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
  2620. if (ret)
  2621. goto out;
  2622. if (exists) {
  2623. list_add_tail(&pm->list, &entry->list);
  2624. } else {
  2625. rb_link_node(&pm->node, parent, p);
  2626. rb_insert_color(&pm->node, &sctx->pending_dir_moves);
  2627. }
  2628. ret = 0;
  2629. out:
  2630. if (ret) {
  2631. __free_recorded_refs(&pm->update_refs);
  2632. kfree(pm);
  2633. }
  2634. return ret;
  2635. }
  2636. static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
  2637. u64 parent_ino)
  2638. {
  2639. struct rb_node *n = sctx->pending_dir_moves.rb_node;
  2640. struct pending_dir_move *entry;
  2641. while (n) {
  2642. entry = rb_entry(n, struct pending_dir_move, node);
  2643. if (parent_ino < entry->parent_ino)
  2644. n = n->rb_left;
  2645. else if (parent_ino > entry->parent_ino)
  2646. n = n->rb_right;
  2647. else
  2648. return entry;
  2649. }
  2650. return NULL;
  2651. }
  2652. static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
  2653. {
  2654. struct fs_path *from_path = NULL;
  2655. struct fs_path *to_path = NULL;
  2656. struct fs_path *name = NULL;
  2657. u64 orig_progress = sctx->send_progress;
  2658. struct recorded_ref *cur;
  2659. u64 parent_ino, parent_gen;
  2660. struct waiting_dir_move *dm = NULL;
  2661. u64 rmdir_ino = 0;
  2662. int ret;
  2663. name = fs_path_alloc();
  2664. from_path = fs_path_alloc();
  2665. if (!name || !from_path) {
  2666. ret = -ENOMEM;
  2667. goto out;
  2668. }
  2669. dm = get_waiting_dir_move(sctx, pm->ino);
  2670. ASSERT(dm);
  2671. rmdir_ino = dm->rmdir_ino;
  2672. free_waiting_dir_move(sctx, dm);
  2673. if (pm->is_orphan) {
  2674. ret = gen_unique_name(sctx, pm->ino,
  2675. pm->gen, from_path);
  2676. } else {
  2677. ret = get_first_ref(sctx->parent_root, pm->ino,
  2678. &parent_ino, &parent_gen, name);
  2679. if (ret < 0)
  2680. goto out;
  2681. ret = get_cur_path(sctx, parent_ino, parent_gen,
  2682. from_path);
  2683. if (ret < 0)
  2684. goto out;
  2685. ret = fs_path_add_path(from_path, name);
  2686. }
  2687. if (ret < 0)
  2688. goto out;
  2689. sctx->send_progress = sctx->cur_ino + 1;
  2690. fs_path_reset(name);
  2691. to_path = name;
  2692. name = NULL;
  2693. ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
  2694. if (ret < 0)
  2695. goto out;
  2696. ret = send_rename(sctx, from_path, to_path);
  2697. if (ret < 0)
  2698. goto out;
  2699. if (rmdir_ino) {
  2700. struct orphan_dir_info *odi;
  2701. odi = get_orphan_dir_info(sctx, rmdir_ino);
  2702. if (!odi) {
  2703. /* already deleted */
  2704. goto finish;
  2705. }
  2706. ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
  2707. if (ret < 0)
  2708. goto out;
  2709. if (!ret)
  2710. goto finish;
  2711. name = fs_path_alloc();
  2712. if (!name) {
  2713. ret = -ENOMEM;
  2714. goto out;
  2715. }
  2716. ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
  2717. if (ret < 0)
  2718. goto out;
  2719. ret = send_rmdir(sctx, name);
  2720. if (ret < 0)
  2721. goto out;
  2722. free_orphan_dir_info(sctx, odi);
  2723. }
  2724. finish:
  2725. ret = send_utimes(sctx, pm->ino, pm->gen);
  2726. if (ret < 0)
  2727. goto out;
  2728. /*
  2729. * After rename/move, need to update the utimes of both new parent(s)
  2730. * and old parent(s).
  2731. */
  2732. list_for_each_entry(cur, &pm->update_refs, list) {
  2733. if (cur->dir == rmdir_ino)
  2734. continue;
  2735. ret = send_utimes(sctx, cur->dir, cur->dir_gen);
  2736. if (ret < 0)
  2737. goto out;
  2738. }
  2739. out:
  2740. fs_path_free(name);
  2741. fs_path_free(from_path);
  2742. fs_path_free(to_path);
  2743. sctx->send_progress = orig_progress;
  2744. return ret;
  2745. }
  2746. static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
  2747. {
  2748. if (!list_empty(&m->list))
  2749. list_del(&m->list);
  2750. if (!RB_EMPTY_NODE(&m->node))
  2751. rb_erase(&m->node, &sctx->pending_dir_moves);
  2752. __free_recorded_refs(&m->update_refs);
  2753. kfree(m);
  2754. }
  2755. static void tail_append_pending_moves(struct pending_dir_move *moves,
  2756. struct list_head *stack)
  2757. {
  2758. if (list_empty(&moves->list)) {
  2759. list_add_tail(&moves->list, stack);
  2760. } else {
  2761. LIST_HEAD(list);
  2762. list_splice_init(&moves->list, &list);
  2763. list_add_tail(&moves->list, stack);
  2764. list_splice_tail(&list, stack);
  2765. }
  2766. }
  2767. static int apply_children_dir_moves(struct send_ctx *sctx)
  2768. {
  2769. struct pending_dir_move *pm;
  2770. struct list_head stack;
  2771. u64 parent_ino = sctx->cur_ino;
  2772. int ret = 0;
  2773. pm = get_pending_dir_moves(sctx, parent_ino);
  2774. if (!pm)
  2775. return 0;
  2776. INIT_LIST_HEAD(&stack);
  2777. tail_append_pending_moves(pm, &stack);
  2778. while (!list_empty(&stack)) {
  2779. pm = list_first_entry(&stack, struct pending_dir_move, list);
  2780. parent_ino = pm->ino;
  2781. ret = apply_dir_move(sctx, pm);
  2782. free_pending_move(sctx, pm);
  2783. if (ret)
  2784. goto out;
  2785. pm = get_pending_dir_moves(sctx, parent_ino);
  2786. if (pm)
  2787. tail_append_pending_moves(pm, &stack);
  2788. }
  2789. return 0;
  2790. out:
  2791. while (!list_empty(&stack)) {
  2792. pm = list_first_entry(&stack, struct pending_dir_move, list);
  2793. free_pending_move(sctx, pm);
  2794. }
  2795. return ret;
  2796. }
  2797. /*
  2798. * We might need to delay a directory rename even when no ancestor directory
  2799. * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
  2800. * renamed. This happens when we rename a directory to the old name (the name
  2801. * in the parent root) of some other unrelated directory that got its rename
  2802. * delayed due to some ancestor with higher number that got renamed.
  2803. *
  2804. * Example:
  2805. *
  2806. * Parent snapshot:
  2807. * . (ino 256)
  2808. * |---- a/ (ino 257)
  2809. * | |---- file (ino 260)
  2810. * |
  2811. * |---- b/ (ino 258)
  2812. * |---- c/ (ino 259)
  2813. *
  2814. * Send snapshot:
  2815. * . (ino 256)
  2816. * |---- a/ (ino 258)
  2817. * |---- x/ (ino 259)
  2818. * |---- y/ (ino 257)
  2819. * |----- file (ino 260)
  2820. *
  2821. * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
  2822. * from 'a' to 'x/y' happening first, which in turn depends on the rename of
  2823. * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
  2824. * must issue is:
  2825. *
  2826. * 1 - rename 259 from 'c' to 'x'
  2827. * 2 - rename 257 from 'a' to 'x/y'
  2828. * 3 - rename 258 from 'b' to 'a'
  2829. *
  2830. * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
  2831. * be done right away and < 0 on error.
  2832. */
  2833. static int wait_for_dest_dir_move(struct send_ctx *sctx,
  2834. struct recorded_ref *parent_ref,
  2835. const bool is_orphan)
  2836. {
  2837. struct btrfs_path *path;
  2838. struct btrfs_key key;
  2839. struct btrfs_key di_key;
  2840. struct btrfs_dir_item *di;
  2841. u64 left_gen;
  2842. u64 right_gen;
  2843. int ret = 0;
  2844. if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
  2845. return 0;
  2846. path = alloc_path_for_send();
  2847. if (!path)
  2848. return -ENOMEM;
  2849. key.objectid = parent_ref->dir;
  2850. key.type = BTRFS_DIR_ITEM_KEY;
  2851. key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
  2852. ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
  2853. if (ret < 0) {
  2854. goto out;
  2855. } else if (ret > 0) {
  2856. ret = 0;
  2857. goto out;
  2858. }
  2859. di = btrfs_match_dir_item_name(sctx->parent_root, path,
  2860. parent_ref->name, parent_ref->name_len);
  2861. if (!di) {
  2862. ret = 0;
  2863. goto out;
  2864. }
  2865. /*
  2866. * di_key.objectid has the number of the inode that has a dentry in the
  2867. * parent directory with the same name that sctx->cur_ino is being
  2868. * renamed to. We need to check if that inode is in the send root as
  2869. * well and if it is currently marked as an inode with a pending rename,
  2870. * if it is, we need to delay the rename of sctx->cur_ino as well, so
  2871. * that it happens after that other inode is renamed.
  2872. */
  2873. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
  2874. if (di_key.type != BTRFS_INODE_ITEM_KEY) {
  2875. ret = 0;
  2876. goto out;
  2877. }
  2878. ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
  2879. &left_gen, NULL, NULL, NULL, NULL);
  2880. if (ret < 0)
  2881. goto out;
  2882. ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
  2883. &right_gen, NULL, NULL, NULL, NULL);
  2884. if (ret < 0) {
  2885. if (ret == -ENOENT)
  2886. ret = 0;
  2887. goto out;
  2888. }
  2889. /* Different inode, no need to delay the rename of sctx->cur_ino */
  2890. if (right_gen != left_gen) {
  2891. ret = 0;
  2892. goto out;
  2893. }
  2894. if (is_waiting_for_move(sctx, di_key.objectid)) {
  2895. ret = add_pending_dir_move(sctx,
  2896. sctx->cur_ino,
  2897. sctx->cur_inode_gen,
  2898. di_key.objectid,
  2899. &sctx->new_refs,
  2900. &sctx->deleted_refs,
  2901. is_orphan);
  2902. if (!ret)
  2903. ret = 1;
  2904. }
  2905. out:
  2906. btrfs_free_path(path);
  2907. return ret;
  2908. }
  2909. /*
  2910. * Check if ino ino1 is an ancestor of inode ino2 in the given root.
  2911. * Return 1 if true, 0 if false and < 0 on error.
  2912. */
  2913. static int is_ancestor(struct btrfs_root *root,
  2914. const u64 ino1,
  2915. const u64 ino1_gen,
  2916. const u64 ino2,
  2917. struct fs_path *fs_path)
  2918. {
  2919. u64 ino = ino2;
  2920. while (ino > BTRFS_FIRST_FREE_OBJECTID) {
  2921. int ret;
  2922. u64 parent;
  2923. u64 parent_gen;
  2924. fs_path_reset(fs_path);
  2925. ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
  2926. if (ret < 0) {
  2927. if (ret == -ENOENT && ino == ino2)
  2928. ret = 0;
  2929. return ret;
  2930. }
  2931. if (parent == ino1)
  2932. return parent_gen == ino1_gen ? 1 : 0;
  2933. ino = parent;
  2934. }
  2935. return 0;
  2936. }
  2937. static int wait_for_parent_move(struct send_ctx *sctx,
  2938. struct recorded_ref *parent_ref,
  2939. const bool is_orphan)
  2940. {
  2941. int ret = 0;
  2942. u64 ino = parent_ref->dir;
  2943. u64 parent_ino_before, parent_ino_after;
  2944. struct fs_path *path_before = NULL;
  2945. struct fs_path *path_after = NULL;
  2946. int len1, len2;
  2947. path_after = fs_path_alloc();
  2948. path_before = fs_path_alloc();
  2949. if (!path_after || !path_before) {
  2950. ret = -ENOMEM;
  2951. goto out;
  2952. }
  2953. /*
  2954. * Our current directory inode may not yet be renamed/moved because some
  2955. * ancestor (immediate or not) has to be renamed/moved first. So find if
  2956. * such ancestor exists and make sure our own rename/move happens after
  2957. * that ancestor is processed to avoid path build infinite loops (done
  2958. * at get_cur_path()).
  2959. */
  2960. while (ino > BTRFS_FIRST_FREE_OBJECTID) {
  2961. if (is_waiting_for_move(sctx, ino)) {
  2962. /*
  2963. * If the current inode is an ancestor of ino in the
  2964. * parent root, we need to delay the rename of the
  2965. * current inode, otherwise don't delayed the rename
  2966. * because we can end up with a circular dependency
  2967. * of renames, resulting in some directories never
  2968. * getting the respective rename operations issued in
  2969. * the send stream or getting into infinite path build
  2970. * loops.
  2971. */
  2972. ret = is_ancestor(sctx->parent_root,
  2973. sctx->cur_ino, sctx->cur_inode_gen,
  2974. ino, path_before);
  2975. break;
  2976. }
  2977. fs_path_reset(path_before);
  2978. fs_path_reset(path_after);
  2979. ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
  2980. NULL, path_after);
  2981. if (ret < 0)
  2982. goto out;
  2983. ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
  2984. NULL, path_before);
  2985. if (ret < 0 && ret != -ENOENT) {
  2986. goto out;
  2987. } else if (ret == -ENOENT) {
  2988. ret = 0;
  2989. break;
  2990. }
  2991. len1 = fs_path_len(path_before);
  2992. len2 = fs_path_len(path_after);
  2993. if (ino > sctx->cur_ino &&
  2994. (parent_ino_before != parent_ino_after || len1 != len2 ||
  2995. memcmp(path_before->start, path_after->start, len1))) {
  2996. ret = 1;
  2997. break;
  2998. }
  2999. ino = parent_ino_after;
  3000. }
  3001. out:
  3002. fs_path_free(path_before);
  3003. fs_path_free(path_after);
  3004. if (ret == 1) {
  3005. ret = add_pending_dir_move(sctx,
  3006. sctx->cur_ino,
  3007. sctx->cur_inode_gen,
  3008. ino,
  3009. &sctx->new_refs,
  3010. &sctx->deleted_refs,
  3011. is_orphan);
  3012. if (!ret)
  3013. ret = 1;
  3014. }
  3015. return ret;
  3016. }
  3017. /*
  3018. * This does all the move/link/unlink/rmdir magic.
  3019. */
  3020. static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
  3021. {
  3022. int ret = 0;
  3023. struct recorded_ref *cur;
  3024. struct recorded_ref *cur2;
  3025. struct list_head check_dirs;
  3026. struct fs_path *valid_path = NULL;
  3027. u64 ow_inode = 0;
  3028. u64 ow_gen;
  3029. int did_overwrite = 0;
  3030. int is_orphan = 0;
  3031. u64 last_dir_ino_rm = 0;
  3032. bool can_rename = true;
  3033. verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
  3034. /*
  3035. * This should never happen as the root dir always has the same ref
  3036. * which is always '..'
  3037. */
  3038. BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
  3039. INIT_LIST_HEAD(&check_dirs);
  3040. valid_path = fs_path_alloc();
  3041. if (!valid_path) {
  3042. ret = -ENOMEM;
  3043. goto out;
  3044. }
  3045. /*
  3046. * First, check if the first ref of the current inode was overwritten
  3047. * before. If yes, we know that the current inode was already orphanized
  3048. * and thus use the orphan name. If not, we can use get_cur_path to
  3049. * get the path of the first ref as it would like while receiving at
  3050. * this point in time.
  3051. * New inodes are always orphan at the beginning, so force to use the
  3052. * orphan name in this case.
  3053. * The first ref is stored in valid_path and will be updated if it
  3054. * gets moved around.
  3055. */
  3056. if (!sctx->cur_inode_new) {
  3057. ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
  3058. sctx->cur_inode_gen);
  3059. if (ret < 0)
  3060. goto out;
  3061. if (ret)
  3062. did_overwrite = 1;
  3063. }
  3064. if (sctx->cur_inode_new || did_overwrite) {
  3065. ret = gen_unique_name(sctx, sctx->cur_ino,
  3066. sctx->cur_inode_gen, valid_path);
  3067. if (ret < 0)
  3068. goto out;
  3069. is_orphan = 1;
  3070. } else {
  3071. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  3072. valid_path);
  3073. if (ret < 0)
  3074. goto out;
  3075. }
  3076. list_for_each_entry(cur, &sctx->new_refs, list) {
  3077. /*
  3078. * We may have refs where the parent directory does not exist
  3079. * yet. This happens if the parent directories inum is higher
  3080. * the the current inum. To handle this case, we create the
  3081. * parent directory out of order. But we need to check if this
  3082. * did already happen before due to other refs in the same dir.
  3083. */
  3084. ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
  3085. if (ret < 0)
  3086. goto out;
  3087. if (ret == inode_state_will_create) {
  3088. ret = 0;
  3089. /*
  3090. * First check if any of the current inodes refs did
  3091. * already create the dir.
  3092. */
  3093. list_for_each_entry(cur2, &sctx->new_refs, list) {
  3094. if (cur == cur2)
  3095. break;
  3096. if (cur2->dir == cur->dir) {
  3097. ret = 1;
  3098. break;
  3099. }
  3100. }
  3101. /*
  3102. * If that did not happen, check if a previous inode
  3103. * did already create the dir.
  3104. */
  3105. if (!ret)
  3106. ret = did_create_dir(sctx, cur->dir);
  3107. if (ret < 0)
  3108. goto out;
  3109. if (!ret) {
  3110. ret = send_create_inode(sctx, cur->dir);
  3111. if (ret < 0)
  3112. goto out;
  3113. }
  3114. }
  3115. /*
  3116. * Check if this new ref would overwrite the first ref of
  3117. * another unprocessed inode. If yes, orphanize the
  3118. * overwritten inode. If we find an overwritten ref that is
  3119. * not the first ref, simply unlink it.
  3120. */
  3121. ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
  3122. cur->name, cur->name_len,
  3123. &ow_inode, &ow_gen);
  3124. if (ret < 0)
  3125. goto out;
  3126. if (ret) {
  3127. ret = is_first_ref(sctx->parent_root,
  3128. ow_inode, cur->dir, cur->name,
  3129. cur->name_len);
  3130. if (ret < 0)
  3131. goto out;
  3132. if (ret) {
  3133. struct name_cache_entry *nce;
  3134. ret = orphanize_inode(sctx, ow_inode, ow_gen,
  3135. cur->full_path);
  3136. if (ret < 0)
  3137. goto out;
  3138. /*
  3139. * Make sure we clear our orphanized inode's
  3140. * name from the name cache. This is because the
  3141. * inode ow_inode might be an ancestor of some
  3142. * other inode that will be orphanized as well
  3143. * later and has an inode number greater than
  3144. * sctx->send_progress. We need to prevent
  3145. * future name lookups from using the old name
  3146. * and get instead the orphan name.
  3147. */
  3148. nce = name_cache_search(sctx, ow_inode, ow_gen);
  3149. if (nce) {
  3150. name_cache_delete(sctx, nce);
  3151. kfree(nce);
  3152. }
  3153. } else {
  3154. ret = send_unlink(sctx, cur->full_path);
  3155. if (ret < 0)
  3156. goto out;
  3157. }
  3158. }
  3159. if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
  3160. ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
  3161. if (ret < 0)
  3162. goto out;
  3163. if (ret == 1) {
  3164. can_rename = false;
  3165. *pending_move = 1;
  3166. }
  3167. }
  3168. if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
  3169. can_rename) {
  3170. ret = wait_for_parent_move(sctx, cur, is_orphan);
  3171. if (ret < 0)
  3172. goto out;
  3173. if (ret == 1) {
  3174. can_rename = false;
  3175. *pending_move = 1;
  3176. }
  3177. }
  3178. /*
  3179. * link/move the ref to the new place. If we have an orphan
  3180. * inode, move it and update valid_path. If not, link or move
  3181. * it depending on the inode mode.
  3182. */
  3183. if (is_orphan && can_rename) {
  3184. ret = send_rename(sctx, valid_path, cur->full_path);
  3185. if (ret < 0)
  3186. goto out;
  3187. is_orphan = 0;
  3188. ret = fs_path_copy(valid_path, cur->full_path);
  3189. if (ret < 0)
  3190. goto out;
  3191. } else if (can_rename) {
  3192. if (S_ISDIR(sctx->cur_inode_mode)) {
  3193. /*
  3194. * Dirs can't be linked, so move it. For moved
  3195. * dirs, we always have one new and one deleted
  3196. * ref. The deleted ref is ignored later.
  3197. */
  3198. ret = send_rename(sctx, valid_path,
  3199. cur->full_path);
  3200. if (!ret)
  3201. ret = fs_path_copy(valid_path,
  3202. cur->full_path);
  3203. if (ret < 0)
  3204. goto out;
  3205. } else {
  3206. ret = send_link(sctx, cur->full_path,
  3207. valid_path);
  3208. if (ret < 0)
  3209. goto out;
  3210. }
  3211. }
  3212. ret = dup_ref(cur, &check_dirs);
  3213. if (ret < 0)
  3214. goto out;
  3215. }
  3216. if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
  3217. /*
  3218. * Check if we can already rmdir the directory. If not,
  3219. * orphanize it. For every dir item inside that gets deleted
  3220. * later, we do this check again and rmdir it then if possible.
  3221. * See the use of check_dirs for more details.
  3222. */
  3223. ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  3224. sctx->cur_ino);
  3225. if (ret < 0)
  3226. goto out;
  3227. if (ret) {
  3228. ret = send_rmdir(sctx, valid_path);
  3229. if (ret < 0)
  3230. goto out;
  3231. } else if (!is_orphan) {
  3232. ret = orphanize_inode(sctx, sctx->cur_ino,
  3233. sctx->cur_inode_gen, valid_path);
  3234. if (ret < 0)
  3235. goto out;
  3236. is_orphan = 1;
  3237. }
  3238. list_for_each_entry(cur, &sctx->deleted_refs, list) {
  3239. ret = dup_ref(cur, &check_dirs);
  3240. if (ret < 0)
  3241. goto out;
  3242. }
  3243. } else if (S_ISDIR(sctx->cur_inode_mode) &&
  3244. !list_empty(&sctx->deleted_refs)) {
  3245. /*
  3246. * We have a moved dir. Add the old parent to check_dirs
  3247. */
  3248. cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
  3249. list);
  3250. ret = dup_ref(cur, &check_dirs);
  3251. if (ret < 0)
  3252. goto out;
  3253. } else if (!S_ISDIR(sctx->cur_inode_mode)) {
  3254. /*
  3255. * We have a non dir inode. Go through all deleted refs and
  3256. * unlink them if they were not already overwritten by other
  3257. * inodes.
  3258. */
  3259. list_for_each_entry(cur, &sctx->deleted_refs, list) {
  3260. ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
  3261. sctx->cur_ino, sctx->cur_inode_gen,
  3262. cur->name, cur->name_len);
  3263. if (ret < 0)
  3264. goto out;
  3265. if (!ret) {
  3266. ret = send_unlink(sctx, cur->full_path);
  3267. if (ret < 0)
  3268. goto out;
  3269. }
  3270. ret = dup_ref(cur, &check_dirs);
  3271. if (ret < 0)
  3272. goto out;
  3273. }
  3274. /*
  3275. * If the inode is still orphan, unlink the orphan. This may
  3276. * happen when a previous inode did overwrite the first ref
  3277. * of this inode and no new refs were added for the current
  3278. * inode. Unlinking does not mean that the inode is deleted in
  3279. * all cases. There may still be links to this inode in other
  3280. * places.
  3281. */
  3282. if (is_orphan) {
  3283. ret = send_unlink(sctx, valid_path);
  3284. if (ret < 0)
  3285. goto out;
  3286. }
  3287. }
  3288. /*
  3289. * We did collect all parent dirs where cur_inode was once located. We
  3290. * now go through all these dirs and check if they are pending for
  3291. * deletion and if it's finally possible to perform the rmdir now.
  3292. * We also update the inode stats of the parent dirs here.
  3293. */
  3294. list_for_each_entry(cur, &check_dirs, list) {
  3295. /*
  3296. * In case we had refs into dirs that were not processed yet,
  3297. * we don't need to do the utime and rmdir logic for these dirs.
  3298. * The dir will be processed later.
  3299. */
  3300. if (cur->dir > sctx->cur_ino)
  3301. continue;
  3302. ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
  3303. if (ret < 0)
  3304. goto out;
  3305. if (ret == inode_state_did_create ||
  3306. ret == inode_state_no_change) {
  3307. /* TODO delayed utimes */
  3308. ret = send_utimes(sctx, cur->dir, cur->dir_gen);
  3309. if (ret < 0)
  3310. goto out;
  3311. } else if (ret == inode_state_did_delete &&
  3312. cur->dir != last_dir_ino_rm) {
  3313. ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
  3314. sctx->cur_ino);
  3315. if (ret < 0)
  3316. goto out;
  3317. if (ret) {
  3318. ret = get_cur_path(sctx, cur->dir,
  3319. cur->dir_gen, valid_path);
  3320. if (ret < 0)
  3321. goto out;
  3322. ret = send_rmdir(sctx, valid_path);
  3323. if (ret < 0)
  3324. goto out;
  3325. last_dir_ino_rm = cur->dir;
  3326. }
  3327. }
  3328. }
  3329. ret = 0;
  3330. out:
  3331. __free_recorded_refs(&check_dirs);
  3332. free_recorded_refs(sctx);
  3333. fs_path_free(valid_path);
  3334. return ret;
  3335. }
  3336. static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
  3337. struct fs_path *name, void *ctx, struct list_head *refs)
  3338. {
  3339. int ret = 0;
  3340. struct send_ctx *sctx = ctx;
  3341. struct fs_path *p;
  3342. u64 gen;
  3343. p = fs_path_alloc();
  3344. if (!p)
  3345. return -ENOMEM;
  3346. ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
  3347. NULL, NULL);
  3348. if (ret < 0)
  3349. goto out;
  3350. ret = get_cur_path(sctx, dir, gen, p);
  3351. if (ret < 0)
  3352. goto out;
  3353. ret = fs_path_add_path(p, name);
  3354. if (ret < 0)
  3355. goto out;
  3356. ret = __record_ref(refs, dir, gen, p);
  3357. out:
  3358. if (ret)
  3359. fs_path_free(p);
  3360. return ret;
  3361. }
  3362. static int __record_new_ref(int num, u64 dir, int index,
  3363. struct fs_path *name,
  3364. void *ctx)
  3365. {
  3366. struct send_ctx *sctx = ctx;
  3367. return record_ref(sctx->send_root, num, dir, index, name,
  3368. ctx, &sctx->new_refs);
  3369. }
  3370. static int __record_deleted_ref(int num, u64 dir, int index,
  3371. struct fs_path *name,
  3372. void *ctx)
  3373. {
  3374. struct send_ctx *sctx = ctx;
  3375. return record_ref(sctx->parent_root, num, dir, index, name,
  3376. ctx, &sctx->deleted_refs);
  3377. }
  3378. static int record_new_ref(struct send_ctx *sctx)
  3379. {
  3380. int ret;
  3381. ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
  3382. sctx->cmp_key, 0, __record_new_ref, sctx);
  3383. if (ret < 0)
  3384. goto out;
  3385. ret = 0;
  3386. out:
  3387. return ret;
  3388. }
  3389. static int record_deleted_ref(struct send_ctx *sctx)
  3390. {
  3391. int ret;
  3392. ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
  3393. sctx->cmp_key, 0, __record_deleted_ref, sctx);
  3394. if (ret < 0)
  3395. goto out;
  3396. ret = 0;
  3397. out:
  3398. return ret;
  3399. }
  3400. struct find_ref_ctx {
  3401. u64 dir;
  3402. u64 dir_gen;
  3403. struct btrfs_root *root;
  3404. struct fs_path *name;
  3405. int found_idx;
  3406. };
  3407. static int __find_iref(int num, u64 dir, int index,
  3408. struct fs_path *name,
  3409. void *ctx_)
  3410. {
  3411. struct find_ref_ctx *ctx = ctx_;
  3412. u64 dir_gen;
  3413. int ret;
  3414. if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
  3415. strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
  3416. /*
  3417. * To avoid doing extra lookups we'll only do this if everything
  3418. * else matches.
  3419. */
  3420. ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
  3421. NULL, NULL, NULL);
  3422. if (ret)
  3423. return ret;
  3424. if (dir_gen != ctx->dir_gen)
  3425. return 0;
  3426. ctx->found_idx = num;
  3427. return 1;
  3428. }
  3429. return 0;
  3430. }
  3431. static int find_iref(struct btrfs_root *root,
  3432. struct btrfs_path *path,
  3433. struct btrfs_key *key,
  3434. u64 dir, u64 dir_gen, struct fs_path *name)
  3435. {
  3436. int ret;
  3437. struct find_ref_ctx ctx;
  3438. ctx.dir = dir;
  3439. ctx.name = name;
  3440. ctx.dir_gen = dir_gen;
  3441. ctx.found_idx = -1;
  3442. ctx.root = root;
  3443. ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
  3444. if (ret < 0)
  3445. return ret;
  3446. if (ctx.found_idx == -1)
  3447. return -ENOENT;
  3448. return ctx.found_idx;
  3449. }
  3450. static int __record_changed_new_ref(int num, u64 dir, int index,
  3451. struct fs_path *name,
  3452. void *ctx)
  3453. {
  3454. u64 dir_gen;
  3455. int ret;
  3456. struct send_ctx *sctx = ctx;
  3457. ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
  3458. NULL, NULL, NULL);
  3459. if (ret)
  3460. return ret;
  3461. ret = find_iref(sctx->parent_root, sctx->right_path,
  3462. sctx->cmp_key, dir, dir_gen, name);
  3463. if (ret == -ENOENT)
  3464. ret = __record_new_ref(num, dir, index, name, sctx);
  3465. else if (ret > 0)
  3466. ret = 0;
  3467. return ret;
  3468. }
  3469. static int __record_changed_deleted_ref(int num, u64 dir, int index,
  3470. struct fs_path *name,
  3471. void *ctx)
  3472. {
  3473. u64 dir_gen;
  3474. int ret;
  3475. struct send_ctx *sctx = ctx;
  3476. ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
  3477. NULL, NULL, NULL);
  3478. if (ret)
  3479. return ret;
  3480. ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
  3481. dir, dir_gen, name);
  3482. if (ret == -ENOENT)
  3483. ret = __record_deleted_ref(num, dir, index, name, sctx);
  3484. else if (ret > 0)
  3485. ret = 0;
  3486. return ret;
  3487. }
  3488. static int record_changed_ref(struct send_ctx *sctx)
  3489. {
  3490. int ret = 0;
  3491. ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
  3492. sctx->cmp_key, 0, __record_changed_new_ref, sctx);
  3493. if (ret < 0)
  3494. goto out;
  3495. ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
  3496. sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
  3497. if (ret < 0)
  3498. goto out;
  3499. ret = 0;
  3500. out:
  3501. return ret;
  3502. }
  3503. /*
  3504. * Record and process all refs at once. Needed when an inode changes the
  3505. * generation number, which means that it was deleted and recreated.
  3506. */
  3507. static int process_all_refs(struct send_ctx *sctx,
  3508. enum btrfs_compare_tree_result cmd)
  3509. {
  3510. int ret;
  3511. struct btrfs_root *root;
  3512. struct btrfs_path *path;
  3513. struct btrfs_key key;
  3514. struct btrfs_key found_key;
  3515. struct extent_buffer *eb;
  3516. int slot;
  3517. iterate_inode_ref_t cb;
  3518. int pending_move = 0;
  3519. path = alloc_path_for_send();
  3520. if (!path)
  3521. return -ENOMEM;
  3522. if (cmd == BTRFS_COMPARE_TREE_NEW) {
  3523. root = sctx->send_root;
  3524. cb = __record_new_ref;
  3525. } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
  3526. root = sctx->parent_root;
  3527. cb = __record_deleted_ref;
  3528. } else {
  3529. btrfs_err(sctx->send_root->fs_info,
  3530. "Wrong command %d in process_all_refs", cmd);
  3531. ret = -EINVAL;
  3532. goto out;
  3533. }
  3534. key.objectid = sctx->cmp_key->objectid;
  3535. key.type = BTRFS_INODE_REF_KEY;
  3536. key.offset = 0;
  3537. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3538. if (ret < 0)
  3539. goto out;
  3540. while (1) {
  3541. eb = path->nodes[0];
  3542. slot = path->slots[0];
  3543. if (slot >= btrfs_header_nritems(eb)) {
  3544. ret = btrfs_next_leaf(root, path);
  3545. if (ret < 0)
  3546. goto out;
  3547. else if (ret > 0)
  3548. break;
  3549. continue;
  3550. }
  3551. btrfs_item_key_to_cpu(eb, &found_key, slot);
  3552. if (found_key.objectid != key.objectid ||
  3553. (found_key.type != BTRFS_INODE_REF_KEY &&
  3554. found_key.type != BTRFS_INODE_EXTREF_KEY))
  3555. break;
  3556. ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
  3557. if (ret < 0)
  3558. goto out;
  3559. path->slots[0]++;
  3560. }
  3561. btrfs_release_path(path);
  3562. ret = process_recorded_refs(sctx, &pending_move);
  3563. /* Only applicable to an incremental send. */
  3564. ASSERT(pending_move == 0);
  3565. out:
  3566. btrfs_free_path(path);
  3567. return ret;
  3568. }
  3569. static int send_set_xattr(struct send_ctx *sctx,
  3570. struct fs_path *path,
  3571. const char *name, int name_len,
  3572. const char *data, int data_len)
  3573. {
  3574. int ret = 0;
  3575. ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
  3576. if (ret < 0)
  3577. goto out;
  3578. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  3579. TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
  3580. TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
  3581. ret = send_cmd(sctx);
  3582. tlv_put_failure:
  3583. out:
  3584. return ret;
  3585. }
  3586. static int send_remove_xattr(struct send_ctx *sctx,
  3587. struct fs_path *path,
  3588. const char *name, int name_len)
  3589. {
  3590. int ret = 0;
  3591. ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
  3592. if (ret < 0)
  3593. goto out;
  3594. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  3595. TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
  3596. ret = send_cmd(sctx);
  3597. tlv_put_failure:
  3598. out:
  3599. return ret;
  3600. }
  3601. static int __process_new_xattr(int num, struct btrfs_key *di_key,
  3602. const char *name, int name_len,
  3603. const char *data, int data_len,
  3604. u8 type, void *ctx)
  3605. {
  3606. int ret;
  3607. struct send_ctx *sctx = ctx;
  3608. struct fs_path *p;
  3609. posix_acl_xattr_header dummy_acl;
  3610. p = fs_path_alloc();
  3611. if (!p)
  3612. return -ENOMEM;
  3613. /*
  3614. * This hack is needed because empty acl's are stored as zero byte
  3615. * data in xattrs. Problem with that is, that receiving these zero byte
  3616. * acl's will fail later. To fix this, we send a dummy acl list that
  3617. * only contains the version number and no entries.
  3618. */
  3619. if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
  3620. !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
  3621. if (data_len == 0) {
  3622. dummy_acl.a_version =
  3623. cpu_to_le32(POSIX_ACL_XATTR_VERSION);
  3624. data = (char *)&dummy_acl;
  3625. data_len = sizeof(dummy_acl);
  3626. }
  3627. }
  3628. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3629. if (ret < 0)
  3630. goto out;
  3631. ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
  3632. out:
  3633. fs_path_free(p);
  3634. return ret;
  3635. }
  3636. static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
  3637. const char *name, int name_len,
  3638. const char *data, int data_len,
  3639. u8 type, void *ctx)
  3640. {
  3641. int ret;
  3642. struct send_ctx *sctx = ctx;
  3643. struct fs_path *p;
  3644. p = fs_path_alloc();
  3645. if (!p)
  3646. return -ENOMEM;
  3647. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3648. if (ret < 0)
  3649. goto out;
  3650. ret = send_remove_xattr(sctx, p, name, name_len);
  3651. out:
  3652. fs_path_free(p);
  3653. return ret;
  3654. }
  3655. static int process_new_xattr(struct send_ctx *sctx)
  3656. {
  3657. int ret = 0;
  3658. ret = iterate_dir_item(sctx->send_root, sctx->left_path,
  3659. sctx->cmp_key, __process_new_xattr, sctx);
  3660. return ret;
  3661. }
  3662. static int process_deleted_xattr(struct send_ctx *sctx)
  3663. {
  3664. int ret;
  3665. ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
  3666. sctx->cmp_key, __process_deleted_xattr, sctx);
  3667. return ret;
  3668. }
  3669. struct find_xattr_ctx {
  3670. const char *name;
  3671. int name_len;
  3672. int found_idx;
  3673. char *found_data;
  3674. int found_data_len;
  3675. };
  3676. static int __find_xattr(int num, struct btrfs_key *di_key,
  3677. const char *name, int name_len,
  3678. const char *data, int data_len,
  3679. u8 type, void *vctx)
  3680. {
  3681. struct find_xattr_ctx *ctx = vctx;
  3682. if (name_len == ctx->name_len &&
  3683. strncmp(name, ctx->name, name_len) == 0) {
  3684. ctx->found_idx = num;
  3685. ctx->found_data_len = data_len;
  3686. ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
  3687. if (!ctx->found_data)
  3688. return -ENOMEM;
  3689. return 1;
  3690. }
  3691. return 0;
  3692. }
  3693. static int find_xattr(struct btrfs_root *root,
  3694. struct btrfs_path *path,
  3695. struct btrfs_key *key,
  3696. const char *name, int name_len,
  3697. char **data, int *data_len)
  3698. {
  3699. int ret;
  3700. struct find_xattr_ctx ctx;
  3701. ctx.name = name;
  3702. ctx.name_len = name_len;
  3703. ctx.found_idx = -1;
  3704. ctx.found_data = NULL;
  3705. ctx.found_data_len = 0;
  3706. ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
  3707. if (ret < 0)
  3708. return ret;
  3709. if (ctx.found_idx == -1)
  3710. return -ENOENT;
  3711. if (data) {
  3712. *data = ctx.found_data;
  3713. *data_len = ctx.found_data_len;
  3714. } else {
  3715. kfree(ctx.found_data);
  3716. }
  3717. return ctx.found_idx;
  3718. }
  3719. static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
  3720. const char *name, int name_len,
  3721. const char *data, int data_len,
  3722. u8 type, void *ctx)
  3723. {
  3724. int ret;
  3725. struct send_ctx *sctx = ctx;
  3726. char *found_data = NULL;
  3727. int found_data_len = 0;
  3728. ret = find_xattr(sctx->parent_root, sctx->right_path,
  3729. sctx->cmp_key, name, name_len, &found_data,
  3730. &found_data_len);
  3731. if (ret == -ENOENT) {
  3732. ret = __process_new_xattr(num, di_key, name, name_len, data,
  3733. data_len, type, ctx);
  3734. } else if (ret >= 0) {
  3735. if (data_len != found_data_len ||
  3736. memcmp(data, found_data, data_len)) {
  3737. ret = __process_new_xattr(num, di_key, name, name_len,
  3738. data, data_len, type, ctx);
  3739. } else {
  3740. ret = 0;
  3741. }
  3742. }
  3743. kfree(found_data);
  3744. return ret;
  3745. }
  3746. static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
  3747. const char *name, int name_len,
  3748. const char *data, int data_len,
  3749. u8 type, void *ctx)
  3750. {
  3751. int ret;
  3752. struct send_ctx *sctx = ctx;
  3753. ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
  3754. name, name_len, NULL, NULL);
  3755. if (ret == -ENOENT)
  3756. ret = __process_deleted_xattr(num, di_key, name, name_len, data,
  3757. data_len, type, ctx);
  3758. else if (ret >= 0)
  3759. ret = 0;
  3760. return ret;
  3761. }
  3762. static int process_changed_xattr(struct send_ctx *sctx)
  3763. {
  3764. int ret = 0;
  3765. ret = iterate_dir_item(sctx->send_root, sctx->left_path,
  3766. sctx->cmp_key, __process_changed_new_xattr, sctx);
  3767. if (ret < 0)
  3768. goto out;
  3769. ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
  3770. sctx->cmp_key, __process_changed_deleted_xattr, sctx);
  3771. out:
  3772. return ret;
  3773. }
  3774. static int process_all_new_xattrs(struct send_ctx *sctx)
  3775. {
  3776. int ret;
  3777. struct btrfs_root *root;
  3778. struct btrfs_path *path;
  3779. struct btrfs_key key;
  3780. struct btrfs_key found_key;
  3781. struct extent_buffer *eb;
  3782. int slot;
  3783. path = alloc_path_for_send();
  3784. if (!path)
  3785. return -ENOMEM;
  3786. root = sctx->send_root;
  3787. key.objectid = sctx->cmp_key->objectid;
  3788. key.type = BTRFS_XATTR_ITEM_KEY;
  3789. key.offset = 0;
  3790. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3791. if (ret < 0)
  3792. goto out;
  3793. while (1) {
  3794. eb = path->nodes[0];
  3795. slot = path->slots[0];
  3796. if (slot >= btrfs_header_nritems(eb)) {
  3797. ret = btrfs_next_leaf(root, path);
  3798. if (ret < 0) {
  3799. goto out;
  3800. } else if (ret > 0) {
  3801. ret = 0;
  3802. break;
  3803. }
  3804. continue;
  3805. }
  3806. btrfs_item_key_to_cpu(eb, &found_key, slot);
  3807. if (found_key.objectid != key.objectid ||
  3808. found_key.type != key.type) {
  3809. ret = 0;
  3810. goto out;
  3811. }
  3812. ret = iterate_dir_item(root, path, &found_key,
  3813. __process_new_xattr, sctx);
  3814. if (ret < 0)
  3815. goto out;
  3816. path->slots[0]++;
  3817. }
  3818. out:
  3819. btrfs_free_path(path);
  3820. return ret;
  3821. }
  3822. static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
  3823. {
  3824. struct btrfs_root *root = sctx->send_root;
  3825. struct btrfs_fs_info *fs_info = root->fs_info;
  3826. struct inode *inode;
  3827. struct page *page;
  3828. char *addr;
  3829. struct btrfs_key key;
  3830. pgoff_t index = offset >> PAGE_CACHE_SHIFT;
  3831. pgoff_t last_index;
  3832. unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
  3833. ssize_t ret = 0;
  3834. key.objectid = sctx->cur_ino;
  3835. key.type = BTRFS_INODE_ITEM_KEY;
  3836. key.offset = 0;
  3837. inode = btrfs_iget(fs_info->sb, &key, root, NULL);
  3838. if (IS_ERR(inode))
  3839. return PTR_ERR(inode);
  3840. if (offset + len > i_size_read(inode)) {
  3841. if (offset > i_size_read(inode))
  3842. len = 0;
  3843. else
  3844. len = offset - i_size_read(inode);
  3845. }
  3846. if (len == 0)
  3847. goto out;
  3848. last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
  3849. /* initial readahead */
  3850. memset(&sctx->ra, 0, sizeof(struct file_ra_state));
  3851. file_ra_state_init(&sctx->ra, inode->i_mapping);
  3852. btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
  3853. last_index - index + 1);
  3854. while (index <= last_index) {
  3855. unsigned cur_len = min_t(unsigned, len,
  3856. PAGE_CACHE_SIZE - pg_offset);
  3857. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  3858. if (!page) {
  3859. ret = -ENOMEM;
  3860. break;
  3861. }
  3862. if (!PageUptodate(page)) {
  3863. btrfs_readpage(NULL, page);
  3864. lock_page(page);
  3865. if (!PageUptodate(page)) {
  3866. unlock_page(page);
  3867. page_cache_release(page);
  3868. ret = -EIO;
  3869. break;
  3870. }
  3871. }
  3872. addr = kmap(page);
  3873. memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
  3874. kunmap(page);
  3875. unlock_page(page);
  3876. page_cache_release(page);
  3877. index++;
  3878. pg_offset = 0;
  3879. len -= cur_len;
  3880. ret += cur_len;
  3881. }
  3882. out:
  3883. iput(inode);
  3884. return ret;
  3885. }
  3886. /*
  3887. * Read some bytes from the current inode/file and send a write command to
  3888. * user space.
  3889. */
  3890. static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
  3891. {
  3892. int ret = 0;
  3893. struct fs_path *p;
  3894. ssize_t num_read = 0;
  3895. p = fs_path_alloc();
  3896. if (!p)
  3897. return -ENOMEM;
  3898. verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
  3899. num_read = fill_read_buf(sctx, offset, len);
  3900. if (num_read <= 0) {
  3901. if (num_read < 0)
  3902. ret = num_read;
  3903. goto out;
  3904. }
  3905. ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
  3906. if (ret < 0)
  3907. goto out;
  3908. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3909. if (ret < 0)
  3910. goto out;
  3911. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  3912. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  3913. TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
  3914. ret = send_cmd(sctx);
  3915. tlv_put_failure:
  3916. out:
  3917. fs_path_free(p);
  3918. if (ret < 0)
  3919. return ret;
  3920. return num_read;
  3921. }
  3922. /*
  3923. * Send a clone command to user space.
  3924. */
  3925. static int send_clone(struct send_ctx *sctx,
  3926. u64 offset, u32 len,
  3927. struct clone_root *clone_root)
  3928. {
  3929. int ret = 0;
  3930. struct fs_path *p;
  3931. u64 gen;
  3932. verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
  3933. "clone_inode=%llu, clone_offset=%llu\n", offset, len,
  3934. clone_root->root->objectid, clone_root->ino,
  3935. clone_root->offset);
  3936. p = fs_path_alloc();
  3937. if (!p)
  3938. return -ENOMEM;
  3939. ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
  3940. if (ret < 0)
  3941. goto out;
  3942. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3943. if (ret < 0)
  3944. goto out;
  3945. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  3946. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
  3947. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  3948. if (clone_root->root == sctx->send_root) {
  3949. ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
  3950. &gen, NULL, NULL, NULL, NULL);
  3951. if (ret < 0)
  3952. goto out;
  3953. ret = get_cur_path(sctx, clone_root->ino, gen, p);
  3954. } else {
  3955. ret = get_inode_path(clone_root->root, clone_root->ino, p);
  3956. }
  3957. if (ret < 0)
  3958. goto out;
  3959. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  3960. clone_root->root->root_item.uuid);
  3961. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
  3962. le64_to_cpu(clone_root->root->root_item.ctransid));
  3963. TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
  3964. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
  3965. clone_root->offset);
  3966. ret = send_cmd(sctx);
  3967. tlv_put_failure:
  3968. out:
  3969. fs_path_free(p);
  3970. return ret;
  3971. }
  3972. /*
  3973. * Send an update extent command to user space.
  3974. */
  3975. static int send_update_extent(struct send_ctx *sctx,
  3976. u64 offset, u32 len)
  3977. {
  3978. int ret = 0;
  3979. struct fs_path *p;
  3980. p = fs_path_alloc();
  3981. if (!p)
  3982. return -ENOMEM;
  3983. ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
  3984. if (ret < 0)
  3985. goto out;
  3986. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3987. if (ret < 0)
  3988. goto out;
  3989. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  3990. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  3991. TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
  3992. ret = send_cmd(sctx);
  3993. tlv_put_failure:
  3994. out:
  3995. fs_path_free(p);
  3996. return ret;
  3997. }
  3998. static int send_hole(struct send_ctx *sctx, u64 end)
  3999. {
  4000. struct fs_path *p = NULL;
  4001. u64 offset = sctx->cur_inode_last_extent;
  4002. u64 len;
  4003. int ret = 0;
  4004. p = fs_path_alloc();
  4005. if (!p)
  4006. return -ENOMEM;
  4007. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  4008. if (ret < 0)
  4009. goto tlv_put_failure;
  4010. memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
  4011. while (offset < end) {
  4012. len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
  4013. ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
  4014. if (ret < 0)
  4015. break;
  4016. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  4017. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  4018. TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
  4019. ret = send_cmd(sctx);
  4020. if (ret < 0)
  4021. break;
  4022. offset += len;
  4023. }
  4024. tlv_put_failure:
  4025. fs_path_free(p);
  4026. return ret;
  4027. }
  4028. static int send_write_or_clone(struct send_ctx *sctx,
  4029. struct btrfs_path *path,
  4030. struct btrfs_key *key,
  4031. struct clone_root *clone_root)
  4032. {
  4033. int ret = 0;
  4034. struct btrfs_file_extent_item *ei;
  4035. u64 offset = key->offset;
  4036. u64 pos = 0;
  4037. u64 len;
  4038. u32 l;
  4039. u8 type;
  4040. u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
  4041. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4042. struct btrfs_file_extent_item);
  4043. type = btrfs_file_extent_type(path->nodes[0], ei);
  4044. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4045. len = btrfs_file_extent_inline_len(path->nodes[0],
  4046. path->slots[0], ei);
  4047. /*
  4048. * it is possible the inline item won't cover the whole page,
  4049. * but there may be items after this page. Make
  4050. * sure to send the whole thing
  4051. */
  4052. len = PAGE_CACHE_ALIGN(len);
  4053. } else {
  4054. len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
  4055. }
  4056. if (offset + len > sctx->cur_inode_size)
  4057. len = sctx->cur_inode_size - offset;
  4058. if (len == 0) {
  4059. ret = 0;
  4060. goto out;
  4061. }
  4062. if (clone_root && IS_ALIGNED(offset + len, bs)) {
  4063. ret = send_clone(sctx, offset, len, clone_root);
  4064. } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
  4065. ret = send_update_extent(sctx, offset, len);
  4066. } else {
  4067. while (pos < len) {
  4068. l = len - pos;
  4069. if (l > BTRFS_SEND_READ_SIZE)
  4070. l = BTRFS_SEND_READ_SIZE;
  4071. ret = send_write(sctx, pos + offset, l);
  4072. if (ret < 0)
  4073. goto out;
  4074. if (!ret)
  4075. break;
  4076. pos += ret;
  4077. }
  4078. ret = 0;
  4079. }
  4080. out:
  4081. return ret;
  4082. }
  4083. static int is_extent_unchanged(struct send_ctx *sctx,
  4084. struct btrfs_path *left_path,
  4085. struct btrfs_key *ekey)
  4086. {
  4087. int ret = 0;
  4088. struct btrfs_key key;
  4089. struct btrfs_path *path = NULL;
  4090. struct extent_buffer *eb;
  4091. int slot;
  4092. struct btrfs_key found_key;
  4093. struct btrfs_file_extent_item *ei;
  4094. u64 left_disknr;
  4095. u64 right_disknr;
  4096. u64 left_offset;
  4097. u64 right_offset;
  4098. u64 left_offset_fixed;
  4099. u64 left_len;
  4100. u64 right_len;
  4101. u64 left_gen;
  4102. u64 right_gen;
  4103. u8 left_type;
  4104. u8 right_type;
  4105. path = alloc_path_for_send();
  4106. if (!path)
  4107. return -ENOMEM;
  4108. eb = left_path->nodes[0];
  4109. slot = left_path->slots[0];
  4110. ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  4111. left_type = btrfs_file_extent_type(eb, ei);
  4112. if (left_type != BTRFS_FILE_EXTENT_REG) {
  4113. ret = 0;
  4114. goto out;
  4115. }
  4116. left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
  4117. left_len = btrfs_file_extent_num_bytes(eb, ei);
  4118. left_offset = btrfs_file_extent_offset(eb, ei);
  4119. left_gen = btrfs_file_extent_generation(eb, ei);
  4120. /*
  4121. * Following comments will refer to these graphics. L is the left
  4122. * extents which we are checking at the moment. 1-8 are the right
  4123. * extents that we iterate.
  4124. *
  4125. * |-----L-----|
  4126. * |-1-|-2a-|-3-|-4-|-5-|-6-|
  4127. *
  4128. * |-----L-----|
  4129. * |--1--|-2b-|...(same as above)
  4130. *
  4131. * Alternative situation. Happens on files where extents got split.
  4132. * |-----L-----|
  4133. * |-----------7-----------|-6-|
  4134. *
  4135. * Alternative situation. Happens on files which got larger.
  4136. * |-----L-----|
  4137. * |-8-|
  4138. * Nothing follows after 8.
  4139. */
  4140. key.objectid = ekey->objectid;
  4141. key.type = BTRFS_EXTENT_DATA_KEY;
  4142. key.offset = ekey->offset;
  4143. ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
  4144. if (ret < 0)
  4145. goto out;
  4146. if (ret) {
  4147. ret = 0;
  4148. goto out;
  4149. }
  4150. /*
  4151. * Handle special case where the right side has no extents at all.
  4152. */
  4153. eb = path->nodes[0];
  4154. slot = path->slots[0];
  4155. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4156. if (found_key.objectid != key.objectid ||
  4157. found_key.type != key.type) {
  4158. /* If we're a hole then just pretend nothing changed */
  4159. ret = (left_disknr) ? 0 : 1;
  4160. goto out;
  4161. }
  4162. /*
  4163. * We're now on 2a, 2b or 7.
  4164. */
  4165. key = found_key;
  4166. while (key.offset < ekey->offset + left_len) {
  4167. ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  4168. right_type = btrfs_file_extent_type(eb, ei);
  4169. if (right_type != BTRFS_FILE_EXTENT_REG) {
  4170. ret = 0;
  4171. goto out;
  4172. }
  4173. right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
  4174. right_len = btrfs_file_extent_num_bytes(eb, ei);
  4175. right_offset = btrfs_file_extent_offset(eb, ei);
  4176. right_gen = btrfs_file_extent_generation(eb, ei);
  4177. /*
  4178. * Are we at extent 8? If yes, we know the extent is changed.
  4179. * This may only happen on the first iteration.
  4180. */
  4181. if (found_key.offset + right_len <= ekey->offset) {
  4182. /* If we're a hole just pretend nothing changed */
  4183. ret = (left_disknr) ? 0 : 1;
  4184. goto out;
  4185. }
  4186. left_offset_fixed = left_offset;
  4187. if (key.offset < ekey->offset) {
  4188. /* Fix the right offset for 2a and 7. */
  4189. right_offset += ekey->offset - key.offset;
  4190. } else {
  4191. /* Fix the left offset for all behind 2a and 2b */
  4192. left_offset_fixed += key.offset - ekey->offset;
  4193. }
  4194. /*
  4195. * Check if we have the same extent.
  4196. */
  4197. if (left_disknr != right_disknr ||
  4198. left_offset_fixed != right_offset ||
  4199. left_gen != right_gen) {
  4200. ret = 0;
  4201. goto out;
  4202. }
  4203. /*
  4204. * Go to the next extent.
  4205. */
  4206. ret = btrfs_next_item(sctx->parent_root, path);
  4207. if (ret < 0)
  4208. goto out;
  4209. if (!ret) {
  4210. eb = path->nodes[0];
  4211. slot = path->slots[0];
  4212. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4213. }
  4214. if (ret || found_key.objectid != key.objectid ||
  4215. found_key.type != key.type) {
  4216. key.offset += right_len;
  4217. break;
  4218. }
  4219. if (found_key.offset != key.offset + right_len) {
  4220. ret = 0;
  4221. goto out;
  4222. }
  4223. key = found_key;
  4224. }
  4225. /*
  4226. * We're now behind the left extent (treat as unchanged) or at the end
  4227. * of the right side (treat as changed).
  4228. */
  4229. if (key.offset >= ekey->offset + left_len)
  4230. ret = 1;
  4231. else
  4232. ret = 0;
  4233. out:
  4234. btrfs_free_path(path);
  4235. return ret;
  4236. }
  4237. static int get_last_extent(struct send_ctx *sctx, u64 offset)
  4238. {
  4239. struct btrfs_path *path;
  4240. struct btrfs_root *root = sctx->send_root;
  4241. struct btrfs_file_extent_item *fi;
  4242. struct btrfs_key key;
  4243. u64 extent_end;
  4244. u8 type;
  4245. int ret;
  4246. path = alloc_path_for_send();
  4247. if (!path)
  4248. return -ENOMEM;
  4249. sctx->cur_inode_last_extent = 0;
  4250. key.objectid = sctx->cur_ino;
  4251. key.type = BTRFS_EXTENT_DATA_KEY;
  4252. key.offset = offset;
  4253. ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
  4254. if (ret < 0)
  4255. goto out;
  4256. ret = 0;
  4257. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  4258. if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
  4259. goto out;
  4260. fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4261. struct btrfs_file_extent_item);
  4262. type = btrfs_file_extent_type(path->nodes[0], fi);
  4263. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4264. u64 size = btrfs_file_extent_inline_len(path->nodes[0],
  4265. path->slots[0], fi);
  4266. extent_end = ALIGN(key.offset + size,
  4267. sctx->send_root->sectorsize);
  4268. } else {
  4269. extent_end = key.offset +
  4270. btrfs_file_extent_num_bytes(path->nodes[0], fi);
  4271. }
  4272. sctx->cur_inode_last_extent = extent_end;
  4273. out:
  4274. btrfs_free_path(path);
  4275. return ret;
  4276. }
  4277. static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
  4278. struct btrfs_key *key)
  4279. {
  4280. struct btrfs_file_extent_item *fi;
  4281. u64 extent_end;
  4282. u8 type;
  4283. int ret = 0;
  4284. if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
  4285. return 0;
  4286. if (sctx->cur_inode_last_extent == (u64)-1) {
  4287. ret = get_last_extent(sctx, key->offset - 1);
  4288. if (ret)
  4289. return ret;
  4290. }
  4291. fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4292. struct btrfs_file_extent_item);
  4293. type = btrfs_file_extent_type(path->nodes[0], fi);
  4294. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4295. u64 size = btrfs_file_extent_inline_len(path->nodes[0],
  4296. path->slots[0], fi);
  4297. extent_end = ALIGN(key->offset + size,
  4298. sctx->send_root->sectorsize);
  4299. } else {
  4300. extent_end = key->offset +
  4301. btrfs_file_extent_num_bytes(path->nodes[0], fi);
  4302. }
  4303. if (path->slots[0] == 0 &&
  4304. sctx->cur_inode_last_extent < key->offset) {
  4305. /*
  4306. * We might have skipped entire leafs that contained only
  4307. * file extent items for our current inode. These leafs have
  4308. * a generation number smaller (older) than the one in the
  4309. * current leaf and the leaf our last extent came from, and
  4310. * are located between these 2 leafs.
  4311. */
  4312. ret = get_last_extent(sctx, key->offset - 1);
  4313. if (ret)
  4314. return ret;
  4315. }
  4316. if (sctx->cur_inode_last_extent < key->offset)
  4317. ret = send_hole(sctx, key->offset);
  4318. sctx->cur_inode_last_extent = extent_end;
  4319. return ret;
  4320. }
  4321. static int process_extent(struct send_ctx *sctx,
  4322. struct btrfs_path *path,
  4323. struct btrfs_key *key)
  4324. {
  4325. struct clone_root *found_clone = NULL;
  4326. int ret = 0;
  4327. if (S_ISLNK(sctx->cur_inode_mode))
  4328. return 0;
  4329. if (sctx->parent_root && !sctx->cur_inode_new) {
  4330. ret = is_extent_unchanged(sctx, path, key);
  4331. if (ret < 0)
  4332. goto out;
  4333. if (ret) {
  4334. ret = 0;
  4335. goto out_hole;
  4336. }
  4337. } else {
  4338. struct btrfs_file_extent_item *ei;
  4339. u8 type;
  4340. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4341. struct btrfs_file_extent_item);
  4342. type = btrfs_file_extent_type(path->nodes[0], ei);
  4343. if (type == BTRFS_FILE_EXTENT_PREALLOC ||
  4344. type == BTRFS_FILE_EXTENT_REG) {
  4345. /*
  4346. * The send spec does not have a prealloc command yet,
  4347. * so just leave a hole for prealloc'ed extents until
  4348. * we have enough commands queued up to justify rev'ing
  4349. * the send spec.
  4350. */
  4351. if (type == BTRFS_FILE_EXTENT_PREALLOC) {
  4352. ret = 0;
  4353. goto out;
  4354. }
  4355. /* Have a hole, just skip it. */
  4356. if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
  4357. ret = 0;
  4358. goto out;
  4359. }
  4360. }
  4361. }
  4362. ret = find_extent_clone(sctx, path, key->objectid, key->offset,
  4363. sctx->cur_inode_size, &found_clone);
  4364. if (ret != -ENOENT && ret < 0)
  4365. goto out;
  4366. ret = send_write_or_clone(sctx, path, key, found_clone);
  4367. if (ret)
  4368. goto out;
  4369. out_hole:
  4370. ret = maybe_send_hole(sctx, path, key);
  4371. out:
  4372. return ret;
  4373. }
  4374. static int process_all_extents(struct send_ctx *sctx)
  4375. {
  4376. int ret;
  4377. struct btrfs_root *root;
  4378. struct btrfs_path *path;
  4379. struct btrfs_key key;
  4380. struct btrfs_key found_key;
  4381. struct extent_buffer *eb;
  4382. int slot;
  4383. root = sctx->send_root;
  4384. path = alloc_path_for_send();
  4385. if (!path)
  4386. return -ENOMEM;
  4387. key.objectid = sctx->cmp_key->objectid;
  4388. key.type = BTRFS_EXTENT_DATA_KEY;
  4389. key.offset = 0;
  4390. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  4391. if (ret < 0)
  4392. goto out;
  4393. while (1) {
  4394. eb = path->nodes[0];
  4395. slot = path->slots[0];
  4396. if (slot >= btrfs_header_nritems(eb)) {
  4397. ret = btrfs_next_leaf(root, path);
  4398. if (ret < 0) {
  4399. goto out;
  4400. } else if (ret > 0) {
  4401. ret = 0;
  4402. break;
  4403. }
  4404. continue;
  4405. }
  4406. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4407. if (found_key.objectid != key.objectid ||
  4408. found_key.type != key.type) {
  4409. ret = 0;
  4410. goto out;
  4411. }
  4412. ret = process_extent(sctx, path, &found_key);
  4413. if (ret < 0)
  4414. goto out;
  4415. path->slots[0]++;
  4416. }
  4417. out:
  4418. btrfs_free_path(path);
  4419. return ret;
  4420. }
  4421. static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
  4422. int *pending_move,
  4423. int *refs_processed)
  4424. {
  4425. int ret = 0;
  4426. if (sctx->cur_ino == 0)
  4427. goto out;
  4428. if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
  4429. sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
  4430. goto out;
  4431. if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
  4432. goto out;
  4433. ret = process_recorded_refs(sctx, pending_move);
  4434. if (ret < 0)
  4435. goto out;
  4436. *refs_processed = 1;
  4437. out:
  4438. return ret;
  4439. }
  4440. static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
  4441. {
  4442. int ret = 0;
  4443. u64 left_mode;
  4444. u64 left_uid;
  4445. u64 left_gid;
  4446. u64 right_mode;
  4447. u64 right_uid;
  4448. u64 right_gid;
  4449. int need_chmod = 0;
  4450. int need_chown = 0;
  4451. int pending_move = 0;
  4452. int refs_processed = 0;
  4453. ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
  4454. &refs_processed);
  4455. if (ret < 0)
  4456. goto out;
  4457. /*
  4458. * We have processed the refs and thus need to advance send_progress.
  4459. * Now, calls to get_cur_xxx will take the updated refs of the current
  4460. * inode into account.
  4461. *
  4462. * On the other hand, if our current inode is a directory and couldn't
  4463. * be moved/renamed because its parent was renamed/moved too and it has
  4464. * a higher inode number, we can only move/rename our current inode
  4465. * after we moved/renamed its parent. Therefore in this case operate on
  4466. * the old path (pre move/rename) of our current inode, and the
  4467. * move/rename will be performed later.
  4468. */
  4469. if (refs_processed && !pending_move)
  4470. sctx->send_progress = sctx->cur_ino + 1;
  4471. if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
  4472. goto out;
  4473. if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
  4474. goto out;
  4475. ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
  4476. &left_mode, &left_uid, &left_gid, NULL);
  4477. if (ret < 0)
  4478. goto out;
  4479. if (!sctx->parent_root || sctx->cur_inode_new) {
  4480. need_chown = 1;
  4481. if (!S_ISLNK(sctx->cur_inode_mode))
  4482. need_chmod = 1;
  4483. } else {
  4484. ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
  4485. NULL, NULL, &right_mode, &right_uid,
  4486. &right_gid, NULL);
  4487. if (ret < 0)
  4488. goto out;
  4489. if (left_uid != right_uid || left_gid != right_gid)
  4490. need_chown = 1;
  4491. if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
  4492. need_chmod = 1;
  4493. }
  4494. if (S_ISREG(sctx->cur_inode_mode)) {
  4495. if (need_send_hole(sctx)) {
  4496. if (sctx->cur_inode_last_extent == (u64)-1 ||
  4497. sctx->cur_inode_last_extent <
  4498. sctx->cur_inode_size) {
  4499. ret = get_last_extent(sctx, (u64)-1);
  4500. if (ret)
  4501. goto out;
  4502. }
  4503. if (sctx->cur_inode_last_extent <
  4504. sctx->cur_inode_size) {
  4505. ret = send_hole(sctx, sctx->cur_inode_size);
  4506. if (ret)
  4507. goto out;
  4508. }
  4509. }
  4510. ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4511. sctx->cur_inode_size);
  4512. if (ret < 0)
  4513. goto out;
  4514. }
  4515. if (need_chown) {
  4516. ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4517. left_uid, left_gid);
  4518. if (ret < 0)
  4519. goto out;
  4520. }
  4521. if (need_chmod) {
  4522. ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4523. left_mode);
  4524. if (ret < 0)
  4525. goto out;
  4526. }
  4527. /*
  4528. * If other directory inodes depended on our current directory
  4529. * inode's move/rename, now do their move/rename operations.
  4530. */
  4531. if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
  4532. ret = apply_children_dir_moves(sctx);
  4533. if (ret)
  4534. goto out;
  4535. /*
  4536. * Need to send that every time, no matter if it actually
  4537. * changed between the two trees as we have done changes to
  4538. * the inode before. If our inode is a directory and it's
  4539. * waiting to be moved/renamed, we will send its utimes when
  4540. * it's moved/renamed, therefore we don't need to do it here.
  4541. */
  4542. sctx->send_progress = sctx->cur_ino + 1;
  4543. ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
  4544. if (ret < 0)
  4545. goto out;
  4546. }
  4547. out:
  4548. return ret;
  4549. }
  4550. static int changed_inode(struct send_ctx *sctx,
  4551. enum btrfs_compare_tree_result result)
  4552. {
  4553. int ret = 0;
  4554. struct btrfs_key *key = sctx->cmp_key;
  4555. struct btrfs_inode_item *left_ii = NULL;
  4556. struct btrfs_inode_item *right_ii = NULL;
  4557. u64 left_gen = 0;
  4558. u64 right_gen = 0;
  4559. sctx->cur_ino = key->objectid;
  4560. sctx->cur_inode_new_gen = 0;
  4561. sctx->cur_inode_last_extent = (u64)-1;
  4562. /*
  4563. * Set send_progress to current inode. This will tell all get_cur_xxx
  4564. * functions that the current inode's refs are not updated yet. Later,
  4565. * when process_recorded_refs is finished, it is set to cur_ino + 1.
  4566. */
  4567. sctx->send_progress = sctx->cur_ino;
  4568. if (result == BTRFS_COMPARE_TREE_NEW ||
  4569. result == BTRFS_COMPARE_TREE_CHANGED) {
  4570. left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
  4571. sctx->left_path->slots[0],
  4572. struct btrfs_inode_item);
  4573. left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
  4574. left_ii);
  4575. } else {
  4576. right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
  4577. sctx->right_path->slots[0],
  4578. struct btrfs_inode_item);
  4579. right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
  4580. right_ii);
  4581. }
  4582. if (result == BTRFS_COMPARE_TREE_CHANGED) {
  4583. right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
  4584. sctx->right_path->slots[0],
  4585. struct btrfs_inode_item);
  4586. right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
  4587. right_ii);
  4588. /*
  4589. * The cur_ino = root dir case is special here. We can't treat
  4590. * the inode as deleted+reused because it would generate a
  4591. * stream that tries to delete/mkdir the root dir.
  4592. */
  4593. if (left_gen != right_gen &&
  4594. sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
  4595. sctx->cur_inode_new_gen = 1;
  4596. }
  4597. if (result == BTRFS_COMPARE_TREE_NEW) {
  4598. sctx->cur_inode_gen = left_gen;
  4599. sctx->cur_inode_new = 1;
  4600. sctx->cur_inode_deleted = 0;
  4601. sctx->cur_inode_size = btrfs_inode_size(
  4602. sctx->left_path->nodes[0], left_ii);
  4603. sctx->cur_inode_mode = btrfs_inode_mode(
  4604. sctx->left_path->nodes[0], left_ii);
  4605. sctx->cur_inode_rdev = btrfs_inode_rdev(
  4606. sctx->left_path->nodes[0], left_ii);
  4607. if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
  4608. ret = send_create_inode_if_needed(sctx);
  4609. } else if (result == BTRFS_COMPARE_TREE_DELETED) {
  4610. sctx->cur_inode_gen = right_gen;
  4611. sctx->cur_inode_new = 0;
  4612. sctx->cur_inode_deleted = 1;
  4613. sctx->cur_inode_size = btrfs_inode_size(
  4614. sctx->right_path->nodes[0], right_ii);
  4615. sctx->cur_inode_mode = btrfs_inode_mode(
  4616. sctx->right_path->nodes[0], right_ii);
  4617. } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
  4618. /*
  4619. * We need to do some special handling in case the inode was
  4620. * reported as changed with a changed generation number. This
  4621. * means that the original inode was deleted and new inode
  4622. * reused the same inum. So we have to treat the old inode as
  4623. * deleted and the new one as new.
  4624. */
  4625. if (sctx->cur_inode_new_gen) {
  4626. /*
  4627. * First, process the inode as if it was deleted.
  4628. */
  4629. sctx->cur_inode_gen = right_gen;
  4630. sctx->cur_inode_new = 0;
  4631. sctx->cur_inode_deleted = 1;
  4632. sctx->cur_inode_size = btrfs_inode_size(
  4633. sctx->right_path->nodes[0], right_ii);
  4634. sctx->cur_inode_mode = btrfs_inode_mode(
  4635. sctx->right_path->nodes[0], right_ii);
  4636. ret = process_all_refs(sctx,
  4637. BTRFS_COMPARE_TREE_DELETED);
  4638. if (ret < 0)
  4639. goto out;
  4640. /*
  4641. * Now process the inode as if it was new.
  4642. */
  4643. sctx->cur_inode_gen = left_gen;
  4644. sctx->cur_inode_new = 1;
  4645. sctx->cur_inode_deleted = 0;
  4646. sctx->cur_inode_size = btrfs_inode_size(
  4647. sctx->left_path->nodes[0], left_ii);
  4648. sctx->cur_inode_mode = btrfs_inode_mode(
  4649. sctx->left_path->nodes[0], left_ii);
  4650. sctx->cur_inode_rdev = btrfs_inode_rdev(
  4651. sctx->left_path->nodes[0], left_ii);
  4652. ret = send_create_inode_if_needed(sctx);
  4653. if (ret < 0)
  4654. goto out;
  4655. ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
  4656. if (ret < 0)
  4657. goto out;
  4658. /*
  4659. * Advance send_progress now as we did not get into
  4660. * process_recorded_refs_if_needed in the new_gen case.
  4661. */
  4662. sctx->send_progress = sctx->cur_ino + 1;
  4663. /*
  4664. * Now process all extents and xattrs of the inode as if
  4665. * they were all new.
  4666. */
  4667. ret = process_all_extents(sctx);
  4668. if (ret < 0)
  4669. goto out;
  4670. ret = process_all_new_xattrs(sctx);
  4671. if (ret < 0)
  4672. goto out;
  4673. } else {
  4674. sctx->cur_inode_gen = left_gen;
  4675. sctx->cur_inode_new = 0;
  4676. sctx->cur_inode_new_gen = 0;
  4677. sctx->cur_inode_deleted = 0;
  4678. sctx->cur_inode_size = btrfs_inode_size(
  4679. sctx->left_path->nodes[0], left_ii);
  4680. sctx->cur_inode_mode = btrfs_inode_mode(
  4681. sctx->left_path->nodes[0], left_ii);
  4682. }
  4683. }
  4684. out:
  4685. return ret;
  4686. }
  4687. /*
  4688. * We have to process new refs before deleted refs, but compare_trees gives us
  4689. * the new and deleted refs mixed. To fix this, we record the new/deleted refs
  4690. * first and later process them in process_recorded_refs.
  4691. * For the cur_inode_new_gen case, we skip recording completely because
  4692. * changed_inode did already initiate processing of refs. The reason for this is
  4693. * that in this case, compare_tree actually compares the refs of 2 different
  4694. * inodes. To fix this, process_all_refs is used in changed_inode to handle all
  4695. * refs of the right tree as deleted and all refs of the left tree as new.
  4696. */
  4697. static int changed_ref(struct send_ctx *sctx,
  4698. enum btrfs_compare_tree_result result)
  4699. {
  4700. int ret = 0;
  4701. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4702. if (!sctx->cur_inode_new_gen &&
  4703. sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
  4704. if (result == BTRFS_COMPARE_TREE_NEW)
  4705. ret = record_new_ref(sctx);
  4706. else if (result == BTRFS_COMPARE_TREE_DELETED)
  4707. ret = record_deleted_ref(sctx);
  4708. else if (result == BTRFS_COMPARE_TREE_CHANGED)
  4709. ret = record_changed_ref(sctx);
  4710. }
  4711. return ret;
  4712. }
  4713. /*
  4714. * Process new/deleted/changed xattrs. We skip processing in the
  4715. * cur_inode_new_gen case because changed_inode did already initiate processing
  4716. * of xattrs. The reason is the same as in changed_ref
  4717. */
  4718. static int changed_xattr(struct send_ctx *sctx,
  4719. enum btrfs_compare_tree_result result)
  4720. {
  4721. int ret = 0;
  4722. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4723. if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
  4724. if (result == BTRFS_COMPARE_TREE_NEW)
  4725. ret = process_new_xattr(sctx);
  4726. else if (result == BTRFS_COMPARE_TREE_DELETED)
  4727. ret = process_deleted_xattr(sctx);
  4728. else if (result == BTRFS_COMPARE_TREE_CHANGED)
  4729. ret = process_changed_xattr(sctx);
  4730. }
  4731. return ret;
  4732. }
  4733. /*
  4734. * Process new/deleted/changed extents. We skip processing in the
  4735. * cur_inode_new_gen case because changed_inode did already initiate processing
  4736. * of extents. The reason is the same as in changed_ref
  4737. */
  4738. static int changed_extent(struct send_ctx *sctx,
  4739. enum btrfs_compare_tree_result result)
  4740. {
  4741. int ret = 0;
  4742. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4743. if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
  4744. if (result != BTRFS_COMPARE_TREE_DELETED)
  4745. ret = process_extent(sctx, sctx->left_path,
  4746. sctx->cmp_key);
  4747. }
  4748. return ret;
  4749. }
  4750. static int dir_changed(struct send_ctx *sctx, u64 dir)
  4751. {
  4752. u64 orig_gen, new_gen;
  4753. int ret;
  4754. ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
  4755. NULL, NULL);
  4756. if (ret)
  4757. return ret;
  4758. ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
  4759. NULL, NULL, NULL);
  4760. if (ret)
  4761. return ret;
  4762. return (orig_gen != new_gen) ? 1 : 0;
  4763. }
  4764. static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
  4765. struct btrfs_key *key)
  4766. {
  4767. struct btrfs_inode_extref *extref;
  4768. struct extent_buffer *leaf;
  4769. u64 dirid = 0, last_dirid = 0;
  4770. unsigned long ptr;
  4771. u32 item_size;
  4772. u32 cur_offset = 0;
  4773. int ref_name_len;
  4774. int ret = 0;
  4775. /* Easy case, just check this one dirid */
  4776. if (key->type == BTRFS_INODE_REF_KEY) {
  4777. dirid = key->offset;
  4778. ret = dir_changed(sctx, dirid);
  4779. goto out;
  4780. }
  4781. leaf = path->nodes[0];
  4782. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  4783. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  4784. while (cur_offset < item_size) {
  4785. extref = (struct btrfs_inode_extref *)(ptr +
  4786. cur_offset);
  4787. dirid = btrfs_inode_extref_parent(leaf, extref);
  4788. ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
  4789. cur_offset += ref_name_len + sizeof(*extref);
  4790. if (dirid == last_dirid)
  4791. continue;
  4792. ret = dir_changed(sctx, dirid);
  4793. if (ret)
  4794. break;
  4795. last_dirid = dirid;
  4796. }
  4797. out:
  4798. return ret;
  4799. }
  4800. /*
  4801. * Updates compare related fields in sctx and simply forwards to the actual
  4802. * changed_xxx functions.
  4803. */
  4804. static int changed_cb(struct btrfs_root *left_root,
  4805. struct btrfs_root *right_root,
  4806. struct btrfs_path *left_path,
  4807. struct btrfs_path *right_path,
  4808. struct btrfs_key *key,
  4809. enum btrfs_compare_tree_result result,
  4810. void *ctx)
  4811. {
  4812. int ret = 0;
  4813. struct send_ctx *sctx = ctx;
  4814. if (result == BTRFS_COMPARE_TREE_SAME) {
  4815. if (key->type == BTRFS_INODE_REF_KEY ||
  4816. key->type == BTRFS_INODE_EXTREF_KEY) {
  4817. ret = compare_refs(sctx, left_path, key);
  4818. if (!ret)
  4819. return 0;
  4820. if (ret < 0)
  4821. return ret;
  4822. } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
  4823. return maybe_send_hole(sctx, left_path, key);
  4824. } else {
  4825. return 0;
  4826. }
  4827. result = BTRFS_COMPARE_TREE_CHANGED;
  4828. ret = 0;
  4829. }
  4830. sctx->left_path = left_path;
  4831. sctx->right_path = right_path;
  4832. sctx->cmp_key = key;
  4833. ret = finish_inode_if_needed(sctx, 0);
  4834. if (ret < 0)
  4835. goto out;
  4836. /* Ignore non-FS objects */
  4837. if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
  4838. key->objectid == BTRFS_FREE_SPACE_OBJECTID)
  4839. goto out;
  4840. if (key->type == BTRFS_INODE_ITEM_KEY)
  4841. ret = changed_inode(sctx, result);
  4842. else if (key->type == BTRFS_INODE_REF_KEY ||
  4843. key->type == BTRFS_INODE_EXTREF_KEY)
  4844. ret = changed_ref(sctx, result);
  4845. else if (key->type == BTRFS_XATTR_ITEM_KEY)
  4846. ret = changed_xattr(sctx, result);
  4847. else if (key->type == BTRFS_EXTENT_DATA_KEY)
  4848. ret = changed_extent(sctx, result);
  4849. out:
  4850. return ret;
  4851. }
  4852. static int full_send_tree(struct send_ctx *sctx)
  4853. {
  4854. int ret;
  4855. struct btrfs_root *send_root = sctx->send_root;
  4856. struct btrfs_key key;
  4857. struct btrfs_key found_key;
  4858. struct btrfs_path *path;
  4859. struct extent_buffer *eb;
  4860. int slot;
  4861. path = alloc_path_for_send();
  4862. if (!path)
  4863. return -ENOMEM;
  4864. key.objectid = BTRFS_FIRST_FREE_OBJECTID;
  4865. key.type = BTRFS_INODE_ITEM_KEY;
  4866. key.offset = 0;
  4867. ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
  4868. if (ret < 0)
  4869. goto out;
  4870. if (ret)
  4871. goto out_finish;
  4872. while (1) {
  4873. eb = path->nodes[0];
  4874. slot = path->slots[0];
  4875. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4876. ret = changed_cb(send_root, NULL, path, NULL,
  4877. &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
  4878. if (ret < 0)
  4879. goto out;
  4880. key.objectid = found_key.objectid;
  4881. key.type = found_key.type;
  4882. key.offset = found_key.offset + 1;
  4883. ret = btrfs_next_item(send_root, path);
  4884. if (ret < 0)
  4885. goto out;
  4886. if (ret) {
  4887. ret = 0;
  4888. break;
  4889. }
  4890. }
  4891. out_finish:
  4892. ret = finish_inode_if_needed(sctx, 1);
  4893. out:
  4894. btrfs_free_path(path);
  4895. return ret;
  4896. }
  4897. static int send_subvol(struct send_ctx *sctx)
  4898. {
  4899. int ret;
  4900. if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
  4901. ret = send_header(sctx);
  4902. if (ret < 0)
  4903. goto out;
  4904. }
  4905. ret = send_subvol_begin(sctx);
  4906. if (ret < 0)
  4907. goto out;
  4908. if (sctx->parent_root) {
  4909. ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
  4910. changed_cb, sctx);
  4911. if (ret < 0)
  4912. goto out;
  4913. ret = finish_inode_if_needed(sctx, 1);
  4914. if (ret < 0)
  4915. goto out;
  4916. } else {
  4917. ret = full_send_tree(sctx);
  4918. if (ret < 0)
  4919. goto out;
  4920. }
  4921. out:
  4922. free_recorded_refs(sctx);
  4923. return ret;
  4924. }
  4925. /*
  4926. * If orphan cleanup did remove any orphans from a root, it means the tree
  4927. * was modified and therefore the commit root is not the same as the current
  4928. * root anymore. This is a problem, because send uses the commit root and
  4929. * therefore can see inode items that don't exist in the current root anymore,
  4930. * and for example make calls to btrfs_iget, which will do tree lookups based
  4931. * on the current root and not on the commit root. Those lookups will fail,
  4932. * returning a -ESTALE error, and making send fail with that error. So make
  4933. * sure a send does not see any orphans we have just removed, and that it will
  4934. * see the same inodes regardless of whether a transaction commit happened
  4935. * before it started (meaning that the commit root will be the same as the
  4936. * current root) or not.
  4937. */
  4938. static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
  4939. {
  4940. int i;
  4941. struct btrfs_trans_handle *trans = NULL;
  4942. again:
  4943. if (sctx->parent_root &&
  4944. sctx->parent_root->node != sctx->parent_root->commit_root)
  4945. goto commit_trans;
  4946. for (i = 0; i < sctx->clone_roots_cnt; i++)
  4947. if (sctx->clone_roots[i].root->node !=
  4948. sctx->clone_roots[i].root->commit_root)
  4949. goto commit_trans;
  4950. if (trans)
  4951. return btrfs_end_transaction(trans, sctx->send_root);
  4952. return 0;
  4953. commit_trans:
  4954. /* Use any root, all fs roots will get their commit roots updated. */
  4955. if (!trans) {
  4956. trans = btrfs_join_transaction(sctx->send_root);
  4957. if (IS_ERR(trans))
  4958. return PTR_ERR(trans);
  4959. goto again;
  4960. }
  4961. return btrfs_commit_transaction(trans, sctx->send_root);
  4962. }
  4963. static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
  4964. {
  4965. spin_lock(&root->root_item_lock);
  4966. root->send_in_progress--;
  4967. /*
  4968. * Not much left to do, we don't know why it's unbalanced and
  4969. * can't blindly reset it to 0.
  4970. */
  4971. if (root->send_in_progress < 0)
  4972. btrfs_err(root->fs_info,
  4973. "send_in_progres unbalanced %d root %llu",
  4974. root->send_in_progress, root->root_key.objectid);
  4975. spin_unlock(&root->root_item_lock);
  4976. }
  4977. long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
  4978. {
  4979. int ret = 0;
  4980. struct btrfs_root *send_root;
  4981. struct btrfs_root *clone_root;
  4982. struct btrfs_fs_info *fs_info;
  4983. struct btrfs_ioctl_send_args *arg = NULL;
  4984. struct btrfs_key key;
  4985. struct send_ctx *sctx = NULL;
  4986. u32 i;
  4987. u64 *clone_sources_tmp = NULL;
  4988. int clone_sources_to_rollback = 0;
  4989. int sort_clone_roots = 0;
  4990. int index;
  4991. if (!capable(CAP_SYS_ADMIN))
  4992. return -EPERM;
  4993. send_root = BTRFS_I(file_inode(mnt_file))->root;
  4994. fs_info = send_root->fs_info;
  4995. /*
  4996. * The subvolume must remain read-only during send, protect against
  4997. * making it RW. This also protects against deletion.
  4998. */
  4999. spin_lock(&send_root->root_item_lock);
  5000. send_root->send_in_progress++;
  5001. spin_unlock(&send_root->root_item_lock);
  5002. /*
  5003. * This is done when we lookup the root, it should already be complete
  5004. * by the time we get here.
  5005. */
  5006. WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
  5007. /*
  5008. * Userspace tools do the checks and warn the user if it's
  5009. * not RO.
  5010. */
  5011. if (!btrfs_root_readonly(send_root)) {
  5012. ret = -EPERM;
  5013. goto out;
  5014. }
  5015. arg = memdup_user(arg_, sizeof(*arg));
  5016. if (IS_ERR(arg)) {
  5017. ret = PTR_ERR(arg);
  5018. arg = NULL;
  5019. goto out;
  5020. }
  5021. if (!access_ok(VERIFY_READ, arg->clone_sources,
  5022. sizeof(*arg->clone_sources) *
  5023. arg->clone_sources_count)) {
  5024. ret = -EFAULT;
  5025. goto out;
  5026. }
  5027. if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
  5028. ret = -EINVAL;
  5029. goto out;
  5030. }
  5031. sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
  5032. if (!sctx) {
  5033. ret = -ENOMEM;
  5034. goto out;
  5035. }
  5036. INIT_LIST_HEAD(&sctx->new_refs);
  5037. INIT_LIST_HEAD(&sctx->deleted_refs);
  5038. INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
  5039. INIT_LIST_HEAD(&sctx->name_cache_list);
  5040. sctx->flags = arg->flags;
  5041. sctx->send_filp = fget(arg->send_fd);
  5042. if (!sctx->send_filp) {
  5043. ret = -EBADF;
  5044. goto out;
  5045. }
  5046. sctx->send_root = send_root;
  5047. /*
  5048. * Unlikely but possible, if the subvolume is marked for deletion but
  5049. * is slow to remove the directory entry, send can still be started
  5050. */
  5051. if (btrfs_root_dead(sctx->send_root)) {
  5052. ret = -EPERM;
  5053. goto out;
  5054. }
  5055. sctx->clone_roots_cnt = arg->clone_sources_count;
  5056. sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
  5057. sctx->send_buf = vmalloc(sctx->send_max_size);
  5058. if (!sctx->send_buf) {
  5059. ret = -ENOMEM;
  5060. goto out;
  5061. }
  5062. sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
  5063. if (!sctx->read_buf) {
  5064. ret = -ENOMEM;
  5065. goto out;
  5066. }
  5067. sctx->pending_dir_moves = RB_ROOT;
  5068. sctx->waiting_dir_moves = RB_ROOT;
  5069. sctx->orphan_dirs = RB_ROOT;
  5070. sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
  5071. (arg->clone_sources_count + 1));
  5072. if (!sctx->clone_roots) {
  5073. ret = -ENOMEM;
  5074. goto out;
  5075. }
  5076. if (arg->clone_sources_count) {
  5077. clone_sources_tmp = vmalloc(arg->clone_sources_count *
  5078. sizeof(*arg->clone_sources));
  5079. if (!clone_sources_tmp) {
  5080. ret = -ENOMEM;
  5081. goto out;
  5082. }
  5083. ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
  5084. arg->clone_sources_count *
  5085. sizeof(*arg->clone_sources));
  5086. if (ret) {
  5087. ret = -EFAULT;
  5088. goto out;
  5089. }
  5090. for (i = 0; i < arg->clone_sources_count; i++) {
  5091. key.objectid = clone_sources_tmp[i];
  5092. key.type = BTRFS_ROOT_ITEM_KEY;
  5093. key.offset = (u64)-1;
  5094. index = srcu_read_lock(&fs_info->subvol_srcu);
  5095. clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
  5096. if (IS_ERR(clone_root)) {
  5097. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5098. ret = PTR_ERR(clone_root);
  5099. goto out;
  5100. }
  5101. spin_lock(&clone_root->root_item_lock);
  5102. if (!btrfs_root_readonly(clone_root) ||
  5103. btrfs_root_dead(clone_root)) {
  5104. spin_unlock(&clone_root->root_item_lock);
  5105. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5106. ret = -EPERM;
  5107. goto out;
  5108. }
  5109. clone_root->send_in_progress++;
  5110. spin_unlock(&clone_root->root_item_lock);
  5111. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5112. sctx->clone_roots[i].root = clone_root;
  5113. clone_sources_to_rollback = i + 1;
  5114. }
  5115. vfree(clone_sources_tmp);
  5116. clone_sources_tmp = NULL;
  5117. }
  5118. if (arg->parent_root) {
  5119. key.objectid = arg->parent_root;
  5120. key.type = BTRFS_ROOT_ITEM_KEY;
  5121. key.offset = (u64)-1;
  5122. index = srcu_read_lock(&fs_info->subvol_srcu);
  5123. sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
  5124. if (IS_ERR(sctx->parent_root)) {
  5125. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5126. ret = PTR_ERR(sctx->parent_root);
  5127. goto out;
  5128. }
  5129. spin_lock(&sctx->parent_root->root_item_lock);
  5130. sctx->parent_root->send_in_progress++;
  5131. if (!btrfs_root_readonly(sctx->parent_root) ||
  5132. btrfs_root_dead(sctx->parent_root)) {
  5133. spin_unlock(&sctx->parent_root->root_item_lock);
  5134. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5135. ret = -EPERM;
  5136. goto out;
  5137. }
  5138. spin_unlock(&sctx->parent_root->root_item_lock);
  5139. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5140. }
  5141. /*
  5142. * Clones from send_root are allowed, but only if the clone source
  5143. * is behind the current send position. This is checked while searching
  5144. * for possible clone sources.
  5145. */
  5146. sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
  5147. /* We do a bsearch later */
  5148. sort(sctx->clone_roots, sctx->clone_roots_cnt,
  5149. sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
  5150. NULL);
  5151. sort_clone_roots = 1;
  5152. ret = ensure_commit_roots_uptodate(sctx);
  5153. if (ret)
  5154. goto out;
  5155. current->journal_info = BTRFS_SEND_TRANS_STUB;
  5156. ret = send_subvol(sctx);
  5157. current->journal_info = NULL;
  5158. if (ret < 0)
  5159. goto out;
  5160. if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
  5161. ret = begin_cmd(sctx, BTRFS_SEND_C_END);
  5162. if (ret < 0)
  5163. goto out;
  5164. ret = send_cmd(sctx);
  5165. if (ret < 0)
  5166. goto out;
  5167. }
  5168. out:
  5169. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
  5170. while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
  5171. struct rb_node *n;
  5172. struct pending_dir_move *pm;
  5173. n = rb_first(&sctx->pending_dir_moves);
  5174. pm = rb_entry(n, struct pending_dir_move, node);
  5175. while (!list_empty(&pm->list)) {
  5176. struct pending_dir_move *pm2;
  5177. pm2 = list_first_entry(&pm->list,
  5178. struct pending_dir_move, list);
  5179. free_pending_move(sctx, pm2);
  5180. }
  5181. free_pending_move(sctx, pm);
  5182. }
  5183. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
  5184. while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
  5185. struct rb_node *n;
  5186. struct waiting_dir_move *dm;
  5187. n = rb_first(&sctx->waiting_dir_moves);
  5188. dm = rb_entry(n, struct waiting_dir_move, node);
  5189. rb_erase(&dm->node, &sctx->waiting_dir_moves);
  5190. kfree(dm);
  5191. }
  5192. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
  5193. while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
  5194. struct rb_node *n;
  5195. struct orphan_dir_info *odi;
  5196. n = rb_first(&sctx->orphan_dirs);
  5197. odi = rb_entry(n, struct orphan_dir_info, node);
  5198. free_orphan_dir_info(sctx, odi);
  5199. }
  5200. if (sort_clone_roots) {
  5201. for (i = 0; i < sctx->clone_roots_cnt; i++)
  5202. btrfs_root_dec_send_in_progress(
  5203. sctx->clone_roots[i].root);
  5204. } else {
  5205. for (i = 0; sctx && i < clone_sources_to_rollback; i++)
  5206. btrfs_root_dec_send_in_progress(
  5207. sctx->clone_roots[i].root);
  5208. btrfs_root_dec_send_in_progress(send_root);
  5209. }
  5210. if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
  5211. btrfs_root_dec_send_in_progress(sctx->parent_root);
  5212. kfree(arg);
  5213. vfree(clone_sources_tmp);
  5214. if (sctx) {
  5215. if (sctx->send_filp)
  5216. fput(sctx->send_filp);
  5217. vfree(sctx->clone_roots);
  5218. vfree(sctx->send_buf);
  5219. vfree(sctx->read_buf);
  5220. name_cache_free(sctx);
  5221. kfree(sctx);
  5222. }
  5223. return ret;
  5224. }