send.c 145 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211
  1. /*
  2. * Copyright (C) 2012 Alexander Block. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/bsearch.h>
  19. #include <linux/fs.h>
  20. #include <linux/file.h>
  21. #include <linux/sort.h>
  22. #include <linux/mount.h>
  23. #include <linux/xattr.h>
  24. #include <linux/posix_acl_xattr.h>
  25. #include <linux/radix-tree.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/string.h>
  28. #include "send.h"
  29. #include "backref.h"
  30. #include "hash.h"
  31. #include "locking.h"
  32. #include "disk-io.h"
  33. #include "btrfs_inode.h"
  34. #include "transaction.h"
  35. static int g_verbose = 0;
  36. #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
  37. /*
  38. * A fs_path is a helper to dynamically build path names with unknown size.
  39. * It reallocates the internal buffer on demand.
  40. * It allows fast adding of path elements on the right side (normal path) and
  41. * fast adding to the left side (reversed path). A reversed path can also be
  42. * unreversed if needed.
  43. */
  44. struct fs_path {
  45. union {
  46. struct {
  47. char *start;
  48. char *end;
  49. char *buf;
  50. unsigned short buf_len:15;
  51. unsigned short reversed:1;
  52. char inline_buf[];
  53. };
  54. /*
  55. * Average path length does not exceed 200 bytes, we'll have
  56. * better packing in the slab and higher chance to satisfy
  57. * a allocation later during send.
  58. */
  59. char pad[256];
  60. };
  61. };
  62. #define FS_PATH_INLINE_SIZE \
  63. (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
  64. /* reused for each extent */
  65. struct clone_root {
  66. struct btrfs_root *root;
  67. u64 ino;
  68. u64 offset;
  69. u64 found_refs;
  70. };
  71. #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
  72. #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
  73. struct send_ctx {
  74. struct file *send_filp;
  75. loff_t send_off;
  76. char *send_buf;
  77. u32 send_size;
  78. u32 send_max_size;
  79. u64 total_send_size;
  80. u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
  81. u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
  82. struct btrfs_root *send_root;
  83. struct btrfs_root *parent_root;
  84. struct clone_root *clone_roots;
  85. int clone_roots_cnt;
  86. /* current state of the compare_tree call */
  87. struct btrfs_path *left_path;
  88. struct btrfs_path *right_path;
  89. struct btrfs_key *cmp_key;
  90. /*
  91. * infos of the currently processed inode. In case of deleted inodes,
  92. * these are the values from the deleted inode.
  93. */
  94. u64 cur_ino;
  95. u64 cur_inode_gen;
  96. int cur_inode_new;
  97. int cur_inode_new_gen;
  98. int cur_inode_deleted;
  99. u64 cur_inode_size;
  100. u64 cur_inode_mode;
  101. u64 cur_inode_rdev;
  102. u64 cur_inode_last_extent;
  103. u64 send_progress;
  104. struct list_head new_refs;
  105. struct list_head deleted_refs;
  106. struct radix_tree_root name_cache;
  107. struct list_head name_cache_list;
  108. int name_cache_size;
  109. struct file_ra_state ra;
  110. char *read_buf;
  111. /*
  112. * We process inodes by their increasing order, so if before an
  113. * incremental send we reverse the parent/child relationship of
  114. * directories such that a directory with a lower inode number was
  115. * the parent of a directory with a higher inode number, and the one
  116. * becoming the new parent got renamed too, we can't rename/move the
  117. * directory with lower inode number when we finish processing it - we
  118. * must process the directory with higher inode number first, then
  119. * rename/move it and then rename/move the directory with lower inode
  120. * number. Example follows.
  121. *
  122. * Tree state when the first send was performed:
  123. *
  124. * .
  125. * |-- a (ino 257)
  126. * |-- b (ino 258)
  127. * |
  128. * |
  129. * |-- c (ino 259)
  130. * | |-- d (ino 260)
  131. * |
  132. * |-- c2 (ino 261)
  133. *
  134. * Tree state when the second (incremental) send is performed:
  135. *
  136. * .
  137. * |-- a (ino 257)
  138. * |-- b (ino 258)
  139. * |-- c2 (ino 261)
  140. * |-- d2 (ino 260)
  141. * |-- cc (ino 259)
  142. *
  143. * The sequence of steps that lead to the second state was:
  144. *
  145. * mv /a/b/c/d /a/b/c2/d2
  146. * mv /a/b/c /a/b/c2/d2/cc
  147. *
  148. * "c" has lower inode number, but we can't move it (2nd mv operation)
  149. * before we move "d", which has higher inode number.
  150. *
  151. * So we just memorize which move/rename operations must be performed
  152. * later when their respective parent is processed and moved/renamed.
  153. */
  154. /* Indexed by parent directory inode number. */
  155. struct rb_root pending_dir_moves;
  156. /*
  157. * Reverse index, indexed by the inode number of a directory that
  158. * is waiting for the move/rename of its immediate parent before its
  159. * own move/rename can be performed.
  160. */
  161. struct rb_root waiting_dir_moves;
  162. /*
  163. * A directory that is going to be rm'ed might have a child directory
  164. * which is in the pending directory moves index above. In this case,
  165. * the directory can only be removed after the move/rename of its child
  166. * is performed. Example:
  167. *
  168. * Parent snapshot:
  169. *
  170. * . (ino 256)
  171. * |-- a/ (ino 257)
  172. * |-- b/ (ino 258)
  173. * |-- c/ (ino 259)
  174. * | |-- x/ (ino 260)
  175. * |
  176. * |-- y/ (ino 261)
  177. *
  178. * Send snapshot:
  179. *
  180. * . (ino 256)
  181. * |-- a/ (ino 257)
  182. * |-- b/ (ino 258)
  183. * |-- YY/ (ino 261)
  184. * |-- x/ (ino 260)
  185. *
  186. * Sequence of steps that lead to the send snapshot:
  187. * rm -f /a/b/c/foo.txt
  188. * mv /a/b/y /a/b/YY
  189. * mv /a/b/c/x /a/b/YY
  190. * rmdir /a/b/c
  191. *
  192. * When the child is processed, its move/rename is delayed until its
  193. * parent is processed (as explained above), but all other operations
  194. * like update utimes, chown, chgrp, etc, are performed and the paths
  195. * that it uses for those operations must use the orphanized name of
  196. * its parent (the directory we're going to rm later), so we need to
  197. * memorize that name.
  198. *
  199. * Indexed by the inode number of the directory to be deleted.
  200. */
  201. struct rb_root orphan_dirs;
  202. };
  203. struct pending_dir_move {
  204. struct rb_node node;
  205. struct list_head list;
  206. u64 parent_ino;
  207. u64 ino;
  208. u64 gen;
  209. bool is_orphan;
  210. struct list_head update_refs;
  211. };
  212. struct waiting_dir_move {
  213. struct rb_node node;
  214. u64 ino;
  215. /*
  216. * There might be some directory that could not be removed because it
  217. * was waiting for this directory inode to be moved first. Therefore
  218. * after this directory is moved, we can try to rmdir the ino rmdir_ino.
  219. */
  220. u64 rmdir_ino;
  221. bool orphanized;
  222. };
  223. struct orphan_dir_info {
  224. struct rb_node node;
  225. u64 ino;
  226. u64 gen;
  227. };
  228. struct name_cache_entry {
  229. struct list_head list;
  230. /*
  231. * radix_tree has only 32bit entries but we need to handle 64bit inums.
  232. * We use the lower 32bit of the 64bit inum to store it in the tree. If
  233. * more then one inum would fall into the same entry, we use radix_list
  234. * to store the additional entries. radix_list is also used to store
  235. * entries where two entries have the same inum but different
  236. * generations.
  237. */
  238. struct list_head radix_list;
  239. u64 ino;
  240. u64 gen;
  241. u64 parent_ino;
  242. u64 parent_gen;
  243. int ret;
  244. int need_later_update;
  245. int name_len;
  246. char name[];
  247. };
  248. static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
  249. static struct waiting_dir_move *
  250. get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
  251. static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
  252. static int need_send_hole(struct send_ctx *sctx)
  253. {
  254. return (sctx->parent_root && !sctx->cur_inode_new &&
  255. !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
  256. S_ISREG(sctx->cur_inode_mode));
  257. }
  258. static void fs_path_reset(struct fs_path *p)
  259. {
  260. if (p->reversed) {
  261. p->start = p->buf + p->buf_len - 1;
  262. p->end = p->start;
  263. *p->start = 0;
  264. } else {
  265. p->start = p->buf;
  266. p->end = p->start;
  267. *p->start = 0;
  268. }
  269. }
  270. static struct fs_path *fs_path_alloc(void)
  271. {
  272. struct fs_path *p;
  273. p = kmalloc(sizeof(*p), GFP_NOFS);
  274. if (!p)
  275. return NULL;
  276. p->reversed = 0;
  277. p->buf = p->inline_buf;
  278. p->buf_len = FS_PATH_INLINE_SIZE;
  279. fs_path_reset(p);
  280. return p;
  281. }
  282. static struct fs_path *fs_path_alloc_reversed(void)
  283. {
  284. struct fs_path *p;
  285. p = fs_path_alloc();
  286. if (!p)
  287. return NULL;
  288. p->reversed = 1;
  289. fs_path_reset(p);
  290. return p;
  291. }
  292. static void fs_path_free(struct fs_path *p)
  293. {
  294. if (!p)
  295. return;
  296. if (p->buf != p->inline_buf)
  297. kfree(p->buf);
  298. kfree(p);
  299. }
  300. static int fs_path_len(struct fs_path *p)
  301. {
  302. return p->end - p->start;
  303. }
  304. static int fs_path_ensure_buf(struct fs_path *p, int len)
  305. {
  306. char *tmp_buf;
  307. int path_len;
  308. int old_buf_len;
  309. len++;
  310. if (p->buf_len >= len)
  311. return 0;
  312. if (len > PATH_MAX) {
  313. WARN_ON(1);
  314. return -ENOMEM;
  315. }
  316. path_len = p->end - p->start;
  317. old_buf_len = p->buf_len;
  318. /*
  319. * First time the inline_buf does not suffice
  320. */
  321. if (p->buf == p->inline_buf) {
  322. tmp_buf = kmalloc(len, GFP_NOFS);
  323. if (tmp_buf)
  324. memcpy(tmp_buf, p->buf, old_buf_len);
  325. } else {
  326. tmp_buf = krealloc(p->buf, len, GFP_NOFS);
  327. }
  328. if (!tmp_buf)
  329. return -ENOMEM;
  330. p->buf = tmp_buf;
  331. /*
  332. * The real size of the buffer is bigger, this will let the fast path
  333. * happen most of the time
  334. */
  335. p->buf_len = ksize(p->buf);
  336. if (p->reversed) {
  337. tmp_buf = p->buf + old_buf_len - path_len - 1;
  338. p->end = p->buf + p->buf_len - 1;
  339. p->start = p->end - path_len;
  340. memmove(p->start, tmp_buf, path_len + 1);
  341. } else {
  342. p->start = p->buf;
  343. p->end = p->start + path_len;
  344. }
  345. return 0;
  346. }
  347. static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
  348. char **prepared)
  349. {
  350. int ret;
  351. int new_len;
  352. new_len = p->end - p->start + name_len;
  353. if (p->start != p->end)
  354. new_len++;
  355. ret = fs_path_ensure_buf(p, new_len);
  356. if (ret < 0)
  357. goto out;
  358. if (p->reversed) {
  359. if (p->start != p->end)
  360. *--p->start = '/';
  361. p->start -= name_len;
  362. *prepared = p->start;
  363. } else {
  364. if (p->start != p->end)
  365. *p->end++ = '/';
  366. *prepared = p->end;
  367. p->end += name_len;
  368. *p->end = 0;
  369. }
  370. out:
  371. return ret;
  372. }
  373. static int fs_path_add(struct fs_path *p, const char *name, int name_len)
  374. {
  375. int ret;
  376. char *prepared;
  377. ret = fs_path_prepare_for_add(p, name_len, &prepared);
  378. if (ret < 0)
  379. goto out;
  380. memcpy(prepared, name, name_len);
  381. out:
  382. return ret;
  383. }
  384. static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
  385. {
  386. int ret;
  387. char *prepared;
  388. ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
  389. if (ret < 0)
  390. goto out;
  391. memcpy(prepared, p2->start, p2->end - p2->start);
  392. out:
  393. return ret;
  394. }
  395. static int fs_path_add_from_extent_buffer(struct fs_path *p,
  396. struct extent_buffer *eb,
  397. unsigned long off, int len)
  398. {
  399. int ret;
  400. char *prepared;
  401. ret = fs_path_prepare_for_add(p, len, &prepared);
  402. if (ret < 0)
  403. goto out;
  404. read_extent_buffer(eb, prepared, off, len);
  405. out:
  406. return ret;
  407. }
  408. static int fs_path_copy(struct fs_path *p, struct fs_path *from)
  409. {
  410. int ret;
  411. p->reversed = from->reversed;
  412. fs_path_reset(p);
  413. ret = fs_path_add_path(p, from);
  414. return ret;
  415. }
  416. static void fs_path_unreverse(struct fs_path *p)
  417. {
  418. char *tmp;
  419. int len;
  420. if (!p->reversed)
  421. return;
  422. tmp = p->start;
  423. len = p->end - p->start;
  424. p->start = p->buf;
  425. p->end = p->start + len;
  426. memmove(p->start, tmp, len + 1);
  427. p->reversed = 0;
  428. }
  429. static struct btrfs_path *alloc_path_for_send(void)
  430. {
  431. struct btrfs_path *path;
  432. path = btrfs_alloc_path();
  433. if (!path)
  434. return NULL;
  435. path->search_commit_root = 1;
  436. path->skip_locking = 1;
  437. path->need_commit_sem = 1;
  438. return path;
  439. }
  440. static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
  441. {
  442. int ret;
  443. mm_segment_t old_fs;
  444. u32 pos = 0;
  445. old_fs = get_fs();
  446. set_fs(KERNEL_DS);
  447. while (pos < len) {
  448. ret = vfs_write(filp, (__force const char __user *)buf + pos,
  449. len - pos, off);
  450. /* TODO handle that correctly */
  451. /*if (ret == -ERESTARTSYS) {
  452. continue;
  453. }*/
  454. if (ret < 0)
  455. goto out;
  456. if (ret == 0) {
  457. ret = -EIO;
  458. goto out;
  459. }
  460. pos += ret;
  461. }
  462. ret = 0;
  463. out:
  464. set_fs(old_fs);
  465. return ret;
  466. }
  467. static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
  468. {
  469. struct btrfs_tlv_header *hdr;
  470. int total_len = sizeof(*hdr) + len;
  471. int left = sctx->send_max_size - sctx->send_size;
  472. if (unlikely(left < total_len))
  473. return -EOVERFLOW;
  474. hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
  475. hdr->tlv_type = cpu_to_le16(attr);
  476. hdr->tlv_len = cpu_to_le16(len);
  477. memcpy(hdr + 1, data, len);
  478. sctx->send_size += total_len;
  479. return 0;
  480. }
  481. #define TLV_PUT_DEFINE_INT(bits) \
  482. static int tlv_put_u##bits(struct send_ctx *sctx, \
  483. u##bits attr, u##bits value) \
  484. { \
  485. __le##bits __tmp = cpu_to_le##bits(value); \
  486. return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
  487. }
  488. TLV_PUT_DEFINE_INT(64)
  489. static int tlv_put_string(struct send_ctx *sctx, u16 attr,
  490. const char *str, int len)
  491. {
  492. if (len == -1)
  493. len = strlen(str);
  494. return tlv_put(sctx, attr, str, len);
  495. }
  496. static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
  497. const u8 *uuid)
  498. {
  499. return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
  500. }
  501. static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
  502. struct extent_buffer *eb,
  503. struct btrfs_timespec *ts)
  504. {
  505. struct btrfs_timespec bts;
  506. read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
  507. return tlv_put(sctx, attr, &bts, sizeof(bts));
  508. }
  509. #define TLV_PUT(sctx, attrtype, attrlen, data) \
  510. do { \
  511. ret = tlv_put(sctx, attrtype, attrlen, data); \
  512. if (ret < 0) \
  513. goto tlv_put_failure; \
  514. } while (0)
  515. #define TLV_PUT_INT(sctx, attrtype, bits, value) \
  516. do { \
  517. ret = tlv_put_u##bits(sctx, attrtype, value); \
  518. if (ret < 0) \
  519. goto tlv_put_failure; \
  520. } while (0)
  521. #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
  522. #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
  523. #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
  524. #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
  525. #define TLV_PUT_STRING(sctx, attrtype, str, len) \
  526. do { \
  527. ret = tlv_put_string(sctx, attrtype, str, len); \
  528. if (ret < 0) \
  529. goto tlv_put_failure; \
  530. } while (0)
  531. #define TLV_PUT_PATH(sctx, attrtype, p) \
  532. do { \
  533. ret = tlv_put_string(sctx, attrtype, p->start, \
  534. p->end - p->start); \
  535. if (ret < 0) \
  536. goto tlv_put_failure; \
  537. } while(0)
  538. #define TLV_PUT_UUID(sctx, attrtype, uuid) \
  539. do { \
  540. ret = tlv_put_uuid(sctx, attrtype, uuid); \
  541. if (ret < 0) \
  542. goto tlv_put_failure; \
  543. } while (0)
  544. #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
  545. do { \
  546. ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
  547. if (ret < 0) \
  548. goto tlv_put_failure; \
  549. } while (0)
  550. static int send_header(struct send_ctx *sctx)
  551. {
  552. struct btrfs_stream_header hdr;
  553. strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
  554. hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
  555. return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
  556. &sctx->send_off);
  557. }
  558. /*
  559. * For each command/item we want to send to userspace, we call this function.
  560. */
  561. static int begin_cmd(struct send_ctx *sctx, int cmd)
  562. {
  563. struct btrfs_cmd_header *hdr;
  564. if (WARN_ON(!sctx->send_buf))
  565. return -EINVAL;
  566. BUG_ON(sctx->send_size);
  567. sctx->send_size += sizeof(*hdr);
  568. hdr = (struct btrfs_cmd_header *)sctx->send_buf;
  569. hdr->cmd = cpu_to_le16(cmd);
  570. return 0;
  571. }
  572. static int send_cmd(struct send_ctx *sctx)
  573. {
  574. int ret;
  575. struct btrfs_cmd_header *hdr;
  576. u32 crc;
  577. hdr = (struct btrfs_cmd_header *)sctx->send_buf;
  578. hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
  579. hdr->crc = 0;
  580. crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
  581. hdr->crc = cpu_to_le32(crc);
  582. ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
  583. &sctx->send_off);
  584. sctx->total_send_size += sctx->send_size;
  585. sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
  586. sctx->send_size = 0;
  587. return ret;
  588. }
  589. /*
  590. * Sends a move instruction to user space
  591. */
  592. static int send_rename(struct send_ctx *sctx,
  593. struct fs_path *from, struct fs_path *to)
  594. {
  595. int ret;
  596. verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
  597. ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
  598. if (ret < 0)
  599. goto out;
  600. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
  601. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
  602. ret = send_cmd(sctx);
  603. tlv_put_failure:
  604. out:
  605. return ret;
  606. }
  607. /*
  608. * Sends a link instruction to user space
  609. */
  610. static int send_link(struct send_ctx *sctx,
  611. struct fs_path *path, struct fs_path *lnk)
  612. {
  613. int ret;
  614. verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
  615. ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
  616. if (ret < 0)
  617. goto out;
  618. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  619. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
  620. ret = send_cmd(sctx);
  621. tlv_put_failure:
  622. out:
  623. return ret;
  624. }
  625. /*
  626. * Sends an unlink instruction to user space
  627. */
  628. static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
  629. {
  630. int ret;
  631. verbose_printk("btrfs: send_unlink %s\n", path->start);
  632. ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
  633. if (ret < 0)
  634. goto out;
  635. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  636. ret = send_cmd(sctx);
  637. tlv_put_failure:
  638. out:
  639. return ret;
  640. }
  641. /*
  642. * Sends a rmdir instruction to user space
  643. */
  644. static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
  645. {
  646. int ret;
  647. verbose_printk("btrfs: send_rmdir %s\n", path->start);
  648. ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
  649. if (ret < 0)
  650. goto out;
  651. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  652. ret = send_cmd(sctx);
  653. tlv_put_failure:
  654. out:
  655. return ret;
  656. }
  657. /*
  658. * Helper function to retrieve some fields from an inode item.
  659. */
  660. static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
  661. u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
  662. u64 *gid, u64 *rdev)
  663. {
  664. int ret;
  665. struct btrfs_inode_item *ii;
  666. struct btrfs_key key;
  667. key.objectid = ino;
  668. key.type = BTRFS_INODE_ITEM_KEY;
  669. key.offset = 0;
  670. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  671. if (ret) {
  672. if (ret > 0)
  673. ret = -ENOENT;
  674. return ret;
  675. }
  676. ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
  677. struct btrfs_inode_item);
  678. if (size)
  679. *size = btrfs_inode_size(path->nodes[0], ii);
  680. if (gen)
  681. *gen = btrfs_inode_generation(path->nodes[0], ii);
  682. if (mode)
  683. *mode = btrfs_inode_mode(path->nodes[0], ii);
  684. if (uid)
  685. *uid = btrfs_inode_uid(path->nodes[0], ii);
  686. if (gid)
  687. *gid = btrfs_inode_gid(path->nodes[0], ii);
  688. if (rdev)
  689. *rdev = btrfs_inode_rdev(path->nodes[0], ii);
  690. return ret;
  691. }
  692. static int get_inode_info(struct btrfs_root *root,
  693. u64 ino, u64 *size, u64 *gen,
  694. u64 *mode, u64 *uid, u64 *gid,
  695. u64 *rdev)
  696. {
  697. struct btrfs_path *path;
  698. int ret;
  699. path = alloc_path_for_send();
  700. if (!path)
  701. return -ENOMEM;
  702. ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
  703. rdev);
  704. btrfs_free_path(path);
  705. return ret;
  706. }
  707. typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
  708. struct fs_path *p,
  709. void *ctx);
  710. /*
  711. * Helper function to iterate the entries in ONE btrfs_inode_ref or
  712. * btrfs_inode_extref.
  713. * The iterate callback may return a non zero value to stop iteration. This can
  714. * be a negative value for error codes or 1 to simply stop it.
  715. *
  716. * path must point to the INODE_REF or INODE_EXTREF when called.
  717. */
  718. static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
  719. struct btrfs_key *found_key, int resolve,
  720. iterate_inode_ref_t iterate, void *ctx)
  721. {
  722. struct extent_buffer *eb = path->nodes[0];
  723. struct btrfs_item *item;
  724. struct btrfs_inode_ref *iref;
  725. struct btrfs_inode_extref *extref;
  726. struct btrfs_path *tmp_path;
  727. struct fs_path *p;
  728. u32 cur = 0;
  729. u32 total;
  730. int slot = path->slots[0];
  731. u32 name_len;
  732. char *start;
  733. int ret = 0;
  734. int num = 0;
  735. int index;
  736. u64 dir;
  737. unsigned long name_off;
  738. unsigned long elem_size;
  739. unsigned long ptr;
  740. p = fs_path_alloc_reversed();
  741. if (!p)
  742. return -ENOMEM;
  743. tmp_path = alloc_path_for_send();
  744. if (!tmp_path) {
  745. fs_path_free(p);
  746. return -ENOMEM;
  747. }
  748. if (found_key->type == BTRFS_INODE_REF_KEY) {
  749. ptr = (unsigned long)btrfs_item_ptr(eb, slot,
  750. struct btrfs_inode_ref);
  751. item = btrfs_item_nr(slot);
  752. total = btrfs_item_size(eb, item);
  753. elem_size = sizeof(*iref);
  754. } else {
  755. ptr = btrfs_item_ptr_offset(eb, slot);
  756. total = btrfs_item_size_nr(eb, slot);
  757. elem_size = sizeof(*extref);
  758. }
  759. while (cur < total) {
  760. fs_path_reset(p);
  761. if (found_key->type == BTRFS_INODE_REF_KEY) {
  762. iref = (struct btrfs_inode_ref *)(ptr + cur);
  763. name_len = btrfs_inode_ref_name_len(eb, iref);
  764. name_off = (unsigned long)(iref + 1);
  765. index = btrfs_inode_ref_index(eb, iref);
  766. dir = found_key->offset;
  767. } else {
  768. extref = (struct btrfs_inode_extref *)(ptr + cur);
  769. name_len = btrfs_inode_extref_name_len(eb, extref);
  770. name_off = (unsigned long)&extref->name;
  771. index = btrfs_inode_extref_index(eb, extref);
  772. dir = btrfs_inode_extref_parent(eb, extref);
  773. }
  774. if (resolve) {
  775. start = btrfs_ref_to_path(root, tmp_path, name_len,
  776. name_off, eb, dir,
  777. p->buf, p->buf_len);
  778. if (IS_ERR(start)) {
  779. ret = PTR_ERR(start);
  780. goto out;
  781. }
  782. if (start < p->buf) {
  783. /* overflow , try again with larger buffer */
  784. ret = fs_path_ensure_buf(p,
  785. p->buf_len + p->buf - start);
  786. if (ret < 0)
  787. goto out;
  788. start = btrfs_ref_to_path(root, tmp_path,
  789. name_len, name_off,
  790. eb, dir,
  791. p->buf, p->buf_len);
  792. if (IS_ERR(start)) {
  793. ret = PTR_ERR(start);
  794. goto out;
  795. }
  796. BUG_ON(start < p->buf);
  797. }
  798. p->start = start;
  799. } else {
  800. ret = fs_path_add_from_extent_buffer(p, eb, name_off,
  801. name_len);
  802. if (ret < 0)
  803. goto out;
  804. }
  805. cur += elem_size + name_len;
  806. ret = iterate(num, dir, index, p, ctx);
  807. if (ret)
  808. goto out;
  809. num++;
  810. }
  811. out:
  812. btrfs_free_path(tmp_path);
  813. fs_path_free(p);
  814. return ret;
  815. }
  816. typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
  817. const char *name, int name_len,
  818. const char *data, int data_len,
  819. u8 type, void *ctx);
  820. /*
  821. * Helper function to iterate the entries in ONE btrfs_dir_item.
  822. * The iterate callback may return a non zero value to stop iteration. This can
  823. * be a negative value for error codes or 1 to simply stop it.
  824. *
  825. * path must point to the dir item when called.
  826. */
  827. static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
  828. struct btrfs_key *found_key,
  829. iterate_dir_item_t iterate, void *ctx)
  830. {
  831. int ret = 0;
  832. struct extent_buffer *eb;
  833. struct btrfs_item *item;
  834. struct btrfs_dir_item *di;
  835. struct btrfs_key di_key;
  836. char *buf = NULL;
  837. int buf_len;
  838. u32 name_len;
  839. u32 data_len;
  840. u32 cur;
  841. u32 len;
  842. u32 total;
  843. int slot;
  844. int num;
  845. u8 type;
  846. /*
  847. * Start with a small buffer (1 page). If later we end up needing more
  848. * space, which can happen for xattrs on a fs with a leaf size greater
  849. * then the page size, attempt to increase the buffer. Typically xattr
  850. * values are small.
  851. */
  852. buf_len = PATH_MAX;
  853. buf = kmalloc(buf_len, GFP_NOFS);
  854. if (!buf) {
  855. ret = -ENOMEM;
  856. goto out;
  857. }
  858. eb = path->nodes[0];
  859. slot = path->slots[0];
  860. item = btrfs_item_nr(slot);
  861. di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
  862. cur = 0;
  863. len = 0;
  864. total = btrfs_item_size(eb, item);
  865. num = 0;
  866. while (cur < total) {
  867. name_len = btrfs_dir_name_len(eb, di);
  868. data_len = btrfs_dir_data_len(eb, di);
  869. type = btrfs_dir_type(eb, di);
  870. btrfs_dir_item_key_to_cpu(eb, di, &di_key);
  871. if (type == BTRFS_FT_XATTR) {
  872. if (name_len > XATTR_NAME_MAX) {
  873. ret = -ENAMETOOLONG;
  874. goto out;
  875. }
  876. if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
  877. ret = -E2BIG;
  878. goto out;
  879. }
  880. } else {
  881. /*
  882. * Path too long
  883. */
  884. if (name_len + data_len > PATH_MAX) {
  885. ret = -ENAMETOOLONG;
  886. goto out;
  887. }
  888. }
  889. if (name_len + data_len > buf_len) {
  890. buf_len = name_len + data_len;
  891. if (is_vmalloc_addr(buf)) {
  892. vfree(buf);
  893. buf = NULL;
  894. } else {
  895. char *tmp = krealloc(buf, buf_len,
  896. GFP_NOFS | __GFP_NOWARN);
  897. if (!tmp)
  898. kfree(buf);
  899. buf = tmp;
  900. }
  901. if (!buf) {
  902. buf = vmalloc(buf_len);
  903. if (!buf) {
  904. ret = -ENOMEM;
  905. goto out;
  906. }
  907. }
  908. }
  909. read_extent_buffer(eb, buf, (unsigned long)(di + 1),
  910. name_len + data_len);
  911. len = sizeof(*di) + name_len + data_len;
  912. di = (struct btrfs_dir_item *)((char *)di + len);
  913. cur += len;
  914. ret = iterate(num, &di_key, buf, name_len, buf + name_len,
  915. data_len, type, ctx);
  916. if (ret < 0)
  917. goto out;
  918. if (ret) {
  919. ret = 0;
  920. goto out;
  921. }
  922. num++;
  923. }
  924. out:
  925. kvfree(buf);
  926. return ret;
  927. }
  928. static int __copy_first_ref(int num, u64 dir, int index,
  929. struct fs_path *p, void *ctx)
  930. {
  931. int ret;
  932. struct fs_path *pt = ctx;
  933. ret = fs_path_copy(pt, p);
  934. if (ret < 0)
  935. return ret;
  936. /* we want the first only */
  937. return 1;
  938. }
  939. /*
  940. * Retrieve the first path of an inode. If an inode has more then one
  941. * ref/hardlink, this is ignored.
  942. */
  943. static int get_inode_path(struct btrfs_root *root,
  944. u64 ino, struct fs_path *path)
  945. {
  946. int ret;
  947. struct btrfs_key key, found_key;
  948. struct btrfs_path *p;
  949. p = alloc_path_for_send();
  950. if (!p)
  951. return -ENOMEM;
  952. fs_path_reset(path);
  953. key.objectid = ino;
  954. key.type = BTRFS_INODE_REF_KEY;
  955. key.offset = 0;
  956. ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
  957. if (ret < 0)
  958. goto out;
  959. if (ret) {
  960. ret = 1;
  961. goto out;
  962. }
  963. btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
  964. if (found_key.objectid != ino ||
  965. (found_key.type != BTRFS_INODE_REF_KEY &&
  966. found_key.type != BTRFS_INODE_EXTREF_KEY)) {
  967. ret = -ENOENT;
  968. goto out;
  969. }
  970. ret = iterate_inode_ref(root, p, &found_key, 1,
  971. __copy_first_ref, path);
  972. if (ret < 0)
  973. goto out;
  974. ret = 0;
  975. out:
  976. btrfs_free_path(p);
  977. return ret;
  978. }
  979. struct backref_ctx {
  980. struct send_ctx *sctx;
  981. struct btrfs_path *path;
  982. /* number of total found references */
  983. u64 found;
  984. /*
  985. * used for clones found in send_root. clones found behind cur_objectid
  986. * and cur_offset are not considered as allowed clones.
  987. */
  988. u64 cur_objectid;
  989. u64 cur_offset;
  990. /* may be truncated in case it's the last extent in a file */
  991. u64 extent_len;
  992. /* data offset in the file extent item */
  993. u64 data_offset;
  994. /* Just to check for bugs in backref resolving */
  995. int found_itself;
  996. };
  997. static int __clone_root_cmp_bsearch(const void *key, const void *elt)
  998. {
  999. u64 root = (u64)(uintptr_t)key;
  1000. struct clone_root *cr = (struct clone_root *)elt;
  1001. if (root < cr->root->objectid)
  1002. return -1;
  1003. if (root > cr->root->objectid)
  1004. return 1;
  1005. return 0;
  1006. }
  1007. static int __clone_root_cmp_sort(const void *e1, const void *e2)
  1008. {
  1009. struct clone_root *cr1 = (struct clone_root *)e1;
  1010. struct clone_root *cr2 = (struct clone_root *)e2;
  1011. if (cr1->root->objectid < cr2->root->objectid)
  1012. return -1;
  1013. if (cr1->root->objectid > cr2->root->objectid)
  1014. return 1;
  1015. return 0;
  1016. }
  1017. /*
  1018. * Called for every backref that is found for the current extent.
  1019. * Results are collected in sctx->clone_roots->ino/offset/found_refs
  1020. */
  1021. static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
  1022. {
  1023. struct backref_ctx *bctx = ctx_;
  1024. struct clone_root *found;
  1025. int ret;
  1026. u64 i_size;
  1027. /* First check if the root is in the list of accepted clone sources */
  1028. found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
  1029. bctx->sctx->clone_roots_cnt,
  1030. sizeof(struct clone_root),
  1031. __clone_root_cmp_bsearch);
  1032. if (!found)
  1033. return 0;
  1034. if (found->root == bctx->sctx->send_root &&
  1035. ino == bctx->cur_objectid &&
  1036. offset == bctx->cur_offset) {
  1037. bctx->found_itself = 1;
  1038. }
  1039. /*
  1040. * There are inodes that have extents that lie behind its i_size. Don't
  1041. * accept clones from these extents.
  1042. */
  1043. ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
  1044. NULL, NULL, NULL);
  1045. btrfs_release_path(bctx->path);
  1046. if (ret < 0)
  1047. return ret;
  1048. if (offset + bctx->data_offset + bctx->extent_len > i_size)
  1049. return 0;
  1050. /*
  1051. * Make sure we don't consider clones from send_root that are
  1052. * behind the current inode/offset.
  1053. */
  1054. if (found->root == bctx->sctx->send_root) {
  1055. /*
  1056. * TODO for the moment we don't accept clones from the inode
  1057. * that is currently send. We may change this when
  1058. * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
  1059. * file.
  1060. */
  1061. if (ino >= bctx->cur_objectid)
  1062. return 0;
  1063. #if 0
  1064. if (ino > bctx->cur_objectid)
  1065. return 0;
  1066. if (offset + bctx->extent_len > bctx->cur_offset)
  1067. return 0;
  1068. #endif
  1069. }
  1070. bctx->found++;
  1071. found->found_refs++;
  1072. if (ino < found->ino) {
  1073. found->ino = ino;
  1074. found->offset = offset;
  1075. } else if (found->ino == ino) {
  1076. /*
  1077. * same extent found more then once in the same file.
  1078. */
  1079. if (found->offset > offset + bctx->extent_len)
  1080. found->offset = offset;
  1081. }
  1082. return 0;
  1083. }
  1084. /*
  1085. * Given an inode, offset and extent item, it finds a good clone for a clone
  1086. * instruction. Returns -ENOENT when none could be found. The function makes
  1087. * sure that the returned clone is usable at the point where sending is at the
  1088. * moment. This means, that no clones are accepted which lie behind the current
  1089. * inode+offset.
  1090. *
  1091. * path must point to the extent item when called.
  1092. */
  1093. static int find_extent_clone(struct send_ctx *sctx,
  1094. struct btrfs_path *path,
  1095. u64 ino, u64 data_offset,
  1096. u64 ino_size,
  1097. struct clone_root **found)
  1098. {
  1099. int ret;
  1100. int extent_type;
  1101. u64 logical;
  1102. u64 disk_byte;
  1103. u64 num_bytes;
  1104. u64 extent_item_pos;
  1105. u64 flags = 0;
  1106. struct btrfs_file_extent_item *fi;
  1107. struct extent_buffer *eb = path->nodes[0];
  1108. struct backref_ctx *backref_ctx = NULL;
  1109. struct clone_root *cur_clone_root;
  1110. struct btrfs_key found_key;
  1111. struct btrfs_path *tmp_path;
  1112. int compressed;
  1113. u32 i;
  1114. tmp_path = alloc_path_for_send();
  1115. if (!tmp_path)
  1116. return -ENOMEM;
  1117. /* We only use this path under the commit sem */
  1118. tmp_path->need_commit_sem = 0;
  1119. backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
  1120. if (!backref_ctx) {
  1121. ret = -ENOMEM;
  1122. goto out;
  1123. }
  1124. backref_ctx->path = tmp_path;
  1125. if (data_offset >= ino_size) {
  1126. /*
  1127. * There may be extents that lie behind the file's size.
  1128. * I at least had this in combination with snapshotting while
  1129. * writing large files.
  1130. */
  1131. ret = 0;
  1132. goto out;
  1133. }
  1134. fi = btrfs_item_ptr(eb, path->slots[0],
  1135. struct btrfs_file_extent_item);
  1136. extent_type = btrfs_file_extent_type(eb, fi);
  1137. if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  1138. ret = -ENOENT;
  1139. goto out;
  1140. }
  1141. compressed = btrfs_file_extent_compression(eb, fi);
  1142. num_bytes = btrfs_file_extent_num_bytes(eb, fi);
  1143. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  1144. if (disk_byte == 0) {
  1145. ret = -ENOENT;
  1146. goto out;
  1147. }
  1148. logical = disk_byte + btrfs_file_extent_offset(eb, fi);
  1149. down_read(&sctx->send_root->fs_info->commit_root_sem);
  1150. ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
  1151. &found_key, &flags);
  1152. up_read(&sctx->send_root->fs_info->commit_root_sem);
  1153. btrfs_release_path(tmp_path);
  1154. if (ret < 0)
  1155. goto out;
  1156. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  1157. ret = -EIO;
  1158. goto out;
  1159. }
  1160. /*
  1161. * Setup the clone roots.
  1162. */
  1163. for (i = 0; i < sctx->clone_roots_cnt; i++) {
  1164. cur_clone_root = sctx->clone_roots + i;
  1165. cur_clone_root->ino = (u64)-1;
  1166. cur_clone_root->offset = 0;
  1167. cur_clone_root->found_refs = 0;
  1168. }
  1169. backref_ctx->sctx = sctx;
  1170. backref_ctx->found = 0;
  1171. backref_ctx->cur_objectid = ino;
  1172. backref_ctx->cur_offset = data_offset;
  1173. backref_ctx->found_itself = 0;
  1174. backref_ctx->extent_len = num_bytes;
  1175. /*
  1176. * For non-compressed extents iterate_extent_inodes() gives us extent
  1177. * offsets that already take into account the data offset, but not for
  1178. * compressed extents, since the offset is logical and not relative to
  1179. * the physical extent locations. We must take this into account to
  1180. * avoid sending clone offsets that go beyond the source file's size,
  1181. * which would result in the clone ioctl failing with -EINVAL on the
  1182. * receiving end.
  1183. */
  1184. if (compressed == BTRFS_COMPRESS_NONE)
  1185. backref_ctx->data_offset = 0;
  1186. else
  1187. backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
  1188. /*
  1189. * The last extent of a file may be too large due to page alignment.
  1190. * We need to adjust extent_len in this case so that the checks in
  1191. * __iterate_backrefs work.
  1192. */
  1193. if (data_offset + num_bytes >= ino_size)
  1194. backref_ctx->extent_len = ino_size - data_offset;
  1195. /*
  1196. * Now collect all backrefs.
  1197. */
  1198. if (compressed == BTRFS_COMPRESS_NONE)
  1199. extent_item_pos = logical - found_key.objectid;
  1200. else
  1201. extent_item_pos = 0;
  1202. ret = iterate_extent_inodes(sctx->send_root->fs_info,
  1203. found_key.objectid, extent_item_pos, 1,
  1204. __iterate_backrefs, backref_ctx);
  1205. if (ret < 0)
  1206. goto out;
  1207. if (!backref_ctx->found_itself) {
  1208. /* found a bug in backref code? */
  1209. ret = -EIO;
  1210. btrfs_err(sctx->send_root->fs_info, "did not find backref in "
  1211. "send_root. inode=%llu, offset=%llu, "
  1212. "disk_byte=%llu found extent=%llu",
  1213. ino, data_offset, disk_byte, found_key.objectid);
  1214. goto out;
  1215. }
  1216. verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
  1217. "ino=%llu, "
  1218. "num_bytes=%llu, logical=%llu\n",
  1219. data_offset, ino, num_bytes, logical);
  1220. if (!backref_ctx->found)
  1221. verbose_printk("btrfs: no clones found\n");
  1222. cur_clone_root = NULL;
  1223. for (i = 0; i < sctx->clone_roots_cnt; i++) {
  1224. if (sctx->clone_roots[i].found_refs) {
  1225. if (!cur_clone_root)
  1226. cur_clone_root = sctx->clone_roots + i;
  1227. else if (sctx->clone_roots[i].root == sctx->send_root)
  1228. /* prefer clones from send_root over others */
  1229. cur_clone_root = sctx->clone_roots + i;
  1230. }
  1231. }
  1232. if (cur_clone_root) {
  1233. *found = cur_clone_root;
  1234. ret = 0;
  1235. } else {
  1236. ret = -ENOENT;
  1237. }
  1238. out:
  1239. btrfs_free_path(tmp_path);
  1240. kfree(backref_ctx);
  1241. return ret;
  1242. }
  1243. static int read_symlink(struct btrfs_root *root,
  1244. u64 ino,
  1245. struct fs_path *dest)
  1246. {
  1247. int ret;
  1248. struct btrfs_path *path;
  1249. struct btrfs_key key;
  1250. struct btrfs_file_extent_item *ei;
  1251. u8 type;
  1252. u8 compression;
  1253. unsigned long off;
  1254. int len;
  1255. path = alloc_path_for_send();
  1256. if (!path)
  1257. return -ENOMEM;
  1258. key.objectid = ino;
  1259. key.type = BTRFS_EXTENT_DATA_KEY;
  1260. key.offset = 0;
  1261. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1262. if (ret < 0)
  1263. goto out;
  1264. BUG_ON(ret);
  1265. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1266. struct btrfs_file_extent_item);
  1267. type = btrfs_file_extent_type(path->nodes[0], ei);
  1268. compression = btrfs_file_extent_compression(path->nodes[0], ei);
  1269. BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
  1270. BUG_ON(compression);
  1271. off = btrfs_file_extent_inline_start(ei);
  1272. len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
  1273. ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
  1274. out:
  1275. btrfs_free_path(path);
  1276. return ret;
  1277. }
  1278. /*
  1279. * Helper function to generate a file name that is unique in the root of
  1280. * send_root and parent_root. This is used to generate names for orphan inodes.
  1281. */
  1282. static int gen_unique_name(struct send_ctx *sctx,
  1283. u64 ino, u64 gen,
  1284. struct fs_path *dest)
  1285. {
  1286. int ret = 0;
  1287. struct btrfs_path *path;
  1288. struct btrfs_dir_item *di;
  1289. char tmp[64];
  1290. int len;
  1291. u64 idx = 0;
  1292. path = alloc_path_for_send();
  1293. if (!path)
  1294. return -ENOMEM;
  1295. while (1) {
  1296. len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
  1297. ino, gen, idx);
  1298. ASSERT(len < sizeof(tmp));
  1299. di = btrfs_lookup_dir_item(NULL, sctx->send_root,
  1300. path, BTRFS_FIRST_FREE_OBJECTID,
  1301. tmp, strlen(tmp), 0);
  1302. btrfs_release_path(path);
  1303. if (IS_ERR(di)) {
  1304. ret = PTR_ERR(di);
  1305. goto out;
  1306. }
  1307. if (di) {
  1308. /* not unique, try again */
  1309. idx++;
  1310. continue;
  1311. }
  1312. if (!sctx->parent_root) {
  1313. /* unique */
  1314. ret = 0;
  1315. break;
  1316. }
  1317. di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
  1318. path, BTRFS_FIRST_FREE_OBJECTID,
  1319. tmp, strlen(tmp), 0);
  1320. btrfs_release_path(path);
  1321. if (IS_ERR(di)) {
  1322. ret = PTR_ERR(di);
  1323. goto out;
  1324. }
  1325. if (di) {
  1326. /* not unique, try again */
  1327. idx++;
  1328. continue;
  1329. }
  1330. /* unique */
  1331. break;
  1332. }
  1333. ret = fs_path_add(dest, tmp, strlen(tmp));
  1334. out:
  1335. btrfs_free_path(path);
  1336. return ret;
  1337. }
  1338. enum inode_state {
  1339. inode_state_no_change,
  1340. inode_state_will_create,
  1341. inode_state_did_create,
  1342. inode_state_will_delete,
  1343. inode_state_did_delete,
  1344. };
  1345. static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
  1346. {
  1347. int ret;
  1348. int left_ret;
  1349. int right_ret;
  1350. u64 left_gen;
  1351. u64 right_gen;
  1352. ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
  1353. NULL, NULL);
  1354. if (ret < 0 && ret != -ENOENT)
  1355. goto out;
  1356. left_ret = ret;
  1357. if (!sctx->parent_root) {
  1358. right_ret = -ENOENT;
  1359. } else {
  1360. ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
  1361. NULL, NULL, NULL, NULL);
  1362. if (ret < 0 && ret != -ENOENT)
  1363. goto out;
  1364. right_ret = ret;
  1365. }
  1366. if (!left_ret && !right_ret) {
  1367. if (left_gen == gen && right_gen == gen) {
  1368. ret = inode_state_no_change;
  1369. } else if (left_gen == gen) {
  1370. if (ino < sctx->send_progress)
  1371. ret = inode_state_did_create;
  1372. else
  1373. ret = inode_state_will_create;
  1374. } else if (right_gen == gen) {
  1375. if (ino < sctx->send_progress)
  1376. ret = inode_state_did_delete;
  1377. else
  1378. ret = inode_state_will_delete;
  1379. } else {
  1380. ret = -ENOENT;
  1381. }
  1382. } else if (!left_ret) {
  1383. if (left_gen == gen) {
  1384. if (ino < sctx->send_progress)
  1385. ret = inode_state_did_create;
  1386. else
  1387. ret = inode_state_will_create;
  1388. } else {
  1389. ret = -ENOENT;
  1390. }
  1391. } else if (!right_ret) {
  1392. if (right_gen == gen) {
  1393. if (ino < sctx->send_progress)
  1394. ret = inode_state_did_delete;
  1395. else
  1396. ret = inode_state_will_delete;
  1397. } else {
  1398. ret = -ENOENT;
  1399. }
  1400. } else {
  1401. ret = -ENOENT;
  1402. }
  1403. out:
  1404. return ret;
  1405. }
  1406. static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
  1407. {
  1408. int ret;
  1409. ret = get_cur_inode_state(sctx, ino, gen);
  1410. if (ret < 0)
  1411. goto out;
  1412. if (ret == inode_state_no_change ||
  1413. ret == inode_state_did_create ||
  1414. ret == inode_state_will_delete)
  1415. ret = 1;
  1416. else
  1417. ret = 0;
  1418. out:
  1419. return ret;
  1420. }
  1421. /*
  1422. * Helper function to lookup a dir item in a dir.
  1423. */
  1424. static int lookup_dir_item_inode(struct btrfs_root *root,
  1425. u64 dir, const char *name, int name_len,
  1426. u64 *found_inode,
  1427. u8 *found_type)
  1428. {
  1429. int ret = 0;
  1430. struct btrfs_dir_item *di;
  1431. struct btrfs_key key;
  1432. struct btrfs_path *path;
  1433. path = alloc_path_for_send();
  1434. if (!path)
  1435. return -ENOMEM;
  1436. di = btrfs_lookup_dir_item(NULL, root, path,
  1437. dir, name, name_len, 0);
  1438. if (!di) {
  1439. ret = -ENOENT;
  1440. goto out;
  1441. }
  1442. if (IS_ERR(di)) {
  1443. ret = PTR_ERR(di);
  1444. goto out;
  1445. }
  1446. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
  1447. if (key.type == BTRFS_ROOT_ITEM_KEY) {
  1448. ret = -ENOENT;
  1449. goto out;
  1450. }
  1451. *found_inode = key.objectid;
  1452. *found_type = btrfs_dir_type(path->nodes[0], di);
  1453. out:
  1454. btrfs_free_path(path);
  1455. return ret;
  1456. }
  1457. /*
  1458. * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
  1459. * generation of the parent dir and the name of the dir entry.
  1460. */
  1461. static int get_first_ref(struct btrfs_root *root, u64 ino,
  1462. u64 *dir, u64 *dir_gen, struct fs_path *name)
  1463. {
  1464. int ret;
  1465. struct btrfs_key key;
  1466. struct btrfs_key found_key;
  1467. struct btrfs_path *path;
  1468. int len;
  1469. u64 parent_dir;
  1470. path = alloc_path_for_send();
  1471. if (!path)
  1472. return -ENOMEM;
  1473. key.objectid = ino;
  1474. key.type = BTRFS_INODE_REF_KEY;
  1475. key.offset = 0;
  1476. ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
  1477. if (ret < 0)
  1478. goto out;
  1479. if (!ret)
  1480. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  1481. path->slots[0]);
  1482. if (ret || found_key.objectid != ino ||
  1483. (found_key.type != BTRFS_INODE_REF_KEY &&
  1484. found_key.type != BTRFS_INODE_EXTREF_KEY)) {
  1485. ret = -ENOENT;
  1486. goto out;
  1487. }
  1488. if (found_key.type == BTRFS_INODE_REF_KEY) {
  1489. struct btrfs_inode_ref *iref;
  1490. iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1491. struct btrfs_inode_ref);
  1492. len = btrfs_inode_ref_name_len(path->nodes[0], iref);
  1493. ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
  1494. (unsigned long)(iref + 1),
  1495. len);
  1496. parent_dir = found_key.offset;
  1497. } else {
  1498. struct btrfs_inode_extref *extref;
  1499. extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1500. struct btrfs_inode_extref);
  1501. len = btrfs_inode_extref_name_len(path->nodes[0], extref);
  1502. ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
  1503. (unsigned long)&extref->name, len);
  1504. parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
  1505. }
  1506. if (ret < 0)
  1507. goto out;
  1508. btrfs_release_path(path);
  1509. if (dir_gen) {
  1510. ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
  1511. NULL, NULL, NULL);
  1512. if (ret < 0)
  1513. goto out;
  1514. }
  1515. *dir = parent_dir;
  1516. out:
  1517. btrfs_free_path(path);
  1518. return ret;
  1519. }
  1520. static int is_first_ref(struct btrfs_root *root,
  1521. u64 ino, u64 dir,
  1522. const char *name, int name_len)
  1523. {
  1524. int ret;
  1525. struct fs_path *tmp_name;
  1526. u64 tmp_dir;
  1527. tmp_name = fs_path_alloc();
  1528. if (!tmp_name)
  1529. return -ENOMEM;
  1530. ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
  1531. if (ret < 0)
  1532. goto out;
  1533. if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
  1534. ret = 0;
  1535. goto out;
  1536. }
  1537. ret = !memcmp(tmp_name->start, name, name_len);
  1538. out:
  1539. fs_path_free(tmp_name);
  1540. return ret;
  1541. }
  1542. /*
  1543. * Used by process_recorded_refs to determine if a new ref would overwrite an
  1544. * already existing ref. In case it detects an overwrite, it returns the
  1545. * inode/gen in who_ino/who_gen.
  1546. * When an overwrite is detected, process_recorded_refs does proper orphanizing
  1547. * to make sure later references to the overwritten inode are possible.
  1548. * Orphanizing is however only required for the first ref of an inode.
  1549. * process_recorded_refs does an additional is_first_ref check to see if
  1550. * orphanizing is really required.
  1551. */
  1552. static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
  1553. const char *name, int name_len,
  1554. u64 *who_ino, u64 *who_gen)
  1555. {
  1556. int ret = 0;
  1557. u64 gen;
  1558. u64 other_inode = 0;
  1559. u8 other_type = 0;
  1560. if (!sctx->parent_root)
  1561. goto out;
  1562. ret = is_inode_existent(sctx, dir, dir_gen);
  1563. if (ret <= 0)
  1564. goto out;
  1565. /*
  1566. * If we have a parent root we need to verify that the parent dir was
  1567. * not delted and then re-created, if it was then we have no overwrite
  1568. * and we can just unlink this entry.
  1569. */
  1570. if (sctx->parent_root) {
  1571. ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
  1572. NULL, NULL, NULL);
  1573. if (ret < 0 && ret != -ENOENT)
  1574. goto out;
  1575. if (ret) {
  1576. ret = 0;
  1577. goto out;
  1578. }
  1579. if (gen != dir_gen)
  1580. goto out;
  1581. }
  1582. ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
  1583. &other_inode, &other_type);
  1584. if (ret < 0 && ret != -ENOENT)
  1585. goto out;
  1586. if (ret) {
  1587. ret = 0;
  1588. goto out;
  1589. }
  1590. /*
  1591. * Check if the overwritten ref was already processed. If yes, the ref
  1592. * was already unlinked/moved, so we can safely assume that we will not
  1593. * overwrite anything at this point in time.
  1594. */
  1595. if (other_inode > sctx->send_progress) {
  1596. ret = get_inode_info(sctx->parent_root, other_inode, NULL,
  1597. who_gen, NULL, NULL, NULL, NULL);
  1598. if (ret < 0)
  1599. goto out;
  1600. ret = 1;
  1601. *who_ino = other_inode;
  1602. } else {
  1603. ret = 0;
  1604. }
  1605. out:
  1606. return ret;
  1607. }
  1608. /*
  1609. * Checks if the ref was overwritten by an already processed inode. This is
  1610. * used by __get_cur_name_and_parent to find out if the ref was orphanized and
  1611. * thus the orphan name needs be used.
  1612. * process_recorded_refs also uses it to avoid unlinking of refs that were
  1613. * overwritten.
  1614. */
  1615. static int did_overwrite_ref(struct send_ctx *sctx,
  1616. u64 dir, u64 dir_gen,
  1617. u64 ino, u64 ino_gen,
  1618. const char *name, int name_len)
  1619. {
  1620. int ret = 0;
  1621. u64 gen;
  1622. u64 ow_inode;
  1623. u8 other_type;
  1624. if (!sctx->parent_root)
  1625. goto out;
  1626. ret = is_inode_existent(sctx, dir, dir_gen);
  1627. if (ret <= 0)
  1628. goto out;
  1629. /* check if the ref was overwritten by another ref */
  1630. ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
  1631. &ow_inode, &other_type);
  1632. if (ret < 0 && ret != -ENOENT)
  1633. goto out;
  1634. if (ret) {
  1635. /* was never and will never be overwritten */
  1636. ret = 0;
  1637. goto out;
  1638. }
  1639. ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
  1640. NULL, NULL);
  1641. if (ret < 0)
  1642. goto out;
  1643. if (ow_inode == ino && gen == ino_gen) {
  1644. ret = 0;
  1645. goto out;
  1646. }
  1647. /*
  1648. * We know that it is or will be overwritten. Check this now.
  1649. * The current inode being processed might have been the one that caused
  1650. * inode 'ino' to be orphanized, therefore check if ow_inode matches
  1651. * the current inode being processed.
  1652. */
  1653. if ((ow_inode < sctx->send_progress) ||
  1654. (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
  1655. gen == sctx->cur_inode_gen))
  1656. ret = 1;
  1657. else
  1658. ret = 0;
  1659. out:
  1660. return ret;
  1661. }
  1662. /*
  1663. * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
  1664. * that got overwritten. This is used by process_recorded_refs to determine
  1665. * if it has to use the path as returned by get_cur_path or the orphan name.
  1666. */
  1667. static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
  1668. {
  1669. int ret = 0;
  1670. struct fs_path *name = NULL;
  1671. u64 dir;
  1672. u64 dir_gen;
  1673. if (!sctx->parent_root)
  1674. goto out;
  1675. name = fs_path_alloc();
  1676. if (!name)
  1677. return -ENOMEM;
  1678. ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
  1679. if (ret < 0)
  1680. goto out;
  1681. ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
  1682. name->start, fs_path_len(name));
  1683. out:
  1684. fs_path_free(name);
  1685. return ret;
  1686. }
  1687. /*
  1688. * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
  1689. * so we need to do some special handling in case we have clashes. This function
  1690. * takes care of this with the help of name_cache_entry::radix_list.
  1691. * In case of error, nce is kfreed.
  1692. */
  1693. static int name_cache_insert(struct send_ctx *sctx,
  1694. struct name_cache_entry *nce)
  1695. {
  1696. int ret = 0;
  1697. struct list_head *nce_head;
  1698. nce_head = radix_tree_lookup(&sctx->name_cache,
  1699. (unsigned long)nce->ino);
  1700. if (!nce_head) {
  1701. nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
  1702. if (!nce_head) {
  1703. kfree(nce);
  1704. return -ENOMEM;
  1705. }
  1706. INIT_LIST_HEAD(nce_head);
  1707. ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
  1708. if (ret < 0) {
  1709. kfree(nce_head);
  1710. kfree(nce);
  1711. return ret;
  1712. }
  1713. }
  1714. list_add_tail(&nce->radix_list, nce_head);
  1715. list_add_tail(&nce->list, &sctx->name_cache_list);
  1716. sctx->name_cache_size++;
  1717. return ret;
  1718. }
  1719. static void name_cache_delete(struct send_ctx *sctx,
  1720. struct name_cache_entry *nce)
  1721. {
  1722. struct list_head *nce_head;
  1723. nce_head = radix_tree_lookup(&sctx->name_cache,
  1724. (unsigned long)nce->ino);
  1725. if (!nce_head) {
  1726. btrfs_err(sctx->send_root->fs_info,
  1727. "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
  1728. nce->ino, sctx->name_cache_size);
  1729. }
  1730. list_del(&nce->radix_list);
  1731. list_del(&nce->list);
  1732. sctx->name_cache_size--;
  1733. /*
  1734. * We may not get to the final release of nce_head if the lookup fails
  1735. */
  1736. if (nce_head && list_empty(nce_head)) {
  1737. radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
  1738. kfree(nce_head);
  1739. }
  1740. }
  1741. static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
  1742. u64 ino, u64 gen)
  1743. {
  1744. struct list_head *nce_head;
  1745. struct name_cache_entry *cur;
  1746. nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
  1747. if (!nce_head)
  1748. return NULL;
  1749. list_for_each_entry(cur, nce_head, radix_list) {
  1750. if (cur->ino == ino && cur->gen == gen)
  1751. return cur;
  1752. }
  1753. return NULL;
  1754. }
  1755. /*
  1756. * Removes the entry from the list and adds it back to the end. This marks the
  1757. * entry as recently used so that name_cache_clean_unused does not remove it.
  1758. */
  1759. static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
  1760. {
  1761. list_del(&nce->list);
  1762. list_add_tail(&nce->list, &sctx->name_cache_list);
  1763. }
  1764. /*
  1765. * Remove some entries from the beginning of name_cache_list.
  1766. */
  1767. static void name_cache_clean_unused(struct send_ctx *sctx)
  1768. {
  1769. struct name_cache_entry *nce;
  1770. if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
  1771. return;
  1772. while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
  1773. nce = list_entry(sctx->name_cache_list.next,
  1774. struct name_cache_entry, list);
  1775. name_cache_delete(sctx, nce);
  1776. kfree(nce);
  1777. }
  1778. }
  1779. static void name_cache_free(struct send_ctx *sctx)
  1780. {
  1781. struct name_cache_entry *nce;
  1782. while (!list_empty(&sctx->name_cache_list)) {
  1783. nce = list_entry(sctx->name_cache_list.next,
  1784. struct name_cache_entry, list);
  1785. name_cache_delete(sctx, nce);
  1786. kfree(nce);
  1787. }
  1788. }
  1789. /*
  1790. * Used by get_cur_path for each ref up to the root.
  1791. * Returns 0 if it succeeded.
  1792. * Returns 1 if the inode is not existent or got overwritten. In that case, the
  1793. * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
  1794. * is returned, parent_ino/parent_gen are not guaranteed to be valid.
  1795. * Returns <0 in case of error.
  1796. */
  1797. static int __get_cur_name_and_parent(struct send_ctx *sctx,
  1798. u64 ino, u64 gen,
  1799. u64 *parent_ino,
  1800. u64 *parent_gen,
  1801. struct fs_path *dest)
  1802. {
  1803. int ret;
  1804. int nce_ret;
  1805. struct name_cache_entry *nce = NULL;
  1806. /*
  1807. * First check if we already did a call to this function with the same
  1808. * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
  1809. * return the cached result.
  1810. */
  1811. nce = name_cache_search(sctx, ino, gen);
  1812. if (nce) {
  1813. if (ino < sctx->send_progress && nce->need_later_update) {
  1814. name_cache_delete(sctx, nce);
  1815. kfree(nce);
  1816. nce = NULL;
  1817. } else {
  1818. name_cache_used(sctx, nce);
  1819. *parent_ino = nce->parent_ino;
  1820. *parent_gen = nce->parent_gen;
  1821. ret = fs_path_add(dest, nce->name, nce->name_len);
  1822. if (ret < 0)
  1823. goto out;
  1824. ret = nce->ret;
  1825. goto out;
  1826. }
  1827. }
  1828. /*
  1829. * If the inode is not existent yet, add the orphan name and return 1.
  1830. * This should only happen for the parent dir that we determine in
  1831. * __record_new_ref
  1832. */
  1833. ret = is_inode_existent(sctx, ino, gen);
  1834. if (ret < 0)
  1835. goto out;
  1836. if (!ret) {
  1837. ret = gen_unique_name(sctx, ino, gen, dest);
  1838. if (ret < 0)
  1839. goto out;
  1840. ret = 1;
  1841. goto out_cache;
  1842. }
  1843. /*
  1844. * Depending on whether the inode was already processed or not, use
  1845. * send_root or parent_root for ref lookup.
  1846. */
  1847. if (ino < sctx->send_progress)
  1848. ret = get_first_ref(sctx->send_root, ino,
  1849. parent_ino, parent_gen, dest);
  1850. else
  1851. ret = get_first_ref(sctx->parent_root, ino,
  1852. parent_ino, parent_gen, dest);
  1853. if (ret < 0)
  1854. goto out;
  1855. /*
  1856. * Check if the ref was overwritten by an inode's ref that was processed
  1857. * earlier. If yes, treat as orphan and return 1.
  1858. */
  1859. ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
  1860. dest->start, dest->end - dest->start);
  1861. if (ret < 0)
  1862. goto out;
  1863. if (ret) {
  1864. fs_path_reset(dest);
  1865. ret = gen_unique_name(sctx, ino, gen, dest);
  1866. if (ret < 0)
  1867. goto out;
  1868. ret = 1;
  1869. }
  1870. out_cache:
  1871. /*
  1872. * Store the result of the lookup in the name cache.
  1873. */
  1874. nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
  1875. if (!nce) {
  1876. ret = -ENOMEM;
  1877. goto out;
  1878. }
  1879. nce->ino = ino;
  1880. nce->gen = gen;
  1881. nce->parent_ino = *parent_ino;
  1882. nce->parent_gen = *parent_gen;
  1883. nce->name_len = fs_path_len(dest);
  1884. nce->ret = ret;
  1885. strcpy(nce->name, dest->start);
  1886. if (ino < sctx->send_progress)
  1887. nce->need_later_update = 0;
  1888. else
  1889. nce->need_later_update = 1;
  1890. nce_ret = name_cache_insert(sctx, nce);
  1891. if (nce_ret < 0)
  1892. ret = nce_ret;
  1893. name_cache_clean_unused(sctx);
  1894. out:
  1895. return ret;
  1896. }
  1897. /*
  1898. * Magic happens here. This function returns the first ref to an inode as it
  1899. * would look like while receiving the stream at this point in time.
  1900. * We walk the path up to the root. For every inode in between, we check if it
  1901. * was already processed/sent. If yes, we continue with the parent as found
  1902. * in send_root. If not, we continue with the parent as found in parent_root.
  1903. * If we encounter an inode that was deleted at this point in time, we use the
  1904. * inodes "orphan" name instead of the real name and stop. Same with new inodes
  1905. * that were not created yet and overwritten inodes/refs.
  1906. *
  1907. * When do we have have orphan inodes:
  1908. * 1. When an inode is freshly created and thus no valid refs are available yet
  1909. * 2. When a directory lost all it's refs (deleted) but still has dir items
  1910. * inside which were not processed yet (pending for move/delete). If anyone
  1911. * tried to get the path to the dir items, it would get a path inside that
  1912. * orphan directory.
  1913. * 3. When an inode is moved around or gets new links, it may overwrite the ref
  1914. * of an unprocessed inode. If in that case the first ref would be
  1915. * overwritten, the overwritten inode gets "orphanized". Later when we
  1916. * process this overwritten inode, it is restored at a new place by moving
  1917. * the orphan inode.
  1918. *
  1919. * sctx->send_progress tells this function at which point in time receiving
  1920. * would be.
  1921. */
  1922. static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
  1923. struct fs_path *dest)
  1924. {
  1925. int ret = 0;
  1926. struct fs_path *name = NULL;
  1927. u64 parent_inode = 0;
  1928. u64 parent_gen = 0;
  1929. int stop = 0;
  1930. name = fs_path_alloc();
  1931. if (!name) {
  1932. ret = -ENOMEM;
  1933. goto out;
  1934. }
  1935. dest->reversed = 1;
  1936. fs_path_reset(dest);
  1937. while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
  1938. struct waiting_dir_move *wdm;
  1939. fs_path_reset(name);
  1940. if (is_waiting_for_rm(sctx, ino)) {
  1941. ret = gen_unique_name(sctx, ino, gen, name);
  1942. if (ret < 0)
  1943. goto out;
  1944. ret = fs_path_add_path(dest, name);
  1945. break;
  1946. }
  1947. wdm = get_waiting_dir_move(sctx, ino);
  1948. if (wdm && wdm->orphanized) {
  1949. ret = gen_unique_name(sctx, ino, gen, name);
  1950. stop = 1;
  1951. } else if (wdm) {
  1952. ret = get_first_ref(sctx->parent_root, ino,
  1953. &parent_inode, &parent_gen, name);
  1954. } else {
  1955. ret = __get_cur_name_and_parent(sctx, ino, gen,
  1956. &parent_inode,
  1957. &parent_gen, name);
  1958. if (ret)
  1959. stop = 1;
  1960. }
  1961. if (ret < 0)
  1962. goto out;
  1963. ret = fs_path_add_path(dest, name);
  1964. if (ret < 0)
  1965. goto out;
  1966. ino = parent_inode;
  1967. gen = parent_gen;
  1968. }
  1969. out:
  1970. fs_path_free(name);
  1971. if (!ret)
  1972. fs_path_unreverse(dest);
  1973. return ret;
  1974. }
  1975. /*
  1976. * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
  1977. */
  1978. static int send_subvol_begin(struct send_ctx *sctx)
  1979. {
  1980. int ret;
  1981. struct btrfs_root *send_root = sctx->send_root;
  1982. struct btrfs_root *parent_root = sctx->parent_root;
  1983. struct btrfs_path *path;
  1984. struct btrfs_key key;
  1985. struct btrfs_root_ref *ref;
  1986. struct extent_buffer *leaf;
  1987. char *name = NULL;
  1988. int namelen;
  1989. path = btrfs_alloc_path();
  1990. if (!path)
  1991. return -ENOMEM;
  1992. name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
  1993. if (!name) {
  1994. btrfs_free_path(path);
  1995. return -ENOMEM;
  1996. }
  1997. key.objectid = send_root->objectid;
  1998. key.type = BTRFS_ROOT_BACKREF_KEY;
  1999. key.offset = 0;
  2000. ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
  2001. &key, path, 1, 0);
  2002. if (ret < 0)
  2003. goto out;
  2004. if (ret) {
  2005. ret = -ENOENT;
  2006. goto out;
  2007. }
  2008. leaf = path->nodes[0];
  2009. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2010. if (key.type != BTRFS_ROOT_BACKREF_KEY ||
  2011. key.objectid != send_root->objectid) {
  2012. ret = -ENOENT;
  2013. goto out;
  2014. }
  2015. ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
  2016. namelen = btrfs_root_ref_name_len(leaf, ref);
  2017. read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
  2018. btrfs_release_path(path);
  2019. if (parent_root) {
  2020. ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
  2021. if (ret < 0)
  2022. goto out;
  2023. } else {
  2024. ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
  2025. if (ret < 0)
  2026. goto out;
  2027. }
  2028. TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
  2029. if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
  2030. TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
  2031. sctx->send_root->root_item.received_uuid);
  2032. else
  2033. TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
  2034. sctx->send_root->root_item.uuid);
  2035. TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
  2036. le64_to_cpu(sctx->send_root->root_item.ctransid));
  2037. if (parent_root) {
  2038. if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
  2039. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  2040. parent_root->root_item.received_uuid);
  2041. else
  2042. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  2043. parent_root->root_item.uuid);
  2044. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
  2045. le64_to_cpu(sctx->parent_root->root_item.ctransid));
  2046. }
  2047. ret = send_cmd(sctx);
  2048. tlv_put_failure:
  2049. out:
  2050. btrfs_free_path(path);
  2051. kfree(name);
  2052. return ret;
  2053. }
  2054. static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
  2055. {
  2056. int ret = 0;
  2057. struct fs_path *p;
  2058. verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
  2059. p = fs_path_alloc();
  2060. if (!p)
  2061. return -ENOMEM;
  2062. ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
  2063. if (ret < 0)
  2064. goto out;
  2065. ret = get_cur_path(sctx, ino, gen, p);
  2066. if (ret < 0)
  2067. goto out;
  2068. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2069. TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
  2070. ret = send_cmd(sctx);
  2071. tlv_put_failure:
  2072. out:
  2073. fs_path_free(p);
  2074. return ret;
  2075. }
  2076. static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
  2077. {
  2078. int ret = 0;
  2079. struct fs_path *p;
  2080. verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
  2081. p = fs_path_alloc();
  2082. if (!p)
  2083. return -ENOMEM;
  2084. ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
  2085. if (ret < 0)
  2086. goto out;
  2087. ret = get_cur_path(sctx, ino, gen, p);
  2088. if (ret < 0)
  2089. goto out;
  2090. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2091. TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
  2092. ret = send_cmd(sctx);
  2093. tlv_put_failure:
  2094. out:
  2095. fs_path_free(p);
  2096. return ret;
  2097. }
  2098. static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
  2099. {
  2100. int ret = 0;
  2101. struct fs_path *p;
  2102. verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
  2103. p = fs_path_alloc();
  2104. if (!p)
  2105. return -ENOMEM;
  2106. ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
  2107. if (ret < 0)
  2108. goto out;
  2109. ret = get_cur_path(sctx, ino, gen, p);
  2110. if (ret < 0)
  2111. goto out;
  2112. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2113. TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
  2114. TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
  2115. ret = send_cmd(sctx);
  2116. tlv_put_failure:
  2117. out:
  2118. fs_path_free(p);
  2119. return ret;
  2120. }
  2121. static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
  2122. {
  2123. int ret = 0;
  2124. struct fs_path *p = NULL;
  2125. struct btrfs_inode_item *ii;
  2126. struct btrfs_path *path = NULL;
  2127. struct extent_buffer *eb;
  2128. struct btrfs_key key;
  2129. int slot;
  2130. verbose_printk("btrfs: send_utimes %llu\n", ino);
  2131. p = fs_path_alloc();
  2132. if (!p)
  2133. return -ENOMEM;
  2134. path = alloc_path_for_send();
  2135. if (!path) {
  2136. ret = -ENOMEM;
  2137. goto out;
  2138. }
  2139. key.objectid = ino;
  2140. key.type = BTRFS_INODE_ITEM_KEY;
  2141. key.offset = 0;
  2142. ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
  2143. if (ret < 0)
  2144. goto out;
  2145. eb = path->nodes[0];
  2146. slot = path->slots[0];
  2147. ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
  2148. ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
  2149. if (ret < 0)
  2150. goto out;
  2151. ret = get_cur_path(sctx, ino, gen, p);
  2152. if (ret < 0)
  2153. goto out;
  2154. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2155. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
  2156. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
  2157. TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
  2158. /* TODO Add otime support when the otime patches get into upstream */
  2159. ret = send_cmd(sctx);
  2160. tlv_put_failure:
  2161. out:
  2162. fs_path_free(p);
  2163. btrfs_free_path(path);
  2164. return ret;
  2165. }
  2166. /*
  2167. * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
  2168. * a valid path yet because we did not process the refs yet. So, the inode
  2169. * is created as orphan.
  2170. */
  2171. static int send_create_inode(struct send_ctx *sctx, u64 ino)
  2172. {
  2173. int ret = 0;
  2174. struct fs_path *p;
  2175. int cmd;
  2176. u64 gen;
  2177. u64 mode;
  2178. u64 rdev;
  2179. verbose_printk("btrfs: send_create_inode %llu\n", ino);
  2180. p = fs_path_alloc();
  2181. if (!p)
  2182. return -ENOMEM;
  2183. if (ino != sctx->cur_ino) {
  2184. ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
  2185. NULL, NULL, &rdev);
  2186. if (ret < 0)
  2187. goto out;
  2188. } else {
  2189. gen = sctx->cur_inode_gen;
  2190. mode = sctx->cur_inode_mode;
  2191. rdev = sctx->cur_inode_rdev;
  2192. }
  2193. if (S_ISREG(mode)) {
  2194. cmd = BTRFS_SEND_C_MKFILE;
  2195. } else if (S_ISDIR(mode)) {
  2196. cmd = BTRFS_SEND_C_MKDIR;
  2197. } else if (S_ISLNK(mode)) {
  2198. cmd = BTRFS_SEND_C_SYMLINK;
  2199. } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
  2200. cmd = BTRFS_SEND_C_MKNOD;
  2201. } else if (S_ISFIFO(mode)) {
  2202. cmd = BTRFS_SEND_C_MKFIFO;
  2203. } else if (S_ISSOCK(mode)) {
  2204. cmd = BTRFS_SEND_C_MKSOCK;
  2205. } else {
  2206. btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
  2207. (int)(mode & S_IFMT));
  2208. ret = -ENOTSUPP;
  2209. goto out;
  2210. }
  2211. ret = begin_cmd(sctx, cmd);
  2212. if (ret < 0)
  2213. goto out;
  2214. ret = gen_unique_name(sctx, ino, gen, p);
  2215. if (ret < 0)
  2216. goto out;
  2217. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  2218. TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
  2219. if (S_ISLNK(mode)) {
  2220. fs_path_reset(p);
  2221. ret = read_symlink(sctx->send_root, ino, p);
  2222. if (ret < 0)
  2223. goto out;
  2224. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
  2225. } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
  2226. S_ISFIFO(mode) || S_ISSOCK(mode)) {
  2227. TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
  2228. TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
  2229. }
  2230. ret = send_cmd(sctx);
  2231. if (ret < 0)
  2232. goto out;
  2233. tlv_put_failure:
  2234. out:
  2235. fs_path_free(p);
  2236. return ret;
  2237. }
  2238. /*
  2239. * We need some special handling for inodes that get processed before the parent
  2240. * directory got created. See process_recorded_refs for details.
  2241. * This function does the check if we already created the dir out of order.
  2242. */
  2243. static int did_create_dir(struct send_ctx *sctx, u64 dir)
  2244. {
  2245. int ret = 0;
  2246. struct btrfs_path *path = NULL;
  2247. struct btrfs_key key;
  2248. struct btrfs_key found_key;
  2249. struct btrfs_key di_key;
  2250. struct extent_buffer *eb;
  2251. struct btrfs_dir_item *di;
  2252. int slot;
  2253. path = alloc_path_for_send();
  2254. if (!path) {
  2255. ret = -ENOMEM;
  2256. goto out;
  2257. }
  2258. key.objectid = dir;
  2259. key.type = BTRFS_DIR_INDEX_KEY;
  2260. key.offset = 0;
  2261. ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
  2262. if (ret < 0)
  2263. goto out;
  2264. while (1) {
  2265. eb = path->nodes[0];
  2266. slot = path->slots[0];
  2267. if (slot >= btrfs_header_nritems(eb)) {
  2268. ret = btrfs_next_leaf(sctx->send_root, path);
  2269. if (ret < 0) {
  2270. goto out;
  2271. } else if (ret > 0) {
  2272. ret = 0;
  2273. break;
  2274. }
  2275. continue;
  2276. }
  2277. btrfs_item_key_to_cpu(eb, &found_key, slot);
  2278. if (found_key.objectid != key.objectid ||
  2279. found_key.type != key.type) {
  2280. ret = 0;
  2281. goto out;
  2282. }
  2283. di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
  2284. btrfs_dir_item_key_to_cpu(eb, di, &di_key);
  2285. if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
  2286. di_key.objectid < sctx->send_progress) {
  2287. ret = 1;
  2288. goto out;
  2289. }
  2290. path->slots[0]++;
  2291. }
  2292. out:
  2293. btrfs_free_path(path);
  2294. return ret;
  2295. }
  2296. /*
  2297. * Only creates the inode if it is:
  2298. * 1. Not a directory
  2299. * 2. Or a directory which was not created already due to out of order
  2300. * directories. See did_create_dir and process_recorded_refs for details.
  2301. */
  2302. static int send_create_inode_if_needed(struct send_ctx *sctx)
  2303. {
  2304. int ret;
  2305. if (S_ISDIR(sctx->cur_inode_mode)) {
  2306. ret = did_create_dir(sctx, sctx->cur_ino);
  2307. if (ret < 0)
  2308. goto out;
  2309. if (ret) {
  2310. ret = 0;
  2311. goto out;
  2312. }
  2313. }
  2314. ret = send_create_inode(sctx, sctx->cur_ino);
  2315. if (ret < 0)
  2316. goto out;
  2317. out:
  2318. return ret;
  2319. }
  2320. struct recorded_ref {
  2321. struct list_head list;
  2322. char *dir_path;
  2323. char *name;
  2324. struct fs_path *full_path;
  2325. u64 dir;
  2326. u64 dir_gen;
  2327. int dir_path_len;
  2328. int name_len;
  2329. };
  2330. /*
  2331. * We need to process new refs before deleted refs, but compare_tree gives us
  2332. * everything mixed. So we first record all refs and later process them.
  2333. * This function is a helper to record one ref.
  2334. */
  2335. static int __record_ref(struct list_head *head, u64 dir,
  2336. u64 dir_gen, struct fs_path *path)
  2337. {
  2338. struct recorded_ref *ref;
  2339. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  2340. if (!ref)
  2341. return -ENOMEM;
  2342. ref->dir = dir;
  2343. ref->dir_gen = dir_gen;
  2344. ref->full_path = path;
  2345. ref->name = (char *)kbasename(ref->full_path->start);
  2346. ref->name_len = ref->full_path->end - ref->name;
  2347. ref->dir_path = ref->full_path->start;
  2348. if (ref->name == ref->full_path->start)
  2349. ref->dir_path_len = 0;
  2350. else
  2351. ref->dir_path_len = ref->full_path->end -
  2352. ref->full_path->start - 1 - ref->name_len;
  2353. list_add_tail(&ref->list, head);
  2354. return 0;
  2355. }
  2356. static int dup_ref(struct recorded_ref *ref, struct list_head *list)
  2357. {
  2358. struct recorded_ref *new;
  2359. new = kmalloc(sizeof(*ref), GFP_NOFS);
  2360. if (!new)
  2361. return -ENOMEM;
  2362. new->dir = ref->dir;
  2363. new->dir_gen = ref->dir_gen;
  2364. new->full_path = NULL;
  2365. INIT_LIST_HEAD(&new->list);
  2366. list_add_tail(&new->list, list);
  2367. return 0;
  2368. }
  2369. static void __free_recorded_refs(struct list_head *head)
  2370. {
  2371. struct recorded_ref *cur;
  2372. while (!list_empty(head)) {
  2373. cur = list_entry(head->next, struct recorded_ref, list);
  2374. fs_path_free(cur->full_path);
  2375. list_del(&cur->list);
  2376. kfree(cur);
  2377. }
  2378. }
  2379. static void free_recorded_refs(struct send_ctx *sctx)
  2380. {
  2381. __free_recorded_refs(&sctx->new_refs);
  2382. __free_recorded_refs(&sctx->deleted_refs);
  2383. }
  2384. /*
  2385. * Renames/moves a file/dir to its orphan name. Used when the first
  2386. * ref of an unprocessed inode gets overwritten and for all non empty
  2387. * directories.
  2388. */
  2389. static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
  2390. struct fs_path *path)
  2391. {
  2392. int ret;
  2393. struct fs_path *orphan;
  2394. orphan = fs_path_alloc();
  2395. if (!orphan)
  2396. return -ENOMEM;
  2397. ret = gen_unique_name(sctx, ino, gen, orphan);
  2398. if (ret < 0)
  2399. goto out;
  2400. ret = send_rename(sctx, path, orphan);
  2401. out:
  2402. fs_path_free(orphan);
  2403. return ret;
  2404. }
  2405. static struct orphan_dir_info *
  2406. add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
  2407. {
  2408. struct rb_node **p = &sctx->orphan_dirs.rb_node;
  2409. struct rb_node *parent = NULL;
  2410. struct orphan_dir_info *entry, *odi;
  2411. odi = kmalloc(sizeof(*odi), GFP_NOFS);
  2412. if (!odi)
  2413. return ERR_PTR(-ENOMEM);
  2414. odi->ino = dir_ino;
  2415. odi->gen = 0;
  2416. while (*p) {
  2417. parent = *p;
  2418. entry = rb_entry(parent, struct orphan_dir_info, node);
  2419. if (dir_ino < entry->ino) {
  2420. p = &(*p)->rb_left;
  2421. } else if (dir_ino > entry->ino) {
  2422. p = &(*p)->rb_right;
  2423. } else {
  2424. kfree(odi);
  2425. return entry;
  2426. }
  2427. }
  2428. rb_link_node(&odi->node, parent, p);
  2429. rb_insert_color(&odi->node, &sctx->orphan_dirs);
  2430. return odi;
  2431. }
  2432. static struct orphan_dir_info *
  2433. get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
  2434. {
  2435. struct rb_node *n = sctx->orphan_dirs.rb_node;
  2436. struct orphan_dir_info *entry;
  2437. while (n) {
  2438. entry = rb_entry(n, struct orphan_dir_info, node);
  2439. if (dir_ino < entry->ino)
  2440. n = n->rb_left;
  2441. else if (dir_ino > entry->ino)
  2442. n = n->rb_right;
  2443. else
  2444. return entry;
  2445. }
  2446. return NULL;
  2447. }
  2448. static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
  2449. {
  2450. struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
  2451. return odi != NULL;
  2452. }
  2453. static void free_orphan_dir_info(struct send_ctx *sctx,
  2454. struct orphan_dir_info *odi)
  2455. {
  2456. if (!odi)
  2457. return;
  2458. rb_erase(&odi->node, &sctx->orphan_dirs);
  2459. kfree(odi);
  2460. }
  2461. /*
  2462. * Returns 1 if a directory can be removed at this point in time.
  2463. * We check this by iterating all dir items and checking if the inode behind
  2464. * the dir item was already processed.
  2465. */
  2466. static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
  2467. u64 send_progress)
  2468. {
  2469. int ret = 0;
  2470. struct btrfs_root *root = sctx->parent_root;
  2471. struct btrfs_path *path;
  2472. struct btrfs_key key;
  2473. struct btrfs_key found_key;
  2474. struct btrfs_key loc;
  2475. struct btrfs_dir_item *di;
  2476. /*
  2477. * Don't try to rmdir the top/root subvolume dir.
  2478. */
  2479. if (dir == BTRFS_FIRST_FREE_OBJECTID)
  2480. return 0;
  2481. path = alloc_path_for_send();
  2482. if (!path)
  2483. return -ENOMEM;
  2484. key.objectid = dir;
  2485. key.type = BTRFS_DIR_INDEX_KEY;
  2486. key.offset = 0;
  2487. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2488. if (ret < 0)
  2489. goto out;
  2490. while (1) {
  2491. struct waiting_dir_move *dm;
  2492. if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
  2493. ret = btrfs_next_leaf(root, path);
  2494. if (ret < 0)
  2495. goto out;
  2496. else if (ret > 0)
  2497. break;
  2498. continue;
  2499. }
  2500. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  2501. path->slots[0]);
  2502. if (found_key.objectid != key.objectid ||
  2503. found_key.type != key.type)
  2504. break;
  2505. di = btrfs_item_ptr(path->nodes[0], path->slots[0],
  2506. struct btrfs_dir_item);
  2507. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
  2508. dm = get_waiting_dir_move(sctx, loc.objectid);
  2509. if (dm) {
  2510. struct orphan_dir_info *odi;
  2511. odi = add_orphan_dir_info(sctx, dir);
  2512. if (IS_ERR(odi)) {
  2513. ret = PTR_ERR(odi);
  2514. goto out;
  2515. }
  2516. odi->gen = dir_gen;
  2517. dm->rmdir_ino = dir;
  2518. ret = 0;
  2519. goto out;
  2520. }
  2521. if (loc.objectid > send_progress) {
  2522. ret = 0;
  2523. goto out;
  2524. }
  2525. path->slots[0]++;
  2526. }
  2527. ret = 1;
  2528. out:
  2529. btrfs_free_path(path);
  2530. return ret;
  2531. }
  2532. static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
  2533. {
  2534. struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
  2535. return entry != NULL;
  2536. }
  2537. static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
  2538. {
  2539. struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
  2540. struct rb_node *parent = NULL;
  2541. struct waiting_dir_move *entry, *dm;
  2542. dm = kmalloc(sizeof(*dm), GFP_NOFS);
  2543. if (!dm)
  2544. return -ENOMEM;
  2545. dm->ino = ino;
  2546. dm->rmdir_ino = 0;
  2547. dm->orphanized = orphanized;
  2548. while (*p) {
  2549. parent = *p;
  2550. entry = rb_entry(parent, struct waiting_dir_move, node);
  2551. if (ino < entry->ino) {
  2552. p = &(*p)->rb_left;
  2553. } else if (ino > entry->ino) {
  2554. p = &(*p)->rb_right;
  2555. } else {
  2556. kfree(dm);
  2557. return -EEXIST;
  2558. }
  2559. }
  2560. rb_link_node(&dm->node, parent, p);
  2561. rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
  2562. return 0;
  2563. }
  2564. static struct waiting_dir_move *
  2565. get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
  2566. {
  2567. struct rb_node *n = sctx->waiting_dir_moves.rb_node;
  2568. struct waiting_dir_move *entry;
  2569. while (n) {
  2570. entry = rb_entry(n, struct waiting_dir_move, node);
  2571. if (ino < entry->ino)
  2572. n = n->rb_left;
  2573. else if (ino > entry->ino)
  2574. n = n->rb_right;
  2575. else
  2576. return entry;
  2577. }
  2578. return NULL;
  2579. }
  2580. static void free_waiting_dir_move(struct send_ctx *sctx,
  2581. struct waiting_dir_move *dm)
  2582. {
  2583. if (!dm)
  2584. return;
  2585. rb_erase(&dm->node, &sctx->waiting_dir_moves);
  2586. kfree(dm);
  2587. }
  2588. static int add_pending_dir_move(struct send_ctx *sctx,
  2589. u64 ino,
  2590. u64 ino_gen,
  2591. u64 parent_ino,
  2592. struct list_head *new_refs,
  2593. struct list_head *deleted_refs,
  2594. const bool is_orphan)
  2595. {
  2596. struct rb_node **p = &sctx->pending_dir_moves.rb_node;
  2597. struct rb_node *parent = NULL;
  2598. struct pending_dir_move *entry = NULL, *pm;
  2599. struct recorded_ref *cur;
  2600. int exists = 0;
  2601. int ret;
  2602. pm = kmalloc(sizeof(*pm), GFP_NOFS);
  2603. if (!pm)
  2604. return -ENOMEM;
  2605. pm->parent_ino = parent_ino;
  2606. pm->ino = ino;
  2607. pm->gen = ino_gen;
  2608. pm->is_orphan = is_orphan;
  2609. INIT_LIST_HEAD(&pm->list);
  2610. INIT_LIST_HEAD(&pm->update_refs);
  2611. RB_CLEAR_NODE(&pm->node);
  2612. while (*p) {
  2613. parent = *p;
  2614. entry = rb_entry(parent, struct pending_dir_move, node);
  2615. if (parent_ino < entry->parent_ino) {
  2616. p = &(*p)->rb_left;
  2617. } else if (parent_ino > entry->parent_ino) {
  2618. p = &(*p)->rb_right;
  2619. } else {
  2620. exists = 1;
  2621. break;
  2622. }
  2623. }
  2624. list_for_each_entry(cur, deleted_refs, list) {
  2625. ret = dup_ref(cur, &pm->update_refs);
  2626. if (ret < 0)
  2627. goto out;
  2628. }
  2629. list_for_each_entry(cur, new_refs, list) {
  2630. ret = dup_ref(cur, &pm->update_refs);
  2631. if (ret < 0)
  2632. goto out;
  2633. }
  2634. ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
  2635. if (ret)
  2636. goto out;
  2637. if (exists) {
  2638. list_add_tail(&pm->list, &entry->list);
  2639. } else {
  2640. rb_link_node(&pm->node, parent, p);
  2641. rb_insert_color(&pm->node, &sctx->pending_dir_moves);
  2642. }
  2643. ret = 0;
  2644. out:
  2645. if (ret) {
  2646. __free_recorded_refs(&pm->update_refs);
  2647. kfree(pm);
  2648. }
  2649. return ret;
  2650. }
  2651. static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
  2652. u64 parent_ino)
  2653. {
  2654. struct rb_node *n = sctx->pending_dir_moves.rb_node;
  2655. struct pending_dir_move *entry;
  2656. while (n) {
  2657. entry = rb_entry(n, struct pending_dir_move, node);
  2658. if (parent_ino < entry->parent_ino)
  2659. n = n->rb_left;
  2660. else if (parent_ino > entry->parent_ino)
  2661. n = n->rb_right;
  2662. else
  2663. return entry;
  2664. }
  2665. return NULL;
  2666. }
  2667. static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
  2668. {
  2669. struct fs_path *from_path = NULL;
  2670. struct fs_path *to_path = NULL;
  2671. struct fs_path *name = NULL;
  2672. u64 orig_progress = sctx->send_progress;
  2673. struct recorded_ref *cur;
  2674. u64 parent_ino, parent_gen;
  2675. struct waiting_dir_move *dm = NULL;
  2676. u64 rmdir_ino = 0;
  2677. int ret;
  2678. name = fs_path_alloc();
  2679. from_path = fs_path_alloc();
  2680. if (!name || !from_path) {
  2681. ret = -ENOMEM;
  2682. goto out;
  2683. }
  2684. dm = get_waiting_dir_move(sctx, pm->ino);
  2685. ASSERT(dm);
  2686. rmdir_ino = dm->rmdir_ino;
  2687. free_waiting_dir_move(sctx, dm);
  2688. if (pm->is_orphan) {
  2689. ret = gen_unique_name(sctx, pm->ino,
  2690. pm->gen, from_path);
  2691. } else {
  2692. ret = get_first_ref(sctx->parent_root, pm->ino,
  2693. &parent_ino, &parent_gen, name);
  2694. if (ret < 0)
  2695. goto out;
  2696. ret = get_cur_path(sctx, parent_ino, parent_gen,
  2697. from_path);
  2698. if (ret < 0)
  2699. goto out;
  2700. ret = fs_path_add_path(from_path, name);
  2701. }
  2702. if (ret < 0)
  2703. goto out;
  2704. sctx->send_progress = sctx->cur_ino + 1;
  2705. fs_path_reset(name);
  2706. to_path = name;
  2707. name = NULL;
  2708. ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
  2709. if (ret < 0)
  2710. goto out;
  2711. ret = send_rename(sctx, from_path, to_path);
  2712. if (ret < 0)
  2713. goto out;
  2714. if (rmdir_ino) {
  2715. struct orphan_dir_info *odi;
  2716. odi = get_orphan_dir_info(sctx, rmdir_ino);
  2717. if (!odi) {
  2718. /* already deleted */
  2719. goto finish;
  2720. }
  2721. ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
  2722. if (ret < 0)
  2723. goto out;
  2724. if (!ret)
  2725. goto finish;
  2726. name = fs_path_alloc();
  2727. if (!name) {
  2728. ret = -ENOMEM;
  2729. goto out;
  2730. }
  2731. ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
  2732. if (ret < 0)
  2733. goto out;
  2734. ret = send_rmdir(sctx, name);
  2735. if (ret < 0)
  2736. goto out;
  2737. free_orphan_dir_info(sctx, odi);
  2738. }
  2739. finish:
  2740. ret = send_utimes(sctx, pm->ino, pm->gen);
  2741. if (ret < 0)
  2742. goto out;
  2743. /*
  2744. * After rename/move, need to update the utimes of both new parent(s)
  2745. * and old parent(s).
  2746. */
  2747. list_for_each_entry(cur, &pm->update_refs, list) {
  2748. if (cur->dir == rmdir_ino)
  2749. continue;
  2750. ret = send_utimes(sctx, cur->dir, cur->dir_gen);
  2751. if (ret < 0)
  2752. goto out;
  2753. }
  2754. out:
  2755. fs_path_free(name);
  2756. fs_path_free(from_path);
  2757. fs_path_free(to_path);
  2758. sctx->send_progress = orig_progress;
  2759. return ret;
  2760. }
  2761. static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
  2762. {
  2763. if (!list_empty(&m->list))
  2764. list_del(&m->list);
  2765. if (!RB_EMPTY_NODE(&m->node))
  2766. rb_erase(&m->node, &sctx->pending_dir_moves);
  2767. __free_recorded_refs(&m->update_refs);
  2768. kfree(m);
  2769. }
  2770. static void tail_append_pending_moves(struct pending_dir_move *moves,
  2771. struct list_head *stack)
  2772. {
  2773. if (list_empty(&moves->list)) {
  2774. list_add_tail(&moves->list, stack);
  2775. } else {
  2776. LIST_HEAD(list);
  2777. list_splice_init(&moves->list, &list);
  2778. list_add_tail(&moves->list, stack);
  2779. list_splice_tail(&list, stack);
  2780. }
  2781. }
  2782. static int apply_children_dir_moves(struct send_ctx *sctx)
  2783. {
  2784. struct pending_dir_move *pm;
  2785. struct list_head stack;
  2786. u64 parent_ino = sctx->cur_ino;
  2787. int ret = 0;
  2788. pm = get_pending_dir_moves(sctx, parent_ino);
  2789. if (!pm)
  2790. return 0;
  2791. INIT_LIST_HEAD(&stack);
  2792. tail_append_pending_moves(pm, &stack);
  2793. while (!list_empty(&stack)) {
  2794. pm = list_first_entry(&stack, struct pending_dir_move, list);
  2795. parent_ino = pm->ino;
  2796. ret = apply_dir_move(sctx, pm);
  2797. free_pending_move(sctx, pm);
  2798. if (ret)
  2799. goto out;
  2800. pm = get_pending_dir_moves(sctx, parent_ino);
  2801. if (pm)
  2802. tail_append_pending_moves(pm, &stack);
  2803. }
  2804. return 0;
  2805. out:
  2806. while (!list_empty(&stack)) {
  2807. pm = list_first_entry(&stack, struct pending_dir_move, list);
  2808. free_pending_move(sctx, pm);
  2809. }
  2810. return ret;
  2811. }
  2812. /*
  2813. * We might need to delay a directory rename even when no ancestor directory
  2814. * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
  2815. * renamed. This happens when we rename a directory to the old name (the name
  2816. * in the parent root) of some other unrelated directory that got its rename
  2817. * delayed due to some ancestor with higher number that got renamed.
  2818. *
  2819. * Example:
  2820. *
  2821. * Parent snapshot:
  2822. * . (ino 256)
  2823. * |---- a/ (ino 257)
  2824. * | |---- file (ino 260)
  2825. * |
  2826. * |---- b/ (ino 258)
  2827. * |---- c/ (ino 259)
  2828. *
  2829. * Send snapshot:
  2830. * . (ino 256)
  2831. * |---- a/ (ino 258)
  2832. * |---- x/ (ino 259)
  2833. * |---- y/ (ino 257)
  2834. * |----- file (ino 260)
  2835. *
  2836. * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
  2837. * from 'a' to 'x/y' happening first, which in turn depends on the rename of
  2838. * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
  2839. * must issue is:
  2840. *
  2841. * 1 - rename 259 from 'c' to 'x'
  2842. * 2 - rename 257 from 'a' to 'x/y'
  2843. * 3 - rename 258 from 'b' to 'a'
  2844. *
  2845. * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
  2846. * be done right away and < 0 on error.
  2847. */
  2848. static int wait_for_dest_dir_move(struct send_ctx *sctx,
  2849. struct recorded_ref *parent_ref,
  2850. const bool is_orphan)
  2851. {
  2852. struct btrfs_path *path;
  2853. struct btrfs_key key;
  2854. struct btrfs_key di_key;
  2855. struct btrfs_dir_item *di;
  2856. u64 left_gen;
  2857. u64 right_gen;
  2858. int ret = 0;
  2859. if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
  2860. return 0;
  2861. path = alloc_path_for_send();
  2862. if (!path)
  2863. return -ENOMEM;
  2864. key.objectid = parent_ref->dir;
  2865. key.type = BTRFS_DIR_ITEM_KEY;
  2866. key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
  2867. ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
  2868. if (ret < 0) {
  2869. goto out;
  2870. } else if (ret > 0) {
  2871. ret = 0;
  2872. goto out;
  2873. }
  2874. di = btrfs_match_dir_item_name(sctx->parent_root, path,
  2875. parent_ref->name, parent_ref->name_len);
  2876. if (!di) {
  2877. ret = 0;
  2878. goto out;
  2879. }
  2880. /*
  2881. * di_key.objectid has the number of the inode that has a dentry in the
  2882. * parent directory with the same name that sctx->cur_ino is being
  2883. * renamed to. We need to check if that inode is in the send root as
  2884. * well and if it is currently marked as an inode with a pending rename,
  2885. * if it is, we need to delay the rename of sctx->cur_ino as well, so
  2886. * that it happens after that other inode is renamed.
  2887. */
  2888. btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
  2889. if (di_key.type != BTRFS_INODE_ITEM_KEY) {
  2890. ret = 0;
  2891. goto out;
  2892. }
  2893. ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
  2894. &left_gen, NULL, NULL, NULL, NULL);
  2895. if (ret < 0)
  2896. goto out;
  2897. ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
  2898. &right_gen, NULL, NULL, NULL, NULL);
  2899. if (ret < 0) {
  2900. if (ret == -ENOENT)
  2901. ret = 0;
  2902. goto out;
  2903. }
  2904. /* Different inode, no need to delay the rename of sctx->cur_ino */
  2905. if (right_gen != left_gen) {
  2906. ret = 0;
  2907. goto out;
  2908. }
  2909. if (is_waiting_for_move(sctx, di_key.objectid)) {
  2910. ret = add_pending_dir_move(sctx,
  2911. sctx->cur_ino,
  2912. sctx->cur_inode_gen,
  2913. di_key.objectid,
  2914. &sctx->new_refs,
  2915. &sctx->deleted_refs,
  2916. is_orphan);
  2917. if (!ret)
  2918. ret = 1;
  2919. }
  2920. out:
  2921. btrfs_free_path(path);
  2922. return ret;
  2923. }
  2924. /*
  2925. * Check if ino ino1 is an ancestor of inode ino2 in the given root.
  2926. * Return 1 if true, 0 if false and < 0 on error.
  2927. */
  2928. static int is_ancestor(struct btrfs_root *root,
  2929. const u64 ino1,
  2930. const u64 ino1_gen,
  2931. const u64 ino2,
  2932. struct fs_path *fs_path)
  2933. {
  2934. u64 ino = ino2;
  2935. while (ino > BTRFS_FIRST_FREE_OBJECTID) {
  2936. int ret;
  2937. u64 parent;
  2938. u64 parent_gen;
  2939. fs_path_reset(fs_path);
  2940. ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
  2941. if (ret < 0) {
  2942. if (ret == -ENOENT && ino == ino2)
  2943. ret = 0;
  2944. return ret;
  2945. }
  2946. if (parent == ino1)
  2947. return parent_gen == ino1_gen ? 1 : 0;
  2948. ino = parent;
  2949. }
  2950. return 0;
  2951. }
  2952. static int wait_for_parent_move(struct send_ctx *sctx,
  2953. struct recorded_ref *parent_ref,
  2954. const bool is_orphan)
  2955. {
  2956. int ret = 0;
  2957. u64 ino = parent_ref->dir;
  2958. u64 parent_ino_before, parent_ino_after;
  2959. struct fs_path *path_before = NULL;
  2960. struct fs_path *path_after = NULL;
  2961. int len1, len2;
  2962. path_after = fs_path_alloc();
  2963. path_before = fs_path_alloc();
  2964. if (!path_after || !path_before) {
  2965. ret = -ENOMEM;
  2966. goto out;
  2967. }
  2968. /*
  2969. * Our current directory inode may not yet be renamed/moved because some
  2970. * ancestor (immediate or not) has to be renamed/moved first. So find if
  2971. * such ancestor exists and make sure our own rename/move happens after
  2972. * that ancestor is processed to avoid path build infinite loops (done
  2973. * at get_cur_path()).
  2974. */
  2975. while (ino > BTRFS_FIRST_FREE_OBJECTID) {
  2976. if (is_waiting_for_move(sctx, ino)) {
  2977. /*
  2978. * If the current inode is an ancestor of ino in the
  2979. * parent root, we need to delay the rename of the
  2980. * current inode, otherwise don't delayed the rename
  2981. * because we can end up with a circular dependency
  2982. * of renames, resulting in some directories never
  2983. * getting the respective rename operations issued in
  2984. * the send stream or getting into infinite path build
  2985. * loops.
  2986. */
  2987. ret = is_ancestor(sctx->parent_root,
  2988. sctx->cur_ino, sctx->cur_inode_gen,
  2989. ino, path_before);
  2990. break;
  2991. }
  2992. fs_path_reset(path_before);
  2993. fs_path_reset(path_after);
  2994. ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
  2995. NULL, path_after);
  2996. if (ret < 0)
  2997. goto out;
  2998. ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
  2999. NULL, path_before);
  3000. if (ret < 0 && ret != -ENOENT) {
  3001. goto out;
  3002. } else if (ret == -ENOENT) {
  3003. ret = 0;
  3004. break;
  3005. }
  3006. len1 = fs_path_len(path_before);
  3007. len2 = fs_path_len(path_after);
  3008. if (ino > sctx->cur_ino &&
  3009. (parent_ino_before != parent_ino_after || len1 != len2 ||
  3010. memcmp(path_before->start, path_after->start, len1))) {
  3011. ret = 1;
  3012. break;
  3013. }
  3014. ino = parent_ino_after;
  3015. }
  3016. out:
  3017. fs_path_free(path_before);
  3018. fs_path_free(path_after);
  3019. if (ret == 1) {
  3020. ret = add_pending_dir_move(sctx,
  3021. sctx->cur_ino,
  3022. sctx->cur_inode_gen,
  3023. ino,
  3024. &sctx->new_refs,
  3025. &sctx->deleted_refs,
  3026. is_orphan);
  3027. if (!ret)
  3028. ret = 1;
  3029. }
  3030. return ret;
  3031. }
  3032. /*
  3033. * This does all the move/link/unlink/rmdir magic.
  3034. */
  3035. static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
  3036. {
  3037. int ret = 0;
  3038. struct recorded_ref *cur;
  3039. struct recorded_ref *cur2;
  3040. struct list_head check_dirs;
  3041. struct fs_path *valid_path = NULL;
  3042. u64 ow_inode = 0;
  3043. u64 ow_gen;
  3044. int did_overwrite = 0;
  3045. int is_orphan = 0;
  3046. u64 last_dir_ino_rm = 0;
  3047. bool can_rename = true;
  3048. verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
  3049. /*
  3050. * This should never happen as the root dir always has the same ref
  3051. * which is always '..'
  3052. */
  3053. BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
  3054. INIT_LIST_HEAD(&check_dirs);
  3055. valid_path = fs_path_alloc();
  3056. if (!valid_path) {
  3057. ret = -ENOMEM;
  3058. goto out;
  3059. }
  3060. /*
  3061. * First, check if the first ref of the current inode was overwritten
  3062. * before. If yes, we know that the current inode was already orphanized
  3063. * and thus use the orphan name. If not, we can use get_cur_path to
  3064. * get the path of the first ref as it would like while receiving at
  3065. * this point in time.
  3066. * New inodes are always orphan at the beginning, so force to use the
  3067. * orphan name in this case.
  3068. * The first ref is stored in valid_path and will be updated if it
  3069. * gets moved around.
  3070. */
  3071. if (!sctx->cur_inode_new) {
  3072. ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
  3073. sctx->cur_inode_gen);
  3074. if (ret < 0)
  3075. goto out;
  3076. if (ret)
  3077. did_overwrite = 1;
  3078. }
  3079. if (sctx->cur_inode_new || did_overwrite) {
  3080. ret = gen_unique_name(sctx, sctx->cur_ino,
  3081. sctx->cur_inode_gen, valid_path);
  3082. if (ret < 0)
  3083. goto out;
  3084. is_orphan = 1;
  3085. } else {
  3086. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  3087. valid_path);
  3088. if (ret < 0)
  3089. goto out;
  3090. }
  3091. list_for_each_entry(cur, &sctx->new_refs, list) {
  3092. /*
  3093. * We may have refs where the parent directory does not exist
  3094. * yet. This happens if the parent directories inum is higher
  3095. * the the current inum. To handle this case, we create the
  3096. * parent directory out of order. But we need to check if this
  3097. * did already happen before due to other refs in the same dir.
  3098. */
  3099. ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
  3100. if (ret < 0)
  3101. goto out;
  3102. if (ret == inode_state_will_create) {
  3103. ret = 0;
  3104. /*
  3105. * First check if any of the current inodes refs did
  3106. * already create the dir.
  3107. */
  3108. list_for_each_entry(cur2, &sctx->new_refs, list) {
  3109. if (cur == cur2)
  3110. break;
  3111. if (cur2->dir == cur->dir) {
  3112. ret = 1;
  3113. break;
  3114. }
  3115. }
  3116. /*
  3117. * If that did not happen, check if a previous inode
  3118. * did already create the dir.
  3119. */
  3120. if (!ret)
  3121. ret = did_create_dir(sctx, cur->dir);
  3122. if (ret < 0)
  3123. goto out;
  3124. if (!ret) {
  3125. ret = send_create_inode(sctx, cur->dir);
  3126. if (ret < 0)
  3127. goto out;
  3128. }
  3129. }
  3130. /*
  3131. * Check if this new ref would overwrite the first ref of
  3132. * another unprocessed inode. If yes, orphanize the
  3133. * overwritten inode. If we find an overwritten ref that is
  3134. * not the first ref, simply unlink it.
  3135. */
  3136. ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
  3137. cur->name, cur->name_len,
  3138. &ow_inode, &ow_gen);
  3139. if (ret < 0)
  3140. goto out;
  3141. if (ret) {
  3142. ret = is_first_ref(sctx->parent_root,
  3143. ow_inode, cur->dir, cur->name,
  3144. cur->name_len);
  3145. if (ret < 0)
  3146. goto out;
  3147. if (ret) {
  3148. struct name_cache_entry *nce;
  3149. ret = orphanize_inode(sctx, ow_inode, ow_gen,
  3150. cur->full_path);
  3151. if (ret < 0)
  3152. goto out;
  3153. /*
  3154. * Make sure we clear our orphanized inode's
  3155. * name from the name cache. This is because the
  3156. * inode ow_inode might be an ancestor of some
  3157. * other inode that will be orphanized as well
  3158. * later and has an inode number greater than
  3159. * sctx->send_progress. We need to prevent
  3160. * future name lookups from using the old name
  3161. * and get instead the orphan name.
  3162. */
  3163. nce = name_cache_search(sctx, ow_inode, ow_gen);
  3164. if (nce) {
  3165. name_cache_delete(sctx, nce);
  3166. kfree(nce);
  3167. }
  3168. } else {
  3169. ret = send_unlink(sctx, cur->full_path);
  3170. if (ret < 0)
  3171. goto out;
  3172. }
  3173. }
  3174. if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
  3175. ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
  3176. if (ret < 0)
  3177. goto out;
  3178. if (ret == 1) {
  3179. can_rename = false;
  3180. *pending_move = 1;
  3181. }
  3182. }
  3183. if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
  3184. can_rename) {
  3185. ret = wait_for_parent_move(sctx, cur, is_orphan);
  3186. if (ret < 0)
  3187. goto out;
  3188. if (ret == 1) {
  3189. can_rename = false;
  3190. *pending_move = 1;
  3191. }
  3192. }
  3193. /*
  3194. * link/move the ref to the new place. If we have an orphan
  3195. * inode, move it and update valid_path. If not, link or move
  3196. * it depending on the inode mode.
  3197. */
  3198. if (is_orphan && can_rename) {
  3199. ret = send_rename(sctx, valid_path, cur->full_path);
  3200. if (ret < 0)
  3201. goto out;
  3202. is_orphan = 0;
  3203. ret = fs_path_copy(valid_path, cur->full_path);
  3204. if (ret < 0)
  3205. goto out;
  3206. } else if (can_rename) {
  3207. if (S_ISDIR(sctx->cur_inode_mode)) {
  3208. /*
  3209. * Dirs can't be linked, so move it. For moved
  3210. * dirs, we always have one new and one deleted
  3211. * ref. The deleted ref is ignored later.
  3212. */
  3213. ret = send_rename(sctx, valid_path,
  3214. cur->full_path);
  3215. if (!ret)
  3216. ret = fs_path_copy(valid_path,
  3217. cur->full_path);
  3218. if (ret < 0)
  3219. goto out;
  3220. } else {
  3221. ret = send_link(sctx, cur->full_path,
  3222. valid_path);
  3223. if (ret < 0)
  3224. goto out;
  3225. }
  3226. }
  3227. ret = dup_ref(cur, &check_dirs);
  3228. if (ret < 0)
  3229. goto out;
  3230. }
  3231. if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
  3232. /*
  3233. * Check if we can already rmdir the directory. If not,
  3234. * orphanize it. For every dir item inside that gets deleted
  3235. * later, we do this check again and rmdir it then if possible.
  3236. * See the use of check_dirs for more details.
  3237. */
  3238. ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  3239. sctx->cur_ino);
  3240. if (ret < 0)
  3241. goto out;
  3242. if (ret) {
  3243. ret = send_rmdir(sctx, valid_path);
  3244. if (ret < 0)
  3245. goto out;
  3246. } else if (!is_orphan) {
  3247. ret = orphanize_inode(sctx, sctx->cur_ino,
  3248. sctx->cur_inode_gen, valid_path);
  3249. if (ret < 0)
  3250. goto out;
  3251. is_orphan = 1;
  3252. }
  3253. list_for_each_entry(cur, &sctx->deleted_refs, list) {
  3254. ret = dup_ref(cur, &check_dirs);
  3255. if (ret < 0)
  3256. goto out;
  3257. }
  3258. } else if (S_ISDIR(sctx->cur_inode_mode) &&
  3259. !list_empty(&sctx->deleted_refs)) {
  3260. /*
  3261. * We have a moved dir. Add the old parent to check_dirs
  3262. */
  3263. cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
  3264. list);
  3265. ret = dup_ref(cur, &check_dirs);
  3266. if (ret < 0)
  3267. goto out;
  3268. } else if (!S_ISDIR(sctx->cur_inode_mode)) {
  3269. /*
  3270. * We have a non dir inode. Go through all deleted refs and
  3271. * unlink them if they were not already overwritten by other
  3272. * inodes.
  3273. */
  3274. list_for_each_entry(cur, &sctx->deleted_refs, list) {
  3275. ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
  3276. sctx->cur_ino, sctx->cur_inode_gen,
  3277. cur->name, cur->name_len);
  3278. if (ret < 0)
  3279. goto out;
  3280. if (!ret) {
  3281. ret = send_unlink(sctx, cur->full_path);
  3282. if (ret < 0)
  3283. goto out;
  3284. }
  3285. ret = dup_ref(cur, &check_dirs);
  3286. if (ret < 0)
  3287. goto out;
  3288. }
  3289. /*
  3290. * If the inode is still orphan, unlink the orphan. This may
  3291. * happen when a previous inode did overwrite the first ref
  3292. * of this inode and no new refs were added for the current
  3293. * inode. Unlinking does not mean that the inode is deleted in
  3294. * all cases. There may still be links to this inode in other
  3295. * places.
  3296. */
  3297. if (is_orphan) {
  3298. ret = send_unlink(sctx, valid_path);
  3299. if (ret < 0)
  3300. goto out;
  3301. }
  3302. }
  3303. /*
  3304. * We did collect all parent dirs where cur_inode was once located. We
  3305. * now go through all these dirs and check if they are pending for
  3306. * deletion and if it's finally possible to perform the rmdir now.
  3307. * We also update the inode stats of the parent dirs here.
  3308. */
  3309. list_for_each_entry(cur, &check_dirs, list) {
  3310. /*
  3311. * In case we had refs into dirs that were not processed yet,
  3312. * we don't need to do the utime and rmdir logic for these dirs.
  3313. * The dir will be processed later.
  3314. */
  3315. if (cur->dir > sctx->cur_ino)
  3316. continue;
  3317. ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
  3318. if (ret < 0)
  3319. goto out;
  3320. if (ret == inode_state_did_create ||
  3321. ret == inode_state_no_change) {
  3322. /* TODO delayed utimes */
  3323. ret = send_utimes(sctx, cur->dir, cur->dir_gen);
  3324. if (ret < 0)
  3325. goto out;
  3326. } else if (ret == inode_state_did_delete &&
  3327. cur->dir != last_dir_ino_rm) {
  3328. ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
  3329. sctx->cur_ino);
  3330. if (ret < 0)
  3331. goto out;
  3332. if (ret) {
  3333. ret = get_cur_path(sctx, cur->dir,
  3334. cur->dir_gen, valid_path);
  3335. if (ret < 0)
  3336. goto out;
  3337. ret = send_rmdir(sctx, valid_path);
  3338. if (ret < 0)
  3339. goto out;
  3340. last_dir_ino_rm = cur->dir;
  3341. }
  3342. }
  3343. }
  3344. ret = 0;
  3345. out:
  3346. __free_recorded_refs(&check_dirs);
  3347. free_recorded_refs(sctx);
  3348. fs_path_free(valid_path);
  3349. return ret;
  3350. }
  3351. static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
  3352. struct fs_path *name, void *ctx, struct list_head *refs)
  3353. {
  3354. int ret = 0;
  3355. struct send_ctx *sctx = ctx;
  3356. struct fs_path *p;
  3357. u64 gen;
  3358. p = fs_path_alloc();
  3359. if (!p)
  3360. return -ENOMEM;
  3361. ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
  3362. NULL, NULL);
  3363. if (ret < 0)
  3364. goto out;
  3365. ret = get_cur_path(sctx, dir, gen, p);
  3366. if (ret < 0)
  3367. goto out;
  3368. ret = fs_path_add_path(p, name);
  3369. if (ret < 0)
  3370. goto out;
  3371. ret = __record_ref(refs, dir, gen, p);
  3372. out:
  3373. if (ret)
  3374. fs_path_free(p);
  3375. return ret;
  3376. }
  3377. static int __record_new_ref(int num, u64 dir, int index,
  3378. struct fs_path *name,
  3379. void *ctx)
  3380. {
  3381. struct send_ctx *sctx = ctx;
  3382. return record_ref(sctx->send_root, num, dir, index, name,
  3383. ctx, &sctx->new_refs);
  3384. }
  3385. static int __record_deleted_ref(int num, u64 dir, int index,
  3386. struct fs_path *name,
  3387. void *ctx)
  3388. {
  3389. struct send_ctx *sctx = ctx;
  3390. return record_ref(sctx->parent_root, num, dir, index, name,
  3391. ctx, &sctx->deleted_refs);
  3392. }
  3393. static int record_new_ref(struct send_ctx *sctx)
  3394. {
  3395. int ret;
  3396. ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
  3397. sctx->cmp_key, 0, __record_new_ref, sctx);
  3398. if (ret < 0)
  3399. goto out;
  3400. ret = 0;
  3401. out:
  3402. return ret;
  3403. }
  3404. static int record_deleted_ref(struct send_ctx *sctx)
  3405. {
  3406. int ret;
  3407. ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
  3408. sctx->cmp_key, 0, __record_deleted_ref, sctx);
  3409. if (ret < 0)
  3410. goto out;
  3411. ret = 0;
  3412. out:
  3413. return ret;
  3414. }
  3415. struct find_ref_ctx {
  3416. u64 dir;
  3417. u64 dir_gen;
  3418. struct btrfs_root *root;
  3419. struct fs_path *name;
  3420. int found_idx;
  3421. };
  3422. static int __find_iref(int num, u64 dir, int index,
  3423. struct fs_path *name,
  3424. void *ctx_)
  3425. {
  3426. struct find_ref_ctx *ctx = ctx_;
  3427. u64 dir_gen;
  3428. int ret;
  3429. if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
  3430. strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
  3431. /*
  3432. * To avoid doing extra lookups we'll only do this if everything
  3433. * else matches.
  3434. */
  3435. ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
  3436. NULL, NULL, NULL);
  3437. if (ret)
  3438. return ret;
  3439. if (dir_gen != ctx->dir_gen)
  3440. return 0;
  3441. ctx->found_idx = num;
  3442. return 1;
  3443. }
  3444. return 0;
  3445. }
  3446. static int find_iref(struct btrfs_root *root,
  3447. struct btrfs_path *path,
  3448. struct btrfs_key *key,
  3449. u64 dir, u64 dir_gen, struct fs_path *name)
  3450. {
  3451. int ret;
  3452. struct find_ref_ctx ctx;
  3453. ctx.dir = dir;
  3454. ctx.name = name;
  3455. ctx.dir_gen = dir_gen;
  3456. ctx.found_idx = -1;
  3457. ctx.root = root;
  3458. ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
  3459. if (ret < 0)
  3460. return ret;
  3461. if (ctx.found_idx == -1)
  3462. return -ENOENT;
  3463. return ctx.found_idx;
  3464. }
  3465. static int __record_changed_new_ref(int num, u64 dir, int index,
  3466. struct fs_path *name,
  3467. void *ctx)
  3468. {
  3469. u64 dir_gen;
  3470. int ret;
  3471. struct send_ctx *sctx = ctx;
  3472. ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
  3473. NULL, NULL, NULL);
  3474. if (ret)
  3475. return ret;
  3476. ret = find_iref(sctx->parent_root, sctx->right_path,
  3477. sctx->cmp_key, dir, dir_gen, name);
  3478. if (ret == -ENOENT)
  3479. ret = __record_new_ref(num, dir, index, name, sctx);
  3480. else if (ret > 0)
  3481. ret = 0;
  3482. return ret;
  3483. }
  3484. static int __record_changed_deleted_ref(int num, u64 dir, int index,
  3485. struct fs_path *name,
  3486. void *ctx)
  3487. {
  3488. u64 dir_gen;
  3489. int ret;
  3490. struct send_ctx *sctx = ctx;
  3491. ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
  3492. NULL, NULL, NULL);
  3493. if (ret)
  3494. return ret;
  3495. ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
  3496. dir, dir_gen, name);
  3497. if (ret == -ENOENT)
  3498. ret = __record_deleted_ref(num, dir, index, name, sctx);
  3499. else if (ret > 0)
  3500. ret = 0;
  3501. return ret;
  3502. }
  3503. static int record_changed_ref(struct send_ctx *sctx)
  3504. {
  3505. int ret = 0;
  3506. ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
  3507. sctx->cmp_key, 0, __record_changed_new_ref, sctx);
  3508. if (ret < 0)
  3509. goto out;
  3510. ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
  3511. sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
  3512. if (ret < 0)
  3513. goto out;
  3514. ret = 0;
  3515. out:
  3516. return ret;
  3517. }
  3518. /*
  3519. * Record and process all refs at once. Needed when an inode changes the
  3520. * generation number, which means that it was deleted and recreated.
  3521. */
  3522. static int process_all_refs(struct send_ctx *sctx,
  3523. enum btrfs_compare_tree_result cmd)
  3524. {
  3525. int ret;
  3526. struct btrfs_root *root;
  3527. struct btrfs_path *path;
  3528. struct btrfs_key key;
  3529. struct btrfs_key found_key;
  3530. struct extent_buffer *eb;
  3531. int slot;
  3532. iterate_inode_ref_t cb;
  3533. int pending_move = 0;
  3534. path = alloc_path_for_send();
  3535. if (!path)
  3536. return -ENOMEM;
  3537. if (cmd == BTRFS_COMPARE_TREE_NEW) {
  3538. root = sctx->send_root;
  3539. cb = __record_new_ref;
  3540. } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
  3541. root = sctx->parent_root;
  3542. cb = __record_deleted_ref;
  3543. } else {
  3544. btrfs_err(sctx->send_root->fs_info,
  3545. "Wrong command %d in process_all_refs", cmd);
  3546. ret = -EINVAL;
  3547. goto out;
  3548. }
  3549. key.objectid = sctx->cmp_key->objectid;
  3550. key.type = BTRFS_INODE_REF_KEY;
  3551. key.offset = 0;
  3552. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3553. if (ret < 0)
  3554. goto out;
  3555. while (1) {
  3556. eb = path->nodes[0];
  3557. slot = path->slots[0];
  3558. if (slot >= btrfs_header_nritems(eb)) {
  3559. ret = btrfs_next_leaf(root, path);
  3560. if (ret < 0)
  3561. goto out;
  3562. else if (ret > 0)
  3563. break;
  3564. continue;
  3565. }
  3566. btrfs_item_key_to_cpu(eb, &found_key, slot);
  3567. if (found_key.objectid != key.objectid ||
  3568. (found_key.type != BTRFS_INODE_REF_KEY &&
  3569. found_key.type != BTRFS_INODE_EXTREF_KEY))
  3570. break;
  3571. ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
  3572. if (ret < 0)
  3573. goto out;
  3574. path->slots[0]++;
  3575. }
  3576. btrfs_release_path(path);
  3577. ret = process_recorded_refs(sctx, &pending_move);
  3578. /* Only applicable to an incremental send. */
  3579. ASSERT(pending_move == 0);
  3580. out:
  3581. btrfs_free_path(path);
  3582. return ret;
  3583. }
  3584. static int send_set_xattr(struct send_ctx *sctx,
  3585. struct fs_path *path,
  3586. const char *name, int name_len,
  3587. const char *data, int data_len)
  3588. {
  3589. int ret = 0;
  3590. ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
  3591. if (ret < 0)
  3592. goto out;
  3593. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  3594. TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
  3595. TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
  3596. ret = send_cmd(sctx);
  3597. tlv_put_failure:
  3598. out:
  3599. return ret;
  3600. }
  3601. static int send_remove_xattr(struct send_ctx *sctx,
  3602. struct fs_path *path,
  3603. const char *name, int name_len)
  3604. {
  3605. int ret = 0;
  3606. ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
  3607. if (ret < 0)
  3608. goto out;
  3609. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
  3610. TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
  3611. ret = send_cmd(sctx);
  3612. tlv_put_failure:
  3613. out:
  3614. return ret;
  3615. }
  3616. static int __process_new_xattr(int num, struct btrfs_key *di_key,
  3617. const char *name, int name_len,
  3618. const char *data, int data_len,
  3619. u8 type, void *ctx)
  3620. {
  3621. int ret;
  3622. struct send_ctx *sctx = ctx;
  3623. struct fs_path *p;
  3624. posix_acl_xattr_header dummy_acl;
  3625. p = fs_path_alloc();
  3626. if (!p)
  3627. return -ENOMEM;
  3628. /*
  3629. * This hack is needed because empty acl's are stored as zero byte
  3630. * data in xattrs. Problem with that is, that receiving these zero byte
  3631. * acl's will fail later. To fix this, we send a dummy acl list that
  3632. * only contains the version number and no entries.
  3633. */
  3634. if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
  3635. !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
  3636. if (data_len == 0) {
  3637. dummy_acl.a_version =
  3638. cpu_to_le32(POSIX_ACL_XATTR_VERSION);
  3639. data = (char *)&dummy_acl;
  3640. data_len = sizeof(dummy_acl);
  3641. }
  3642. }
  3643. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3644. if (ret < 0)
  3645. goto out;
  3646. ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
  3647. out:
  3648. fs_path_free(p);
  3649. return ret;
  3650. }
  3651. static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
  3652. const char *name, int name_len,
  3653. const char *data, int data_len,
  3654. u8 type, void *ctx)
  3655. {
  3656. int ret;
  3657. struct send_ctx *sctx = ctx;
  3658. struct fs_path *p;
  3659. p = fs_path_alloc();
  3660. if (!p)
  3661. return -ENOMEM;
  3662. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3663. if (ret < 0)
  3664. goto out;
  3665. ret = send_remove_xattr(sctx, p, name, name_len);
  3666. out:
  3667. fs_path_free(p);
  3668. return ret;
  3669. }
  3670. static int process_new_xattr(struct send_ctx *sctx)
  3671. {
  3672. int ret = 0;
  3673. ret = iterate_dir_item(sctx->send_root, sctx->left_path,
  3674. sctx->cmp_key, __process_new_xattr, sctx);
  3675. return ret;
  3676. }
  3677. static int process_deleted_xattr(struct send_ctx *sctx)
  3678. {
  3679. int ret;
  3680. ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
  3681. sctx->cmp_key, __process_deleted_xattr, sctx);
  3682. return ret;
  3683. }
  3684. struct find_xattr_ctx {
  3685. const char *name;
  3686. int name_len;
  3687. int found_idx;
  3688. char *found_data;
  3689. int found_data_len;
  3690. };
  3691. static int __find_xattr(int num, struct btrfs_key *di_key,
  3692. const char *name, int name_len,
  3693. const char *data, int data_len,
  3694. u8 type, void *vctx)
  3695. {
  3696. struct find_xattr_ctx *ctx = vctx;
  3697. if (name_len == ctx->name_len &&
  3698. strncmp(name, ctx->name, name_len) == 0) {
  3699. ctx->found_idx = num;
  3700. ctx->found_data_len = data_len;
  3701. ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
  3702. if (!ctx->found_data)
  3703. return -ENOMEM;
  3704. return 1;
  3705. }
  3706. return 0;
  3707. }
  3708. static int find_xattr(struct btrfs_root *root,
  3709. struct btrfs_path *path,
  3710. struct btrfs_key *key,
  3711. const char *name, int name_len,
  3712. char **data, int *data_len)
  3713. {
  3714. int ret;
  3715. struct find_xattr_ctx ctx;
  3716. ctx.name = name;
  3717. ctx.name_len = name_len;
  3718. ctx.found_idx = -1;
  3719. ctx.found_data = NULL;
  3720. ctx.found_data_len = 0;
  3721. ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
  3722. if (ret < 0)
  3723. return ret;
  3724. if (ctx.found_idx == -1)
  3725. return -ENOENT;
  3726. if (data) {
  3727. *data = ctx.found_data;
  3728. *data_len = ctx.found_data_len;
  3729. } else {
  3730. kfree(ctx.found_data);
  3731. }
  3732. return ctx.found_idx;
  3733. }
  3734. static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
  3735. const char *name, int name_len,
  3736. const char *data, int data_len,
  3737. u8 type, void *ctx)
  3738. {
  3739. int ret;
  3740. struct send_ctx *sctx = ctx;
  3741. char *found_data = NULL;
  3742. int found_data_len = 0;
  3743. ret = find_xattr(sctx->parent_root, sctx->right_path,
  3744. sctx->cmp_key, name, name_len, &found_data,
  3745. &found_data_len);
  3746. if (ret == -ENOENT) {
  3747. ret = __process_new_xattr(num, di_key, name, name_len, data,
  3748. data_len, type, ctx);
  3749. } else if (ret >= 0) {
  3750. if (data_len != found_data_len ||
  3751. memcmp(data, found_data, data_len)) {
  3752. ret = __process_new_xattr(num, di_key, name, name_len,
  3753. data, data_len, type, ctx);
  3754. } else {
  3755. ret = 0;
  3756. }
  3757. }
  3758. kfree(found_data);
  3759. return ret;
  3760. }
  3761. static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
  3762. const char *name, int name_len,
  3763. const char *data, int data_len,
  3764. u8 type, void *ctx)
  3765. {
  3766. int ret;
  3767. struct send_ctx *sctx = ctx;
  3768. ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
  3769. name, name_len, NULL, NULL);
  3770. if (ret == -ENOENT)
  3771. ret = __process_deleted_xattr(num, di_key, name, name_len, data,
  3772. data_len, type, ctx);
  3773. else if (ret >= 0)
  3774. ret = 0;
  3775. return ret;
  3776. }
  3777. static int process_changed_xattr(struct send_ctx *sctx)
  3778. {
  3779. int ret = 0;
  3780. ret = iterate_dir_item(sctx->send_root, sctx->left_path,
  3781. sctx->cmp_key, __process_changed_new_xattr, sctx);
  3782. if (ret < 0)
  3783. goto out;
  3784. ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
  3785. sctx->cmp_key, __process_changed_deleted_xattr, sctx);
  3786. out:
  3787. return ret;
  3788. }
  3789. static int process_all_new_xattrs(struct send_ctx *sctx)
  3790. {
  3791. int ret;
  3792. struct btrfs_root *root;
  3793. struct btrfs_path *path;
  3794. struct btrfs_key key;
  3795. struct btrfs_key found_key;
  3796. struct extent_buffer *eb;
  3797. int slot;
  3798. path = alloc_path_for_send();
  3799. if (!path)
  3800. return -ENOMEM;
  3801. root = sctx->send_root;
  3802. key.objectid = sctx->cmp_key->objectid;
  3803. key.type = BTRFS_XATTR_ITEM_KEY;
  3804. key.offset = 0;
  3805. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3806. if (ret < 0)
  3807. goto out;
  3808. while (1) {
  3809. eb = path->nodes[0];
  3810. slot = path->slots[0];
  3811. if (slot >= btrfs_header_nritems(eb)) {
  3812. ret = btrfs_next_leaf(root, path);
  3813. if (ret < 0) {
  3814. goto out;
  3815. } else if (ret > 0) {
  3816. ret = 0;
  3817. break;
  3818. }
  3819. continue;
  3820. }
  3821. btrfs_item_key_to_cpu(eb, &found_key, slot);
  3822. if (found_key.objectid != key.objectid ||
  3823. found_key.type != key.type) {
  3824. ret = 0;
  3825. goto out;
  3826. }
  3827. ret = iterate_dir_item(root, path, &found_key,
  3828. __process_new_xattr, sctx);
  3829. if (ret < 0)
  3830. goto out;
  3831. path->slots[0]++;
  3832. }
  3833. out:
  3834. btrfs_free_path(path);
  3835. return ret;
  3836. }
  3837. static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
  3838. {
  3839. struct btrfs_root *root = sctx->send_root;
  3840. struct btrfs_fs_info *fs_info = root->fs_info;
  3841. struct inode *inode;
  3842. struct page *page;
  3843. char *addr;
  3844. struct btrfs_key key;
  3845. pgoff_t index = offset >> PAGE_CACHE_SHIFT;
  3846. pgoff_t last_index;
  3847. unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
  3848. ssize_t ret = 0;
  3849. key.objectid = sctx->cur_ino;
  3850. key.type = BTRFS_INODE_ITEM_KEY;
  3851. key.offset = 0;
  3852. inode = btrfs_iget(fs_info->sb, &key, root, NULL);
  3853. if (IS_ERR(inode))
  3854. return PTR_ERR(inode);
  3855. if (offset + len > i_size_read(inode)) {
  3856. if (offset > i_size_read(inode))
  3857. len = 0;
  3858. else
  3859. len = offset - i_size_read(inode);
  3860. }
  3861. if (len == 0)
  3862. goto out;
  3863. last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
  3864. /* initial readahead */
  3865. memset(&sctx->ra, 0, sizeof(struct file_ra_state));
  3866. file_ra_state_init(&sctx->ra, inode->i_mapping);
  3867. btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
  3868. last_index - index + 1);
  3869. while (index <= last_index) {
  3870. unsigned cur_len = min_t(unsigned, len,
  3871. PAGE_CACHE_SIZE - pg_offset);
  3872. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  3873. if (!page) {
  3874. ret = -ENOMEM;
  3875. break;
  3876. }
  3877. if (!PageUptodate(page)) {
  3878. btrfs_readpage(NULL, page);
  3879. lock_page(page);
  3880. if (!PageUptodate(page)) {
  3881. unlock_page(page);
  3882. page_cache_release(page);
  3883. ret = -EIO;
  3884. break;
  3885. }
  3886. }
  3887. addr = kmap(page);
  3888. memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
  3889. kunmap(page);
  3890. unlock_page(page);
  3891. page_cache_release(page);
  3892. index++;
  3893. pg_offset = 0;
  3894. len -= cur_len;
  3895. ret += cur_len;
  3896. }
  3897. out:
  3898. iput(inode);
  3899. return ret;
  3900. }
  3901. /*
  3902. * Read some bytes from the current inode/file and send a write command to
  3903. * user space.
  3904. */
  3905. static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
  3906. {
  3907. int ret = 0;
  3908. struct fs_path *p;
  3909. ssize_t num_read = 0;
  3910. p = fs_path_alloc();
  3911. if (!p)
  3912. return -ENOMEM;
  3913. verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
  3914. num_read = fill_read_buf(sctx, offset, len);
  3915. if (num_read <= 0) {
  3916. if (num_read < 0)
  3917. ret = num_read;
  3918. goto out;
  3919. }
  3920. ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
  3921. if (ret < 0)
  3922. goto out;
  3923. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3924. if (ret < 0)
  3925. goto out;
  3926. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  3927. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  3928. TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
  3929. ret = send_cmd(sctx);
  3930. tlv_put_failure:
  3931. out:
  3932. fs_path_free(p);
  3933. if (ret < 0)
  3934. return ret;
  3935. return num_read;
  3936. }
  3937. /*
  3938. * Send a clone command to user space.
  3939. */
  3940. static int send_clone(struct send_ctx *sctx,
  3941. u64 offset, u32 len,
  3942. struct clone_root *clone_root)
  3943. {
  3944. int ret = 0;
  3945. struct fs_path *p;
  3946. u64 gen;
  3947. verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
  3948. "clone_inode=%llu, clone_offset=%llu\n", offset, len,
  3949. clone_root->root->objectid, clone_root->ino,
  3950. clone_root->offset);
  3951. p = fs_path_alloc();
  3952. if (!p)
  3953. return -ENOMEM;
  3954. ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
  3955. if (ret < 0)
  3956. goto out;
  3957. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  3958. if (ret < 0)
  3959. goto out;
  3960. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  3961. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
  3962. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  3963. if (clone_root->root == sctx->send_root) {
  3964. ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
  3965. &gen, NULL, NULL, NULL, NULL);
  3966. if (ret < 0)
  3967. goto out;
  3968. ret = get_cur_path(sctx, clone_root->ino, gen, p);
  3969. } else {
  3970. ret = get_inode_path(clone_root->root, clone_root->ino, p);
  3971. }
  3972. if (ret < 0)
  3973. goto out;
  3974. /*
  3975. * If the parent we're using has a received_uuid set then use that as
  3976. * our clone source as that is what we will look for when doing a
  3977. * receive.
  3978. *
  3979. * This covers the case that we create a snapshot off of a received
  3980. * subvolume and then use that as the parent and try to receive on a
  3981. * different host.
  3982. */
  3983. if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
  3984. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  3985. clone_root->root->root_item.received_uuid);
  3986. else
  3987. TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
  3988. clone_root->root->root_item.uuid);
  3989. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
  3990. le64_to_cpu(clone_root->root->root_item.ctransid));
  3991. TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
  3992. TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
  3993. clone_root->offset);
  3994. ret = send_cmd(sctx);
  3995. tlv_put_failure:
  3996. out:
  3997. fs_path_free(p);
  3998. return ret;
  3999. }
  4000. /*
  4001. * Send an update extent command to user space.
  4002. */
  4003. static int send_update_extent(struct send_ctx *sctx,
  4004. u64 offset, u32 len)
  4005. {
  4006. int ret = 0;
  4007. struct fs_path *p;
  4008. p = fs_path_alloc();
  4009. if (!p)
  4010. return -ENOMEM;
  4011. ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
  4012. if (ret < 0)
  4013. goto out;
  4014. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  4015. if (ret < 0)
  4016. goto out;
  4017. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  4018. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  4019. TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
  4020. ret = send_cmd(sctx);
  4021. tlv_put_failure:
  4022. out:
  4023. fs_path_free(p);
  4024. return ret;
  4025. }
  4026. static int send_hole(struct send_ctx *sctx, u64 end)
  4027. {
  4028. struct fs_path *p = NULL;
  4029. u64 offset = sctx->cur_inode_last_extent;
  4030. u64 len;
  4031. int ret = 0;
  4032. p = fs_path_alloc();
  4033. if (!p)
  4034. return -ENOMEM;
  4035. ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
  4036. if (ret < 0)
  4037. goto tlv_put_failure;
  4038. memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
  4039. while (offset < end) {
  4040. len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
  4041. ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
  4042. if (ret < 0)
  4043. break;
  4044. TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
  4045. TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
  4046. TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
  4047. ret = send_cmd(sctx);
  4048. if (ret < 0)
  4049. break;
  4050. offset += len;
  4051. }
  4052. tlv_put_failure:
  4053. fs_path_free(p);
  4054. return ret;
  4055. }
  4056. static int send_extent_data(struct send_ctx *sctx,
  4057. const u64 offset,
  4058. const u64 len)
  4059. {
  4060. u64 sent = 0;
  4061. if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
  4062. return send_update_extent(sctx, offset, len);
  4063. while (sent < len) {
  4064. u64 size = len - sent;
  4065. int ret;
  4066. if (size > BTRFS_SEND_READ_SIZE)
  4067. size = BTRFS_SEND_READ_SIZE;
  4068. ret = send_write(sctx, offset + sent, size);
  4069. if (ret < 0)
  4070. return ret;
  4071. if (!ret)
  4072. break;
  4073. sent += ret;
  4074. }
  4075. return 0;
  4076. }
  4077. static int clone_range(struct send_ctx *sctx,
  4078. struct clone_root *clone_root,
  4079. const u64 disk_byte,
  4080. u64 data_offset,
  4081. u64 offset,
  4082. u64 len)
  4083. {
  4084. struct btrfs_path *path;
  4085. struct btrfs_key key;
  4086. int ret;
  4087. path = alloc_path_for_send();
  4088. if (!path)
  4089. return -ENOMEM;
  4090. /*
  4091. * We can't send a clone operation for the entire range if we find
  4092. * extent items in the respective range in the source file that
  4093. * refer to different extents or if we find holes.
  4094. * So check for that and do a mix of clone and regular write/copy
  4095. * operations if needed.
  4096. *
  4097. * Example:
  4098. *
  4099. * mkfs.btrfs -f /dev/sda
  4100. * mount /dev/sda /mnt
  4101. * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
  4102. * cp --reflink=always /mnt/foo /mnt/bar
  4103. * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
  4104. * btrfs subvolume snapshot -r /mnt /mnt/snap
  4105. *
  4106. * If when we send the snapshot and we are processing file bar (which
  4107. * has a higher inode number than foo) we blindly send a clone operation
  4108. * for the [0, 100K[ range from foo to bar, the receiver ends up getting
  4109. * a file bar that matches the content of file foo - iow, doesn't match
  4110. * the content from bar in the original filesystem.
  4111. */
  4112. key.objectid = clone_root->ino;
  4113. key.type = BTRFS_EXTENT_DATA_KEY;
  4114. key.offset = clone_root->offset;
  4115. ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
  4116. if (ret < 0)
  4117. goto out;
  4118. if (ret > 0 && path->slots[0] > 0) {
  4119. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
  4120. if (key.objectid == clone_root->ino &&
  4121. key.type == BTRFS_EXTENT_DATA_KEY)
  4122. path->slots[0]--;
  4123. }
  4124. while (true) {
  4125. struct extent_buffer *leaf = path->nodes[0];
  4126. int slot = path->slots[0];
  4127. struct btrfs_file_extent_item *ei;
  4128. u8 type;
  4129. u64 ext_len;
  4130. u64 clone_len;
  4131. if (slot >= btrfs_header_nritems(leaf)) {
  4132. ret = btrfs_next_leaf(clone_root->root, path);
  4133. if (ret < 0)
  4134. goto out;
  4135. else if (ret > 0)
  4136. break;
  4137. continue;
  4138. }
  4139. btrfs_item_key_to_cpu(leaf, &key, slot);
  4140. /*
  4141. * We might have an implicit trailing hole (NO_HOLES feature
  4142. * enabled). We deal with it after leaving this loop.
  4143. */
  4144. if (key.objectid != clone_root->ino ||
  4145. key.type != BTRFS_EXTENT_DATA_KEY)
  4146. break;
  4147. ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  4148. type = btrfs_file_extent_type(leaf, ei);
  4149. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4150. ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
  4151. ext_len = PAGE_CACHE_ALIGN(ext_len);
  4152. } else {
  4153. ext_len = btrfs_file_extent_num_bytes(leaf, ei);
  4154. }
  4155. if (key.offset + ext_len <= clone_root->offset)
  4156. goto next;
  4157. if (key.offset > clone_root->offset) {
  4158. /* Implicit hole, NO_HOLES feature enabled. */
  4159. u64 hole_len = key.offset - clone_root->offset;
  4160. if (hole_len > len)
  4161. hole_len = len;
  4162. ret = send_extent_data(sctx, offset, hole_len);
  4163. if (ret < 0)
  4164. goto out;
  4165. len -= hole_len;
  4166. if (len == 0)
  4167. break;
  4168. offset += hole_len;
  4169. clone_root->offset += hole_len;
  4170. data_offset += hole_len;
  4171. }
  4172. if (key.offset >= clone_root->offset + len)
  4173. break;
  4174. clone_len = min_t(u64, ext_len, len);
  4175. if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
  4176. btrfs_file_extent_offset(leaf, ei) == data_offset)
  4177. ret = send_clone(sctx, offset, clone_len, clone_root);
  4178. else
  4179. ret = send_extent_data(sctx, offset, clone_len);
  4180. if (ret < 0)
  4181. goto out;
  4182. len -= clone_len;
  4183. if (len == 0)
  4184. break;
  4185. offset += clone_len;
  4186. clone_root->offset += clone_len;
  4187. data_offset += clone_len;
  4188. next:
  4189. path->slots[0]++;
  4190. }
  4191. if (len > 0)
  4192. ret = send_extent_data(sctx, offset, len);
  4193. else
  4194. ret = 0;
  4195. out:
  4196. btrfs_free_path(path);
  4197. return ret;
  4198. }
  4199. static int send_write_or_clone(struct send_ctx *sctx,
  4200. struct btrfs_path *path,
  4201. struct btrfs_key *key,
  4202. struct clone_root *clone_root)
  4203. {
  4204. int ret = 0;
  4205. struct btrfs_file_extent_item *ei;
  4206. u64 offset = key->offset;
  4207. u64 len;
  4208. u8 type;
  4209. u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
  4210. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4211. struct btrfs_file_extent_item);
  4212. type = btrfs_file_extent_type(path->nodes[0], ei);
  4213. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4214. len = btrfs_file_extent_inline_len(path->nodes[0],
  4215. path->slots[0], ei);
  4216. /*
  4217. * it is possible the inline item won't cover the whole page,
  4218. * but there may be items after this page. Make
  4219. * sure to send the whole thing
  4220. */
  4221. len = PAGE_CACHE_ALIGN(len);
  4222. } else {
  4223. len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
  4224. }
  4225. if (offset + len > sctx->cur_inode_size)
  4226. len = sctx->cur_inode_size - offset;
  4227. if (len == 0) {
  4228. ret = 0;
  4229. goto out;
  4230. }
  4231. if (clone_root && IS_ALIGNED(offset + len, bs)) {
  4232. u64 disk_byte;
  4233. u64 data_offset;
  4234. disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
  4235. data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
  4236. ret = clone_range(sctx, clone_root, disk_byte, data_offset,
  4237. offset, len);
  4238. } else {
  4239. ret = send_extent_data(sctx, offset, len);
  4240. }
  4241. out:
  4242. return ret;
  4243. }
  4244. static int is_extent_unchanged(struct send_ctx *sctx,
  4245. struct btrfs_path *left_path,
  4246. struct btrfs_key *ekey)
  4247. {
  4248. int ret = 0;
  4249. struct btrfs_key key;
  4250. struct btrfs_path *path = NULL;
  4251. struct extent_buffer *eb;
  4252. int slot;
  4253. struct btrfs_key found_key;
  4254. struct btrfs_file_extent_item *ei;
  4255. u64 left_disknr;
  4256. u64 right_disknr;
  4257. u64 left_offset;
  4258. u64 right_offset;
  4259. u64 left_offset_fixed;
  4260. u64 left_len;
  4261. u64 right_len;
  4262. u64 left_gen;
  4263. u64 right_gen;
  4264. u8 left_type;
  4265. u8 right_type;
  4266. path = alloc_path_for_send();
  4267. if (!path)
  4268. return -ENOMEM;
  4269. eb = left_path->nodes[0];
  4270. slot = left_path->slots[0];
  4271. ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  4272. left_type = btrfs_file_extent_type(eb, ei);
  4273. if (left_type != BTRFS_FILE_EXTENT_REG) {
  4274. ret = 0;
  4275. goto out;
  4276. }
  4277. left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
  4278. left_len = btrfs_file_extent_num_bytes(eb, ei);
  4279. left_offset = btrfs_file_extent_offset(eb, ei);
  4280. left_gen = btrfs_file_extent_generation(eb, ei);
  4281. /*
  4282. * Following comments will refer to these graphics. L is the left
  4283. * extents which we are checking at the moment. 1-8 are the right
  4284. * extents that we iterate.
  4285. *
  4286. * |-----L-----|
  4287. * |-1-|-2a-|-3-|-4-|-5-|-6-|
  4288. *
  4289. * |-----L-----|
  4290. * |--1--|-2b-|...(same as above)
  4291. *
  4292. * Alternative situation. Happens on files where extents got split.
  4293. * |-----L-----|
  4294. * |-----------7-----------|-6-|
  4295. *
  4296. * Alternative situation. Happens on files which got larger.
  4297. * |-----L-----|
  4298. * |-8-|
  4299. * Nothing follows after 8.
  4300. */
  4301. key.objectid = ekey->objectid;
  4302. key.type = BTRFS_EXTENT_DATA_KEY;
  4303. key.offset = ekey->offset;
  4304. ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
  4305. if (ret < 0)
  4306. goto out;
  4307. if (ret) {
  4308. ret = 0;
  4309. goto out;
  4310. }
  4311. /*
  4312. * Handle special case where the right side has no extents at all.
  4313. */
  4314. eb = path->nodes[0];
  4315. slot = path->slots[0];
  4316. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4317. if (found_key.objectid != key.objectid ||
  4318. found_key.type != key.type) {
  4319. /* If we're a hole then just pretend nothing changed */
  4320. ret = (left_disknr) ? 0 : 1;
  4321. goto out;
  4322. }
  4323. /*
  4324. * We're now on 2a, 2b or 7.
  4325. */
  4326. key = found_key;
  4327. while (key.offset < ekey->offset + left_len) {
  4328. ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  4329. right_type = btrfs_file_extent_type(eb, ei);
  4330. if (right_type != BTRFS_FILE_EXTENT_REG) {
  4331. ret = 0;
  4332. goto out;
  4333. }
  4334. right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
  4335. right_len = btrfs_file_extent_num_bytes(eb, ei);
  4336. right_offset = btrfs_file_extent_offset(eb, ei);
  4337. right_gen = btrfs_file_extent_generation(eb, ei);
  4338. /*
  4339. * Are we at extent 8? If yes, we know the extent is changed.
  4340. * This may only happen on the first iteration.
  4341. */
  4342. if (found_key.offset + right_len <= ekey->offset) {
  4343. /* If we're a hole just pretend nothing changed */
  4344. ret = (left_disknr) ? 0 : 1;
  4345. goto out;
  4346. }
  4347. left_offset_fixed = left_offset;
  4348. if (key.offset < ekey->offset) {
  4349. /* Fix the right offset for 2a and 7. */
  4350. right_offset += ekey->offset - key.offset;
  4351. } else {
  4352. /* Fix the left offset for all behind 2a and 2b */
  4353. left_offset_fixed += key.offset - ekey->offset;
  4354. }
  4355. /*
  4356. * Check if we have the same extent.
  4357. */
  4358. if (left_disknr != right_disknr ||
  4359. left_offset_fixed != right_offset ||
  4360. left_gen != right_gen) {
  4361. ret = 0;
  4362. goto out;
  4363. }
  4364. /*
  4365. * Go to the next extent.
  4366. */
  4367. ret = btrfs_next_item(sctx->parent_root, path);
  4368. if (ret < 0)
  4369. goto out;
  4370. if (!ret) {
  4371. eb = path->nodes[0];
  4372. slot = path->slots[0];
  4373. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4374. }
  4375. if (ret || found_key.objectid != key.objectid ||
  4376. found_key.type != key.type) {
  4377. key.offset += right_len;
  4378. break;
  4379. }
  4380. if (found_key.offset != key.offset + right_len) {
  4381. ret = 0;
  4382. goto out;
  4383. }
  4384. key = found_key;
  4385. }
  4386. /*
  4387. * We're now behind the left extent (treat as unchanged) or at the end
  4388. * of the right side (treat as changed).
  4389. */
  4390. if (key.offset >= ekey->offset + left_len)
  4391. ret = 1;
  4392. else
  4393. ret = 0;
  4394. out:
  4395. btrfs_free_path(path);
  4396. return ret;
  4397. }
  4398. static int get_last_extent(struct send_ctx *sctx, u64 offset)
  4399. {
  4400. struct btrfs_path *path;
  4401. struct btrfs_root *root = sctx->send_root;
  4402. struct btrfs_file_extent_item *fi;
  4403. struct btrfs_key key;
  4404. u64 extent_end;
  4405. u8 type;
  4406. int ret;
  4407. path = alloc_path_for_send();
  4408. if (!path)
  4409. return -ENOMEM;
  4410. sctx->cur_inode_last_extent = 0;
  4411. key.objectid = sctx->cur_ino;
  4412. key.type = BTRFS_EXTENT_DATA_KEY;
  4413. key.offset = offset;
  4414. ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
  4415. if (ret < 0)
  4416. goto out;
  4417. ret = 0;
  4418. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  4419. if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
  4420. goto out;
  4421. fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4422. struct btrfs_file_extent_item);
  4423. type = btrfs_file_extent_type(path->nodes[0], fi);
  4424. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4425. u64 size = btrfs_file_extent_inline_len(path->nodes[0],
  4426. path->slots[0], fi);
  4427. extent_end = ALIGN(key.offset + size,
  4428. sctx->send_root->sectorsize);
  4429. } else {
  4430. extent_end = key.offset +
  4431. btrfs_file_extent_num_bytes(path->nodes[0], fi);
  4432. }
  4433. sctx->cur_inode_last_extent = extent_end;
  4434. out:
  4435. btrfs_free_path(path);
  4436. return ret;
  4437. }
  4438. static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
  4439. struct btrfs_key *key)
  4440. {
  4441. struct btrfs_file_extent_item *fi;
  4442. u64 extent_end;
  4443. u8 type;
  4444. int ret = 0;
  4445. if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
  4446. return 0;
  4447. if (sctx->cur_inode_last_extent == (u64)-1) {
  4448. ret = get_last_extent(sctx, key->offset - 1);
  4449. if (ret)
  4450. return ret;
  4451. }
  4452. fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4453. struct btrfs_file_extent_item);
  4454. type = btrfs_file_extent_type(path->nodes[0], fi);
  4455. if (type == BTRFS_FILE_EXTENT_INLINE) {
  4456. u64 size = btrfs_file_extent_inline_len(path->nodes[0],
  4457. path->slots[0], fi);
  4458. extent_end = ALIGN(key->offset + size,
  4459. sctx->send_root->sectorsize);
  4460. } else {
  4461. extent_end = key->offset +
  4462. btrfs_file_extent_num_bytes(path->nodes[0], fi);
  4463. }
  4464. if (path->slots[0] == 0 &&
  4465. sctx->cur_inode_last_extent < key->offset) {
  4466. /*
  4467. * We might have skipped entire leafs that contained only
  4468. * file extent items for our current inode. These leafs have
  4469. * a generation number smaller (older) than the one in the
  4470. * current leaf and the leaf our last extent came from, and
  4471. * are located between these 2 leafs.
  4472. */
  4473. ret = get_last_extent(sctx, key->offset - 1);
  4474. if (ret)
  4475. return ret;
  4476. }
  4477. if (sctx->cur_inode_last_extent < key->offset)
  4478. ret = send_hole(sctx, key->offset);
  4479. sctx->cur_inode_last_extent = extent_end;
  4480. return ret;
  4481. }
  4482. static int process_extent(struct send_ctx *sctx,
  4483. struct btrfs_path *path,
  4484. struct btrfs_key *key)
  4485. {
  4486. struct clone_root *found_clone = NULL;
  4487. int ret = 0;
  4488. if (S_ISLNK(sctx->cur_inode_mode))
  4489. return 0;
  4490. if (sctx->parent_root && !sctx->cur_inode_new) {
  4491. ret = is_extent_unchanged(sctx, path, key);
  4492. if (ret < 0)
  4493. goto out;
  4494. if (ret) {
  4495. ret = 0;
  4496. goto out_hole;
  4497. }
  4498. } else {
  4499. struct btrfs_file_extent_item *ei;
  4500. u8 type;
  4501. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  4502. struct btrfs_file_extent_item);
  4503. type = btrfs_file_extent_type(path->nodes[0], ei);
  4504. if (type == BTRFS_FILE_EXTENT_PREALLOC ||
  4505. type == BTRFS_FILE_EXTENT_REG) {
  4506. /*
  4507. * The send spec does not have a prealloc command yet,
  4508. * so just leave a hole for prealloc'ed extents until
  4509. * we have enough commands queued up to justify rev'ing
  4510. * the send spec.
  4511. */
  4512. if (type == BTRFS_FILE_EXTENT_PREALLOC) {
  4513. ret = 0;
  4514. goto out;
  4515. }
  4516. /* Have a hole, just skip it. */
  4517. if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
  4518. ret = 0;
  4519. goto out;
  4520. }
  4521. }
  4522. }
  4523. ret = find_extent_clone(sctx, path, key->objectid, key->offset,
  4524. sctx->cur_inode_size, &found_clone);
  4525. if (ret != -ENOENT && ret < 0)
  4526. goto out;
  4527. ret = send_write_or_clone(sctx, path, key, found_clone);
  4528. if (ret)
  4529. goto out;
  4530. out_hole:
  4531. ret = maybe_send_hole(sctx, path, key);
  4532. out:
  4533. return ret;
  4534. }
  4535. static int process_all_extents(struct send_ctx *sctx)
  4536. {
  4537. int ret;
  4538. struct btrfs_root *root;
  4539. struct btrfs_path *path;
  4540. struct btrfs_key key;
  4541. struct btrfs_key found_key;
  4542. struct extent_buffer *eb;
  4543. int slot;
  4544. root = sctx->send_root;
  4545. path = alloc_path_for_send();
  4546. if (!path)
  4547. return -ENOMEM;
  4548. key.objectid = sctx->cmp_key->objectid;
  4549. key.type = BTRFS_EXTENT_DATA_KEY;
  4550. key.offset = 0;
  4551. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  4552. if (ret < 0)
  4553. goto out;
  4554. while (1) {
  4555. eb = path->nodes[0];
  4556. slot = path->slots[0];
  4557. if (slot >= btrfs_header_nritems(eb)) {
  4558. ret = btrfs_next_leaf(root, path);
  4559. if (ret < 0) {
  4560. goto out;
  4561. } else if (ret > 0) {
  4562. ret = 0;
  4563. break;
  4564. }
  4565. continue;
  4566. }
  4567. btrfs_item_key_to_cpu(eb, &found_key, slot);
  4568. if (found_key.objectid != key.objectid ||
  4569. found_key.type != key.type) {
  4570. ret = 0;
  4571. goto out;
  4572. }
  4573. ret = process_extent(sctx, path, &found_key);
  4574. if (ret < 0)
  4575. goto out;
  4576. path->slots[0]++;
  4577. }
  4578. out:
  4579. btrfs_free_path(path);
  4580. return ret;
  4581. }
  4582. static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
  4583. int *pending_move,
  4584. int *refs_processed)
  4585. {
  4586. int ret = 0;
  4587. if (sctx->cur_ino == 0)
  4588. goto out;
  4589. if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
  4590. sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
  4591. goto out;
  4592. if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
  4593. goto out;
  4594. ret = process_recorded_refs(sctx, pending_move);
  4595. if (ret < 0)
  4596. goto out;
  4597. *refs_processed = 1;
  4598. out:
  4599. return ret;
  4600. }
  4601. static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
  4602. {
  4603. int ret = 0;
  4604. u64 left_mode;
  4605. u64 left_uid;
  4606. u64 left_gid;
  4607. u64 right_mode;
  4608. u64 right_uid;
  4609. u64 right_gid;
  4610. int need_chmod = 0;
  4611. int need_chown = 0;
  4612. int pending_move = 0;
  4613. int refs_processed = 0;
  4614. ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
  4615. &refs_processed);
  4616. if (ret < 0)
  4617. goto out;
  4618. /*
  4619. * We have processed the refs and thus need to advance send_progress.
  4620. * Now, calls to get_cur_xxx will take the updated refs of the current
  4621. * inode into account.
  4622. *
  4623. * On the other hand, if our current inode is a directory and couldn't
  4624. * be moved/renamed because its parent was renamed/moved too and it has
  4625. * a higher inode number, we can only move/rename our current inode
  4626. * after we moved/renamed its parent. Therefore in this case operate on
  4627. * the old path (pre move/rename) of our current inode, and the
  4628. * move/rename will be performed later.
  4629. */
  4630. if (refs_processed && !pending_move)
  4631. sctx->send_progress = sctx->cur_ino + 1;
  4632. if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
  4633. goto out;
  4634. if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
  4635. goto out;
  4636. ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
  4637. &left_mode, &left_uid, &left_gid, NULL);
  4638. if (ret < 0)
  4639. goto out;
  4640. if (!sctx->parent_root || sctx->cur_inode_new) {
  4641. need_chown = 1;
  4642. if (!S_ISLNK(sctx->cur_inode_mode))
  4643. need_chmod = 1;
  4644. } else {
  4645. ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
  4646. NULL, NULL, &right_mode, &right_uid,
  4647. &right_gid, NULL);
  4648. if (ret < 0)
  4649. goto out;
  4650. if (left_uid != right_uid || left_gid != right_gid)
  4651. need_chown = 1;
  4652. if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
  4653. need_chmod = 1;
  4654. }
  4655. if (S_ISREG(sctx->cur_inode_mode)) {
  4656. if (need_send_hole(sctx)) {
  4657. if (sctx->cur_inode_last_extent == (u64)-1 ||
  4658. sctx->cur_inode_last_extent <
  4659. sctx->cur_inode_size) {
  4660. ret = get_last_extent(sctx, (u64)-1);
  4661. if (ret)
  4662. goto out;
  4663. }
  4664. if (sctx->cur_inode_last_extent <
  4665. sctx->cur_inode_size) {
  4666. ret = send_hole(sctx, sctx->cur_inode_size);
  4667. if (ret)
  4668. goto out;
  4669. }
  4670. }
  4671. ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4672. sctx->cur_inode_size);
  4673. if (ret < 0)
  4674. goto out;
  4675. }
  4676. if (need_chown) {
  4677. ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4678. left_uid, left_gid);
  4679. if (ret < 0)
  4680. goto out;
  4681. }
  4682. if (need_chmod) {
  4683. ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
  4684. left_mode);
  4685. if (ret < 0)
  4686. goto out;
  4687. }
  4688. /*
  4689. * If other directory inodes depended on our current directory
  4690. * inode's move/rename, now do their move/rename operations.
  4691. */
  4692. if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
  4693. ret = apply_children_dir_moves(sctx);
  4694. if (ret)
  4695. goto out;
  4696. /*
  4697. * Need to send that every time, no matter if it actually
  4698. * changed between the two trees as we have done changes to
  4699. * the inode before. If our inode is a directory and it's
  4700. * waiting to be moved/renamed, we will send its utimes when
  4701. * it's moved/renamed, therefore we don't need to do it here.
  4702. */
  4703. sctx->send_progress = sctx->cur_ino + 1;
  4704. ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
  4705. if (ret < 0)
  4706. goto out;
  4707. }
  4708. out:
  4709. return ret;
  4710. }
  4711. static int changed_inode(struct send_ctx *sctx,
  4712. enum btrfs_compare_tree_result result)
  4713. {
  4714. int ret = 0;
  4715. struct btrfs_key *key = sctx->cmp_key;
  4716. struct btrfs_inode_item *left_ii = NULL;
  4717. struct btrfs_inode_item *right_ii = NULL;
  4718. u64 left_gen = 0;
  4719. u64 right_gen = 0;
  4720. sctx->cur_ino = key->objectid;
  4721. sctx->cur_inode_new_gen = 0;
  4722. sctx->cur_inode_last_extent = (u64)-1;
  4723. /*
  4724. * Set send_progress to current inode. This will tell all get_cur_xxx
  4725. * functions that the current inode's refs are not updated yet. Later,
  4726. * when process_recorded_refs is finished, it is set to cur_ino + 1.
  4727. */
  4728. sctx->send_progress = sctx->cur_ino;
  4729. if (result == BTRFS_COMPARE_TREE_NEW ||
  4730. result == BTRFS_COMPARE_TREE_CHANGED) {
  4731. left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
  4732. sctx->left_path->slots[0],
  4733. struct btrfs_inode_item);
  4734. left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
  4735. left_ii);
  4736. } else {
  4737. right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
  4738. sctx->right_path->slots[0],
  4739. struct btrfs_inode_item);
  4740. right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
  4741. right_ii);
  4742. }
  4743. if (result == BTRFS_COMPARE_TREE_CHANGED) {
  4744. right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
  4745. sctx->right_path->slots[0],
  4746. struct btrfs_inode_item);
  4747. right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
  4748. right_ii);
  4749. /*
  4750. * The cur_ino = root dir case is special here. We can't treat
  4751. * the inode as deleted+reused because it would generate a
  4752. * stream that tries to delete/mkdir the root dir.
  4753. */
  4754. if (left_gen != right_gen &&
  4755. sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
  4756. sctx->cur_inode_new_gen = 1;
  4757. }
  4758. if (result == BTRFS_COMPARE_TREE_NEW) {
  4759. sctx->cur_inode_gen = left_gen;
  4760. sctx->cur_inode_new = 1;
  4761. sctx->cur_inode_deleted = 0;
  4762. sctx->cur_inode_size = btrfs_inode_size(
  4763. sctx->left_path->nodes[0], left_ii);
  4764. sctx->cur_inode_mode = btrfs_inode_mode(
  4765. sctx->left_path->nodes[0], left_ii);
  4766. sctx->cur_inode_rdev = btrfs_inode_rdev(
  4767. sctx->left_path->nodes[0], left_ii);
  4768. if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
  4769. ret = send_create_inode_if_needed(sctx);
  4770. } else if (result == BTRFS_COMPARE_TREE_DELETED) {
  4771. sctx->cur_inode_gen = right_gen;
  4772. sctx->cur_inode_new = 0;
  4773. sctx->cur_inode_deleted = 1;
  4774. sctx->cur_inode_size = btrfs_inode_size(
  4775. sctx->right_path->nodes[0], right_ii);
  4776. sctx->cur_inode_mode = btrfs_inode_mode(
  4777. sctx->right_path->nodes[0], right_ii);
  4778. } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
  4779. /*
  4780. * We need to do some special handling in case the inode was
  4781. * reported as changed with a changed generation number. This
  4782. * means that the original inode was deleted and new inode
  4783. * reused the same inum. So we have to treat the old inode as
  4784. * deleted and the new one as new.
  4785. */
  4786. if (sctx->cur_inode_new_gen) {
  4787. /*
  4788. * First, process the inode as if it was deleted.
  4789. */
  4790. sctx->cur_inode_gen = right_gen;
  4791. sctx->cur_inode_new = 0;
  4792. sctx->cur_inode_deleted = 1;
  4793. sctx->cur_inode_size = btrfs_inode_size(
  4794. sctx->right_path->nodes[0], right_ii);
  4795. sctx->cur_inode_mode = btrfs_inode_mode(
  4796. sctx->right_path->nodes[0], right_ii);
  4797. ret = process_all_refs(sctx,
  4798. BTRFS_COMPARE_TREE_DELETED);
  4799. if (ret < 0)
  4800. goto out;
  4801. /*
  4802. * Now process the inode as if it was new.
  4803. */
  4804. sctx->cur_inode_gen = left_gen;
  4805. sctx->cur_inode_new = 1;
  4806. sctx->cur_inode_deleted = 0;
  4807. sctx->cur_inode_size = btrfs_inode_size(
  4808. sctx->left_path->nodes[0], left_ii);
  4809. sctx->cur_inode_mode = btrfs_inode_mode(
  4810. sctx->left_path->nodes[0], left_ii);
  4811. sctx->cur_inode_rdev = btrfs_inode_rdev(
  4812. sctx->left_path->nodes[0], left_ii);
  4813. ret = send_create_inode_if_needed(sctx);
  4814. if (ret < 0)
  4815. goto out;
  4816. ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
  4817. if (ret < 0)
  4818. goto out;
  4819. /*
  4820. * Advance send_progress now as we did not get into
  4821. * process_recorded_refs_if_needed in the new_gen case.
  4822. */
  4823. sctx->send_progress = sctx->cur_ino + 1;
  4824. /*
  4825. * Now process all extents and xattrs of the inode as if
  4826. * they were all new.
  4827. */
  4828. ret = process_all_extents(sctx);
  4829. if (ret < 0)
  4830. goto out;
  4831. ret = process_all_new_xattrs(sctx);
  4832. if (ret < 0)
  4833. goto out;
  4834. } else {
  4835. sctx->cur_inode_gen = left_gen;
  4836. sctx->cur_inode_new = 0;
  4837. sctx->cur_inode_new_gen = 0;
  4838. sctx->cur_inode_deleted = 0;
  4839. sctx->cur_inode_size = btrfs_inode_size(
  4840. sctx->left_path->nodes[0], left_ii);
  4841. sctx->cur_inode_mode = btrfs_inode_mode(
  4842. sctx->left_path->nodes[0], left_ii);
  4843. }
  4844. }
  4845. out:
  4846. return ret;
  4847. }
  4848. /*
  4849. * We have to process new refs before deleted refs, but compare_trees gives us
  4850. * the new and deleted refs mixed. To fix this, we record the new/deleted refs
  4851. * first and later process them in process_recorded_refs.
  4852. * For the cur_inode_new_gen case, we skip recording completely because
  4853. * changed_inode did already initiate processing of refs. The reason for this is
  4854. * that in this case, compare_tree actually compares the refs of 2 different
  4855. * inodes. To fix this, process_all_refs is used in changed_inode to handle all
  4856. * refs of the right tree as deleted and all refs of the left tree as new.
  4857. */
  4858. static int changed_ref(struct send_ctx *sctx,
  4859. enum btrfs_compare_tree_result result)
  4860. {
  4861. int ret = 0;
  4862. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4863. if (!sctx->cur_inode_new_gen &&
  4864. sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
  4865. if (result == BTRFS_COMPARE_TREE_NEW)
  4866. ret = record_new_ref(sctx);
  4867. else if (result == BTRFS_COMPARE_TREE_DELETED)
  4868. ret = record_deleted_ref(sctx);
  4869. else if (result == BTRFS_COMPARE_TREE_CHANGED)
  4870. ret = record_changed_ref(sctx);
  4871. }
  4872. return ret;
  4873. }
  4874. /*
  4875. * Process new/deleted/changed xattrs. We skip processing in the
  4876. * cur_inode_new_gen case because changed_inode did already initiate processing
  4877. * of xattrs. The reason is the same as in changed_ref
  4878. */
  4879. static int changed_xattr(struct send_ctx *sctx,
  4880. enum btrfs_compare_tree_result result)
  4881. {
  4882. int ret = 0;
  4883. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4884. if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
  4885. if (result == BTRFS_COMPARE_TREE_NEW)
  4886. ret = process_new_xattr(sctx);
  4887. else if (result == BTRFS_COMPARE_TREE_DELETED)
  4888. ret = process_deleted_xattr(sctx);
  4889. else if (result == BTRFS_COMPARE_TREE_CHANGED)
  4890. ret = process_changed_xattr(sctx);
  4891. }
  4892. return ret;
  4893. }
  4894. /*
  4895. * Process new/deleted/changed extents. We skip processing in the
  4896. * cur_inode_new_gen case because changed_inode did already initiate processing
  4897. * of extents. The reason is the same as in changed_ref
  4898. */
  4899. static int changed_extent(struct send_ctx *sctx,
  4900. enum btrfs_compare_tree_result result)
  4901. {
  4902. int ret = 0;
  4903. BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
  4904. if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
  4905. if (result != BTRFS_COMPARE_TREE_DELETED)
  4906. ret = process_extent(sctx, sctx->left_path,
  4907. sctx->cmp_key);
  4908. }
  4909. return ret;
  4910. }
  4911. static int dir_changed(struct send_ctx *sctx, u64 dir)
  4912. {
  4913. u64 orig_gen, new_gen;
  4914. int ret;
  4915. ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
  4916. NULL, NULL);
  4917. if (ret)
  4918. return ret;
  4919. ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
  4920. NULL, NULL, NULL);
  4921. if (ret)
  4922. return ret;
  4923. return (orig_gen != new_gen) ? 1 : 0;
  4924. }
  4925. static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
  4926. struct btrfs_key *key)
  4927. {
  4928. struct btrfs_inode_extref *extref;
  4929. struct extent_buffer *leaf;
  4930. u64 dirid = 0, last_dirid = 0;
  4931. unsigned long ptr;
  4932. u32 item_size;
  4933. u32 cur_offset = 0;
  4934. int ref_name_len;
  4935. int ret = 0;
  4936. /* Easy case, just check this one dirid */
  4937. if (key->type == BTRFS_INODE_REF_KEY) {
  4938. dirid = key->offset;
  4939. ret = dir_changed(sctx, dirid);
  4940. goto out;
  4941. }
  4942. leaf = path->nodes[0];
  4943. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  4944. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  4945. while (cur_offset < item_size) {
  4946. extref = (struct btrfs_inode_extref *)(ptr +
  4947. cur_offset);
  4948. dirid = btrfs_inode_extref_parent(leaf, extref);
  4949. ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
  4950. cur_offset += ref_name_len + sizeof(*extref);
  4951. if (dirid == last_dirid)
  4952. continue;
  4953. ret = dir_changed(sctx, dirid);
  4954. if (ret)
  4955. break;
  4956. last_dirid = dirid;
  4957. }
  4958. out:
  4959. return ret;
  4960. }
  4961. /*
  4962. * Updates compare related fields in sctx and simply forwards to the actual
  4963. * changed_xxx functions.
  4964. */
  4965. static int changed_cb(struct btrfs_root *left_root,
  4966. struct btrfs_root *right_root,
  4967. struct btrfs_path *left_path,
  4968. struct btrfs_path *right_path,
  4969. struct btrfs_key *key,
  4970. enum btrfs_compare_tree_result result,
  4971. void *ctx)
  4972. {
  4973. int ret = 0;
  4974. struct send_ctx *sctx = ctx;
  4975. if (result == BTRFS_COMPARE_TREE_SAME) {
  4976. if (key->type == BTRFS_INODE_REF_KEY ||
  4977. key->type == BTRFS_INODE_EXTREF_KEY) {
  4978. ret = compare_refs(sctx, left_path, key);
  4979. if (!ret)
  4980. return 0;
  4981. if (ret < 0)
  4982. return ret;
  4983. } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
  4984. return maybe_send_hole(sctx, left_path, key);
  4985. } else {
  4986. return 0;
  4987. }
  4988. result = BTRFS_COMPARE_TREE_CHANGED;
  4989. ret = 0;
  4990. }
  4991. sctx->left_path = left_path;
  4992. sctx->right_path = right_path;
  4993. sctx->cmp_key = key;
  4994. ret = finish_inode_if_needed(sctx, 0);
  4995. if (ret < 0)
  4996. goto out;
  4997. /* Ignore non-FS objects */
  4998. if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
  4999. key->objectid == BTRFS_FREE_SPACE_OBJECTID)
  5000. goto out;
  5001. if (key->type == BTRFS_INODE_ITEM_KEY)
  5002. ret = changed_inode(sctx, result);
  5003. else if (key->type == BTRFS_INODE_REF_KEY ||
  5004. key->type == BTRFS_INODE_EXTREF_KEY)
  5005. ret = changed_ref(sctx, result);
  5006. else if (key->type == BTRFS_XATTR_ITEM_KEY)
  5007. ret = changed_xattr(sctx, result);
  5008. else if (key->type == BTRFS_EXTENT_DATA_KEY)
  5009. ret = changed_extent(sctx, result);
  5010. out:
  5011. return ret;
  5012. }
  5013. static int full_send_tree(struct send_ctx *sctx)
  5014. {
  5015. int ret;
  5016. struct btrfs_root *send_root = sctx->send_root;
  5017. struct btrfs_key key;
  5018. struct btrfs_key found_key;
  5019. struct btrfs_path *path;
  5020. struct extent_buffer *eb;
  5021. int slot;
  5022. path = alloc_path_for_send();
  5023. if (!path)
  5024. return -ENOMEM;
  5025. key.objectid = BTRFS_FIRST_FREE_OBJECTID;
  5026. key.type = BTRFS_INODE_ITEM_KEY;
  5027. key.offset = 0;
  5028. ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
  5029. if (ret < 0)
  5030. goto out;
  5031. if (ret)
  5032. goto out_finish;
  5033. while (1) {
  5034. eb = path->nodes[0];
  5035. slot = path->slots[0];
  5036. btrfs_item_key_to_cpu(eb, &found_key, slot);
  5037. ret = changed_cb(send_root, NULL, path, NULL,
  5038. &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
  5039. if (ret < 0)
  5040. goto out;
  5041. key.objectid = found_key.objectid;
  5042. key.type = found_key.type;
  5043. key.offset = found_key.offset + 1;
  5044. ret = btrfs_next_item(send_root, path);
  5045. if (ret < 0)
  5046. goto out;
  5047. if (ret) {
  5048. ret = 0;
  5049. break;
  5050. }
  5051. }
  5052. out_finish:
  5053. ret = finish_inode_if_needed(sctx, 1);
  5054. out:
  5055. btrfs_free_path(path);
  5056. return ret;
  5057. }
  5058. static int send_subvol(struct send_ctx *sctx)
  5059. {
  5060. int ret;
  5061. if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
  5062. ret = send_header(sctx);
  5063. if (ret < 0)
  5064. goto out;
  5065. }
  5066. ret = send_subvol_begin(sctx);
  5067. if (ret < 0)
  5068. goto out;
  5069. if (sctx->parent_root) {
  5070. ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
  5071. changed_cb, sctx);
  5072. if (ret < 0)
  5073. goto out;
  5074. ret = finish_inode_if_needed(sctx, 1);
  5075. if (ret < 0)
  5076. goto out;
  5077. } else {
  5078. ret = full_send_tree(sctx);
  5079. if (ret < 0)
  5080. goto out;
  5081. }
  5082. out:
  5083. free_recorded_refs(sctx);
  5084. return ret;
  5085. }
  5086. /*
  5087. * If orphan cleanup did remove any orphans from a root, it means the tree
  5088. * was modified and therefore the commit root is not the same as the current
  5089. * root anymore. This is a problem, because send uses the commit root and
  5090. * therefore can see inode items that don't exist in the current root anymore,
  5091. * and for example make calls to btrfs_iget, which will do tree lookups based
  5092. * on the current root and not on the commit root. Those lookups will fail,
  5093. * returning a -ESTALE error, and making send fail with that error. So make
  5094. * sure a send does not see any orphans we have just removed, and that it will
  5095. * see the same inodes regardless of whether a transaction commit happened
  5096. * before it started (meaning that the commit root will be the same as the
  5097. * current root) or not.
  5098. */
  5099. static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
  5100. {
  5101. int i;
  5102. struct btrfs_trans_handle *trans = NULL;
  5103. again:
  5104. if (sctx->parent_root &&
  5105. sctx->parent_root->node != sctx->parent_root->commit_root)
  5106. goto commit_trans;
  5107. for (i = 0; i < sctx->clone_roots_cnt; i++)
  5108. if (sctx->clone_roots[i].root->node !=
  5109. sctx->clone_roots[i].root->commit_root)
  5110. goto commit_trans;
  5111. if (trans)
  5112. return btrfs_end_transaction(trans, sctx->send_root);
  5113. return 0;
  5114. commit_trans:
  5115. /* Use any root, all fs roots will get their commit roots updated. */
  5116. if (!trans) {
  5117. trans = btrfs_join_transaction(sctx->send_root);
  5118. if (IS_ERR(trans))
  5119. return PTR_ERR(trans);
  5120. goto again;
  5121. }
  5122. return btrfs_commit_transaction(trans, sctx->send_root);
  5123. }
  5124. static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
  5125. {
  5126. spin_lock(&root->root_item_lock);
  5127. root->send_in_progress--;
  5128. /*
  5129. * Not much left to do, we don't know why it's unbalanced and
  5130. * can't blindly reset it to 0.
  5131. */
  5132. if (root->send_in_progress < 0)
  5133. btrfs_err(root->fs_info,
  5134. "send_in_progres unbalanced %d root %llu",
  5135. root->send_in_progress, root->root_key.objectid);
  5136. spin_unlock(&root->root_item_lock);
  5137. }
  5138. long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
  5139. {
  5140. int ret = 0;
  5141. struct btrfs_root *send_root;
  5142. struct btrfs_root *clone_root;
  5143. struct btrfs_fs_info *fs_info;
  5144. struct btrfs_ioctl_send_args *arg = NULL;
  5145. struct btrfs_key key;
  5146. struct send_ctx *sctx = NULL;
  5147. u32 i;
  5148. u64 *clone_sources_tmp = NULL;
  5149. int clone_sources_to_rollback = 0;
  5150. int sort_clone_roots = 0;
  5151. int index;
  5152. if (!capable(CAP_SYS_ADMIN))
  5153. return -EPERM;
  5154. send_root = BTRFS_I(file_inode(mnt_file))->root;
  5155. fs_info = send_root->fs_info;
  5156. /*
  5157. * The subvolume must remain read-only during send, protect against
  5158. * making it RW. This also protects against deletion.
  5159. */
  5160. spin_lock(&send_root->root_item_lock);
  5161. send_root->send_in_progress++;
  5162. spin_unlock(&send_root->root_item_lock);
  5163. /*
  5164. * This is done when we lookup the root, it should already be complete
  5165. * by the time we get here.
  5166. */
  5167. WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
  5168. /*
  5169. * Userspace tools do the checks and warn the user if it's
  5170. * not RO.
  5171. */
  5172. if (!btrfs_root_readonly(send_root)) {
  5173. ret = -EPERM;
  5174. goto out;
  5175. }
  5176. arg = memdup_user(arg_, sizeof(*arg));
  5177. if (IS_ERR(arg)) {
  5178. ret = PTR_ERR(arg);
  5179. arg = NULL;
  5180. goto out;
  5181. }
  5182. if (!access_ok(VERIFY_READ, arg->clone_sources,
  5183. sizeof(*arg->clone_sources) *
  5184. arg->clone_sources_count)) {
  5185. ret = -EFAULT;
  5186. goto out;
  5187. }
  5188. if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
  5189. ret = -EINVAL;
  5190. goto out;
  5191. }
  5192. sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
  5193. if (!sctx) {
  5194. ret = -ENOMEM;
  5195. goto out;
  5196. }
  5197. INIT_LIST_HEAD(&sctx->new_refs);
  5198. INIT_LIST_HEAD(&sctx->deleted_refs);
  5199. INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
  5200. INIT_LIST_HEAD(&sctx->name_cache_list);
  5201. sctx->flags = arg->flags;
  5202. sctx->send_filp = fget(arg->send_fd);
  5203. if (!sctx->send_filp) {
  5204. ret = -EBADF;
  5205. goto out;
  5206. }
  5207. sctx->send_root = send_root;
  5208. /*
  5209. * Unlikely but possible, if the subvolume is marked for deletion but
  5210. * is slow to remove the directory entry, send can still be started
  5211. */
  5212. if (btrfs_root_dead(sctx->send_root)) {
  5213. ret = -EPERM;
  5214. goto out;
  5215. }
  5216. sctx->clone_roots_cnt = arg->clone_sources_count;
  5217. sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
  5218. sctx->send_buf = vmalloc(sctx->send_max_size);
  5219. if (!sctx->send_buf) {
  5220. ret = -ENOMEM;
  5221. goto out;
  5222. }
  5223. sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
  5224. if (!sctx->read_buf) {
  5225. ret = -ENOMEM;
  5226. goto out;
  5227. }
  5228. sctx->pending_dir_moves = RB_ROOT;
  5229. sctx->waiting_dir_moves = RB_ROOT;
  5230. sctx->orphan_dirs = RB_ROOT;
  5231. sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
  5232. (arg->clone_sources_count + 1));
  5233. if (!sctx->clone_roots) {
  5234. ret = -ENOMEM;
  5235. goto out;
  5236. }
  5237. if (arg->clone_sources_count) {
  5238. clone_sources_tmp = vmalloc(arg->clone_sources_count *
  5239. sizeof(*arg->clone_sources));
  5240. if (!clone_sources_tmp) {
  5241. ret = -ENOMEM;
  5242. goto out;
  5243. }
  5244. ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
  5245. arg->clone_sources_count *
  5246. sizeof(*arg->clone_sources));
  5247. if (ret) {
  5248. ret = -EFAULT;
  5249. goto out;
  5250. }
  5251. for (i = 0; i < arg->clone_sources_count; i++) {
  5252. key.objectid = clone_sources_tmp[i];
  5253. key.type = BTRFS_ROOT_ITEM_KEY;
  5254. key.offset = (u64)-1;
  5255. index = srcu_read_lock(&fs_info->subvol_srcu);
  5256. clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
  5257. if (IS_ERR(clone_root)) {
  5258. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5259. ret = PTR_ERR(clone_root);
  5260. goto out;
  5261. }
  5262. spin_lock(&clone_root->root_item_lock);
  5263. if (!btrfs_root_readonly(clone_root) ||
  5264. btrfs_root_dead(clone_root)) {
  5265. spin_unlock(&clone_root->root_item_lock);
  5266. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5267. ret = -EPERM;
  5268. goto out;
  5269. }
  5270. clone_root->send_in_progress++;
  5271. spin_unlock(&clone_root->root_item_lock);
  5272. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5273. sctx->clone_roots[i].root = clone_root;
  5274. clone_sources_to_rollback = i + 1;
  5275. }
  5276. vfree(clone_sources_tmp);
  5277. clone_sources_tmp = NULL;
  5278. }
  5279. if (arg->parent_root) {
  5280. key.objectid = arg->parent_root;
  5281. key.type = BTRFS_ROOT_ITEM_KEY;
  5282. key.offset = (u64)-1;
  5283. index = srcu_read_lock(&fs_info->subvol_srcu);
  5284. sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
  5285. if (IS_ERR(sctx->parent_root)) {
  5286. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5287. ret = PTR_ERR(sctx->parent_root);
  5288. goto out;
  5289. }
  5290. spin_lock(&sctx->parent_root->root_item_lock);
  5291. sctx->parent_root->send_in_progress++;
  5292. if (!btrfs_root_readonly(sctx->parent_root) ||
  5293. btrfs_root_dead(sctx->parent_root)) {
  5294. spin_unlock(&sctx->parent_root->root_item_lock);
  5295. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5296. ret = -EPERM;
  5297. goto out;
  5298. }
  5299. spin_unlock(&sctx->parent_root->root_item_lock);
  5300. srcu_read_unlock(&fs_info->subvol_srcu, index);
  5301. }
  5302. /*
  5303. * Clones from send_root are allowed, but only if the clone source
  5304. * is behind the current send position. This is checked while searching
  5305. * for possible clone sources.
  5306. */
  5307. sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
  5308. /* We do a bsearch later */
  5309. sort(sctx->clone_roots, sctx->clone_roots_cnt,
  5310. sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
  5311. NULL);
  5312. sort_clone_roots = 1;
  5313. ret = ensure_commit_roots_uptodate(sctx);
  5314. if (ret)
  5315. goto out;
  5316. current->journal_info = BTRFS_SEND_TRANS_STUB;
  5317. ret = send_subvol(sctx);
  5318. current->journal_info = NULL;
  5319. if (ret < 0)
  5320. goto out;
  5321. if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
  5322. ret = begin_cmd(sctx, BTRFS_SEND_C_END);
  5323. if (ret < 0)
  5324. goto out;
  5325. ret = send_cmd(sctx);
  5326. if (ret < 0)
  5327. goto out;
  5328. }
  5329. out:
  5330. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
  5331. while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
  5332. struct rb_node *n;
  5333. struct pending_dir_move *pm;
  5334. n = rb_first(&sctx->pending_dir_moves);
  5335. pm = rb_entry(n, struct pending_dir_move, node);
  5336. while (!list_empty(&pm->list)) {
  5337. struct pending_dir_move *pm2;
  5338. pm2 = list_first_entry(&pm->list,
  5339. struct pending_dir_move, list);
  5340. free_pending_move(sctx, pm2);
  5341. }
  5342. free_pending_move(sctx, pm);
  5343. }
  5344. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
  5345. while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
  5346. struct rb_node *n;
  5347. struct waiting_dir_move *dm;
  5348. n = rb_first(&sctx->waiting_dir_moves);
  5349. dm = rb_entry(n, struct waiting_dir_move, node);
  5350. rb_erase(&dm->node, &sctx->waiting_dir_moves);
  5351. kfree(dm);
  5352. }
  5353. WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
  5354. while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
  5355. struct rb_node *n;
  5356. struct orphan_dir_info *odi;
  5357. n = rb_first(&sctx->orphan_dirs);
  5358. odi = rb_entry(n, struct orphan_dir_info, node);
  5359. free_orphan_dir_info(sctx, odi);
  5360. }
  5361. if (sort_clone_roots) {
  5362. for (i = 0; i < sctx->clone_roots_cnt; i++)
  5363. btrfs_root_dec_send_in_progress(
  5364. sctx->clone_roots[i].root);
  5365. } else {
  5366. for (i = 0; sctx && i < clone_sources_to_rollback; i++)
  5367. btrfs_root_dec_send_in_progress(
  5368. sctx->clone_roots[i].root);
  5369. btrfs_root_dec_send_in_progress(send_root);
  5370. }
  5371. if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
  5372. btrfs_root_dec_send_in_progress(sctx->parent_root);
  5373. kfree(arg);
  5374. vfree(clone_sources_tmp);
  5375. if (sctx) {
  5376. if (sctx->send_filp)
  5377. fput(sctx->send_filp);
  5378. vfree(sctx->clone_roots);
  5379. vfree(sctx->send_buf);
  5380. vfree(sctx->read_buf);
  5381. name_cache_free(sctx);
  5382. kfree(sctx);
  5383. }
  5384. return ret;
  5385. }