extent-tree.c 304 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include <linux/percpu_counter.h>
  28. #include "hash.h"
  29. #include "tree-log.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "volumes.h"
  33. #include "raid56.h"
  34. #include "locking.h"
  35. #include "free-space-cache.h"
  36. #include "free-space-tree.h"
  37. #include "math.h"
  38. #include "sysfs.h"
  39. #include "qgroup.h"
  40. #undef SCRAMBLE_DELAYED_REFS
  41. /*
  42. * control flags for do_chunk_alloc's force field
  43. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  44. * if we really need one.
  45. *
  46. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  47. * if we have very few chunks already allocated. This is
  48. * used as part of the clustering code to help make sure
  49. * we have a good pool of storage to cluster in, without
  50. * filling the FS with empty chunks
  51. *
  52. * CHUNK_ALLOC_FORCE means it must try to allocate one
  53. *
  54. */
  55. enum {
  56. CHUNK_ALLOC_NO_FORCE = 0,
  57. CHUNK_ALLOC_LIMITED = 1,
  58. CHUNK_ALLOC_FORCE = 2,
  59. };
  60. static int update_block_group(struct btrfs_trans_handle *trans,
  61. struct btrfs_root *root, u64 bytenr,
  62. u64 num_bytes, int alloc);
  63. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  64. struct btrfs_root *root,
  65. struct btrfs_delayed_ref_node *node, u64 parent,
  66. u64 root_objectid, u64 owner_objectid,
  67. u64 owner_offset, int refs_to_drop,
  68. struct btrfs_delayed_extent_op *extra_op);
  69. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  70. struct extent_buffer *leaf,
  71. struct btrfs_extent_item *ei);
  72. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  73. struct btrfs_root *root,
  74. u64 parent, u64 root_objectid,
  75. u64 flags, u64 owner, u64 offset,
  76. struct btrfs_key *ins, int ref_mod);
  77. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  78. struct btrfs_root *root,
  79. u64 parent, u64 root_objectid,
  80. u64 flags, struct btrfs_disk_key *key,
  81. int level, struct btrfs_key *ins);
  82. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  83. struct btrfs_root *extent_root, u64 flags,
  84. int force);
  85. static int find_next_key(struct btrfs_path *path, int level,
  86. struct btrfs_key *key);
  87. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  88. int dump_block_groups);
  89. static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
  90. u64 ram_bytes, u64 num_bytes, int delalloc);
  91. static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
  92. u64 num_bytes, int delalloc);
  93. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  94. u64 num_bytes);
  95. int btrfs_pin_extent(struct btrfs_root *root,
  96. u64 bytenr, u64 num_bytes, int reserved);
  97. static int __reserve_metadata_bytes(struct btrfs_root *root,
  98. struct btrfs_space_info *space_info,
  99. u64 orig_bytes,
  100. enum btrfs_reserve_flush_enum flush);
  101. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  102. struct btrfs_space_info *space_info,
  103. u64 num_bytes);
  104. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  105. struct btrfs_space_info *space_info,
  106. u64 num_bytes);
  107. static noinline int
  108. block_group_cache_done(struct btrfs_block_group_cache *cache)
  109. {
  110. smp_mb();
  111. return cache->cached == BTRFS_CACHE_FINISHED ||
  112. cache->cached == BTRFS_CACHE_ERROR;
  113. }
  114. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  115. {
  116. return (cache->flags & bits) == bits;
  117. }
  118. void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  119. {
  120. atomic_inc(&cache->count);
  121. }
  122. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  123. {
  124. if (atomic_dec_and_test(&cache->count)) {
  125. WARN_ON(cache->pinned > 0);
  126. WARN_ON(cache->reserved > 0);
  127. kfree(cache->free_space_ctl);
  128. kfree(cache);
  129. }
  130. }
  131. /*
  132. * this adds the block group to the fs_info rb tree for the block group
  133. * cache
  134. */
  135. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  136. struct btrfs_block_group_cache *block_group)
  137. {
  138. struct rb_node **p;
  139. struct rb_node *parent = NULL;
  140. struct btrfs_block_group_cache *cache;
  141. spin_lock(&info->block_group_cache_lock);
  142. p = &info->block_group_cache_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. cache = rb_entry(parent, struct btrfs_block_group_cache,
  146. cache_node);
  147. if (block_group->key.objectid < cache->key.objectid) {
  148. p = &(*p)->rb_left;
  149. } else if (block_group->key.objectid > cache->key.objectid) {
  150. p = &(*p)->rb_right;
  151. } else {
  152. spin_unlock(&info->block_group_cache_lock);
  153. return -EEXIST;
  154. }
  155. }
  156. rb_link_node(&block_group->cache_node, parent, p);
  157. rb_insert_color(&block_group->cache_node,
  158. &info->block_group_cache_tree);
  159. if (info->first_logical_byte > block_group->key.objectid)
  160. info->first_logical_byte = block_group->key.objectid;
  161. spin_unlock(&info->block_group_cache_lock);
  162. return 0;
  163. }
  164. /*
  165. * This will return the block group at or after bytenr if contains is 0, else
  166. * it will return the block group that contains the bytenr
  167. */
  168. static struct btrfs_block_group_cache *
  169. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  170. int contains)
  171. {
  172. struct btrfs_block_group_cache *cache, *ret = NULL;
  173. struct rb_node *n;
  174. u64 end, start;
  175. spin_lock(&info->block_group_cache_lock);
  176. n = info->block_group_cache_tree.rb_node;
  177. while (n) {
  178. cache = rb_entry(n, struct btrfs_block_group_cache,
  179. cache_node);
  180. end = cache->key.objectid + cache->key.offset - 1;
  181. start = cache->key.objectid;
  182. if (bytenr < start) {
  183. if (!contains && (!ret || start < ret->key.objectid))
  184. ret = cache;
  185. n = n->rb_left;
  186. } else if (bytenr > start) {
  187. if (contains && bytenr <= end) {
  188. ret = cache;
  189. break;
  190. }
  191. n = n->rb_right;
  192. } else {
  193. ret = cache;
  194. break;
  195. }
  196. }
  197. if (ret) {
  198. btrfs_get_block_group(ret);
  199. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  200. info->first_logical_byte = ret->key.objectid;
  201. }
  202. spin_unlock(&info->block_group_cache_lock);
  203. return ret;
  204. }
  205. static int add_excluded_extent(struct btrfs_root *root,
  206. u64 start, u64 num_bytes)
  207. {
  208. u64 end = start + num_bytes - 1;
  209. set_extent_bits(&root->fs_info->freed_extents[0],
  210. start, end, EXTENT_UPTODATE);
  211. set_extent_bits(&root->fs_info->freed_extents[1],
  212. start, end, EXTENT_UPTODATE);
  213. return 0;
  214. }
  215. static void free_excluded_extents(struct btrfs_root *root,
  216. struct btrfs_block_group_cache *cache)
  217. {
  218. u64 start, end;
  219. start = cache->key.objectid;
  220. end = start + cache->key.offset - 1;
  221. clear_extent_bits(&root->fs_info->freed_extents[0],
  222. start, end, EXTENT_UPTODATE);
  223. clear_extent_bits(&root->fs_info->freed_extents[1],
  224. start, end, EXTENT_UPTODATE);
  225. }
  226. static int exclude_super_stripes(struct btrfs_root *root,
  227. struct btrfs_block_group_cache *cache)
  228. {
  229. u64 bytenr;
  230. u64 *logical;
  231. int stripe_len;
  232. int i, nr, ret;
  233. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  234. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  235. cache->bytes_super += stripe_len;
  236. ret = add_excluded_extent(root, cache->key.objectid,
  237. stripe_len);
  238. if (ret)
  239. return ret;
  240. }
  241. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  242. bytenr = btrfs_sb_offset(i);
  243. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  244. cache->key.objectid, bytenr,
  245. 0, &logical, &nr, &stripe_len);
  246. if (ret)
  247. return ret;
  248. while (nr--) {
  249. u64 start, len;
  250. if (logical[nr] > cache->key.objectid +
  251. cache->key.offset)
  252. continue;
  253. if (logical[nr] + stripe_len <= cache->key.objectid)
  254. continue;
  255. start = logical[nr];
  256. if (start < cache->key.objectid) {
  257. start = cache->key.objectid;
  258. len = (logical[nr] + stripe_len) - start;
  259. } else {
  260. len = min_t(u64, stripe_len,
  261. cache->key.objectid +
  262. cache->key.offset - start);
  263. }
  264. cache->bytes_super += len;
  265. ret = add_excluded_extent(root, start, len);
  266. if (ret) {
  267. kfree(logical);
  268. return ret;
  269. }
  270. }
  271. kfree(logical);
  272. }
  273. return 0;
  274. }
  275. static struct btrfs_caching_control *
  276. get_caching_control(struct btrfs_block_group_cache *cache)
  277. {
  278. struct btrfs_caching_control *ctl;
  279. spin_lock(&cache->lock);
  280. if (!cache->caching_ctl) {
  281. spin_unlock(&cache->lock);
  282. return NULL;
  283. }
  284. ctl = cache->caching_ctl;
  285. atomic_inc(&ctl->count);
  286. spin_unlock(&cache->lock);
  287. return ctl;
  288. }
  289. static void put_caching_control(struct btrfs_caching_control *ctl)
  290. {
  291. if (atomic_dec_and_test(&ctl->count))
  292. kfree(ctl);
  293. }
  294. #ifdef CONFIG_BTRFS_DEBUG
  295. static void fragment_free_space(struct btrfs_root *root,
  296. struct btrfs_block_group_cache *block_group)
  297. {
  298. u64 start = block_group->key.objectid;
  299. u64 len = block_group->key.offset;
  300. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  301. root->nodesize : root->sectorsize;
  302. u64 step = chunk << 1;
  303. while (len > chunk) {
  304. btrfs_remove_free_space(block_group, start, chunk);
  305. start += step;
  306. if (len < step)
  307. len = 0;
  308. else
  309. len -= step;
  310. }
  311. }
  312. #endif
  313. /*
  314. * this is only called by cache_block_group, since we could have freed extents
  315. * we need to check the pinned_extents for any extents that can't be used yet
  316. * since their free space will be released as soon as the transaction commits.
  317. */
  318. u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  319. struct btrfs_fs_info *info, u64 start, u64 end)
  320. {
  321. u64 extent_start, extent_end, size, total_added = 0;
  322. int ret;
  323. while (start < end) {
  324. ret = find_first_extent_bit(info->pinned_extents, start,
  325. &extent_start, &extent_end,
  326. EXTENT_DIRTY | EXTENT_UPTODATE,
  327. NULL);
  328. if (ret)
  329. break;
  330. if (extent_start <= start) {
  331. start = extent_end + 1;
  332. } else if (extent_start > start && extent_start < end) {
  333. size = extent_start - start;
  334. total_added += size;
  335. ret = btrfs_add_free_space(block_group, start,
  336. size);
  337. BUG_ON(ret); /* -ENOMEM or logic error */
  338. start = extent_end + 1;
  339. } else {
  340. break;
  341. }
  342. }
  343. if (start < end) {
  344. size = end - start;
  345. total_added += size;
  346. ret = btrfs_add_free_space(block_group, start, size);
  347. BUG_ON(ret); /* -ENOMEM or logic error */
  348. }
  349. return total_added;
  350. }
  351. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  352. {
  353. struct btrfs_block_group_cache *block_group;
  354. struct btrfs_fs_info *fs_info;
  355. struct btrfs_root *extent_root;
  356. struct btrfs_path *path;
  357. struct extent_buffer *leaf;
  358. struct btrfs_key key;
  359. u64 total_found = 0;
  360. u64 last = 0;
  361. u32 nritems;
  362. int ret;
  363. bool wakeup = true;
  364. block_group = caching_ctl->block_group;
  365. fs_info = block_group->fs_info;
  366. extent_root = fs_info->extent_root;
  367. path = btrfs_alloc_path();
  368. if (!path)
  369. return -ENOMEM;
  370. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  371. #ifdef CONFIG_BTRFS_DEBUG
  372. /*
  373. * If we're fragmenting we don't want to make anybody think we can
  374. * allocate from this block group until we've had a chance to fragment
  375. * the free space.
  376. */
  377. if (btrfs_should_fragment_free_space(extent_root, block_group))
  378. wakeup = false;
  379. #endif
  380. /*
  381. * We don't want to deadlock with somebody trying to allocate a new
  382. * extent for the extent root while also trying to search the extent
  383. * root to add free space. So we skip locking and search the commit
  384. * root, since its read-only
  385. */
  386. path->skip_locking = 1;
  387. path->search_commit_root = 1;
  388. path->reada = READA_FORWARD;
  389. key.objectid = last;
  390. key.offset = 0;
  391. key.type = BTRFS_EXTENT_ITEM_KEY;
  392. next:
  393. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  394. if (ret < 0)
  395. goto out;
  396. leaf = path->nodes[0];
  397. nritems = btrfs_header_nritems(leaf);
  398. while (1) {
  399. if (btrfs_fs_closing(fs_info) > 1) {
  400. last = (u64)-1;
  401. break;
  402. }
  403. if (path->slots[0] < nritems) {
  404. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  405. } else {
  406. ret = find_next_key(path, 0, &key);
  407. if (ret)
  408. break;
  409. if (need_resched() ||
  410. rwsem_is_contended(&fs_info->commit_root_sem)) {
  411. if (wakeup)
  412. caching_ctl->progress = last;
  413. btrfs_release_path(path);
  414. up_read(&fs_info->commit_root_sem);
  415. mutex_unlock(&caching_ctl->mutex);
  416. cond_resched();
  417. mutex_lock(&caching_ctl->mutex);
  418. down_read(&fs_info->commit_root_sem);
  419. goto next;
  420. }
  421. ret = btrfs_next_leaf(extent_root, path);
  422. if (ret < 0)
  423. goto out;
  424. if (ret)
  425. break;
  426. leaf = path->nodes[0];
  427. nritems = btrfs_header_nritems(leaf);
  428. continue;
  429. }
  430. if (key.objectid < last) {
  431. key.objectid = last;
  432. key.offset = 0;
  433. key.type = BTRFS_EXTENT_ITEM_KEY;
  434. if (wakeup)
  435. caching_ctl->progress = last;
  436. btrfs_release_path(path);
  437. goto next;
  438. }
  439. if (key.objectid < block_group->key.objectid) {
  440. path->slots[0]++;
  441. continue;
  442. }
  443. if (key.objectid >= block_group->key.objectid +
  444. block_group->key.offset)
  445. break;
  446. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  447. key.type == BTRFS_METADATA_ITEM_KEY) {
  448. total_found += add_new_free_space(block_group,
  449. fs_info, last,
  450. key.objectid);
  451. if (key.type == BTRFS_METADATA_ITEM_KEY)
  452. last = key.objectid +
  453. fs_info->tree_root->nodesize;
  454. else
  455. last = key.objectid + key.offset;
  456. if (total_found > CACHING_CTL_WAKE_UP) {
  457. total_found = 0;
  458. if (wakeup)
  459. wake_up(&caching_ctl->wait);
  460. }
  461. }
  462. path->slots[0]++;
  463. }
  464. ret = 0;
  465. total_found += add_new_free_space(block_group, fs_info, last,
  466. block_group->key.objectid +
  467. block_group->key.offset);
  468. caching_ctl->progress = (u64)-1;
  469. out:
  470. btrfs_free_path(path);
  471. return ret;
  472. }
  473. static noinline void caching_thread(struct btrfs_work *work)
  474. {
  475. struct btrfs_block_group_cache *block_group;
  476. struct btrfs_fs_info *fs_info;
  477. struct btrfs_caching_control *caching_ctl;
  478. struct btrfs_root *extent_root;
  479. int ret;
  480. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  481. block_group = caching_ctl->block_group;
  482. fs_info = block_group->fs_info;
  483. extent_root = fs_info->extent_root;
  484. mutex_lock(&caching_ctl->mutex);
  485. down_read(&fs_info->commit_root_sem);
  486. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
  487. ret = load_free_space_tree(caching_ctl);
  488. else
  489. ret = load_extent_tree_free(caching_ctl);
  490. spin_lock(&block_group->lock);
  491. block_group->caching_ctl = NULL;
  492. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  493. spin_unlock(&block_group->lock);
  494. #ifdef CONFIG_BTRFS_DEBUG
  495. if (btrfs_should_fragment_free_space(extent_root, block_group)) {
  496. u64 bytes_used;
  497. spin_lock(&block_group->space_info->lock);
  498. spin_lock(&block_group->lock);
  499. bytes_used = block_group->key.offset -
  500. btrfs_block_group_used(&block_group->item);
  501. block_group->space_info->bytes_used += bytes_used >> 1;
  502. spin_unlock(&block_group->lock);
  503. spin_unlock(&block_group->space_info->lock);
  504. fragment_free_space(extent_root, block_group);
  505. }
  506. #endif
  507. caching_ctl->progress = (u64)-1;
  508. up_read(&fs_info->commit_root_sem);
  509. free_excluded_extents(fs_info->extent_root, block_group);
  510. mutex_unlock(&caching_ctl->mutex);
  511. wake_up(&caching_ctl->wait);
  512. put_caching_control(caching_ctl);
  513. btrfs_put_block_group(block_group);
  514. }
  515. static int cache_block_group(struct btrfs_block_group_cache *cache,
  516. int load_cache_only)
  517. {
  518. DEFINE_WAIT(wait);
  519. struct btrfs_fs_info *fs_info = cache->fs_info;
  520. struct btrfs_caching_control *caching_ctl;
  521. int ret = 0;
  522. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  523. if (!caching_ctl)
  524. return -ENOMEM;
  525. INIT_LIST_HEAD(&caching_ctl->list);
  526. mutex_init(&caching_ctl->mutex);
  527. init_waitqueue_head(&caching_ctl->wait);
  528. caching_ctl->block_group = cache;
  529. caching_ctl->progress = cache->key.objectid;
  530. atomic_set(&caching_ctl->count, 1);
  531. btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
  532. caching_thread, NULL, NULL);
  533. spin_lock(&cache->lock);
  534. /*
  535. * This should be a rare occasion, but this could happen I think in the
  536. * case where one thread starts to load the space cache info, and then
  537. * some other thread starts a transaction commit which tries to do an
  538. * allocation while the other thread is still loading the space cache
  539. * info. The previous loop should have kept us from choosing this block
  540. * group, but if we've moved to the state where we will wait on caching
  541. * block groups we need to first check if we're doing a fast load here,
  542. * so we can wait for it to finish, otherwise we could end up allocating
  543. * from a block group who's cache gets evicted for one reason or
  544. * another.
  545. */
  546. while (cache->cached == BTRFS_CACHE_FAST) {
  547. struct btrfs_caching_control *ctl;
  548. ctl = cache->caching_ctl;
  549. atomic_inc(&ctl->count);
  550. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  551. spin_unlock(&cache->lock);
  552. schedule();
  553. finish_wait(&ctl->wait, &wait);
  554. put_caching_control(ctl);
  555. spin_lock(&cache->lock);
  556. }
  557. if (cache->cached != BTRFS_CACHE_NO) {
  558. spin_unlock(&cache->lock);
  559. kfree(caching_ctl);
  560. return 0;
  561. }
  562. WARN_ON(cache->caching_ctl);
  563. cache->caching_ctl = caching_ctl;
  564. cache->cached = BTRFS_CACHE_FAST;
  565. spin_unlock(&cache->lock);
  566. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  567. mutex_lock(&caching_ctl->mutex);
  568. ret = load_free_space_cache(fs_info, cache);
  569. spin_lock(&cache->lock);
  570. if (ret == 1) {
  571. cache->caching_ctl = NULL;
  572. cache->cached = BTRFS_CACHE_FINISHED;
  573. cache->last_byte_to_unpin = (u64)-1;
  574. caching_ctl->progress = (u64)-1;
  575. } else {
  576. if (load_cache_only) {
  577. cache->caching_ctl = NULL;
  578. cache->cached = BTRFS_CACHE_NO;
  579. } else {
  580. cache->cached = BTRFS_CACHE_STARTED;
  581. cache->has_caching_ctl = 1;
  582. }
  583. }
  584. spin_unlock(&cache->lock);
  585. #ifdef CONFIG_BTRFS_DEBUG
  586. if (ret == 1 &&
  587. btrfs_should_fragment_free_space(fs_info->extent_root,
  588. cache)) {
  589. u64 bytes_used;
  590. spin_lock(&cache->space_info->lock);
  591. spin_lock(&cache->lock);
  592. bytes_used = cache->key.offset -
  593. btrfs_block_group_used(&cache->item);
  594. cache->space_info->bytes_used += bytes_used >> 1;
  595. spin_unlock(&cache->lock);
  596. spin_unlock(&cache->space_info->lock);
  597. fragment_free_space(fs_info->extent_root, cache);
  598. }
  599. #endif
  600. mutex_unlock(&caching_ctl->mutex);
  601. wake_up(&caching_ctl->wait);
  602. if (ret == 1) {
  603. put_caching_control(caching_ctl);
  604. free_excluded_extents(fs_info->extent_root, cache);
  605. return 0;
  606. }
  607. } else {
  608. /*
  609. * We're either using the free space tree or no caching at all.
  610. * Set cached to the appropriate value and wakeup any waiters.
  611. */
  612. spin_lock(&cache->lock);
  613. if (load_cache_only) {
  614. cache->caching_ctl = NULL;
  615. cache->cached = BTRFS_CACHE_NO;
  616. } else {
  617. cache->cached = BTRFS_CACHE_STARTED;
  618. cache->has_caching_ctl = 1;
  619. }
  620. spin_unlock(&cache->lock);
  621. wake_up(&caching_ctl->wait);
  622. }
  623. if (load_cache_only) {
  624. put_caching_control(caching_ctl);
  625. return 0;
  626. }
  627. down_write(&fs_info->commit_root_sem);
  628. atomic_inc(&caching_ctl->count);
  629. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  630. up_write(&fs_info->commit_root_sem);
  631. btrfs_get_block_group(cache);
  632. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  633. return ret;
  634. }
  635. /*
  636. * return the block group that starts at or after bytenr
  637. */
  638. static struct btrfs_block_group_cache *
  639. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  640. {
  641. return block_group_cache_tree_search(info, bytenr, 0);
  642. }
  643. /*
  644. * return the block group that contains the given bytenr
  645. */
  646. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  647. struct btrfs_fs_info *info,
  648. u64 bytenr)
  649. {
  650. return block_group_cache_tree_search(info, bytenr, 1);
  651. }
  652. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  653. u64 flags)
  654. {
  655. struct list_head *head = &info->space_info;
  656. struct btrfs_space_info *found;
  657. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  658. rcu_read_lock();
  659. list_for_each_entry_rcu(found, head, list) {
  660. if (found->flags & flags) {
  661. rcu_read_unlock();
  662. return found;
  663. }
  664. }
  665. rcu_read_unlock();
  666. return NULL;
  667. }
  668. /*
  669. * after adding space to the filesystem, we need to clear the full flags
  670. * on all the space infos.
  671. */
  672. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  673. {
  674. struct list_head *head = &info->space_info;
  675. struct btrfs_space_info *found;
  676. rcu_read_lock();
  677. list_for_each_entry_rcu(found, head, list)
  678. found->full = 0;
  679. rcu_read_unlock();
  680. }
  681. /* simple helper to search for an existing data extent at a given offset */
  682. int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
  683. {
  684. int ret;
  685. struct btrfs_key key;
  686. struct btrfs_path *path;
  687. path = btrfs_alloc_path();
  688. if (!path)
  689. return -ENOMEM;
  690. key.objectid = start;
  691. key.offset = len;
  692. key.type = BTRFS_EXTENT_ITEM_KEY;
  693. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  694. 0, 0);
  695. btrfs_free_path(path);
  696. return ret;
  697. }
  698. /*
  699. * helper function to lookup reference count and flags of a tree block.
  700. *
  701. * the head node for delayed ref is used to store the sum of all the
  702. * reference count modifications queued up in the rbtree. the head
  703. * node may also store the extent flags to set. This way you can check
  704. * to see what the reference count and extent flags would be if all of
  705. * the delayed refs are not processed.
  706. */
  707. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  708. struct btrfs_root *root, u64 bytenr,
  709. u64 offset, int metadata, u64 *refs, u64 *flags)
  710. {
  711. struct btrfs_delayed_ref_head *head;
  712. struct btrfs_delayed_ref_root *delayed_refs;
  713. struct btrfs_path *path;
  714. struct btrfs_extent_item *ei;
  715. struct extent_buffer *leaf;
  716. struct btrfs_key key;
  717. u32 item_size;
  718. u64 num_refs;
  719. u64 extent_flags;
  720. int ret;
  721. /*
  722. * If we don't have skinny metadata, don't bother doing anything
  723. * different
  724. */
  725. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
  726. offset = root->nodesize;
  727. metadata = 0;
  728. }
  729. path = btrfs_alloc_path();
  730. if (!path)
  731. return -ENOMEM;
  732. if (!trans) {
  733. path->skip_locking = 1;
  734. path->search_commit_root = 1;
  735. }
  736. search_again:
  737. key.objectid = bytenr;
  738. key.offset = offset;
  739. if (metadata)
  740. key.type = BTRFS_METADATA_ITEM_KEY;
  741. else
  742. key.type = BTRFS_EXTENT_ITEM_KEY;
  743. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  744. &key, path, 0, 0);
  745. if (ret < 0)
  746. goto out_free;
  747. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  748. if (path->slots[0]) {
  749. path->slots[0]--;
  750. btrfs_item_key_to_cpu(path->nodes[0], &key,
  751. path->slots[0]);
  752. if (key.objectid == bytenr &&
  753. key.type == BTRFS_EXTENT_ITEM_KEY &&
  754. key.offset == root->nodesize)
  755. ret = 0;
  756. }
  757. }
  758. if (ret == 0) {
  759. leaf = path->nodes[0];
  760. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  761. if (item_size >= sizeof(*ei)) {
  762. ei = btrfs_item_ptr(leaf, path->slots[0],
  763. struct btrfs_extent_item);
  764. num_refs = btrfs_extent_refs(leaf, ei);
  765. extent_flags = btrfs_extent_flags(leaf, ei);
  766. } else {
  767. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  768. struct btrfs_extent_item_v0 *ei0;
  769. BUG_ON(item_size != sizeof(*ei0));
  770. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  771. struct btrfs_extent_item_v0);
  772. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  773. /* FIXME: this isn't correct for data */
  774. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  775. #else
  776. BUG();
  777. #endif
  778. }
  779. BUG_ON(num_refs == 0);
  780. } else {
  781. num_refs = 0;
  782. extent_flags = 0;
  783. ret = 0;
  784. }
  785. if (!trans)
  786. goto out;
  787. delayed_refs = &trans->transaction->delayed_refs;
  788. spin_lock(&delayed_refs->lock);
  789. head = btrfs_find_delayed_ref_head(trans, bytenr);
  790. if (head) {
  791. if (!mutex_trylock(&head->mutex)) {
  792. atomic_inc(&head->node.refs);
  793. spin_unlock(&delayed_refs->lock);
  794. btrfs_release_path(path);
  795. /*
  796. * Mutex was contended, block until it's released and try
  797. * again
  798. */
  799. mutex_lock(&head->mutex);
  800. mutex_unlock(&head->mutex);
  801. btrfs_put_delayed_ref(&head->node);
  802. goto search_again;
  803. }
  804. spin_lock(&head->lock);
  805. if (head->extent_op && head->extent_op->update_flags)
  806. extent_flags |= head->extent_op->flags_to_set;
  807. else
  808. BUG_ON(num_refs == 0);
  809. num_refs += head->node.ref_mod;
  810. spin_unlock(&head->lock);
  811. mutex_unlock(&head->mutex);
  812. }
  813. spin_unlock(&delayed_refs->lock);
  814. out:
  815. WARN_ON(num_refs == 0);
  816. if (refs)
  817. *refs = num_refs;
  818. if (flags)
  819. *flags = extent_flags;
  820. out_free:
  821. btrfs_free_path(path);
  822. return ret;
  823. }
  824. /*
  825. * Back reference rules. Back refs have three main goals:
  826. *
  827. * 1) differentiate between all holders of references to an extent so that
  828. * when a reference is dropped we can make sure it was a valid reference
  829. * before freeing the extent.
  830. *
  831. * 2) Provide enough information to quickly find the holders of an extent
  832. * if we notice a given block is corrupted or bad.
  833. *
  834. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  835. * maintenance. This is actually the same as #2, but with a slightly
  836. * different use case.
  837. *
  838. * There are two kinds of back refs. The implicit back refs is optimized
  839. * for pointers in non-shared tree blocks. For a given pointer in a block,
  840. * back refs of this kind provide information about the block's owner tree
  841. * and the pointer's key. These information allow us to find the block by
  842. * b-tree searching. The full back refs is for pointers in tree blocks not
  843. * referenced by their owner trees. The location of tree block is recorded
  844. * in the back refs. Actually the full back refs is generic, and can be
  845. * used in all cases the implicit back refs is used. The major shortcoming
  846. * of the full back refs is its overhead. Every time a tree block gets
  847. * COWed, we have to update back refs entry for all pointers in it.
  848. *
  849. * For a newly allocated tree block, we use implicit back refs for
  850. * pointers in it. This means most tree related operations only involve
  851. * implicit back refs. For a tree block created in old transaction, the
  852. * only way to drop a reference to it is COW it. So we can detect the
  853. * event that tree block loses its owner tree's reference and do the
  854. * back refs conversion.
  855. *
  856. * When a tree block is COWed through a tree, there are four cases:
  857. *
  858. * The reference count of the block is one and the tree is the block's
  859. * owner tree. Nothing to do in this case.
  860. *
  861. * The reference count of the block is one and the tree is not the
  862. * block's owner tree. In this case, full back refs is used for pointers
  863. * in the block. Remove these full back refs, add implicit back refs for
  864. * every pointers in the new block.
  865. *
  866. * The reference count of the block is greater than one and the tree is
  867. * the block's owner tree. In this case, implicit back refs is used for
  868. * pointers in the block. Add full back refs for every pointers in the
  869. * block, increase lower level extents' reference counts. The original
  870. * implicit back refs are entailed to the new block.
  871. *
  872. * The reference count of the block is greater than one and the tree is
  873. * not the block's owner tree. Add implicit back refs for every pointer in
  874. * the new block, increase lower level extents' reference count.
  875. *
  876. * Back Reference Key composing:
  877. *
  878. * The key objectid corresponds to the first byte in the extent,
  879. * The key type is used to differentiate between types of back refs.
  880. * There are different meanings of the key offset for different types
  881. * of back refs.
  882. *
  883. * File extents can be referenced by:
  884. *
  885. * - multiple snapshots, subvolumes, or different generations in one subvol
  886. * - different files inside a single subvolume
  887. * - different offsets inside a file (bookend extents in file.c)
  888. *
  889. * The extent ref structure for the implicit back refs has fields for:
  890. *
  891. * - Objectid of the subvolume root
  892. * - objectid of the file holding the reference
  893. * - original offset in the file
  894. * - how many bookend extents
  895. *
  896. * The key offset for the implicit back refs is hash of the first
  897. * three fields.
  898. *
  899. * The extent ref structure for the full back refs has field for:
  900. *
  901. * - number of pointers in the tree leaf
  902. *
  903. * The key offset for the implicit back refs is the first byte of
  904. * the tree leaf
  905. *
  906. * When a file extent is allocated, The implicit back refs is used.
  907. * the fields are filled in:
  908. *
  909. * (root_key.objectid, inode objectid, offset in file, 1)
  910. *
  911. * When a file extent is removed file truncation, we find the
  912. * corresponding implicit back refs and check the following fields:
  913. *
  914. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  915. *
  916. * Btree extents can be referenced by:
  917. *
  918. * - Different subvolumes
  919. *
  920. * Both the implicit back refs and the full back refs for tree blocks
  921. * only consist of key. The key offset for the implicit back refs is
  922. * objectid of block's owner tree. The key offset for the full back refs
  923. * is the first byte of parent block.
  924. *
  925. * When implicit back refs is used, information about the lowest key and
  926. * level of the tree block are required. These information are stored in
  927. * tree block info structure.
  928. */
  929. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  930. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  931. struct btrfs_root *root,
  932. struct btrfs_path *path,
  933. u64 owner, u32 extra_size)
  934. {
  935. struct btrfs_extent_item *item;
  936. struct btrfs_extent_item_v0 *ei0;
  937. struct btrfs_extent_ref_v0 *ref0;
  938. struct btrfs_tree_block_info *bi;
  939. struct extent_buffer *leaf;
  940. struct btrfs_key key;
  941. struct btrfs_key found_key;
  942. u32 new_size = sizeof(*item);
  943. u64 refs;
  944. int ret;
  945. leaf = path->nodes[0];
  946. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  947. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  948. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  949. struct btrfs_extent_item_v0);
  950. refs = btrfs_extent_refs_v0(leaf, ei0);
  951. if (owner == (u64)-1) {
  952. while (1) {
  953. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  954. ret = btrfs_next_leaf(root, path);
  955. if (ret < 0)
  956. return ret;
  957. BUG_ON(ret > 0); /* Corruption */
  958. leaf = path->nodes[0];
  959. }
  960. btrfs_item_key_to_cpu(leaf, &found_key,
  961. path->slots[0]);
  962. BUG_ON(key.objectid != found_key.objectid);
  963. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  964. path->slots[0]++;
  965. continue;
  966. }
  967. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  968. struct btrfs_extent_ref_v0);
  969. owner = btrfs_ref_objectid_v0(leaf, ref0);
  970. break;
  971. }
  972. }
  973. btrfs_release_path(path);
  974. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  975. new_size += sizeof(*bi);
  976. new_size -= sizeof(*ei0);
  977. ret = btrfs_search_slot(trans, root, &key, path,
  978. new_size + extra_size, 1);
  979. if (ret < 0)
  980. return ret;
  981. BUG_ON(ret); /* Corruption */
  982. btrfs_extend_item(root, path, new_size);
  983. leaf = path->nodes[0];
  984. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  985. btrfs_set_extent_refs(leaf, item, refs);
  986. /* FIXME: get real generation */
  987. btrfs_set_extent_generation(leaf, item, 0);
  988. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  989. btrfs_set_extent_flags(leaf, item,
  990. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  991. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  992. bi = (struct btrfs_tree_block_info *)(item + 1);
  993. /* FIXME: get first key of the block */
  994. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  995. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  996. } else {
  997. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  998. }
  999. btrfs_mark_buffer_dirty(leaf);
  1000. return 0;
  1001. }
  1002. #endif
  1003. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  1004. {
  1005. u32 high_crc = ~(u32)0;
  1006. u32 low_crc = ~(u32)0;
  1007. __le64 lenum;
  1008. lenum = cpu_to_le64(root_objectid);
  1009. high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
  1010. lenum = cpu_to_le64(owner);
  1011. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1012. lenum = cpu_to_le64(offset);
  1013. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1014. return ((u64)high_crc << 31) ^ (u64)low_crc;
  1015. }
  1016. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  1017. struct btrfs_extent_data_ref *ref)
  1018. {
  1019. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  1020. btrfs_extent_data_ref_objectid(leaf, ref),
  1021. btrfs_extent_data_ref_offset(leaf, ref));
  1022. }
  1023. static int match_extent_data_ref(struct extent_buffer *leaf,
  1024. struct btrfs_extent_data_ref *ref,
  1025. u64 root_objectid, u64 owner, u64 offset)
  1026. {
  1027. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  1028. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  1029. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  1030. return 0;
  1031. return 1;
  1032. }
  1033. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  1034. struct btrfs_root *root,
  1035. struct btrfs_path *path,
  1036. u64 bytenr, u64 parent,
  1037. u64 root_objectid,
  1038. u64 owner, u64 offset)
  1039. {
  1040. struct btrfs_key key;
  1041. struct btrfs_extent_data_ref *ref;
  1042. struct extent_buffer *leaf;
  1043. u32 nritems;
  1044. int ret;
  1045. int recow;
  1046. int err = -ENOENT;
  1047. key.objectid = bytenr;
  1048. if (parent) {
  1049. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1050. key.offset = parent;
  1051. } else {
  1052. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1053. key.offset = hash_extent_data_ref(root_objectid,
  1054. owner, offset);
  1055. }
  1056. again:
  1057. recow = 0;
  1058. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1059. if (ret < 0) {
  1060. err = ret;
  1061. goto fail;
  1062. }
  1063. if (parent) {
  1064. if (!ret)
  1065. return 0;
  1066. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1067. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1068. btrfs_release_path(path);
  1069. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1070. if (ret < 0) {
  1071. err = ret;
  1072. goto fail;
  1073. }
  1074. if (!ret)
  1075. return 0;
  1076. #endif
  1077. goto fail;
  1078. }
  1079. leaf = path->nodes[0];
  1080. nritems = btrfs_header_nritems(leaf);
  1081. while (1) {
  1082. if (path->slots[0] >= nritems) {
  1083. ret = btrfs_next_leaf(root, path);
  1084. if (ret < 0)
  1085. err = ret;
  1086. if (ret)
  1087. goto fail;
  1088. leaf = path->nodes[0];
  1089. nritems = btrfs_header_nritems(leaf);
  1090. recow = 1;
  1091. }
  1092. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1093. if (key.objectid != bytenr ||
  1094. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1095. goto fail;
  1096. ref = btrfs_item_ptr(leaf, path->slots[0],
  1097. struct btrfs_extent_data_ref);
  1098. if (match_extent_data_ref(leaf, ref, root_objectid,
  1099. owner, offset)) {
  1100. if (recow) {
  1101. btrfs_release_path(path);
  1102. goto again;
  1103. }
  1104. err = 0;
  1105. break;
  1106. }
  1107. path->slots[0]++;
  1108. }
  1109. fail:
  1110. return err;
  1111. }
  1112. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1113. struct btrfs_root *root,
  1114. struct btrfs_path *path,
  1115. u64 bytenr, u64 parent,
  1116. u64 root_objectid, u64 owner,
  1117. u64 offset, int refs_to_add)
  1118. {
  1119. struct btrfs_key key;
  1120. struct extent_buffer *leaf;
  1121. u32 size;
  1122. u32 num_refs;
  1123. int ret;
  1124. key.objectid = bytenr;
  1125. if (parent) {
  1126. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1127. key.offset = parent;
  1128. size = sizeof(struct btrfs_shared_data_ref);
  1129. } else {
  1130. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1131. key.offset = hash_extent_data_ref(root_objectid,
  1132. owner, offset);
  1133. size = sizeof(struct btrfs_extent_data_ref);
  1134. }
  1135. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1136. if (ret && ret != -EEXIST)
  1137. goto fail;
  1138. leaf = path->nodes[0];
  1139. if (parent) {
  1140. struct btrfs_shared_data_ref *ref;
  1141. ref = btrfs_item_ptr(leaf, path->slots[0],
  1142. struct btrfs_shared_data_ref);
  1143. if (ret == 0) {
  1144. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1145. } else {
  1146. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1147. num_refs += refs_to_add;
  1148. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1149. }
  1150. } else {
  1151. struct btrfs_extent_data_ref *ref;
  1152. while (ret == -EEXIST) {
  1153. ref = btrfs_item_ptr(leaf, path->slots[0],
  1154. struct btrfs_extent_data_ref);
  1155. if (match_extent_data_ref(leaf, ref, root_objectid,
  1156. owner, offset))
  1157. break;
  1158. btrfs_release_path(path);
  1159. key.offset++;
  1160. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1161. size);
  1162. if (ret && ret != -EEXIST)
  1163. goto fail;
  1164. leaf = path->nodes[0];
  1165. }
  1166. ref = btrfs_item_ptr(leaf, path->slots[0],
  1167. struct btrfs_extent_data_ref);
  1168. if (ret == 0) {
  1169. btrfs_set_extent_data_ref_root(leaf, ref,
  1170. root_objectid);
  1171. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1172. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1173. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1174. } else {
  1175. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1176. num_refs += refs_to_add;
  1177. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1178. }
  1179. }
  1180. btrfs_mark_buffer_dirty(leaf);
  1181. ret = 0;
  1182. fail:
  1183. btrfs_release_path(path);
  1184. return ret;
  1185. }
  1186. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1187. struct btrfs_root *root,
  1188. struct btrfs_path *path,
  1189. int refs_to_drop, int *last_ref)
  1190. {
  1191. struct btrfs_key key;
  1192. struct btrfs_extent_data_ref *ref1 = NULL;
  1193. struct btrfs_shared_data_ref *ref2 = NULL;
  1194. struct extent_buffer *leaf;
  1195. u32 num_refs = 0;
  1196. int ret = 0;
  1197. leaf = path->nodes[0];
  1198. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1199. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1200. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1201. struct btrfs_extent_data_ref);
  1202. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1203. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1204. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1205. struct btrfs_shared_data_ref);
  1206. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1207. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1208. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1209. struct btrfs_extent_ref_v0 *ref0;
  1210. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1211. struct btrfs_extent_ref_v0);
  1212. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1213. #endif
  1214. } else {
  1215. BUG();
  1216. }
  1217. BUG_ON(num_refs < refs_to_drop);
  1218. num_refs -= refs_to_drop;
  1219. if (num_refs == 0) {
  1220. ret = btrfs_del_item(trans, root, path);
  1221. *last_ref = 1;
  1222. } else {
  1223. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1224. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1225. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1226. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1227. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1228. else {
  1229. struct btrfs_extent_ref_v0 *ref0;
  1230. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1231. struct btrfs_extent_ref_v0);
  1232. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1233. }
  1234. #endif
  1235. btrfs_mark_buffer_dirty(leaf);
  1236. }
  1237. return ret;
  1238. }
  1239. static noinline u32 extent_data_ref_count(struct btrfs_path *path,
  1240. struct btrfs_extent_inline_ref *iref)
  1241. {
  1242. struct btrfs_key key;
  1243. struct extent_buffer *leaf;
  1244. struct btrfs_extent_data_ref *ref1;
  1245. struct btrfs_shared_data_ref *ref2;
  1246. u32 num_refs = 0;
  1247. leaf = path->nodes[0];
  1248. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1249. if (iref) {
  1250. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1251. BTRFS_EXTENT_DATA_REF_KEY) {
  1252. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1253. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1254. } else {
  1255. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1256. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1257. }
  1258. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1259. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1260. struct btrfs_extent_data_ref);
  1261. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1262. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1263. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1264. struct btrfs_shared_data_ref);
  1265. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1266. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1267. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1268. struct btrfs_extent_ref_v0 *ref0;
  1269. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1270. struct btrfs_extent_ref_v0);
  1271. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1272. #endif
  1273. } else {
  1274. WARN_ON(1);
  1275. }
  1276. return num_refs;
  1277. }
  1278. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1279. struct btrfs_root *root,
  1280. struct btrfs_path *path,
  1281. u64 bytenr, u64 parent,
  1282. u64 root_objectid)
  1283. {
  1284. struct btrfs_key key;
  1285. int ret;
  1286. key.objectid = bytenr;
  1287. if (parent) {
  1288. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1289. key.offset = parent;
  1290. } else {
  1291. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1292. key.offset = root_objectid;
  1293. }
  1294. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1295. if (ret > 0)
  1296. ret = -ENOENT;
  1297. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1298. if (ret == -ENOENT && parent) {
  1299. btrfs_release_path(path);
  1300. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1301. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1302. if (ret > 0)
  1303. ret = -ENOENT;
  1304. }
  1305. #endif
  1306. return ret;
  1307. }
  1308. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1309. struct btrfs_root *root,
  1310. struct btrfs_path *path,
  1311. u64 bytenr, u64 parent,
  1312. u64 root_objectid)
  1313. {
  1314. struct btrfs_key key;
  1315. int ret;
  1316. key.objectid = bytenr;
  1317. if (parent) {
  1318. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1319. key.offset = parent;
  1320. } else {
  1321. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1322. key.offset = root_objectid;
  1323. }
  1324. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1325. btrfs_release_path(path);
  1326. return ret;
  1327. }
  1328. static inline int extent_ref_type(u64 parent, u64 owner)
  1329. {
  1330. int type;
  1331. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1332. if (parent > 0)
  1333. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1334. else
  1335. type = BTRFS_TREE_BLOCK_REF_KEY;
  1336. } else {
  1337. if (parent > 0)
  1338. type = BTRFS_SHARED_DATA_REF_KEY;
  1339. else
  1340. type = BTRFS_EXTENT_DATA_REF_KEY;
  1341. }
  1342. return type;
  1343. }
  1344. static int find_next_key(struct btrfs_path *path, int level,
  1345. struct btrfs_key *key)
  1346. {
  1347. for (; level < BTRFS_MAX_LEVEL; level++) {
  1348. if (!path->nodes[level])
  1349. break;
  1350. if (path->slots[level] + 1 >=
  1351. btrfs_header_nritems(path->nodes[level]))
  1352. continue;
  1353. if (level == 0)
  1354. btrfs_item_key_to_cpu(path->nodes[level], key,
  1355. path->slots[level] + 1);
  1356. else
  1357. btrfs_node_key_to_cpu(path->nodes[level], key,
  1358. path->slots[level] + 1);
  1359. return 0;
  1360. }
  1361. return 1;
  1362. }
  1363. /*
  1364. * look for inline back ref. if back ref is found, *ref_ret is set
  1365. * to the address of inline back ref, and 0 is returned.
  1366. *
  1367. * if back ref isn't found, *ref_ret is set to the address where it
  1368. * should be inserted, and -ENOENT is returned.
  1369. *
  1370. * if insert is true and there are too many inline back refs, the path
  1371. * points to the extent item, and -EAGAIN is returned.
  1372. *
  1373. * NOTE: inline back refs are ordered in the same way that back ref
  1374. * items in the tree are ordered.
  1375. */
  1376. static noinline_for_stack
  1377. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1378. struct btrfs_root *root,
  1379. struct btrfs_path *path,
  1380. struct btrfs_extent_inline_ref **ref_ret,
  1381. u64 bytenr, u64 num_bytes,
  1382. u64 parent, u64 root_objectid,
  1383. u64 owner, u64 offset, int insert)
  1384. {
  1385. struct btrfs_key key;
  1386. struct extent_buffer *leaf;
  1387. struct btrfs_extent_item *ei;
  1388. struct btrfs_extent_inline_ref *iref;
  1389. u64 flags;
  1390. u64 item_size;
  1391. unsigned long ptr;
  1392. unsigned long end;
  1393. int extra_size;
  1394. int type;
  1395. int want;
  1396. int ret;
  1397. int err = 0;
  1398. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  1399. SKINNY_METADATA);
  1400. key.objectid = bytenr;
  1401. key.type = BTRFS_EXTENT_ITEM_KEY;
  1402. key.offset = num_bytes;
  1403. want = extent_ref_type(parent, owner);
  1404. if (insert) {
  1405. extra_size = btrfs_extent_inline_ref_size(want);
  1406. path->keep_locks = 1;
  1407. } else
  1408. extra_size = -1;
  1409. /*
  1410. * Owner is our parent level, so we can just add one to get the level
  1411. * for the block we are interested in.
  1412. */
  1413. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1414. key.type = BTRFS_METADATA_ITEM_KEY;
  1415. key.offset = owner;
  1416. }
  1417. again:
  1418. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1419. if (ret < 0) {
  1420. err = ret;
  1421. goto out;
  1422. }
  1423. /*
  1424. * We may be a newly converted file system which still has the old fat
  1425. * extent entries for metadata, so try and see if we have one of those.
  1426. */
  1427. if (ret > 0 && skinny_metadata) {
  1428. skinny_metadata = false;
  1429. if (path->slots[0]) {
  1430. path->slots[0]--;
  1431. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1432. path->slots[0]);
  1433. if (key.objectid == bytenr &&
  1434. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1435. key.offset == num_bytes)
  1436. ret = 0;
  1437. }
  1438. if (ret) {
  1439. key.objectid = bytenr;
  1440. key.type = BTRFS_EXTENT_ITEM_KEY;
  1441. key.offset = num_bytes;
  1442. btrfs_release_path(path);
  1443. goto again;
  1444. }
  1445. }
  1446. if (ret && !insert) {
  1447. err = -ENOENT;
  1448. goto out;
  1449. } else if (WARN_ON(ret)) {
  1450. err = -EIO;
  1451. goto out;
  1452. }
  1453. leaf = path->nodes[0];
  1454. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1455. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1456. if (item_size < sizeof(*ei)) {
  1457. if (!insert) {
  1458. err = -ENOENT;
  1459. goto out;
  1460. }
  1461. ret = convert_extent_item_v0(trans, root, path, owner,
  1462. extra_size);
  1463. if (ret < 0) {
  1464. err = ret;
  1465. goto out;
  1466. }
  1467. leaf = path->nodes[0];
  1468. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1469. }
  1470. #endif
  1471. BUG_ON(item_size < sizeof(*ei));
  1472. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1473. flags = btrfs_extent_flags(leaf, ei);
  1474. ptr = (unsigned long)(ei + 1);
  1475. end = (unsigned long)ei + item_size;
  1476. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1477. ptr += sizeof(struct btrfs_tree_block_info);
  1478. BUG_ON(ptr > end);
  1479. }
  1480. err = -ENOENT;
  1481. while (1) {
  1482. if (ptr >= end) {
  1483. WARN_ON(ptr > end);
  1484. break;
  1485. }
  1486. iref = (struct btrfs_extent_inline_ref *)ptr;
  1487. type = btrfs_extent_inline_ref_type(leaf, iref);
  1488. if (want < type)
  1489. break;
  1490. if (want > type) {
  1491. ptr += btrfs_extent_inline_ref_size(type);
  1492. continue;
  1493. }
  1494. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1495. struct btrfs_extent_data_ref *dref;
  1496. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1497. if (match_extent_data_ref(leaf, dref, root_objectid,
  1498. owner, offset)) {
  1499. err = 0;
  1500. break;
  1501. }
  1502. if (hash_extent_data_ref_item(leaf, dref) <
  1503. hash_extent_data_ref(root_objectid, owner, offset))
  1504. break;
  1505. } else {
  1506. u64 ref_offset;
  1507. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1508. if (parent > 0) {
  1509. if (parent == ref_offset) {
  1510. err = 0;
  1511. break;
  1512. }
  1513. if (ref_offset < parent)
  1514. break;
  1515. } else {
  1516. if (root_objectid == ref_offset) {
  1517. err = 0;
  1518. break;
  1519. }
  1520. if (ref_offset < root_objectid)
  1521. break;
  1522. }
  1523. }
  1524. ptr += btrfs_extent_inline_ref_size(type);
  1525. }
  1526. if (err == -ENOENT && insert) {
  1527. if (item_size + extra_size >=
  1528. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1529. err = -EAGAIN;
  1530. goto out;
  1531. }
  1532. /*
  1533. * To add new inline back ref, we have to make sure
  1534. * there is no corresponding back ref item.
  1535. * For simplicity, we just do not add new inline back
  1536. * ref if there is any kind of item for this block
  1537. */
  1538. if (find_next_key(path, 0, &key) == 0 &&
  1539. key.objectid == bytenr &&
  1540. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1541. err = -EAGAIN;
  1542. goto out;
  1543. }
  1544. }
  1545. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1546. out:
  1547. if (insert) {
  1548. path->keep_locks = 0;
  1549. btrfs_unlock_up_safe(path, 1);
  1550. }
  1551. return err;
  1552. }
  1553. /*
  1554. * helper to add new inline back ref
  1555. */
  1556. static noinline_for_stack
  1557. void setup_inline_extent_backref(struct btrfs_root *root,
  1558. struct btrfs_path *path,
  1559. struct btrfs_extent_inline_ref *iref,
  1560. u64 parent, u64 root_objectid,
  1561. u64 owner, u64 offset, int refs_to_add,
  1562. struct btrfs_delayed_extent_op *extent_op)
  1563. {
  1564. struct extent_buffer *leaf;
  1565. struct btrfs_extent_item *ei;
  1566. unsigned long ptr;
  1567. unsigned long end;
  1568. unsigned long item_offset;
  1569. u64 refs;
  1570. int size;
  1571. int type;
  1572. leaf = path->nodes[0];
  1573. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1574. item_offset = (unsigned long)iref - (unsigned long)ei;
  1575. type = extent_ref_type(parent, owner);
  1576. size = btrfs_extent_inline_ref_size(type);
  1577. btrfs_extend_item(root, path, size);
  1578. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1579. refs = btrfs_extent_refs(leaf, ei);
  1580. refs += refs_to_add;
  1581. btrfs_set_extent_refs(leaf, ei, refs);
  1582. if (extent_op)
  1583. __run_delayed_extent_op(extent_op, leaf, ei);
  1584. ptr = (unsigned long)ei + item_offset;
  1585. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1586. if (ptr < end - size)
  1587. memmove_extent_buffer(leaf, ptr + size, ptr,
  1588. end - size - ptr);
  1589. iref = (struct btrfs_extent_inline_ref *)ptr;
  1590. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1591. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1592. struct btrfs_extent_data_ref *dref;
  1593. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1594. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1595. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1596. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1597. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1598. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1599. struct btrfs_shared_data_ref *sref;
  1600. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1601. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1602. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1603. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1604. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1605. } else {
  1606. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1607. }
  1608. btrfs_mark_buffer_dirty(leaf);
  1609. }
  1610. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1611. struct btrfs_root *root,
  1612. struct btrfs_path *path,
  1613. struct btrfs_extent_inline_ref **ref_ret,
  1614. u64 bytenr, u64 num_bytes, u64 parent,
  1615. u64 root_objectid, u64 owner, u64 offset)
  1616. {
  1617. int ret;
  1618. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1619. bytenr, num_bytes, parent,
  1620. root_objectid, owner, offset, 0);
  1621. if (ret != -ENOENT)
  1622. return ret;
  1623. btrfs_release_path(path);
  1624. *ref_ret = NULL;
  1625. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1626. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1627. root_objectid);
  1628. } else {
  1629. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1630. root_objectid, owner, offset);
  1631. }
  1632. return ret;
  1633. }
  1634. /*
  1635. * helper to update/remove inline back ref
  1636. */
  1637. static noinline_for_stack
  1638. void update_inline_extent_backref(struct btrfs_root *root,
  1639. struct btrfs_path *path,
  1640. struct btrfs_extent_inline_ref *iref,
  1641. int refs_to_mod,
  1642. struct btrfs_delayed_extent_op *extent_op,
  1643. int *last_ref)
  1644. {
  1645. struct extent_buffer *leaf;
  1646. struct btrfs_extent_item *ei;
  1647. struct btrfs_extent_data_ref *dref = NULL;
  1648. struct btrfs_shared_data_ref *sref = NULL;
  1649. unsigned long ptr;
  1650. unsigned long end;
  1651. u32 item_size;
  1652. int size;
  1653. int type;
  1654. u64 refs;
  1655. leaf = path->nodes[0];
  1656. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1657. refs = btrfs_extent_refs(leaf, ei);
  1658. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1659. refs += refs_to_mod;
  1660. btrfs_set_extent_refs(leaf, ei, refs);
  1661. if (extent_op)
  1662. __run_delayed_extent_op(extent_op, leaf, ei);
  1663. type = btrfs_extent_inline_ref_type(leaf, iref);
  1664. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1665. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1666. refs = btrfs_extent_data_ref_count(leaf, dref);
  1667. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1668. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1669. refs = btrfs_shared_data_ref_count(leaf, sref);
  1670. } else {
  1671. refs = 1;
  1672. BUG_ON(refs_to_mod != -1);
  1673. }
  1674. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1675. refs += refs_to_mod;
  1676. if (refs > 0) {
  1677. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1678. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1679. else
  1680. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1681. } else {
  1682. *last_ref = 1;
  1683. size = btrfs_extent_inline_ref_size(type);
  1684. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1685. ptr = (unsigned long)iref;
  1686. end = (unsigned long)ei + item_size;
  1687. if (ptr + size < end)
  1688. memmove_extent_buffer(leaf, ptr, ptr + size,
  1689. end - ptr - size);
  1690. item_size -= size;
  1691. btrfs_truncate_item(root, path, item_size, 1);
  1692. }
  1693. btrfs_mark_buffer_dirty(leaf);
  1694. }
  1695. static noinline_for_stack
  1696. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1697. struct btrfs_root *root,
  1698. struct btrfs_path *path,
  1699. u64 bytenr, u64 num_bytes, u64 parent,
  1700. u64 root_objectid, u64 owner,
  1701. u64 offset, int refs_to_add,
  1702. struct btrfs_delayed_extent_op *extent_op)
  1703. {
  1704. struct btrfs_extent_inline_ref *iref;
  1705. int ret;
  1706. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1707. bytenr, num_bytes, parent,
  1708. root_objectid, owner, offset, 1);
  1709. if (ret == 0) {
  1710. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1711. update_inline_extent_backref(root, path, iref,
  1712. refs_to_add, extent_op, NULL);
  1713. } else if (ret == -ENOENT) {
  1714. setup_inline_extent_backref(root, path, iref, parent,
  1715. root_objectid, owner, offset,
  1716. refs_to_add, extent_op);
  1717. ret = 0;
  1718. }
  1719. return ret;
  1720. }
  1721. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1722. struct btrfs_root *root,
  1723. struct btrfs_path *path,
  1724. u64 bytenr, u64 parent, u64 root_objectid,
  1725. u64 owner, u64 offset, int refs_to_add)
  1726. {
  1727. int ret;
  1728. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1729. BUG_ON(refs_to_add != 1);
  1730. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1731. parent, root_objectid);
  1732. } else {
  1733. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1734. parent, root_objectid,
  1735. owner, offset, refs_to_add);
  1736. }
  1737. return ret;
  1738. }
  1739. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1740. struct btrfs_root *root,
  1741. struct btrfs_path *path,
  1742. struct btrfs_extent_inline_ref *iref,
  1743. int refs_to_drop, int is_data, int *last_ref)
  1744. {
  1745. int ret = 0;
  1746. BUG_ON(!is_data && refs_to_drop != 1);
  1747. if (iref) {
  1748. update_inline_extent_backref(root, path, iref,
  1749. -refs_to_drop, NULL, last_ref);
  1750. } else if (is_data) {
  1751. ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
  1752. last_ref);
  1753. } else {
  1754. *last_ref = 1;
  1755. ret = btrfs_del_item(trans, root, path);
  1756. }
  1757. return ret;
  1758. }
  1759. #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
  1760. static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
  1761. u64 *discarded_bytes)
  1762. {
  1763. int j, ret = 0;
  1764. u64 bytes_left, end;
  1765. u64 aligned_start = ALIGN(start, 1 << 9);
  1766. if (WARN_ON(start != aligned_start)) {
  1767. len -= aligned_start - start;
  1768. len = round_down(len, 1 << 9);
  1769. start = aligned_start;
  1770. }
  1771. *discarded_bytes = 0;
  1772. if (!len)
  1773. return 0;
  1774. end = start + len;
  1775. bytes_left = len;
  1776. /* Skip any superblocks on this device. */
  1777. for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
  1778. u64 sb_start = btrfs_sb_offset(j);
  1779. u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
  1780. u64 size = sb_start - start;
  1781. if (!in_range(sb_start, start, bytes_left) &&
  1782. !in_range(sb_end, start, bytes_left) &&
  1783. !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
  1784. continue;
  1785. /*
  1786. * Superblock spans beginning of range. Adjust start and
  1787. * try again.
  1788. */
  1789. if (sb_start <= start) {
  1790. start += sb_end - start;
  1791. if (start > end) {
  1792. bytes_left = 0;
  1793. break;
  1794. }
  1795. bytes_left = end - start;
  1796. continue;
  1797. }
  1798. if (size) {
  1799. ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
  1800. GFP_NOFS, 0);
  1801. if (!ret)
  1802. *discarded_bytes += size;
  1803. else if (ret != -EOPNOTSUPP)
  1804. return ret;
  1805. }
  1806. start = sb_end;
  1807. if (start > end) {
  1808. bytes_left = 0;
  1809. break;
  1810. }
  1811. bytes_left = end - start;
  1812. }
  1813. if (bytes_left) {
  1814. ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
  1815. GFP_NOFS, 0);
  1816. if (!ret)
  1817. *discarded_bytes += bytes_left;
  1818. }
  1819. return ret;
  1820. }
  1821. int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1822. u64 num_bytes, u64 *actual_bytes)
  1823. {
  1824. int ret;
  1825. u64 discarded_bytes = 0;
  1826. struct btrfs_bio *bbio = NULL;
  1827. /*
  1828. * Avoid races with device replace and make sure our bbio has devices
  1829. * associated to its stripes that don't go away while we are discarding.
  1830. */
  1831. btrfs_bio_counter_inc_blocked(root->fs_info);
  1832. /* Tell the block device(s) that the sectors can be discarded */
  1833. ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
  1834. bytenr, &num_bytes, &bbio, 0);
  1835. /* Error condition is -ENOMEM */
  1836. if (!ret) {
  1837. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1838. int i;
  1839. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1840. u64 bytes;
  1841. if (!stripe->dev->can_discard)
  1842. continue;
  1843. ret = btrfs_issue_discard(stripe->dev->bdev,
  1844. stripe->physical,
  1845. stripe->length,
  1846. &bytes);
  1847. if (!ret)
  1848. discarded_bytes += bytes;
  1849. else if (ret != -EOPNOTSUPP)
  1850. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1851. /*
  1852. * Just in case we get back EOPNOTSUPP for some reason,
  1853. * just ignore the return value so we don't screw up
  1854. * people calling discard_extent.
  1855. */
  1856. ret = 0;
  1857. }
  1858. btrfs_put_bbio(bbio);
  1859. }
  1860. btrfs_bio_counter_dec(root->fs_info);
  1861. if (actual_bytes)
  1862. *actual_bytes = discarded_bytes;
  1863. if (ret == -EOPNOTSUPP)
  1864. ret = 0;
  1865. return ret;
  1866. }
  1867. /* Can return -ENOMEM */
  1868. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1869. struct btrfs_root *root,
  1870. u64 bytenr, u64 num_bytes, u64 parent,
  1871. u64 root_objectid, u64 owner, u64 offset)
  1872. {
  1873. int ret;
  1874. struct btrfs_fs_info *fs_info = root->fs_info;
  1875. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1876. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1877. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1878. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1879. num_bytes,
  1880. parent, root_objectid, (int)owner,
  1881. BTRFS_ADD_DELAYED_REF, NULL);
  1882. } else {
  1883. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1884. num_bytes, parent, root_objectid,
  1885. owner, offset, 0,
  1886. BTRFS_ADD_DELAYED_REF, NULL);
  1887. }
  1888. return ret;
  1889. }
  1890. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1891. struct btrfs_root *root,
  1892. struct btrfs_delayed_ref_node *node,
  1893. u64 parent, u64 root_objectid,
  1894. u64 owner, u64 offset, int refs_to_add,
  1895. struct btrfs_delayed_extent_op *extent_op)
  1896. {
  1897. struct btrfs_fs_info *fs_info = root->fs_info;
  1898. struct btrfs_path *path;
  1899. struct extent_buffer *leaf;
  1900. struct btrfs_extent_item *item;
  1901. struct btrfs_key key;
  1902. u64 bytenr = node->bytenr;
  1903. u64 num_bytes = node->num_bytes;
  1904. u64 refs;
  1905. int ret;
  1906. path = btrfs_alloc_path();
  1907. if (!path)
  1908. return -ENOMEM;
  1909. path->reada = READA_FORWARD;
  1910. path->leave_spinning = 1;
  1911. /* this will setup the path even if it fails to insert the back ref */
  1912. ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
  1913. bytenr, num_bytes, parent,
  1914. root_objectid, owner, offset,
  1915. refs_to_add, extent_op);
  1916. if ((ret < 0 && ret != -EAGAIN) || !ret)
  1917. goto out;
  1918. /*
  1919. * Ok we had -EAGAIN which means we didn't have space to insert and
  1920. * inline extent ref, so just update the reference count and add a
  1921. * normal backref.
  1922. */
  1923. leaf = path->nodes[0];
  1924. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1925. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1926. refs = btrfs_extent_refs(leaf, item);
  1927. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1928. if (extent_op)
  1929. __run_delayed_extent_op(extent_op, leaf, item);
  1930. btrfs_mark_buffer_dirty(leaf);
  1931. btrfs_release_path(path);
  1932. path->reada = READA_FORWARD;
  1933. path->leave_spinning = 1;
  1934. /* now insert the actual backref */
  1935. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1936. path, bytenr, parent, root_objectid,
  1937. owner, offset, refs_to_add);
  1938. if (ret)
  1939. btrfs_abort_transaction(trans, ret);
  1940. out:
  1941. btrfs_free_path(path);
  1942. return ret;
  1943. }
  1944. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1945. struct btrfs_root *root,
  1946. struct btrfs_delayed_ref_node *node,
  1947. struct btrfs_delayed_extent_op *extent_op,
  1948. int insert_reserved)
  1949. {
  1950. int ret = 0;
  1951. struct btrfs_delayed_data_ref *ref;
  1952. struct btrfs_key ins;
  1953. u64 parent = 0;
  1954. u64 ref_root = 0;
  1955. u64 flags = 0;
  1956. ins.objectid = node->bytenr;
  1957. ins.offset = node->num_bytes;
  1958. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1959. ref = btrfs_delayed_node_to_data_ref(node);
  1960. trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
  1961. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1962. parent = ref->parent;
  1963. ref_root = ref->root;
  1964. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1965. if (extent_op)
  1966. flags |= extent_op->flags_to_set;
  1967. ret = alloc_reserved_file_extent(trans, root,
  1968. parent, ref_root, flags,
  1969. ref->objectid, ref->offset,
  1970. &ins, node->ref_mod);
  1971. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1972. ret = __btrfs_inc_extent_ref(trans, root, node, parent,
  1973. ref_root, ref->objectid,
  1974. ref->offset, node->ref_mod,
  1975. extent_op);
  1976. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1977. ret = __btrfs_free_extent(trans, root, node, parent,
  1978. ref_root, ref->objectid,
  1979. ref->offset, node->ref_mod,
  1980. extent_op);
  1981. } else {
  1982. BUG();
  1983. }
  1984. return ret;
  1985. }
  1986. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1987. struct extent_buffer *leaf,
  1988. struct btrfs_extent_item *ei)
  1989. {
  1990. u64 flags = btrfs_extent_flags(leaf, ei);
  1991. if (extent_op->update_flags) {
  1992. flags |= extent_op->flags_to_set;
  1993. btrfs_set_extent_flags(leaf, ei, flags);
  1994. }
  1995. if (extent_op->update_key) {
  1996. struct btrfs_tree_block_info *bi;
  1997. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1998. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1999. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  2000. }
  2001. }
  2002. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  2003. struct btrfs_root *root,
  2004. struct btrfs_delayed_ref_node *node,
  2005. struct btrfs_delayed_extent_op *extent_op)
  2006. {
  2007. struct btrfs_key key;
  2008. struct btrfs_path *path;
  2009. struct btrfs_extent_item *ei;
  2010. struct extent_buffer *leaf;
  2011. u32 item_size;
  2012. int ret;
  2013. int err = 0;
  2014. int metadata = !extent_op->is_data;
  2015. if (trans->aborted)
  2016. return 0;
  2017. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  2018. metadata = 0;
  2019. path = btrfs_alloc_path();
  2020. if (!path)
  2021. return -ENOMEM;
  2022. key.objectid = node->bytenr;
  2023. if (metadata) {
  2024. key.type = BTRFS_METADATA_ITEM_KEY;
  2025. key.offset = extent_op->level;
  2026. } else {
  2027. key.type = BTRFS_EXTENT_ITEM_KEY;
  2028. key.offset = node->num_bytes;
  2029. }
  2030. again:
  2031. path->reada = READA_FORWARD;
  2032. path->leave_spinning = 1;
  2033. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  2034. path, 0, 1);
  2035. if (ret < 0) {
  2036. err = ret;
  2037. goto out;
  2038. }
  2039. if (ret > 0) {
  2040. if (metadata) {
  2041. if (path->slots[0] > 0) {
  2042. path->slots[0]--;
  2043. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2044. path->slots[0]);
  2045. if (key.objectid == node->bytenr &&
  2046. key.type == BTRFS_EXTENT_ITEM_KEY &&
  2047. key.offset == node->num_bytes)
  2048. ret = 0;
  2049. }
  2050. if (ret > 0) {
  2051. btrfs_release_path(path);
  2052. metadata = 0;
  2053. key.objectid = node->bytenr;
  2054. key.offset = node->num_bytes;
  2055. key.type = BTRFS_EXTENT_ITEM_KEY;
  2056. goto again;
  2057. }
  2058. } else {
  2059. err = -EIO;
  2060. goto out;
  2061. }
  2062. }
  2063. leaf = path->nodes[0];
  2064. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2065. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2066. if (item_size < sizeof(*ei)) {
  2067. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  2068. path, (u64)-1, 0);
  2069. if (ret < 0) {
  2070. err = ret;
  2071. goto out;
  2072. }
  2073. leaf = path->nodes[0];
  2074. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2075. }
  2076. #endif
  2077. BUG_ON(item_size < sizeof(*ei));
  2078. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2079. __run_delayed_extent_op(extent_op, leaf, ei);
  2080. btrfs_mark_buffer_dirty(leaf);
  2081. out:
  2082. btrfs_free_path(path);
  2083. return err;
  2084. }
  2085. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  2086. struct btrfs_root *root,
  2087. struct btrfs_delayed_ref_node *node,
  2088. struct btrfs_delayed_extent_op *extent_op,
  2089. int insert_reserved)
  2090. {
  2091. int ret = 0;
  2092. struct btrfs_delayed_tree_ref *ref;
  2093. struct btrfs_key ins;
  2094. u64 parent = 0;
  2095. u64 ref_root = 0;
  2096. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  2097. SKINNY_METADATA);
  2098. ref = btrfs_delayed_node_to_tree_ref(node);
  2099. trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
  2100. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2101. parent = ref->parent;
  2102. ref_root = ref->root;
  2103. ins.objectid = node->bytenr;
  2104. if (skinny_metadata) {
  2105. ins.offset = ref->level;
  2106. ins.type = BTRFS_METADATA_ITEM_KEY;
  2107. } else {
  2108. ins.offset = node->num_bytes;
  2109. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2110. }
  2111. BUG_ON(node->ref_mod != 1);
  2112. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2113. BUG_ON(!extent_op || !extent_op->update_flags);
  2114. ret = alloc_reserved_tree_block(trans, root,
  2115. parent, ref_root,
  2116. extent_op->flags_to_set,
  2117. &extent_op->key,
  2118. ref->level, &ins);
  2119. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2120. ret = __btrfs_inc_extent_ref(trans, root, node,
  2121. parent, ref_root,
  2122. ref->level, 0, 1,
  2123. extent_op);
  2124. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2125. ret = __btrfs_free_extent(trans, root, node,
  2126. parent, ref_root,
  2127. ref->level, 0, 1, extent_op);
  2128. } else {
  2129. BUG();
  2130. }
  2131. return ret;
  2132. }
  2133. /* helper function to actually process a single delayed ref entry */
  2134. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2135. struct btrfs_root *root,
  2136. struct btrfs_delayed_ref_node *node,
  2137. struct btrfs_delayed_extent_op *extent_op,
  2138. int insert_reserved)
  2139. {
  2140. int ret = 0;
  2141. if (trans->aborted) {
  2142. if (insert_reserved)
  2143. btrfs_pin_extent(root, node->bytenr,
  2144. node->num_bytes, 1);
  2145. return 0;
  2146. }
  2147. if (btrfs_delayed_ref_is_head(node)) {
  2148. struct btrfs_delayed_ref_head *head;
  2149. /*
  2150. * we've hit the end of the chain and we were supposed
  2151. * to insert this extent into the tree. But, it got
  2152. * deleted before we ever needed to insert it, so all
  2153. * we have to do is clean up the accounting
  2154. */
  2155. BUG_ON(extent_op);
  2156. head = btrfs_delayed_node_to_head(node);
  2157. trace_run_delayed_ref_head(root->fs_info, node, head,
  2158. node->action);
  2159. if (insert_reserved) {
  2160. btrfs_pin_extent(root, node->bytenr,
  2161. node->num_bytes, 1);
  2162. if (head->is_data) {
  2163. ret = btrfs_del_csums(trans, root,
  2164. node->bytenr,
  2165. node->num_bytes);
  2166. }
  2167. }
  2168. /* Also free its reserved qgroup space */
  2169. btrfs_qgroup_free_delayed_ref(root->fs_info,
  2170. head->qgroup_ref_root,
  2171. head->qgroup_reserved);
  2172. return ret;
  2173. }
  2174. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2175. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2176. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  2177. insert_reserved);
  2178. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2179. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2180. ret = run_delayed_data_ref(trans, root, node, extent_op,
  2181. insert_reserved);
  2182. else
  2183. BUG();
  2184. return ret;
  2185. }
  2186. static inline struct btrfs_delayed_ref_node *
  2187. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2188. {
  2189. struct btrfs_delayed_ref_node *ref;
  2190. if (list_empty(&head->ref_list))
  2191. return NULL;
  2192. /*
  2193. * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2194. * This is to prevent a ref count from going down to zero, which deletes
  2195. * the extent item from the extent tree, when there still are references
  2196. * to add, which would fail because they would not find the extent item.
  2197. */
  2198. list_for_each_entry(ref, &head->ref_list, list) {
  2199. if (ref->action == BTRFS_ADD_DELAYED_REF)
  2200. return ref;
  2201. }
  2202. return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
  2203. list);
  2204. }
  2205. /*
  2206. * Returns 0 on success or if called with an already aborted transaction.
  2207. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2208. */
  2209. static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2210. struct btrfs_root *root,
  2211. unsigned long nr)
  2212. {
  2213. struct btrfs_delayed_ref_root *delayed_refs;
  2214. struct btrfs_delayed_ref_node *ref;
  2215. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2216. struct btrfs_delayed_extent_op *extent_op;
  2217. struct btrfs_fs_info *fs_info = root->fs_info;
  2218. ktime_t start = ktime_get();
  2219. int ret;
  2220. unsigned long count = 0;
  2221. unsigned long actual_count = 0;
  2222. int must_insert_reserved = 0;
  2223. delayed_refs = &trans->transaction->delayed_refs;
  2224. while (1) {
  2225. if (!locked_ref) {
  2226. if (count >= nr)
  2227. break;
  2228. spin_lock(&delayed_refs->lock);
  2229. locked_ref = btrfs_select_ref_head(trans);
  2230. if (!locked_ref) {
  2231. spin_unlock(&delayed_refs->lock);
  2232. break;
  2233. }
  2234. /* grab the lock that says we are going to process
  2235. * all the refs for this head */
  2236. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2237. spin_unlock(&delayed_refs->lock);
  2238. /*
  2239. * we may have dropped the spin lock to get the head
  2240. * mutex lock, and that might have given someone else
  2241. * time to free the head. If that's true, it has been
  2242. * removed from our list and we can move on.
  2243. */
  2244. if (ret == -EAGAIN) {
  2245. locked_ref = NULL;
  2246. count++;
  2247. continue;
  2248. }
  2249. }
  2250. /*
  2251. * We need to try and merge add/drops of the same ref since we
  2252. * can run into issues with relocate dropping the implicit ref
  2253. * and then it being added back again before the drop can
  2254. * finish. If we merged anything we need to re-loop so we can
  2255. * get a good ref.
  2256. * Or we can get node references of the same type that weren't
  2257. * merged when created due to bumps in the tree mod seq, and
  2258. * we need to merge them to prevent adding an inline extent
  2259. * backref before dropping it (triggering a BUG_ON at
  2260. * insert_inline_extent_backref()).
  2261. */
  2262. spin_lock(&locked_ref->lock);
  2263. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2264. locked_ref);
  2265. /*
  2266. * locked_ref is the head node, so we have to go one
  2267. * node back for any delayed ref updates
  2268. */
  2269. ref = select_delayed_ref(locked_ref);
  2270. if (ref && ref->seq &&
  2271. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2272. spin_unlock(&locked_ref->lock);
  2273. btrfs_delayed_ref_unlock(locked_ref);
  2274. spin_lock(&delayed_refs->lock);
  2275. locked_ref->processing = 0;
  2276. delayed_refs->num_heads_ready++;
  2277. spin_unlock(&delayed_refs->lock);
  2278. locked_ref = NULL;
  2279. cond_resched();
  2280. count++;
  2281. continue;
  2282. }
  2283. /*
  2284. * record the must insert reserved flag before we
  2285. * drop the spin lock.
  2286. */
  2287. must_insert_reserved = locked_ref->must_insert_reserved;
  2288. locked_ref->must_insert_reserved = 0;
  2289. extent_op = locked_ref->extent_op;
  2290. locked_ref->extent_op = NULL;
  2291. if (!ref) {
  2292. /* All delayed refs have been processed, Go ahead
  2293. * and send the head node to run_one_delayed_ref,
  2294. * so that any accounting fixes can happen
  2295. */
  2296. ref = &locked_ref->node;
  2297. if (extent_op && must_insert_reserved) {
  2298. btrfs_free_delayed_extent_op(extent_op);
  2299. extent_op = NULL;
  2300. }
  2301. if (extent_op) {
  2302. spin_unlock(&locked_ref->lock);
  2303. ret = run_delayed_extent_op(trans, root,
  2304. ref, extent_op);
  2305. btrfs_free_delayed_extent_op(extent_op);
  2306. if (ret) {
  2307. /*
  2308. * Need to reset must_insert_reserved if
  2309. * there was an error so the abort stuff
  2310. * can cleanup the reserved space
  2311. * properly.
  2312. */
  2313. if (must_insert_reserved)
  2314. locked_ref->must_insert_reserved = 1;
  2315. locked_ref->processing = 0;
  2316. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2317. btrfs_delayed_ref_unlock(locked_ref);
  2318. return ret;
  2319. }
  2320. continue;
  2321. }
  2322. /*
  2323. * Need to drop our head ref lock and re-acquire the
  2324. * delayed ref lock and then re-check to make sure
  2325. * nobody got added.
  2326. */
  2327. spin_unlock(&locked_ref->lock);
  2328. spin_lock(&delayed_refs->lock);
  2329. spin_lock(&locked_ref->lock);
  2330. if (!list_empty(&locked_ref->ref_list) ||
  2331. locked_ref->extent_op) {
  2332. spin_unlock(&locked_ref->lock);
  2333. spin_unlock(&delayed_refs->lock);
  2334. continue;
  2335. }
  2336. ref->in_tree = 0;
  2337. delayed_refs->num_heads--;
  2338. rb_erase(&locked_ref->href_node,
  2339. &delayed_refs->href_root);
  2340. spin_unlock(&delayed_refs->lock);
  2341. } else {
  2342. actual_count++;
  2343. ref->in_tree = 0;
  2344. list_del(&ref->list);
  2345. }
  2346. atomic_dec(&delayed_refs->num_entries);
  2347. if (!btrfs_delayed_ref_is_head(ref)) {
  2348. /*
  2349. * when we play the delayed ref, also correct the
  2350. * ref_mod on head
  2351. */
  2352. switch (ref->action) {
  2353. case BTRFS_ADD_DELAYED_REF:
  2354. case BTRFS_ADD_DELAYED_EXTENT:
  2355. locked_ref->node.ref_mod -= ref->ref_mod;
  2356. break;
  2357. case BTRFS_DROP_DELAYED_REF:
  2358. locked_ref->node.ref_mod += ref->ref_mod;
  2359. break;
  2360. default:
  2361. WARN_ON(1);
  2362. }
  2363. }
  2364. spin_unlock(&locked_ref->lock);
  2365. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2366. must_insert_reserved);
  2367. btrfs_free_delayed_extent_op(extent_op);
  2368. if (ret) {
  2369. locked_ref->processing = 0;
  2370. btrfs_delayed_ref_unlock(locked_ref);
  2371. btrfs_put_delayed_ref(ref);
  2372. btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
  2373. return ret;
  2374. }
  2375. /*
  2376. * If this node is a head, that means all the refs in this head
  2377. * have been dealt with, and we will pick the next head to deal
  2378. * with, so we must unlock the head and drop it from the cluster
  2379. * list before we release it.
  2380. */
  2381. if (btrfs_delayed_ref_is_head(ref)) {
  2382. if (locked_ref->is_data &&
  2383. locked_ref->total_ref_mod < 0) {
  2384. spin_lock(&delayed_refs->lock);
  2385. delayed_refs->pending_csums -= ref->num_bytes;
  2386. spin_unlock(&delayed_refs->lock);
  2387. }
  2388. btrfs_delayed_ref_unlock(locked_ref);
  2389. locked_ref = NULL;
  2390. }
  2391. btrfs_put_delayed_ref(ref);
  2392. count++;
  2393. cond_resched();
  2394. }
  2395. /*
  2396. * We don't want to include ref heads since we can have empty ref heads
  2397. * and those will drastically skew our runtime down since we just do
  2398. * accounting, no actual extent tree updates.
  2399. */
  2400. if (actual_count > 0) {
  2401. u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
  2402. u64 avg;
  2403. /*
  2404. * We weigh the current average higher than our current runtime
  2405. * to avoid large swings in the average.
  2406. */
  2407. spin_lock(&delayed_refs->lock);
  2408. avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
  2409. fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
  2410. spin_unlock(&delayed_refs->lock);
  2411. }
  2412. return 0;
  2413. }
  2414. #ifdef SCRAMBLE_DELAYED_REFS
  2415. /*
  2416. * Normally delayed refs get processed in ascending bytenr order. This
  2417. * correlates in most cases to the order added. To expose dependencies on this
  2418. * order, we start to process the tree in the middle instead of the beginning
  2419. */
  2420. static u64 find_middle(struct rb_root *root)
  2421. {
  2422. struct rb_node *n = root->rb_node;
  2423. struct btrfs_delayed_ref_node *entry;
  2424. int alt = 1;
  2425. u64 middle;
  2426. u64 first = 0, last = 0;
  2427. n = rb_first(root);
  2428. if (n) {
  2429. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2430. first = entry->bytenr;
  2431. }
  2432. n = rb_last(root);
  2433. if (n) {
  2434. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2435. last = entry->bytenr;
  2436. }
  2437. n = root->rb_node;
  2438. while (n) {
  2439. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2440. WARN_ON(!entry->in_tree);
  2441. middle = entry->bytenr;
  2442. if (alt)
  2443. n = n->rb_left;
  2444. else
  2445. n = n->rb_right;
  2446. alt = 1 - alt;
  2447. }
  2448. return middle;
  2449. }
  2450. #endif
  2451. static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
  2452. {
  2453. u64 num_bytes;
  2454. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2455. sizeof(struct btrfs_extent_inline_ref));
  2456. if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  2457. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2458. /*
  2459. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2460. * closer to what we're really going to want to use.
  2461. */
  2462. return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
  2463. }
  2464. /*
  2465. * Takes the number of bytes to be csumm'ed and figures out how many leaves it
  2466. * would require to store the csums for that many bytes.
  2467. */
  2468. u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
  2469. {
  2470. u64 csum_size;
  2471. u64 num_csums_per_leaf;
  2472. u64 num_csums;
  2473. csum_size = BTRFS_MAX_ITEM_SIZE(root);
  2474. num_csums_per_leaf = div64_u64(csum_size,
  2475. (u64)btrfs_super_csum_size(root->fs_info->super_copy));
  2476. num_csums = div64_u64(csum_bytes, root->sectorsize);
  2477. num_csums += num_csums_per_leaf - 1;
  2478. num_csums = div64_u64(num_csums, num_csums_per_leaf);
  2479. return num_csums;
  2480. }
  2481. int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
  2482. struct btrfs_root *root)
  2483. {
  2484. struct btrfs_block_rsv *global_rsv;
  2485. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2486. u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
  2487. u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
  2488. u64 num_bytes, num_dirty_bgs_bytes;
  2489. int ret = 0;
  2490. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  2491. num_heads = heads_to_leaves(root, num_heads);
  2492. if (num_heads > 1)
  2493. num_bytes += (num_heads - 1) * root->nodesize;
  2494. num_bytes <<= 1;
  2495. num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
  2496. num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
  2497. num_dirty_bgs);
  2498. global_rsv = &root->fs_info->global_block_rsv;
  2499. /*
  2500. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2501. * wiggle room since running delayed refs can create more delayed refs.
  2502. */
  2503. if (global_rsv->space_info->full) {
  2504. num_dirty_bgs_bytes <<= 1;
  2505. num_bytes <<= 1;
  2506. }
  2507. spin_lock(&global_rsv->lock);
  2508. if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
  2509. ret = 1;
  2510. spin_unlock(&global_rsv->lock);
  2511. return ret;
  2512. }
  2513. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
  2514. struct btrfs_root *root)
  2515. {
  2516. struct btrfs_fs_info *fs_info = root->fs_info;
  2517. u64 num_entries =
  2518. atomic_read(&trans->transaction->delayed_refs.num_entries);
  2519. u64 avg_runtime;
  2520. u64 val;
  2521. smp_mb();
  2522. avg_runtime = fs_info->avg_delayed_ref_runtime;
  2523. val = num_entries * avg_runtime;
  2524. if (num_entries * avg_runtime >= NSEC_PER_SEC)
  2525. return 1;
  2526. if (val >= NSEC_PER_SEC / 2)
  2527. return 2;
  2528. return btrfs_check_space_for_delayed_refs(trans, root);
  2529. }
  2530. struct async_delayed_refs {
  2531. struct btrfs_root *root;
  2532. u64 transid;
  2533. int count;
  2534. int error;
  2535. int sync;
  2536. struct completion wait;
  2537. struct btrfs_work work;
  2538. };
  2539. static void delayed_ref_async_start(struct btrfs_work *work)
  2540. {
  2541. struct async_delayed_refs *async;
  2542. struct btrfs_trans_handle *trans;
  2543. int ret;
  2544. async = container_of(work, struct async_delayed_refs, work);
  2545. /* if the commit is already started, we don't need to wait here */
  2546. if (btrfs_transaction_blocked(async->root->fs_info))
  2547. goto done;
  2548. trans = btrfs_join_transaction(async->root);
  2549. if (IS_ERR(trans)) {
  2550. async->error = PTR_ERR(trans);
  2551. goto done;
  2552. }
  2553. /*
  2554. * trans->sync means that when we call end_transaction, we won't
  2555. * wait on delayed refs
  2556. */
  2557. trans->sync = true;
  2558. /* Don't bother flushing if we got into a different transaction */
  2559. if (trans->transid > async->transid)
  2560. goto end;
  2561. ret = btrfs_run_delayed_refs(trans, async->root, async->count);
  2562. if (ret)
  2563. async->error = ret;
  2564. end:
  2565. ret = btrfs_end_transaction(trans, async->root);
  2566. if (ret && !async->error)
  2567. async->error = ret;
  2568. done:
  2569. if (async->sync)
  2570. complete(&async->wait);
  2571. else
  2572. kfree(async);
  2573. }
  2574. int btrfs_async_run_delayed_refs(struct btrfs_root *root,
  2575. unsigned long count, u64 transid, int wait)
  2576. {
  2577. struct async_delayed_refs *async;
  2578. int ret;
  2579. async = kmalloc(sizeof(*async), GFP_NOFS);
  2580. if (!async)
  2581. return -ENOMEM;
  2582. async->root = root->fs_info->tree_root;
  2583. async->count = count;
  2584. async->error = 0;
  2585. async->transid = transid;
  2586. if (wait)
  2587. async->sync = 1;
  2588. else
  2589. async->sync = 0;
  2590. init_completion(&async->wait);
  2591. btrfs_init_work(&async->work, btrfs_extent_refs_helper,
  2592. delayed_ref_async_start, NULL, NULL);
  2593. btrfs_queue_work(root->fs_info->extent_workers, &async->work);
  2594. if (wait) {
  2595. wait_for_completion(&async->wait);
  2596. ret = async->error;
  2597. kfree(async);
  2598. return ret;
  2599. }
  2600. return 0;
  2601. }
  2602. /*
  2603. * this starts processing the delayed reference count updates and
  2604. * extent insertions we have queued up so far. count can be
  2605. * 0, which means to process everything in the tree at the start
  2606. * of the run (but not newly added entries), or it can be some target
  2607. * number you'd like to process.
  2608. *
  2609. * Returns 0 on success or if called with an aborted transaction
  2610. * Returns <0 on error and aborts the transaction
  2611. */
  2612. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2613. struct btrfs_root *root, unsigned long count)
  2614. {
  2615. struct rb_node *node;
  2616. struct btrfs_delayed_ref_root *delayed_refs;
  2617. struct btrfs_delayed_ref_head *head;
  2618. int ret;
  2619. int run_all = count == (unsigned long)-1;
  2620. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  2621. /* We'll clean this up in btrfs_cleanup_transaction */
  2622. if (trans->aborted)
  2623. return 0;
  2624. if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
  2625. return 0;
  2626. if (root == root->fs_info->extent_root)
  2627. root = root->fs_info->tree_root;
  2628. delayed_refs = &trans->transaction->delayed_refs;
  2629. if (count == 0)
  2630. count = atomic_read(&delayed_refs->num_entries) * 2;
  2631. again:
  2632. #ifdef SCRAMBLE_DELAYED_REFS
  2633. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2634. #endif
  2635. trans->can_flush_pending_bgs = false;
  2636. ret = __btrfs_run_delayed_refs(trans, root, count);
  2637. if (ret < 0) {
  2638. btrfs_abort_transaction(trans, ret);
  2639. return ret;
  2640. }
  2641. if (run_all) {
  2642. if (!list_empty(&trans->new_bgs))
  2643. btrfs_create_pending_block_groups(trans, root);
  2644. spin_lock(&delayed_refs->lock);
  2645. node = rb_first(&delayed_refs->href_root);
  2646. if (!node) {
  2647. spin_unlock(&delayed_refs->lock);
  2648. goto out;
  2649. }
  2650. count = (unsigned long)-1;
  2651. while (node) {
  2652. head = rb_entry(node, struct btrfs_delayed_ref_head,
  2653. href_node);
  2654. if (btrfs_delayed_ref_is_head(&head->node)) {
  2655. struct btrfs_delayed_ref_node *ref;
  2656. ref = &head->node;
  2657. atomic_inc(&ref->refs);
  2658. spin_unlock(&delayed_refs->lock);
  2659. /*
  2660. * Mutex was contended, block until it's
  2661. * released and try again
  2662. */
  2663. mutex_lock(&head->mutex);
  2664. mutex_unlock(&head->mutex);
  2665. btrfs_put_delayed_ref(ref);
  2666. cond_resched();
  2667. goto again;
  2668. } else {
  2669. WARN_ON(1);
  2670. }
  2671. node = rb_next(node);
  2672. }
  2673. spin_unlock(&delayed_refs->lock);
  2674. cond_resched();
  2675. goto again;
  2676. }
  2677. out:
  2678. assert_qgroups_uptodate(trans);
  2679. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  2680. return 0;
  2681. }
  2682. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2683. struct btrfs_root *root,
  2684. u64 bytenr, u64 num_bytes, u64 flags,
  2685. int level, int is_data)
  2686. {
  2687. struct btrfs_delayed_extent_op *extent_op;
  2688. int ret;
  2689. extent_op = btrfs_alloc_delayed_extent_op();
  2690. if (!extent_op)
  2691. return -ENOMEM;
  2692. extent_op->flags_to_set = flags;
  2693. extent_op->update_flags = true;
  2694. extent_op->update_key = false;
  2695. extent_op->is_data = is_data ? true : false;
  2696. extent_op->level = level;
  2697. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2698. num_bytes, extent_op);
  2699. if (ret)
  2700. btrfs_free_delayed_extent_op(extent_op);
  2701. return ret;
  2702. }
  2703. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2704. struct btrfs_root *root,
  2705. struct btrfs_path *path,
  2706. u64 objectid, u64 offset, u64 bytenr)
  2707. {
  2708. struct btrfs_delayed_ref_head *head;
  2709. struct btrfs_delayed_ref_node *ref;
  2710. struct btrfs_delayed_data_ref *data_ref;
  2711. struct btrfs_delayed_ref_root *delayed_refs;
  2712. int ret = 0;
  2713. delayed_refs = &trans->transaction->delayed_refs;
  2714. spin_lock(&delayed_refs->lock);
  2715. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2716. if (!head) {
  2717. spin_unlock(&delayed_refs->lock);
  2718. return 0;
  2719. }
  2720. if (!mutex_trylock(&head->mutex)) {
  2721. atomic_inc(&head->node.refs);
  2722. spin_unlock(&delayed_refs->lock);
  2723. btrfs_release_path(path);
  2724. /*
  2725. * Mutex was contended, block until it's released and let
  2726. * caller try again
  2727. */
  2728. mutex_lock(&head->mutex);
  2729. mutex_unlock(&head->mutex);
  2730. btrfs_put_delayed_ref(&head->node);
  2731. return -EAGAIN;
  2732. }
  2733. spin_unlock(&delayed_refs->lock);
  2734. spin_lock(&head->lock);
  2735. list_for_each_entry(ref, &head->ref_list, list) {
  2736. /* If it's a shared ref we know a cross reference exists */
  2737. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
  2738. ret = 1;
  2739. break;
  2740. }
  2741. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2742. /*
  2743. * If our ref doesn't match the one we're currently looking at
  2744. * then we have a cross reference.
  2745. */
  2746. if (data_ref->root != root->root_key.objectid ||
  2747. data_ref->objectid != objectid ||
  2748. data_ref->offset != offset) {
  2749. ret = 1;
  2750. break;
  2751. }
  2752. }
  2753. spin_unlock(&head->lock);
  2754. mutex_unlock(&head->mutex);
  2755. return ret;
  2756. }
  2757. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2758. struct btrfs_root *root,
  2759. struct btrfs_path *path,
  2760. u64 objectid, u64 offset, u64 bytenr)
  2761. {
  2762. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2763. struct extent_buffer *leaf;
  2764. struct btrfs_extent_data_ref *ref;
  2765. struct btrfs_extent_inline_ref *iref;
  2766. struct btrfs_extent_item *ei;
  2767. struct btrfs_key key;
  2768. u32 item_size;
  2769. int ret;
  2770. key.objectid = bytenr;
  2771. key.offset = (u64)-1;
  2772. key.type = BTRFS_EXTENT_ITEM_KEY;
  2773. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2774. if (ret < 0)
  2775. goto out;
  2776. BUG_ON(ret == 0); /* Corruption */
  2777. ret = -ENOENT;
  2778. if (path->slots[0] == 0)
  2779. goto out;
  2780. path->slots[0]--;
  2781. leaf = path->nodes[0];
  2782. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2783. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2784. goto out;
  2785. ret = 1;
  2786. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2787. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2788. if (item_size < sizeof(*ei)) {
  2789. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2790. goto out;
  2791. }
  2792. #endif
  2793. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2794. if (item_size != sizeof(*ei) +
  2795. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2796. goto out;
  2797. if (btrfs_extent_generation(leaf, ei) <=
  2798. btrfs_root_last_snapshot(&root->root_item))
  2799. goto out;
  2800. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2801. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2802. BTRFS_EXTENT_DATA_REF_KEY)
  2803. goto out;
  2804. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2805. if (btrfs_extent_refs(leaf, ei) !=
  2806. btrfs_extent_data_ref_count(leaf, ref) ||
  2807. btrfs_extent_data_ref_root(leaf, ref) !=
  2808. root->root_key.objectid ||
  2809. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2810. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2811. goto out;
  2812. ret = 0;
  2813. out:
  2814. return ret;
  2815. }
  2816. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2817. struct btrfs_root *root,
  2818. u64 objectid, u64 offset, u64 bytenr)
  2819. {
  2820. struct btrfs_path *path;
  2821. int ret;
  2822. int ret2;
  2823. path = btrfs_alloc_path();
  2824. if (!path)
  2825. return -ENOENT;
  2826. do {
  2827. ret = check_committed_ref(trans, root, path, objectid,
  2828. offset, bytenr);
  2829. if (ret && ret != -ENOENT)
  2830. goto out;
  2831. ret2 = check_delayed_ref(trans, root, path, objectid,
  2832. offset, bytenr);
  2833. } while (ret2 == -EAGAIN);
  2834. if (ret2 && ret2 != -ENOENT) {
  2835. ret = ret2;
  2836. goto out;
  2837. }
  2838. if (ret != -ENOENT || ret2 != -ENOENT)
  2839. ret = 0;
  2840. out:
  2841. btrfs_free_path(path);
  2842. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2843. WARN_ON(ret > 0);
  2844. return ret;
  2845. }
  2846. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2847. struct btrfs_root *root,
  2848. struct extent_buffer *buf,
  2849. int full_backref, int inc)
  2850. {
  2851. u64 bytenr;
  2852. u64 num_bytes;
  2853. u64 parent;
  2854. u64 ref_root;
  2855. u32 nritems;
  2856. struct btrfs_key key;
  2857. struct btrfs_file_extent_item *fi;
  2858. int i;
  2859. int level;
  2860. int ret = 0;
  2861. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2862. u64, u64, u64, u64, u64, u64);
  2863. if (btrfs_is_testing(root->fs_info))
  2864. return 0;
  2865. ref_root = btrfs_header_owner(buf);
  2866. nritems = btrfs_header_nritems(buf);
  2867. level = btrfs_header_level(buf);
  2868. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
  2869. return 0;
  2870. if (inc)
  2871. process_func = btrfs_inc_extent_ref;
  2872. else
  2873. process_func = btrfs_free_extent;
  2874. if (full_backref)
  2875. parent = buf->start;
  2876. else
  2877. parent = 0;
  2878. for (i = 0; i < nritems; i++) {
  2879. if (level == 0) {
  2880. btrfs_item_key_to_cpu(buf, &key, i);
  2881. if (key.type != BTRFS_EXTENT_DATA_KEY)
  2882. continue;
  2883. fi = btrfs_item_ptr(buf, i,
  2884. struct btrfs_file_extent_item);
  2885. if (btrfs_file_extent_type(buf, fi) ==
  2886. BTRFS_FILE_EXTENT_INLINE)
  2887. continue;
  2888. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2889. if (bytenr == 0)
  2890. continue;
  2891. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2892. key.offset -= btrfs_file_extent_offset(buf, fi);
  2893. ret = process_func(trans, root, bytenr, num_bytes,
  2894. parent, ref_root, key.objectid,
  2895. key.offset);
  2896. if (ret)
  2897. goto fail;
  2898. } else {
  2899. bytenr = btrfs_node_blockptr(buf, i);
  2900. num_bytes = root->nodesize;
  2901. ret = process_func(trans, root, bytenr, num_bytes,
  2902. parent, ref_root, level - 1, 0);
  2903. if (ret)
  2904. goto fail;
  2905. }
  2906. }
  2907. return 0;
  2908. fail:
  2909. return ret;
  2910. }
  2911. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2912. struct extent_buffer *buf, int full_backref)
  2913. {
  2914. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  2915. }
  2916. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2917. struct extent_buffer *buf, int full_backref)
  2918. {
  2919. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  2920. }
  2921. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2922. struct btrfs_root *root,
  2923. struct btrfs_path *path,
  2924. struct btrfs_block_group_cache *cache)
  2925. {
  2926. int ret;
  2927. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2928. unsigned long bi;
  2929. struct extent_buffer *leaf;
  2930. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2931. if (ret) {
  2932. if (ret > 0)
  2933. ret = -ENOENT;
  2934. goto fail;
  2935. }
  2936. leaf = path->nodes[0];
  2937. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2938. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2939. btrfs_mark_buffer_dirty(leaf);
  2940. fail:
  2941. btrfs_release_path(path);
  2942. return ret;
  2943. }
  2944. static struct btrfs_block_group_cache *
  2945. next_block_group(struct btrfs_root *root,
  2946. struct btrfs_block_group_cache *cache)
  2947. {
  2948. struct rb_node *node;
  2949. spin_lock(&root->fs_info->block_group_cache_lock);
  2950. /* If our block group was removed, we need a full search. */
  2951. if (RB_EMPTY_NODE(&cache->cache_node)) {
  2952. const u64 next_bytenr = cache->key.objectid + cache->key.offset;
  2953. spin_unlock(&root->fs_info->block_group_cache_lock);
  2954. btrfs_put_block_group(cache);
  2955. cache = btrfs_lookup_first_block_group(root->fs_info,
  2956. next_bytenr);
  2957. return cache;
  2958. }
  2959. node = rb_next(&cache->cache_node);
  2960. btrfs_put_block_group(cache);
  2961. if (node) {
  2962. cache = rb_entry(node, struct btrfs_block_group_cache,
  2963. cache_node);
  2964. btrfs_get_block_group(cache);
  2965. } else
  2966. cache = NULL;
  2967. spin_unlock(&root->fs_info->block_group_cache_lock);
  2968. return cache;
  2969. }
  2970. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2971. struct btrfs_trans_handle *trans,
  2972. struct btrfs_path *path)
  2973. {
  2974. struct btrfs_root *root = block_group->fs_info->tree_root;
  2975. struct inode *inode = NULL;
  2976. u64 alloc_hint = 0;
  2977. int dcs = BTRFS_DC_ERROR;
  2978. u64 num_pages = 0;
  2979. int retries = 0;
  2980. int ret = 0;
  2981. /*
  2982. * If this block group is smaller than 100 megs don't bother caching the
  2983. * block group.
  2984. */
  2985. if (block_group->key.offset < (100 * SZ_1M)) {
  2986. spin_lock(&block_group->lock);
  2987. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2988. spin_unlock(&block_group->lock);
  2989. return 0;
  2990. }
  2991. if (trans->aborted)
  2992. return 0;
  2993. again:
  2994. inode = lookup_free_space_inode(root, block_group, path);
  2995. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2996. ret = PTR_ERR(inode);
  2997. btrfs_release_path(path);
  2998. goto out;
  2999. }
  3000. if (IS_ERR(inode)) {
  3001. BUG_ON(retries);
  3002. retries++;
  3003. if (block_group->ro)
  3004. goto out_free;
  3005. ret = create_free_space_inode(root, trans, block_group, path);
  3006. if (ret)
  3007. goto out_free;
  3008. goto again;
  3009. }
  3010. /* We've already setup this transaction, go ahead and exit */
  3011. if (block_group->cache_generation == trans->transid &&
  3012. i_size_read(inode)) {
  3013. dcs = BTRFS_DC_SETUP;
  3014. goto out_put;
  3015. }
  3016. /*
  3017. * We want to set the generation to 0, that way if anything goes wrong
  3018. * from here on out we know not to trust this cache when we load up next
  3019. * time.
  3020. */
  3021. BTRFS_I(inode)->generation = 0;
  3022. ret = btrfs_update_inode(trans, root, inode);
  3023. if (ret) {
  3024. /*
  3025. * So theoretically we could recover from this, simply set the
  3026. * super cache generation to 0 so we know to invalidate the
  3027. * cache, but then we'd have to keep track of the block groups
  3028. * that fail this way so we know we _have_ to reset this cache
  3029. * before the next commit or risk reading stale cache. So to
  3030. * limit our exposure to horrible edge cases lets just abort the
  3031. * transaction, this only happens in really bad situations
  3032. * anyway.
  3033. */
  3034. btrfs_abort_transaction(trans, ret);
  3035. goto out_put;
  3036. }
  3037. WARN_ON(ret);
  3038. if (i_size_read(inode) > 0) {
  3039. ret = btrfs_check_trunc_cache_free_space(root,
  3040. &root->fs_info->global_block_rsv);
  3041. if (ret)
  3042. goto out_put;
  3043. ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
  3044. if (ret)
  3045. goto out_put;
  3046. }
  3047. spin_lock(&block_group->lock);
  3048. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  3049. !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
  3050. /*
  3051. * don't bother trying to write stuff out _if_
  3052. * a) we're not cached,
  3053. * b) we're with nospace_cache mount option.
  3054. */
  3055. dcs = BTRFS_DC_WRITTEN;
  3056. spin_unlock(&block_group->lock);
  3057. goto out_put;
  3058. }
  3059. spin_unlock(&block_group->lock);
  3060. /*
  3061. * We hit an ENOSPC when setting up the cache in this transaction, just
  3062. * skip doing the setup, we've already cleared the cache so we're safe.
  3063. */
  3064. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  3065. ret = -ENOSPC;
  3066. goto out_put;
  3067. }
  3068. /*
  3069. * Try to preallocate enough space based on how big the block group is.
  3070. * Keep in mind this has to include any pinned space which could end up
  3071. * taking up quite a bit since it's not folded into the other space
  3072. * cache.
  3073. */
  3074. num_pages = div_u64(block_group->key.offset, SZ_256M);
  3075. if (!num_pages)
  3076. num_pages = 1;
  3077. num_pages *= 16;
  3078. num_pages *= PAGE_SIZE;
  3079. ret = btrfs_check_data_free_space(inode, 0, num_pages);
  3080. if (ret)
  3081. goto out_put;
  3082. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  3083. num_pages, num_pages,
  3084. &alloc_hint);
  3085. /*
  3086. * Our cache requires contiguous chunks so that we don't modify a bunch
  3087. * of metadata or split extents when writing the cache out, which means
  3088. * we can enospc if we are heavily fragmented in addition to just normal
  3089. * out of space conditions. So if we hit this just skip setting up any
  3090. * other block groups for this transaction, maybe we'll unpin enough
  3091. * space the next time around.
  3092. */
  3093. if (!ret)
  3094. dcs = BTRFS_DC_SETUP;
  3095. else if (ret == -ENOSPC)
  3096. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  3097. out_put:
  3098. iput(inode);
  3099. out_free:
  3100. btrfs_release_path(path);
  3101. out:
  3102. spin_lock(&block_group->lock);
  3103. if (!ret && dcs == BTRFS_DC_SETUP)
  3104. block_group->cache_generation = trans->transid;
  3105. block_group->disk_cache_state = dcs;
  3106. spin_unlock(&block_group->lock);
  3107. return ret;
  3108. }
  3109. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  3110. struct btrfs_root *root)
  3111. {
  3112. struct btrfs_block_group_cache *cache, *tmp;
  3113. struct btrfs_transaction *cur_trans = trans->transaction;
  3114. struct btrfs_path *path;
  3115. if (list_empty(&cur_trans->dirty_bgs) ||
  3116. !btrfs_test_opt(root->fs_info, SPACE_CACHE))
  3117. return 0;
  3118. path = btrfs_alloc_path();
  3119. if (!path)
  3120. return -ENOMEM;
  3121. /* Could add new block groups, use _safe just in case */
  3122. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  3123. dirty_list) {
  3124. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  3125. cache_save_setup(cache, trans, path);
  3126. }
  3127. btrfs_free_path(path);
  3128. return 0;
  3129. }
  3130. /*
  3131. * transaction commit does final block group cache writeback during a
  3132. * critical section where nothing is allowed to change the FS. This is
  3133. * required in order for the cache to actually match the block group,
  3134. * but can introduce a lot of latency into the commit.
  3135. *
  3136. * So, btrfs_start_dirty_block_groups is here to kick off block group
  3137. * cache IO. There's a chance we'll have to redo some of it if the
  3138. * block group changes again during the commit, but it greatly reduces
  3139. * the commit latency by getting rid of the easy block groups while
  3140. * we're still allowing others to join the commit.
  3141. */
  3142. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
  3143. struct btrfs_root *root)
  3144. {
  3145. struct btrfs_block_group_cache *cache;
  3146. struct btrfs_transaction *cur_trans = trans->transaction;
  3147. int ret = 0;
  3148. int should_put;
  3149. struct btrfs_path *path = NULL;
  3150. LIST_HEAD(dirty);
  3151. struct list_head *io = &cur_trans->io_bgs;
  3152. int num_started = 0;
  3153. int loops = 0;
  3154. spin_lock(&cur_trans->dirty_bgs_lock);
  3155. if (list_empty(&cur_trans->dirty_bgs)) {
  3156. spin_unlock(&cur_trans->dirty_bgs_lock);
  3157. return 0;
  3158. }
  3159. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3160. spin_unlock(&cur_trans->dirty_bgs_lock);
  3161. again:
  3162. /*
  3163. * make sure all the block groups on our dirty list actually
  3164. * exist
  3165. */
  3166. btrfs_create_pending_block_groups(trans, root);
  3167. if (!path) {
  3168. path = btrfs_alloc_path();
  3169. if (!path)
  3170. return -ENOMEM;
  3171. }
  3172. /*
  3173. * cache_write_mutex is here only to save us from balance or automatic
  3174. * removal of empty block groups deleting this block group while we are
  3175. * writing out the cache
  3176. */
  3177. mutex_lock(&trans->transaction->cache_write_mutex);
  3178. while (!list_empty(&dirty)) {
  3179. cache = list_first_entry(&dirty,
  3180. struct btrfs_block_group_cache,
  3181. dirty_list);
  3182. /*
  3183. * this can happen if something re-dirties a block
  3184. * group that is already under IO. Just wait for it to
  3185. * finish and then do it all again
  3186. */
  3187. if (!list_empty(&cache->io_list)) {
  3188. list_del_init(&cache->io_list);
  3189. btrfs_wait_cache_io(root, trans, cache,
  3190. &cache->io_ctl, path,
  3191. cache->key.objectid);
  3192. btrfs_put_block_group(cache);
  3193. }
  3194. /*
  3195. * btrfs_wait_cache_io uses the cache->dirty_list to decide
  3196. * if it should update the cache_state. Don't delete
  3197. * until after we wait.
  3198. *
  3199. * Since we're not running in the commit critical section
  3200. * we need the dirty_bgs_lock to protect from update_block_group
  3201. */
  3202. spin_lock(&cur_trans->dirty_bgs_lock);
  3203. list_del_init(&cache->dirty_list);
  3204. spin_unlock(&cur_trans->dirty_bgs_lock);
  3205. should_put = 1;
  3206. cache_save_setup(cache, trans, path);
  3207. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  3208. cache->io_ctl.inode = NULL;
  3209. ret = btrfs_write_out_cache(root, trans, cache, path);
  3210. if (ret == 0 && cache->io_ctl.inode) {
  3211. num_started++;
  3212. should_put = 0;
  3213. /*
  3214. * the cache_write_mutex is protecting
  3215. * the io_list
  3216. */
  3217. list_add_tail(&cache->io_list, io);
  3218. } else {
  3219. /*
  3220. * if we failed to write the cache, the
  3221. * generation will be bad and life goes on
  3222. */
  3223. ret = 0;
  3224. }
  3225. }
  3226. if (!ret) {
  3227. ret = write_one_cache_group(trans, root, path, cache);
  3228. /*
  3229. * Our block group might still be attached to the list
  3230. * of new block groups in the transaction handle of some
  3231. * other task (struct btrfs_trans_handle->new_bgs). This
  3232. * means its block group item isn't yet in the extent
  3233. * tree. If this happens ignore the error, as we will
  3234. * try again later in the critical section of the
  3235. * transaction commit.
  3236. */
  3237. if (ret == -ENOENT) {
  3238. ret = 0;
  3239. spin_lock(&cur_trans->dirty_bgs_lock);
  3240. if (list_empty(&cache->dirty_list)) {
  3241. list_add_tail(&cache->dirty_list,
  3242. &cur_trans->dirty_bgs);
  3243. btrfs_get_block_group(cache);
  3244. }
  3245. spin_unlock(&cur_trans->dirty_bgs_lock);
  3246. } else if (ret) {
  3247. btrfs_abort_transaction(trans, ret);
  3248. }
  3249. }
  3250. /* if its not on the io list, we need to put the block group */
  3251. if (should_put)
  3252. btrfs_put_block_group(cache);
  3253. if (ret)
  3254. break;
  3255. /*
  3256. * Avoid blocking other tasks for too long. It might even save
  3257. * us from writing caches for block groups that are going to be
  3258. * removed.
  3259. */
  3260. mutex_unlock(&trans->transaction->cache_write_mutex);
  3261. mutex_lock(&trans->transaction->cache_write_mutex);
  3262. }
  3263. mutex_unlock(&trans->transaction->cache_write_mutex);
  3264. /*
  3265. * go through delayed refs for all the stuff we've just kicked off
  3266. * and then loop back (just once)
  3267. */
  3268. ret = btrfs_run_delayed_refs(trans, root, 0);
  3269. if (!ret && loops == 0) {
  3270. loops++;
  3271. spin_lock(&cur_trans->dirty_bgs_lock);
  3272. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3273. /*
  3274. * dirty_bgs_lock protects us from concurrent block group
  3275. * deletes too (not just cache_write_mutex).
  3276. */
  3277. if (!list_empty(&dirty)) {
  3278. spin_unlock(&cur_trans->dirty_bgs_lock);
  3279. goto again;
  3280. }
  3281. spin_unlock(&cur_trans->dirty_bgs_lock);
  3282. } else if (ret < 0) {
  3283. btrfs_cleanup_dirty_bgs(cur_trans, root);
  3284. }
  3285. btrfs_free_path(path);
  3286. return ret;
  3287. }
  3288. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  3289. struct btrfs_root *root)
  3290. {
  3291. struct btrfs_block_group_cache *cache;
  3292. struct btrfs_transaction *cur_trans = trans->transaction;
  3293. int ret = 0;
  3294. int should_put;
  3295. struct btrfs_path *path;
  3296. struct list_head *io = &cur_trans->io_bgs;
  3297. int num_started = 0;
  3298. path = btrfs_alloc_path();
  3299. if (!path)
  3300. return -ENOMEM;
  3301. /*
  3302. * Even though we are in the critical section of the transaction commit,
  3303. * we can still have concurrent tasks adding elements to this
  3304. * transaction's list of dirty block groups. These tasks correspond to
  3305. * endio free space workers started when writeback finishes for a
  3306. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  3307. * allocate new block groups as a result of COWing nodes of the root
  3308. * tree when updating the free space inode. The writeback for the space
  3309. * caches is triggered by an earlier call to
  3310. * btrfs_start_dirty_block_groups() and iterations of the following
  3311. * loop.
  3312. * Also we want to do the cache_save_setup first and then run the
  3313. * delayed refs to make sure we have the best chance at doing this all
  3314. * in one shot.
  3315. */
  3316. spin_lock(&cur_trans->dirty_bgs_lock);
  3317. while (!list_empty(&cur_trans->dirty_bgs)) {
  3318. cache = list_first_entry(&cur_trans->dirty_bgs,
  3319. struct btrfs_block_group_cache,
  3320. dirty_list);
  3321. /*
  3322. * this can happen if cache_save_setup re-dirties a block
  3323. * group that is already under IO. Just wait for it to
  3324. * finish and then do it all again
  3325. */
  3326. if (!list_empty(&cache->io_list)) {
  3327. spin_unlock(&cur_trans->dirty_bgs_lock);
  3328. list_del_init(&cache->io_list);
  3329. btrfs_wait_cache_io(root, trans, cache,
  3330. &cache->io_ctl, path,
  3331. cache->key.objectid);
  3332. btrfs_put_block_group(cache);
  3333. spin_lock(&cur_trans->dirty_bgs_lock);
  3334. }
  3335. /*
  3336. * don't remove from the dirty list until after we've waited
  3337. * on any pending IO
  3338. */
  3339. list_del_init(&cache->dirty_list);
  3340. spin_unlock(&cur_trans->dirty_bgs_lock);
  3341. should_put = 1;
  3342. cache_save_setup(cache, trans, path);
  3343. if (!ret)
  3344. ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
  3345. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  3346. cache->io_ctl.inode = NULL;
  3347. ret = btrfs_write_out_cache(root, trans, cache, path);
  3348. if (ret == 0 && cache->io_ctl.inode) {
  3349. num_started++;
  3350. should_put = 0;
  3351. list_add_tail(&cache->io_list, io);
  3352. } else {
  3353. /*
  3354. * if we failed to write the cache, the
  3355. * generation will be bad and life goes on
  3356. */
  3357. ret = 0;
  3358. }
  3359. }
  3360. if (!ret) {
  3361. ret = write_one_cache_group(trans, root, path, cache);
  3362. /*
  3363. * One of the free space endio workers might have
  3364. * created a new block group while updating a free space
  3365. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  3366. * and hasn't released its transaction handle yet, in
  3367. * which case the new block group is still attached to
  3368. * its transaction handle and its creation has not
  3369. * finished yet (no block group item in the extent tree
  3370. * yet, etc). If this is the case, wait for all free
  3371. * space endio workers to finish and retry. This is a
  3372. * a very rare case so no need for a more efficient and
  3373. * complex approach.
  3374. */
  3375. if (ret == -ENOENT) {
  3376. wait_event(cur_trans->writer_wait,
  3377. atomic_read(&cur_trans->num_writers) == 1);
  3378. ret = write_one_cache_group(trans, root, path,
  3379. cache);
  3380. }
  3381. if (ret)
  3382. btrfs_abort_transaction(trans, ret);
  3383. }
  3384. /* if its not on the io list, we need to put the block group */
  3385. if (should_put)
  3386. btrfs_put_block_group(cache);
  3387. spin_lock(&cur_trans->dirty_bgs_lock);
  3388. }
  3389. spin_unlock(&cur_trans->dirty_bgs_lock);
  3390. while (!list_empty(io)) {
  3391. cache = list_first_entry(io, struct btrfs_block_group_cache,
  3392. io_list);
  3393. list_del_init(&cache->io_list);
  3394. btrfs_wait_cache_io(root, trans, cache,
  3395. &cache->io_ctl, path, cache->key.objectid);
  3396. btrfs_put_block_group(cache);
  3397. }
  3398. btrfs_free_path(path);
  3399. return ret;
  3400. }
  3401. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  3402. {
  3403. struct btrfs_block_group_cache *block_group;
  3404. int readonly = 0;
  3405. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  3406. if (!block_group || block_group->ro)
  3407. readonly = 1;
  3408. if (block_group)
  3409. btrfs_put_block_group(block_group);
  3410. return readonly;
  3411. }
  3412. bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3413. {
  3414. struct btrfs_block_group_cache *bg;
  3415. bool ret = true;
  3416. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3417. if (!bg)
  3418. return false;
  3419. spin_lock(&bg->lock);
  3420. if (bg->ro)
  3421. ret = false;
  3422. else
  3423. atomic_inc(&bg->nocow_writers);
  3424. spin_unlock(&bg->lock);
  3425. /* no put on block group, done by btrfs_dec_nocow_writers */
  3426. if (!ret)
  3427. btrfs_put_block_group(bg);
  3428. return ret;
  3429. }
  3430. void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3431. {
  3432. struct btrfs_block_group_cache *bg;
  3433. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3434. ASSERT(bg);
  3435. if (atomic_dec_and_test(&bg->nocow_writers))
  3436. wake_up_atomic_t(&bg->nocow_writers);
  3437. /*
  3438. * Once for our lookup and once for the lookup done by a previous call
  3439. * to btrfs_inc_nocow_writers()
  3440. */
  3441. btrfs_put_block_group(bg);
  3442. btrfs_put_block_group(bg);
  3443. }
  3444. static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
  3445. {
  3446. schedule();
  3447. return 0;
  3448. }
  3449. void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
  3450. {
  3451. wait_on_atomic_t(&bg->nocow_writers,
  3452. btrfs_wait_nocow_writers_atomic_t,
  3453. TASK_UNINTERRUPTIBLE);
  3454. }
  3455. static const char *alloc_name(u64 flags)
  3456. {
  3457. switch (flags) {
  3458. case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
  3459. return "mixed";
  3460. case BTRFS_BLOCK_GROUP_METADATA:
  3461. return "metadata";
  3462. case BTRFS_BLOCK_GROUP_DATA:
  3463. return "data";
  3464. case BTRFS_BLOCK_GROUP_SYSTEM:
  3465. return "system";
  3466. default:
  3467. WARN_ON(1);
  3468. return "invalid-combination";
  3469. };
  3470. }
  3471. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  3472. u64 total_bytes, u64 bytes_used,
  3473. u64 bytes_readonly,
  3474. struct btrfs_space_info **space_info)
  3475. {
  3476. struct btrfs_space_info *found;
  3477. int i;
  3478. int factor;
  3479. int ret;
  3480. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  3481. BTRFS_BLOCK_GROUP_RAID10))
  3482. factor = 2;
  3483. else
  3484. factor = 1;
  3485. found = __find_space_info(info, flags);
  3486. if (found) {
  3487. spin_lock(&found->lock);
  3488. found->total_bytes += total_bytes;
  3489. found->disk_total += total_bytes * factor;
  3490. found->bytes_used += bytes_used;
  3491. found->disk_used += bytes_used * factor;
  3492. found->bytes_readonly += bytes_readonly;
  3493. if (total_bytes > 0)
  3494. found->full = 0;
  3495. space_info_add_new_bytes(info, found, total_bytes -
  3496. bytes_used - bytes_readonly);
  3497. spin_unlock(&found->lock);
  3498. *space_info = found;
  3499. return 0;
  3500. }
  3501. found = kzalloc(sizeof(*found), GFP_NOFS);
  3502. if (!found)
  3503. return -ENOMEM;
  3504. ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
  3505. if (ret) {
  3506. kfree(found);
  3507. return ret;
  3508. }
  3509. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3510. INIT_LIST_HEAD(&found->block_groups[i]);
  3511. init_rwsem(&found->groups_sem);
  3512. spin_lock_init(&found->lock);
  3513. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3514. found->total_bytes = total_bytes;
  3515. found->disk_total = total_bytes * factor;
  3516. found->bytes_used = bytes_used;
  3517. found->disk_used = bytes_used * factor;
  3518. found->bytes_pinned = 0;
  3519. found->bytes_reserved = 0;
  3520. found->bytes_readonly = bytes_readonly;
  3521. found->bytes_may_use = 0;
  3522. found->full = 0;
  3523. found->max_extent_size = 0;
  3524. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3525. found->chunk_alloc = 0;
  3526. found->flush = 0;
  3527. init_waitqueue_head(&found->wait);
  3528. INIT_LIST_HEAD(&found->ro_bgs);
  3529. INIT_LIST_HEAD(&found->tickets);
  3530. INIT_LIST_HEAD(&found->priority_tickets);
  3531. ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
  3532. info->space_info_kobj, "%s",
  3533. alloc_name(found->flags));
  3534. if (ret) {
  3535. kfree(found);
  3536. return ret;
  3537. }
  3538. *space_info = found;
  3539. list_add_rcu(&found->list, &info->space_info);
  3540. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3541. info->data_sinfo = found;
  3542. return ret;
  3543. }
  3544. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3545. {
  3546. u64 extra_flags = chunk_to_extended(flags) &
  3547. BTRFS_EXTENDED_PROFILE_MASK;
  3548. write_seqlock(&fs_info->profiles_lock);
  3549. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3550. fs_info->avail_data_alloc_bits |= extra_flags;
  3551. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3552. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3553. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3554. fs_info->avail_system_alloc_bits |= extra_flags;
  3555. write_sequnlock(&fs_info->profiles_lock);
  3556. }
  3557. /*
  3558. * returns target flags in extended format or 0 if restripe for this
  3559. * chunk_type is not in progress
  3560. *
  3561. * should be called with either volume_mutex or balance_lock held
  3562. */
  3563. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3564. {
  3565. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3566. u64 target = 0;
  3567. if (!bctl)
  3568. return 0;
  3569. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3570. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3571. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3572. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3573. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3574. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3575. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3576. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3577. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3578. }
  3579. return target;
  3580. }
  3581. /*
  3582. * @flags: available profiles in extended format (see ctree.h)
  3583. *
  3584. * Returns reduced profile in chunk format. If profile changing is in
  3585. * progress (either running or paused) picks the target profile (if it's
  3586. * already available), otherwise falls back to plain reducing.
  3587. */
  3588. static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  3589. {
  3590. u64 num_devices = root->fs_info->fs_devices->rw_devices;
  3591. u64 target;
  3592. u64 raid_type;
  3593. u64 allowed = 0;
  3594. /*
  3595. * see if restripe for this chunk_type is in progress, if so
  3596. * try to reduce to the target profile
  3597. */
  3598. spin_lock(&root->fs_info->balance_lock);
  3599. target = get_restripe_target(root->fs_info, flags);
  3600. if (target) {
  3601. /* pick target profile only if it's already available */
  3602. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3603. spin_unlock(&root->fs_info->balance_lock);
  3604. return extended_to_chunk(target);
  3605. }
  3606. }
  3607. spin_unlock(&root->fs_info->balance_lock);
  3608. /* First, mask out the RAID levels which aren't possible */
  3609. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  3610. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  3611. allowed |= btrfs_raid_group[raid_type];
  3612. }
  3613. allowed &= flags;
  3614. if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  3615. allowed = BTRFS_BLOCK_GROUP_RAID6;
  3616. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  3617. allowed = BTRFS_BLOCK_GROUP_RAID5;
  3618. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  3619. allowed = BTRFS_BLOCK_GROUP_RAID10;
  3620. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  3621. allowed = BTRFS_BLOCK_GROUP_RAID1;
  3622. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  3623. allowed = BTRFS_BLOCK_GROUP_RAID0;
  3624. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  3625. return extended_to_chunk(flags | allowed);
  3626. }
  3627. static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
  3628. {
  3629. unsigned seq;
  3630. u64 flags;
  3631. do {
  3632. flags = orig_flags;
  3633. seq = read_seqbegin(&root->fs_info->profiles_lock);
  3634. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3635. flags |= root->fs_info->avail_data_alloc_bits;
  3636. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3637. flags |= root->fs_info->avail_system_alloc_bits;
  3638. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3639. flags |= root->fs_info->avail_metadata_alloc_bits;
  3640. } while (read_seqretry(&root->fs_info->profiles_lock, seq));
  3641. return btrfs_reduce_alloc_profile(root, flags);
  3642. }
  3643. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  3644. {
  3645. u64 flags;
  3646. u64 ret;
  3647. if (data)
  3648. flags = BTRFS_BLOCK_GROUP_DATA;
  3649. else if (root == root->fs_info->chunk_root)
  3650. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3651. else
  3652. flags = BTRFS_BLOCK_GROUP_METADATA;
  3653. ret = get_alloc_profile(root, flags);
  3654. return ret;
  3655. }
  3656. int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
  3657. {
  3658. struct btrfs_space_info *data_sinfo;
  3659. struct btrfs_root *root = BTRFS_I(inode)->root;
  3660. struct btrfs_fs_info *fs_info = root->fs_info;
  3661. u64 used;
  3662. int ret = 0;
  3663. int need_commit = 2;
  3664. int have_pinned_space;
  3665. /* make sure bytes are sectorsize aligned */
  3666. bytes = ALIGN(bytes, root->sectorsize);
  3667. if (btrfs_is_free_space_inode(inode)) {
  3668. need_commit = 0;
  3669. ASSERT(current->journal_info);
  3670. }
  3671. data_sinfo = fs_info->data_sinfo;
  3672. if (!data_sinfo)
  3673. goto alloc;
  3674. again:
  3675. /* make sure we have enough space to handle the data first */
  3676. spin_lock(&data_sinfo->lock);
  3677. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3678. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3679. data_sinfo->bytes_may_use;
  3680. if (used + bytes > data_sinfo->total_bytes) {
  3681. struct btrfs_trans_handle *trans;
  3682. /*
  3683. * if we don't have enough free bytes in this space then we need
  3684. * to alloc a new chunk.
  3685. */
  3686. if (!data_sinfo->full) {
  3687. u64 alloc_target;
  3688. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3689. spin_unlock(&data_sinfo->lock);
  3690. alloc:
  3691. alloc_target = btrfs_get_alloc_profile(root, 1);
  3692. /*
  3693. * It is ugly that we don't call nolock join
  3694. * transaction for the free space inode case here.
  3695. * But it is safe because we only do the data space
  3696. * reservation for the free space cache in the
  3697. * transaction context, the common join transaction
  3698. * just increase the counter of the current transaction
  3699. * handler, doesn't try to acquire the trans_lock of
  3700. * the fs.
  3701. */
  3702. trans = btrfs_join_transaction(root);
  3703. if (IS_ERR(trans))
  3704. return PTR_ERR(trans);
  3705. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3706. alloc_target,
  3707. CHUNK_ALLOC_NO_FORCE);
  3708. btrfs_end_transaction(trans, root);
  3709. if (ret < 0) {
  3710. if (ret != -ENOSPC)
  3711. return ret;
  3712. else {
  3713. have_pinned_space = 1;
  3714. goto commit_trans;
  3715. }
  3716. }
  3717. if (!data_sinfo)
  3718. data_sinfo = fs_info->data_sinfo;
  3719. goto again;
  3720. }
  3721. /*
  3722. * If we don't have enough pinned space to deal with this
  3723. * allocation, and no removed chunk in current transaction,
  3724. * don't bother committing the transaction.
  3725. */
  3726. have_pinned_space = percpu_counter_compare(
  3727. &data_sinfo->total_bytes_pinned,
  3728. used + bytes - data_sinfo->total_bytes);
  3729. spin_unlock(&data_sinfo->lock);
  3730. /* commit the current transaction and try again */
  3731. commit_trans:
  3732. if (need_commit &&
  3733. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3734. need_commit--;
  3735. if (need_commit > 0) {
  3736. btrfs_start_delalloc_roots(fs_info, 0, -1);
  3737. btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
  3738. }
  3739. trans = btrfs_join_transaction(root);
  3740. if (IS_ERR(trans))
  3741. return PTR_ERR(trans);
  3742. if (have_pinned_space >= 0 ||
  3743. test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
  3744. &trans->transaction->flags) ||
  3745. need_commit > 0) {
  3746. ret = btrfs_commit_transaction(trans, root);
  3747. if (ret)
  3748. return ret;
  3749. /*
  3750. * The cleaner kthread might still be doing iput
  3751. * operations. Wait for it to finish so that
  3752. * more space is released.
  3753. */
  3754. mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
  3755. mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
  3756. goto again;
  3757. } else {
  3758. btrfs_end_transaction(trans, root);
  3759. }
  3760. }
  3761. trace_btrfs_space_reservation(root->fs_info,
  3762. "space_info:enospc",
  3763. data_sinfo->flags, bytes, 1);
  3764. return -ENOSPC;
  3765. }
  3766. data_sinfo->bytes_may_use += bytes;
  3767. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3768. data_sinfo->flags, bytes, 1);
  3769. spin_unlock(&data_sinfo->lock);
  3770. return ret;
  3771. }
  3772. /*
  3773. * New check_data_free_space() with ability for precious data reservation
  3774. * Will replace old btrfs_check_data_free_space(), but for patch split,
  3775. * add a new function first and then replace it.
  3776. */
  3777. int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
  3778. {
  3779. struct btrfs_root *root = BTRFS_I(inode)->root;
  3780. int ret;
  3781. /* align the range */
  3782. len = round_up(start + len, root->sectorsize) -
  3783. round_down(start, root->sectorsize);
  3784. start = round_down(start, root->sectorsize);
  3785. ret = btrfs_alloc_data_chunk_ondemand(inode, len);
  3786. if (ret < 0)
  3787. return ret;
  3788. /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
  3789. ret = btrfs_qgroup_reserve_data(inode, start, len);
  3790. if (ret)
  3791. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3792. return ret;
  3793. }
  3794. /*
  3795. * Called if we need to clear a data reservation for this inode
  3796. * Normally in a error case.
  3797. *
  3798. * This one will *NOT* use accurate qgroup reserved space API, just for case
  3799. * which we can't sleep and is sure it won't affect qgroup reserved space.
  3800. * Like clear_bit_hook().
  3801. */
  3802. void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
  3803. u64 len)
  3804. {
  3805. struct btrfs_root *root = BTRFS_I(inode)->root;
  3806. struct btrfs_space_info *data_sinfo;
  3807. /* Make sure the range is aligned to sectorsize */
  3808. len = round_up(start + len, root->sectorsize) -
  3809. round_down(start, root->sectorsize);
  3810. start = round_down(start, root->sectorsize);
  3811. data_sinfo = root->fs_info->data_sinfo;
  3812. spin_lock(&data_sinfo->lock);
  3813. if (WARN_ON(data_sinfo->bytes_may_use < len))
  3814. data_sinfo->bytes_may_use = 0;
  3815. else
  3816. data_sinfo->bytes_may_use -= len;
  3817. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3818. data_sinfo->flags, len, 0);
  3819. spin_unlock(&data_sinfo->lock);
  3820. }
  3821. /*
  3822. * Called if we need to clear a data reservation for this inode
  3823. * Normally in a error case.
  3824. *
  3825. * This one will handle the per-inode data rsv map for accurate reserved
  3826. * space framework.
  3827. */
  3828. void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
  3829. {
  3830. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3831. btrfs_qgroup_free_data(inode, start, len);
  3832. }
  3833. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3834. {
  3835. struct list_head *head = &info->space_info;
  3836. struct btrfs_space_info *found;
  3837. rcu_read_lock();
  3838. list_for_each_entry_rcu(found, head, list) {
  3839. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3840. found->force_alloc = CHUNK_ALLOC_FORCE;
  3841. }
  3842. rcu_read_unlock();
  3843. }
  3844. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3845. {
  3846. return (global->size << 1);
  3847. }
  3848. static int should_alloc_chunk(struct btrfs_root *root,
  3849. struct btrfs_space_info *sinfo, int force)
  3850. {
  3851. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3852. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3853. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3854. u64 thresh;
  3855. if (force == CHUNK_ALLOC_FORCE)
  3856. return 1;
  3857. /*
  3858. * We need to take into account the global rsv because for all intents
  3859. * and purposes it's used space. Don't worry about locking the
  3860. * global_rsv, it doesn't change except when the transaction commits.
  3861. */
  3862. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3863. num_allocated += calc_global_rsv_need_space(global_rsv);
  3864. /*
  3865. * in limited mode, we want to have some free space up to
  3866. * about 1% of the FS size.
  3867. */
  3868. if (force == CHUNK_ALLOC_LIMITED) {
  3869. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3870. thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
  3871. if (num_bytes - num_allocated < thresh)
  3872. return 1;
  3873. }
  3874. if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
  3875. return 0;
  3876. return 1;
  3877. }
  3878. static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
  3879. {
  3880. u64 num_dev;
  3881. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3882. BTRFS_BLOCK_GROUP_RAID0 |
  3883. BTRFS_BLOCK_GROUP_RAID5 |
  3884. BTRFS_BLOCK_GROUP_RAID6))
  3885. num_dev = root->fs_info->fs_devices->rw_devices;
  3886. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3887. num_dev = 2;
  3888. else
  3889. num_dev = 1; /* DUP or single */
  3890. return num_dev;
  3891. }
  3892. /*
  3893. * If @is_allocation is true, reserve space in the system space info necessary
  3894. * for allocating a chunk, otherwise if it's false, reserve space necessary for
  3895. * removing a chunk.
  3896. */
  3897. void check_system_chunk(struct btrfs_trans_handle *trans,
  3898. struct btrfs_root *root,
  3899. u64 type)
  3900. {
  3901. struct btrfs_space_info *info;
  3902. u64 left;
  3903. u64 thresh;
  3904. int ret = 0;
  3905. u64 num_devs;
  3906. /*
  3907. * Needed because we can end up allocating a system chunk and for an
  3908. * atomic and race free space reservation in the chunk block reserve.
  3909. */
  3910. ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
  3911. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3912. spin_lock(&info->lock);
  3913. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3914. info->bytes_reserved - info->bytes_readonly -
  3915. info->bytes_may_use;
  3916. spin_unlock(&info->lock);
  3917. num_devs = get_profile_num_devs(root, type);
  3918. /* num_devs device items to update and 1 chunk item to add or remove */
  3919. thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
  3920. btrfs_calc_trans_metadata_size(root, 1);
  3921. if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
  3922. btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
  3923. left, thresh, type);
  3924. dump_space_info(info, 0, 0);
  3925. }
  3926. if (left < thresh) {
  3927. u64 flags;
  3928. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3929. /*
  3930. * Ignore failure to create system chunk. We might end up not
  3931. * needing it, as we might not need to COW all nodes/leafs from
  3932. * the paths we visit in the chunk tree (they were already COWed
  3933. * or created in the current transaction for example).
  3934. */
  3935. ret = btrfs_alloc_chunk(trans, root, flags);
  3936. }
  3937. if (!ret) {
  3938. ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
  3939. &root->fs_info->chunk_block_rsv,
  3940. thresh, BTRFS_RESERVE_NO_FLUSH);
  3941. if (!ret)
  3942. trans->chunk_bytes_reserved += thresh;
  3943. }
  3944. }
  3945. /*
  3946. * If force is CHUNK_ALLOC_FORCE:
  3947. * - return 1 if it successfully allocates a chunk,
  3948. * - return errors including -ENOSPC otherwise.
  3949. * If force is NOT CHUNK_ALLOC_FORCE:
  3950. * - return 0 if it doesn't need to allocate a new chunk,
  3951. * - return 1 if it successfully allocates a chunk,
  3952. * - return errors including -ENOSPC otherwise.
  3953. */
  3954. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3955. struct btrfs_root *extent_root, u64 flags, int force)
  3956. {
  3957. struct btrfs_space_info *space_info;
  3958. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3959. int wait_for_alloc = 0;
  3960. int ret = 0;
  3961. /* Don't re-enter if we're already allocating a chunk */
  3962. if (trans->allocating_chunk)
  3963. return -ENOSPC;
  3964. space_info = __find_space_info(extent_root->fs_info, flags);
  3965. if (!space_info) {
  3966. ret = update_space_info(extent_root->fs_info, flags,
  3967. 0, 0, 0, &space_info);
  3968. BUG_ON(ret); /* -ENOMEM */
  3969. }
  3970. BUG_ON(!space_info); /* Logic error */
  3971. again:
  3972. spin_lock(&space_info->lock);
  3973. if (force < space_info->force_alloc)
  3974. force = space_info->force_alloc;
  3975. if (space_info->full) {
  3976. if (should_alloc_chunk(extent_root, space_info, force))
  3977. ret = -ENOSPC;
  3978. else
  3979. ret = 0;
  3980. spin_unlock(&space_info->lock);
  3981. return ret;
  3982. }
  3983. if (!should_alloc_chunk(extent_root, space_info, force)) {
  3984. spin_unlock(&space_info->lock);
  3985. return 0;
  3986. } else if (space_info->chunk_alloc) {
  3987. wait_for_alloc = 1;
  3988. } else {
  3989. space_info->chunk_alloc = 1;
  3990. }
  3991. spin_unlock(&space_info->lock);
  3992. mutex_lock(&fs_info->chunk_mutex);
  3993. /*
  3994. * The chunk_mutex is held throughout the entirety of a chunk
  3995. * allocation, so once we've acquired the chunk_mutex we know that the
  3996. * other guy is done and we need to recheck and see if we should
  3997. * allocate.
  3998. */
  3999. if (wait_for_alloc) {
  4000. mutex_unlock(&fs_info->chunk_mutex);
  4001. wait_for_alloc = 0;
  4002. goto again;
  4003. }
  4004. trans->allocating_chunk = true;
  4005. /*
  4006. * If we have mixed data/metadata chunks we want to make sure we keep
  4007. * allocating mixed chunks instead of individual chunks.
  4008. */
  4009. if (btrfs_mixed_space_info(space_info))
  4010. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  4011. /*
  4012. * if we're doing a data chunk, go ahead and make sure that
  4013. * we keep a reasonable number of metadata chunks allocated in the
  4014. * FS as well.
  4015. */
  4016. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  4017. fs_info->data_chunk_allocations++;
  4018. if (!(fs_info->data_chunk_allocations %
  4019. fs_info->metadata_ratio))
  4020. force_metadata_allocation(fs_info);
  4021. }
  4022. /*
  4023. * Check if we have enough space in SYSTEM chunk because we may need
  4024. * to update devices.
  4025. */
  4026. check_system_chunk(trans, extent_root, flags);
  4027. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  4028. trans->allocating_chunk = false;
  4029. spin_lock(&space_info->lock);
  4030. if (ret < 0 && ret != -ENOSPC)
  4031. goto out;
  4032. if (ret)
  4033. space_info->full = 1;
  4034. else
  4035. ret = 1;
  4036. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  4037. out:
  4038. space_info->chunk_alloc = 0;
  4039. spin_unlock(&space_info->lock);
  4040. mutex_unlock(&fs_info->chunk_mutex);
  4041. /*
  4042. * When we allocate a new chunk we reserve space in the chunk block
  4043. * reserve to make sure we can COW nodes/leafs in the chunk tree or
  4044. * add new nodes/leafs to it if we end up needing to do it when
  4045. * inserting the chunk item and updating device items as part of the
  4046. * second phase of chunk allocation, performed by
  4047. * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
  4048. * large number of new block groups to create in our transaction
  4049. * handle's new_bgs list to avoid exhausting the chunk block reserve
  4050. * in extreme cases - like having a single transaction create many new
  4051. * block groups when starting to write out the free space caches of all
  4052. * the block groups that were made dirty during the lifetime of the
  4053. * transaction.
  4054. */
  4055. if (trans->can_flush_pending_bgs &&
  4056. trans->chunk_bytes_reserved >= (u64)SZ_2M) {
  4057. btrfs_create_pending_block_groups(trans, extent_root);
  4058. btrfs_trans_release_chunk_metadata(trans);
  4059. }
  4060. return ret;
  4061. }
  4062. static int can_overcommit(struct btrfs_root *root,
  4063. struct btrfs_space_info *space_info, u64 bytes,
  4064. enum btrfs_reserve_flush_enum flush)
  4065. {
  4066. struct btrfs_block_rsv *global_rsv;
  4067. u64 profile;
  4068. u64 space_size;
  4069. u64 avail;
  4070. u64 used;
  4071. /* Don't overcommit when in mixed mode. */
  4072. if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
  4073. return 0;
  4074. BUG_ON(root->fs_info == NULL);
  4075. global_rsv = &root->fs_info->global_block_rsv;
  4076. profile = btrfs_get_alloc_profile(root, 0);
  4077. used = space_info->bytes_used + space_info->bytes_reserved +
  4078. space_info->bytes_pinned + space_info->bytes_readonly;
  4079. /*
  4080. * We only want to allow over committing if we have lots of actual space
  4081. * free, but if we don't have enough space to handle the global reserve
  4082. * space then we could end up having a real enospc problem when trying
  4083. * to allocate a chunk or some other such important allocation.
  4084. */
  4085. spin_lock(&global_rsv->lock);
  4086. space_size = calc_global_rsv_need_space(global_rsv);
  4087. spin_unlock(&global_rsv->lock);
  4088. if (used + space_size >= space_info->total_bytes)
  4089. return 0;
  4090. used += space_info->bytes_may_use;
  4091. spin_lock(&root->fs_info->free_chunk_lock);
  4092. avail = root->fs_info->free_chunk_space;
  4093. spin_unlock(&root->fs_info->free_chunk_lock);
  4094. /*
  4095. * If we have dup, raid1 or raid10 then only half of the free
  4096. * space is actually useable. For raid56, the space info used
  4097. * doesn't include the parity drive, so we don't have to
  4098. * change the math
  4099. */
  4100. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  4101. BTRFS_BLOCK_GROUP_RAID1 |
  4102. BTRFS_BLOCK_GROUP_RAID10))
  4103. avail >>= 1;
  4104. /*
  4105. * If we aren't flushing all things, let us overcommit up to
  4106. * 1/2th of the space. If we can flush, don't let us overcommit
  4107. * too much, let it overcommit up to 1/8 of the space.
  4108. */
  4109. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4110. avail >>= 3;
  4111. else
  4112. avail >>= 1;
  4113. if (used + bytes < space_info->total_bytes + avail)
  4114. return 1;
  4115. return 0;
  4116. }
  4117. static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
  4118. unsigned long nr_pages, int nr_items)
  4119. {
  4120. struct super_block *sb = root->fs_info->sb;
  4121. if (down_read_trylock(&sb->s_umount)) {
  4122. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  4123. up_read(&sb->s_umount);
  4124. } else {
  4125. /*
  4126. * We needn't worry the filesystem going from r/w to r/o though
  4127. * we don't acquire ->s_umount mutex, because the filesystem
  4128. * should guarantee the delalloc inodes list be empty after
  4129. * the filesystem is readonly(all dirty pages are written to
  4130. * the disk).
  4131. */
  4132. btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
  4133. if (!current->journal_info)
  4134. btrfs_wait_ordered_roots(root->fs_info, nr_items,
  4135. 0, (u64)-1);
  4136. }
  4137. }
  4138. static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
  4139. {
  4140. u64 bytes;
  4141. int nr;
  4142. bytes = btrfs_calc_trans_metadata_size(root, 1);
  4143. nr = (int)div64_u64(to_reclaim, bytes);
  4144. if (!nr)
  4145. nr = 1;
  4146. return nr;
  4147. }
  4148. #define EXTENT_SIZE_PER_ITEM SZ_256K
  4149. /*
  4150. * shrink metadata reservation for delalloc
  4151. */
  4152. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  4153. bool wait_ordered)
  4154. {
  4155. struct btrfs_block_rsv *block_rsv;
  4156. struct btrfs_space_info *space_info;
  4157. struct btrfs_trans_handle *trans;
  4158. u64 delalloc_bytes;
  4159. u64 max_reclaim;
  4160. long time_left;
  4161. unsigned long nr_pages;
  4162. int loops;
  4163. int items;
  4164. enum btrfs_reserve_flush_enum flush;
  4165. /* Calc the number of the pages we need flush for space reservation */
  4166. items = calc_reclaim_items_nr(root, to_reclaim);
  4167. to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
  4168. trans = (struct btrfs_trans_handle *)current->journal_info;
  4169. block_rsv = &root->fs_info->delalloc_block_rsv;
  4170. space_info = block_rsv->space_info;
  4171. delalloc_bytes = percpu_counter_sum_positive(
  4172. &root->fs_info->delalloc_bytes);
  4173. if (delalloc_bytes == 0) {
  4174. if (trans)
  4175. return;
  4176. if (wait_ordered)
  4177. btrfs_wait_ordered_roots(root->fs_info, items,
  4178. 0, (u64)-1);
  4179. return;
  4180. }
  4181. loops = 0;
  4182. while (delalloc_bytes && loops < 3) {
  4183. max_reclaim = min(delalloc_bytes, to_reclaim);
  4184. nr_pages = max_reclaim >> PAGE_SHIFT;
  4185. btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
  4186. /*
  4187. * We need to wait for the async pages to actually start before
  4188. * we do anything.
  4189. */
  4190. max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
  4191. if (!max_reclaim)
  4192. goto skip_async;
  4193. if (max_reclaim <= nr_pages)
  4194. max_reclaim = 0;
  4195. else
  4196. max_reclaim -= nr_pages;
  4197. wait_event(root->fs_info->async_submit_wait,
  4198. atomic_read(&root->fs_info->async_delalloc_pages) <=
  4199. (int)max_reclaim);
  4200. skip_async:
  4201. if (!trans)
  4202. flush = BTRFS_RESERVE_FLUSH_ALL;
  4203. else
  4204. flush = BTRFS_RESERVE_NO_FLUSH;
  4205. spin_lock(&space_info->lock);
  4206. if (can_overcommit(root, space_info, orig, flush)) {
  4207. spin_unlock(&space_info->lock);
  4208. break;
  4209. }
  4210. if (list_empty(&space_info->tickets) &&
  4211. list_empty(&space_info->priority_tickets)) {
  4212. spin_unlock(&space_info->lock);
  4213. break;
  4214. }
  4215. spin_unlock(&space_info->lock);
  4216. loops++;
  4217. if (wait_ordered && !trans) {
  4218. btrfs_wait_ordered_roots(root->fs_info, items,
  4219. 0, (u64)-1);
  4220. } else {
  4221. time_left = schedule_timeout_killable(1);
  4222. if (time_left)
  4223. break;
  4224. }
  4225. delalloc_bytes = percpu_counter_sum_positive(
  4226. &root->fs_info->delalloc_bytes);
  4227. }
  4228. }
  4229. /**
  4230. * maybe_commit_transaction - possibly commit the transaction if its ok to
  4231. * @root - the root we're allocating for
  4232. * @bytes - the number of bytes we want to reserve
  4233. * @force - force the commit
  4234. *
  4235. * This will check to make sure that committing the transaction will actually
  4236. * get us somewhere and then commit the transaction if it does. Otherwise it
  4237. * will return -ENOSPC.
  4238. */
  4239. static int may_commit_transaction(struct btrfs_root *root,
  4240. struct btrfs_space_info *space_info,
  4241. u64 bytes, int force)
  4242. {
  4243. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  4244. struct btrfs_trans_handle *trans;
  4245. trans = (struct btrfs_trans_handle *)current->journal_info;
  4246. if (trans)
  4247. return -EAGAIN;
  4248. if (force)
  4249. goto commit;
  4250. /* See if there is enough pinned space to make this reservation */
  4251. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4252. bytes) >= 0)
  4253. goto commit;
  4254. /*
  4255. * See if there is some space in the delayed insertion reservation for
  4256. * this reservation.
  4257. */
  4258. if (space_info != delayed_rsv->space_info)
  4259. return -ENOSPC;
  4260. spin_lock(&delayed_rsv->lock);
  4261. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4262. bytes - delayed_rsv->size) >= 0) {
  4263. spin_unlock(&delayed_rsv->lock);
  4264. return -ENOSPC;
  4265. }
  4266. spin_unlock(&delayed_rsv->lock);
  4267. commit:
  4268. trans = btrfs_join_transaction(root);
  4269. if (IS_ERR(trans))
  4270. return -ENOSPC;
  4271. return btrfs_commit_transaction(trans, root);
  4272. }
  4273. struct reserve_ticket {
  4274. u64 bytes;
  4275. int error;
  4276. struct list_head list;
  4277. wait_queue_head_t wait;
  4278. };
  4279. static int flush_space(struct btrfs_root *root,
  4280. struct btrfs_space_info *space_info, u64 num_bytes,
  4281. u64 orig_bytes, int state)
  4282. {
  4283. struct btrfs_trans_handle *trans;
  4284. int nr;
  4285. int ret = 0;
  4286. switch (state) {
  4287. case FLUSH_DELAYED_ITEMS_NR:
  4288. case FLUSH_DELAYED_ITEMS:
  4289. if (state == FLUSH_DELAYED_ITEMS_NR)
  4290. nr = calc_reclaim_items_nr(root, num_bytes) * 2;
  4291. else
  4292. nr = -1;
  4293. trans = btrfs_join_transaction(root);
  4294. if (IS_ERR(trans)) {
  4295. ret = PTR_ERR(trans);
  4296. break;
  4297. }
  4298. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  4299. btrfs_end_transaction(trans, root);
  4300. break;
  4301. case FLUSH_DELALLOC:
  4302. case FLUSH_DELALLOC_WAIT:
  4303. shrink_delalloc(root, num_bytes * 2, orig_bytes,
  4304. state == FLUSH_DELALLOC_WAIT);
  4305. break;
  4306. case ALLOC_CHUNK:
  4307. trans = btrfs_join_transaction(root);
  4308. if (IS_ERR(trans)) {
  4309. ret = PTR_ERR(trans);
  4310. break;
  4311. }
  4312. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  4313. btrfs_get_alloc_profile(root, 0),
  4314. CHUNK_ALLOC_NO_FORCE);
  4315. btrfs_end_transaction(trans, root);
  4316. if (ret > 0 || ret == -ENOSPC)
  4317. ret = 0;
  4318. break;
  4319. case COMMIT_TRANS:
  4320. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  4321. break;
  4322. default:
  4323. ret = -ENOSPC;
  4324. break;
  4325. }
  4326. trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
  4327. orig_bytes, state, ret);
  4328. return ret;
  4329. }
  4330. static inline u64
  4331. btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
  4332. struct btrfs_space_info *space_info)
  4333. {
  4334. struct reserve_ticket *ticket;
  4335. u64 used;
  4336. u64 expected;
  4337. u64 to_reclaim = 0;
  4338. list_for_each_entry(ticket, &space_info->tickets, list)
  4339. to_reclaim += ticket->bytes;
  4340. list_for_each_entry(ticket, &space_info->priority_tickets, list)
  4341. to_reclaim += ticket->bytes;
  4342. if (to_reclaim)
  4343. return to_reclaim;
  4344. to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
  4345. if (can_overcommit(root, space_info, to_reclaim,
  4346. BTRFS_RESERVE_FLUSH_ALL))
  4347. return 0;
  4348. used = space_info->bytes_used + space_info->bytes_reserved +
  4349. space_info->bytes_pinned + space_info->bytes_readonly +
  4350. space_info->bytes_may_use;
  4351. if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
  4352. expected = div_factor_fine(space_info->total_bytes, 95);
  4353. else
  4354. expected = div_factor_fine(space_info->total_bytes, 90);
  4355. if (used > expected)
  4356. to_reclaim = used - expected;
  4357. else
  4358. to_reclaim = 0;
  4359. to_reclaim = min(to_reclaim, space_info->bytes_may_use +
  4360. space_info->bytes_reserved);
  4361. return to_reclaim;
  4362. }
  4363. static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
  4364. struct btrfs_root *root, u64 used)
  4365. {
  4366. u64 thresh = div_factor_fine(space_info->total_bytes, 98);
  4367. /* If we're just plain full then async reclaim just slows us down. */
  4368. if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
  4369. return 0;
  4370. if (!btrfs_calc_reclaim_metadata_size(root, space_info))
  4371. return 0;
  4372. return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
  4373. !test_bit(BTRFS_FS_STATE_REMOUNTING,
  4374. &root->fs_info->fs_state));
  4375. }
  4376. static void wake_all_tickets(struct list_head *head)
  4377. {
  4378. struct reserve_ticket *ticket;
  4379. while (!list_empty(head)) {
  4380. ticket = list_first_entry(head, struct reserve_ticket, list);
  4381. list_del_init(&ticket->list);
  4382. ticket->error = -ENOSPC;
  4383. wake_up(&ticket->wait);
  4384. }
  4385. }
  4386. /*
  4387. * This is for normal flushers, we can wait all goddamned day if we want to. We
  4388. * will loop and continuously try to flush as long as we are making progress.
  4389. * We count progress as clearing off tickets each time we have to loop.
  4390. */
  4391. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  4392. {
  4393. struct btrfs_fs_info *fs_info;
  4394. struct btrfs_space_info *space_info;
  4395. u64 to_reclaim;
  4396. int flush_state;
  4397. int commit_cycles = 0;
  4398. u64 last_tickets_id;
  4399. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  4400. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4401. spin_lock(&space_info->lock);
  4402. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
  4403. space_info);
  4404. if (!to_reclaim) {
  4405. space_info->flush = 0;
  4406. spin_unlock(&space_info->lock);
  4407. return;
  4408. }
  4409. last_tickets_id = space_info->tickets_id;
  4410. spin_unlock(&space_info->lock);
  4411. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4412. do {
  4413. struct reserve_ticket *ticket;
  4414. int ret;
  4415. ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
  4416. to_reclaim, flush_state);
  4417. spin_lock(&space_info->lock);
  4418. if (list_empty(&space_info->tickets)) {
  4419. space_info->flush = 0;
  4420. spin_unlock(&space_info->lock);
  4421. return;
  4422. }
  4423. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
  4424. space_info);
  4425. ticket = list_first_entry(&space_info->tickets,
  4426. struct reserve_ticket, list);
  4427. if (last_tickets_id == space_info->tickets_id) {
  4428. flush_state++;
  4429. } else {
  4430. last_tickets_id = space_info->tickets_id;
  4431. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4432. if (commit_cycles)
  4433. commit_cycles--;
  4434. }
  4435. if (flush_state > COMMIT_TRANS) {
  4436. commit_cycles++;
  4437. if (commit_cycles > 2) {
  4438. wake_all_tickets(&space_info->tickets);
  4439. space_info->flush = 0;
  4440. } else {
  4441. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4442. }
  4443. }
  4444. spin_unlock(&space_info->lock);
  4445. } while (flush_state <= COMMIT_TRANS);
  4446. }
  4447. void btrfs_init_async_reclaim_work(struct work_struct *work)
  4448. {
  4449. INIT_WORK(work, btrfs_async_reclaim_metadata_space);
  4450. }
  4451. static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
  4452. struct btrfs_space_info *space_info,
  4453. struct reserve_ticket *ticket)
  4454. {
  4455. u64 to_reclaim;
  4456. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  4457. spin_lock(&space_info->lock);
  4458. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
  4459. space_info);
  4460. if (!to_reclaim) {
  4461. spin_unlock(&space_info->lock);
  4462. return;
  4463. }
  4464. spin_unlock(&space_info->lock);
  4465. do {
  4466. flush_space(fs_info->fs_root, space_info, to_reclaim,
  4467. to_reclaim, flush_state);
  4468. flush_state++;
  4469. spin_lock(&space_info->lock);
  4470. if (ticket->bytes == 0) {
  4471. spin_unlock(&space_info->lock);
  4472. return;
  4473. }
  4474. spin_unlock(&space_info->lock);
  4475. /*
  4476. * Priority flushers can't wait on delalloc without
  4477. * deadlocking.
  4478. */
  4479. if (flush_state == FLUSH_DELALLOC ||
  4480. flush_state == FLUSH_DELALLOC_WAIT)
  4481. flush_state = ALLOC_CHUNK;
  4482. } while (flush_state < COMMIT_TRANS);
  4483. }
  4484. static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  4485. struct btrfs_space_info *space_info,
  4486. struct reserve_ticket *ticket, u64 orig_bytes)
  4487. {
  4488. DEFINE_WAIT(wait);
  4489. int ret = 0;
  4490. spin_lock(&space_info->lock);
  4491. while (ticket->bytes > 0 && ticket->error == 0) {
  4492. ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
  4493. if (ret) {
  4494. ret = -EINTR;
  4495. break;
  4496. }
  4497. spin_unlock(&space_info->lock);
  4498. schedule();
  4499. finish_wait(&ticket->wait, &wait);
  4500. spin_lock(&space_info->lock);
  4501. }
  4502. if (!ret)
  4503. ret = ticket->error;
  4504. if (!list_empty(&ticket->list))
  4505. list_del_init(&ticket->list);
  4506. if (ticket->bytes && ticket->bytes < orig_bytes) {
  4507. u64 num_bytes = orig_bytes - ticket->bytes;
  4508. space_info->bytes_may_use -= num_bytes;
  4509. trace_btrfs_space_reservation(fs_info, "space_info",
  4510. space_info->flags, num_bytes, 0);
  4511. }
  4512. spin_unlock(&space_info->lock);
  4513. return ret;
  4514. }
  4515. /**
  4516. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4517. * @root - the root we're allocating for
  4518. * @space_info - the space info we want to allocate from
  4519. * @orig_bytes - the number of bytes we want
  4520. * @flush - whether or not we can flush to make our reservation
  4521. *
  4522. * This will reserve orig_bytes number of bytes from the space info associated
  4523. * with the block_rsv. If there is not enough space it will make an attempt to
  4524. * flush out space to make room. It will do this by flushing delalloc if
  4525. * possible or committing the transaction. If flush is 0 then no attempts to
  4526. * regain reservations will be made and this will fail if there is not enough
  4527. * space already.
  4528. */
  4529. static int __reserve_metadata_bytes(struct btrfs_root *root,
  4530. struct btrfs_space_info *space_info,
  4531. u64 orig_bytes,
  4532. enum btrfs_reserve_flush_enum flush)
  4533. {
  4534. struct reserve_ticket ticket;
  4535. u64 used;
  4536. int ret = 0;
  4537. ASSERT(orig_bytes);
  4538. ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
  4539. spin_lock(&space_info->lock);
  4540. ret = -ENOSPC;
  4541. used = space_info->bytes_used + space_info->bytes_reserved +
  4542. space_info->bytes_pinned + space_info->bytes_readonly +
  4543. space_info->bytes_may_use;
  4544. /*
  4545. * If we have enough space then hooray, make our reservation and carry
  4546. * on. If not see if we can overcommit, and if we can, hooray carry on.
  4547. * If not things get more complicated.
  4548. */
  4549. if (used + orig_bytes <= space_info->total_bytes) {
  4550. space_info->bytes_may_use += orig_bytes;
  4551. trace_btrfs_space_reservation(root->fs_info, "space_info",
  4552. space_info->flags, orig_bytes,
  4553. 1);
  4554. ret = 0;
  4555. } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
  4556. space_info->bytes_may_use += orig_bytes;
  4557. trace_btrfs_space_reservation(root->fs_info, "space_info",
  4558. space_info->flags, orig_bytes,
  4559. 1);
  4560. ret = 0;
  4561. }
  4562. /*
  4563. * If we couldn't make a reservation then setup our reservation ticket
  4564. * and kick the async worker if it's not already running.
  4565. *
  4566. * If we are a priority flusher then we just need to add our ticket to
  4567. * the list and we will do our own flushing further down.
  4568. */
  4569. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  4570. ticket.bytes = orig_bytes;
  4571. ticket.error = 0;
  4572. init_waitqueue_head(&ticket.wait);
  4573. if (flush == BTRFS_RESERVE_FLUSH_ALL) {
  4574. list_add_tail(&ticket.list, &space_info->tickets);
  4575. if (!space_info->flush) {
  4576. space_info->flush = 1;
  4577. trace_btrfs_trigger_flush(root->fs_info,
  4578. space_info->flags,
  4579. orig_bytes, flush,
  4580. "enospc");
  4581. queue_work(system_unbound_wq,
  4582. &root->fs_info->async_reclaim_work);
  4583. }
  4584. } else {
  4585. list_add_tail(&ticket.list,
  4586. &space_info->priority_tickets);
  4587. }
  4588. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  4589. used += orig_bytes;
  4590. /*
  4591. * We will do the space reservation dance during log replay,
  4592. * which means we won't have fs_info->fs_root set, so don't do
  4593. * the async reclaim as we will panic.
  4594. */
  4595. if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
  4596. need_do_async_reclaim(space_info, root, used) &&
  4597. !work_busy(&root->fs_info->async_reclaim_work)) {
  4598. trace_btrfs_trigger_flush(root->fs_info,
  4599. space_info->flags,
  4600. orig_bytes, flush,
  4601. "preempt");
  4602. queue_work(system_unbound_wq,
  4603. &root->fs_info->async_reclaim_work);
  4604. }
  4605. }
  4606. spin_unlock(&space_info->lock);
  4607. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  4608. return ret;
  4609. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4610. return wait_reserve_ticket(root->fs_info, space_info, &ticket,
  4611. orig_bytes);
  4612. ret = 0;
  4613. priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
  4614. spin_lock(&space_info->lock);
  4615. if (ticket.bytes) {
  4616. if (ticket.bytes < orig_bytes) {
  4617. u64 num_bytes = orig_bytes - ticket.bytes;
  4618. space_info->bytes_may_use -= num_bytes;
  4619. trace_btrfs_space_reservation(root->fs_info,
  4620. "space_info", space_info->flags,
  4621. num_bytes, 0);
  4622. }
  4623. list_del_init(&ticket.list);
  4624. ret = -ENOSPC;
  4625. }
  4626. spin_unlock(&space_info->lock);
  4627. ASSERT(list_empty(&ticket.list));
  4628. return ret;
  4629. }
  4630. /**
  4631. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4632. * @root - the root we're allocating for
  4633. * @block_rsv - the block_rsv we're allocating for
  4634. * @orig_bytes - the number of bytes we want
  4635. * @flush - whether or not we can flush to make our reservation
  4636. *
  4637. * This will reserve orgi_bytes number of bytes from the space info associated
  4638. * with the block_rsv. If there is not enough space it will make an attempt to
  4639. * flush out space to make room. It will do this by flushing delalloc if
  4640. * possible or committing the transaction. If flush is 0 then no attempts to
  4641. * regain reservations will be made and this will fail if there is not enough
  4642. * space already.
  4643. */
  4644. static int reserve_metadata_bytes(struct btrfs_root *root,
  4645. struct btrfs_block_rsv *block_rsv,
  4646. u64 orig_bytes,
  4647. enum btrfs_reserve_flush_enum flush)
  4648. {
  4649. int ret;
  4650. ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
  4651. flush);
  4652. if (ret == -ENOSPC &&
  4653. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  4654. struct btrfs_block_rsv *global_rsv =
  4655. &root->fs_info->global_block_rsv;
  4656. if (block_rsv != global_rsv &&
  4657. !block_rsv_use_bytes(global_rsv, orig_bytes))
  4658. ret = 0;
  4659. }
  4660. if (ret == -ENOSPC)
  4661. trace_btrfs_space_reservation(root->fs_info,
  4662. "space_info:enospc",
  4663. block_rsv->space_info->flags,
  4664. orig_bytes, 1);
  4665. return ret;
  4666. }
  4667. static struct btrfs_block_rsv *get_block_rsv(
  4668. const struct btrfs_trans_handle *trans,
  4669. const struct btrfs_root *root)
  4670. {
  4671. struct btrfs_block_rsv *block_rsv = NULL;
  4672. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  4673. (root == root->fs_info->csum_root && trans->adding_csums) ||
  4674. (root == root->fs_info->uuid_root))
  4675. block_rsv = trans->block_rsv;
  4676. if (!block_rsv)
  4677. block_rsv = root->block_rsv;
  4678. if (!block_rsv)
  4679. block_rsv = &root->fs_info->empty_block_rsv;
  4680. return block_rsv;
  4681. }
  4682. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  4683. u64 num_bytes)
  4684. {
  4685. int ret = -ENOSPC;
  4686. spin_lock(&block_rsv->lock);
  4687. if (block_rsv->reserved >= num_bytes) {
  4688. block_rsv->reserved -= num_bytes;
  4689. if (block_rsv->reserved < block_rsv->size)
  4690. block_rsv->full = 0;
  4691. ret = 0;
  4692. }
  4693. spin_unlock(&block_rsv->lock);
  4694. return ret;
  4695. }
  4696. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  4697. u64 num_bytes, int update_size)
  4698. {
  4699. spin_lock(&block_rsv->lock);
  4700. block_rsv->reserved += num_bytes;
  4701. if (update_size)
  4702. block_rsv->size += num_bytes;
  4703. else if (block_rsv->reserved >= block_rsv->size)
  4704. block_rsv->full = 1;
  4705. spin_unlock(&block_rsv->lock);
  4706. }
  4707. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  4708. struct btrfs_block_rsv *dest, u64 num_bytes,
  4709. int min_factor)
  4710. {
  4711. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4712. u64 min_bytes;
  4713. if (global_rsv->space_info != dest->space_info)
  4714. return -ENOSPC;
  4715. spin_lock(&global_rsv->lock);
  4716. min_bytes = div_factor(global_rsv->size, min_factor);
  4717. if (global_rsv->reserved < min_bytes + num_bytes) {
  4718. spin_unlock(&global_rsv->lock);
  4719. return -ENOSPC;
  4720. }
  4721. global_rsv->reserved -= num_bytes;
  4722. if (global_rsv->reserved < global_rsv->size)
  4723. global_rsv->full = 0;
  4724. spin_unlock(&global_rsv->lock);
  4725. block_rsv_add_bytes(dest, num_bytes, 1);
  4726. return 0;
  4727. }
  4728. /*
  4729. * This is for space we already have accounted in space_info->bytes_may_use, so
  4730. * basically when we're returning space from block_rsv's.
  4731. */
  4732. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  4733. struct btrfs_space_info *space_info,
  4734. u64 num_bytes)
  4735. {
  4736. struct reserve_ticket *ticket;
  4737. struct list_head *head;
  4738. u64 used;
  4739. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
  4740. bool check_overcommit = false;
  4741. spin_lock(&space_info->lock);
  4742. head = &space_info->priority_tickets;
  4743. /*
  4744. * If we are over our limit then we need to check and see if we can
  4745. * overcommit, and if we can't then we just need to free up our space
  4746. * and not satisfy any requests.
  4747. */
  4748. used = space_info->bytes_used + space_info->bytes_reserved +
  4749. space_info->bytes_pinned + space_info->bytes_readonly +
  4750. space_info->bytes_may_use;
  4751. if (used - num_bytes >= space_info->total_bytes)
  4752. check_overcommit = true;
  4753. again:
  4754. while (!list_empty(head) && num_bytes) {
  4755. ticket = list_first_entry(head, struct reserve_ticket,
  4756. list);
  4757. /*
  4758. * We use 0 bytes because this space is already reserved, so
  4759. * adding the ticket space would be a double count.
  4760. */
  4761. if (check_overcommit &&
  4762. !can_overcommit(fs_info->extent_root, space_info, 0,
  4763. flush))
  4764. break;
  4765. if (num_bytes >= ticket->bytes) {
  4766. list_del_init(&ticket->list);
  4767. num_bytes -= ticket->bytes;
  4768. ticket->bytes = 0;
  4769. space_info->tickets_id++;
  4770. wake_up(&ticket->wait);
  4771. } else {
  4772. ticket->bytes -= num_bytes;
  4773. num_bytes = 0;
  4774. }
  4775. }
  4776. if (num_bytes && head == &space_info->priority_tickets) {
  4777. head = &space_info->tickets;
  4778. flush = BTRFS_RESERVE_FLUSH_ALL;
  4779. goto again;
  4780. }
  4781. space_info->bytes_may_use -= num_bytes;
  4782. trace_btrfs_space_reservation(fs_info, "space_info",
  4783. space_info->flags, num_bytes, 0);
  4784. spin_unlock(&space_info->lock);
  4785. }
  4786. /*
  4787. * This is for newly allocated space that isn't accounted in
  4788. * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
  4789. * we use this helper.
  4790. */
  4791. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  4792. struct btrfs_space_info *space_info,
  4793. u64 num_bytes)
  4794. {
  4795. struct reserve_ticket *ticket;
  4796. struct list_head *head = &space_info->priority_tickets;
  4797. again:
  4798. while (!list_empty(head) && num_bytes) {
  4799. ticket = list_first_entry(head, struct reserve_ticket,
  4800. list);
  4801. if (num_bytes >= ticket->bytes) {
  4802. trace_btrfs_space_reservation(fs_info, "space_info",
  4803. space_info->flags,
  4804. ticket->bytes, 1);
  4805. list_del_init(&ticket->list);
  4806. num_bytes -= ticket->bytes;
  4807. space_info->bytes_may_use += ticket->bytes;
  4808. ticket->bytes = 0;
  4809. space_info->tickets_id++;
  4810. wake_up(&ticket->wait);
  4811. } else {
  4812. trace_btrfs_space_reservation(fs_info, "space_info",
  4813. space_info->flags,
  4814. num_bytes, 1);
  4815. space_info->bytes_may_use += num_bytes;
  4816. ticket->bytes -= num_bytes;
  4817. num_bytes = 0;
  4818. }
  4819. }
  4820. if (num_bytes && head == &space_info->priority_tickets) {
  4821. head = &space_info->tickets;
  4822. goto again;
  4823. }
  4824. }
  4825. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  4826. struct btrfs_block_rsv *block_rsv,
  4827. struct btrfs_block_rsv *dest, u64 num_bytes)
  4828. {
  4829. struct btrfs_space_info *space_info = block_rsv->space_info;
  4830. spin_lock(&block_rsv->lock);
  4831. if (num_bytes == (u64)-1)
  4832. num_bytes = block_rsv->size;
  4833. block_rsv->size -= num_bytes;
  4834. if (block_rsv->reserved >= block_rsv->size) {
  4835. num_bytes = block_rsv->reserved - block_rsv->size;
  4836. block_rsv->reserved = block_rsv->size;
  4837. block_rsv->full = 1;
  4838. } else {
  4839. num_bytes = 0;
  4840. }
  4841. spin_unlock(&block_rsv->lock);
  4842. if (num_bytes > 0) {
  4843. if (dest) {
  4844. spin_lock(&dest->lock);
  4845. if (!dest->full) {
  4846. u64 bytes_to_add;
  4847. bytes_to_add = dest->size - dest->reserved;
  4848. bytes_to_add = min(num_bytes, bytes_to_add);
  4849. dest->reserved += bytes_to_add;
  4850. if (dest->reserved >= dest->size)
  4851. dest->full = 1;
  4852. num_bytes -= bytes_to_add;
  4853. }
  4854. spin_unlock(&dest->lock);
  4855. }
  4856. if (num_bytes)
  4857. space_info_add_old_bytes(fs_info, space_info,
  4858. num_bytes);
  4859. }
  4860. }
  4861. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
  4862. struct btrfs_block_rsv *dst, u64 num_bytes,
  4863. int update_size)
  4864. {
  4865. int ret;
  4866. ret = block_rsv_use_bytes(src, num_bytes);
  4867. if (ret)
  4868. return ret;
  4869. block_rsv_add_bytes(dst, num_bytes, update_size);
  4870. return 0;
  4871. }
  4872. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  4873. {
  4874. memset(rsv, 0, sizeof(*rsv));
  4875. spin_lock_init(&rsv->lock);
  4876. rsv->type = type;
  4877. }
  4878. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  4879. unsigned short type)
  4880. {
  4881. struct btrfs_block_rsv *block_rsv;
  4882. struct btrfs_fs_info *fs_info = root->fs_info;
  4883. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  4884. if (!block_rsv)
  4885. return NULL;
  4886. btrfs_init_block_rsv(block_rsv, type);
  4887. block_rsv->space_info = __find_space_info(fs_info,
  4888. BTRFS_BLOCK_GROUP_METADATA);
  4889. return block_rsv;
  4890. }
  4891. void btrfs_free_block_rsv(struct btrfs_root *root,
  4892. struct btrfs_block_rsv *rsv)
  4893. {
  4894. if (!rsv)
  4895. return;
  4896. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4897. kfree(rsv);
  4898. }
  4899. void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
  4900. {
  4901. kfree(rsv);
  4902. }
  4903. int btrfs_block_rsv_add(struct btrfs_root *root,
  4904. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  4905. enum btrfs_reserve_flush_enum flush)
  4906. {
  4907. int ret;
  4908. if (num_bytes == 0)
  4909. return 0;
  4910. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4911. if (!ret) {
  4912. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  4913. return 0;
  4914. }
  4915. return ret;
  4916. }
  4917. int btrfs_block_rsv_check(struct btrfs_root *root,
  4918. struct btrfs_block_rsv *block_rsv, int min_factor)
  4919. {
  4920. u64 num_bytes = 0;
  4921. int ret = -ENOSPC;
  4922. if (!block_rsv)
  4923. return 0;
  4924. spin_lock(&block_rsv->lock);
  4925. num_bytes = div_factor(block_rsv->size, min_factor);
  4926. if (block_rsv->reserved >= num_bytes)
  4927. ret = 0;
  4928. spin_unlock(&block_rsv->lock);
  4929. return ret;
  4930. }
  4931. int btrfs_block_rsv_refill(struct btrfs_root *root,
  4932. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  4933. enum btrfs_reserve_flush_enum flush)
  4934. {
  4935. u64 num_bytes = 0;
  4936. int ret = -ENOSPC;
  4937. if (!block_rsv)
  4938. return 0;
  4939. spin_lock(&block_rsv->lock);
  4940. num_bytes = min_reserved;
  4941. if (block_rsv->reserved >= num_bytes)
  4942. ret = 0;
  4943. else
  4944. num_bytes -= block_rsv->reserved;
  4945. spin_unlock(&block_rsv->lock);
  4946. if (!ret)
  4947. return 0;
  4948. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4949. if (!ret) {
  4950. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  4951. return 0;
  4952. }
  4953. return ret;
  4954. }
  4955. void btrfs_block_rsv_release(struct btrfs_root *root,
  4956. struct btrfs_block_rsv *block_rsv,
  4957. u64 num_bytes)
  4958. {
  4959. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4960. if (global_rsv == block_rsv ||
  4961. block_rsv->space_info != global_rsv->space_info)
  4962. global_rsv = NULL;
  4963. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  4964. num_bytes);
  4965. }
  4966. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  4967. {
  4968. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  4969. struct btrfs_space_info *sinfo = block_rsv->space_info;
  4970. u64 num_bytes;
  4971. /*
  4972. * The global block rsv is based on the size of the extent tree, the
  4973. * checksum tree and the root tree. If the fs is empty we want to set
  4974. * it to a minimal amount for safety.
  4975. */
  4976. num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
  4977. btrfs_root_used(&fs_info->csum_root->root_item) +
  4978. btrfs_root_used(&fs_info->tree_root->root_item);
  4979. num_bytes = max_t(u64, num_bytes, SZ_16M);
  4980. spin_lock(&sinfo->lock);
  4981. spin_lock(&block_rsv->lock);
  4982. block_rsv->size = min_t(u64, num_bytes, SZ_512M);
  4983. if (block_rsv->reserved < block_rsv->size) {
  4984. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  4985. sinfo->bytes_reserved + sinfo->bytes_readonly +
  4986. sinfo->bytes_may_use;
  4987. if (sinfo->total_bytes > num_bytes) {
  4988. num_bytes = sinfo->total_bytes - num_bytes;
  4989. num_bytes = min(num_bytes,
  4990. block_rsv->size - block_rsv->reserved);
  4991. block_rsv->reserved += num_bytes;
  4992. sinfo->bytes_may_use += num_bytes;
  4993. trace_btrfs_space_reservation(fs_info, "space_info",
  4994. sinfo->flags, num_bytes,
  4995. 1);
  4996. }
  4997. } else if (block_rsv->reserved > block_rsv->size) {
  4998. num_bytes = block_rsv->reserved - block_rsv->size;
  4999. sinfo->bytes_may_use -= num_bytes;
  5000. trace_btrfs_space_reservation(fs_info, "space_info",
  5001. sinfo->flags, num_bytes, 0);
  5002. block_rsv->reserved = block_rsv->size;
  5003. }
  5004. if (block_rsv->reserved == block_rsv->size)
  5005. block_rsv->full = 1;
  5006. else
  5007. block_rsv->full = 0;
  5008. spin_unlock(&block_rsv->lock);
  5009. spin_unlock(&sinfo->lock);
  5010. }
  5011. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  5012. {
  5013. struct btrfs_space_info *space_info;
  5014. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  5015. fs_info->chunk_block_rsv.space_info = space_info;
  5016. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  5017. fs_info->global_block_rsv.space_info = space_info;
  5018. fs_info->delalloc_block_rsv.space_info = space_info;
  5019. fs_info->trans_block_rsv.space_info = space_info;
  5020. fs_info->empty_block_rsv.space_info = space_info;
  5021. fs_info->delayed_block_rsv.space_info = space_info;
  5022. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  5023. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  5024. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  5025. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  5026. if (fs_info->quota_root)
  5027. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  5028. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  5029. update_global_block_rsv(fs_info);
  5030. }
  5031. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  5032. {
  5033. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  5034. (u64)-1);
  5035. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  5036. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  5037. WARN_ON(fs_info->trans_block_rsv.size > 0);
  5038. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  5039. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  5040. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  5041. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  5042. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  5043. }
  5044. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  5045. struct btrfs_root *root)
  5046. {
  5047. if (!trans->block_rsv)
  5048. return;
  5049. if (!trans->bytes_reserved)
  5050. return;
  5051. trace_btrfs_space_reservation(root->fs_info, "transaction",
  5052. trans->transid, trans->bytes_reserved, 0);
  5053. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  5054. trans->bytes_reserved = 0;
  5055. }
  5056. /*
  5057. * To be called after all the new block groups attached to the transaction
  5058. * handle have been created (btrfs_create_pending_block_groups()).
  5059. */
  5060. void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  5061. {
  5062. struct btrfs_fs_info *fs_info = trans->fs_info;
  5063. if (!trans->chunk_bytes_reserved)
  5064. return;
  5065. WARN_ON_ONCE(!list_empty(&trans->new_bgs));
  5066. block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
  5067. trans->chunk_bytes_reserved);
  5068. trans->chunk_bytes_reserved = 0;
  5069. }
  5070. /* Can only return 0 or -ENOSPC */
  5071. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  5072. struct inode *inode)
  5073. {
  5074. struct btrfs_root *root = BTRFS_I(inode)->root;
  5075. /*
  5076. * We always use trans->block_rsv here as we will have reserved space
  5077. * for our orphan when starting the transaction, using get_block_rsv()
  5078. * here will sometimes make us choose the wrong block rsv as we could be
  5079. * doing a reloc inode for a non refcounted root.
  5080. */
  5081. struct btrfs_block_rsv *src_rsv = trans->block_rsv;
  5082. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  5083. /*
  5084. * We need to hold space in order to delete our orphan item once we've
  5085. * added it, so this takes the reservation so we can release it later
  5086. * when we are truly done with the orphan item.
  5087. */
  5088. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  5089. trace_btrfs_space_reservation(root->fs_info, "orphan",
  5090. btrfs_ino(inode), num_bytes, 1);
  5091. return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  5092. }
  5093. void btrfs_orphan_release_metadata(struct inode *inode)
  5094. {
  5095. struct btrfs_root *root = BTRFS_I(inode)->root;
  5096. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  5097. trace_btrfs_space_reservation(root->fs_info, "orphan",
  5098. btrfs_ino(inode), num_bytes, 0);
  5099. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  5100. }
  5101. /*
  5102. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  5103. * root: the root of the parent directory
  5104. * rsv: block reservation
  5105. * items: the number of items that we need do reservation
  5106. * qgroup_reserved: used to return the reserved size in qgroup
  5107. *
  5108. * This function is used to reserve the space for snapshot/subvolume
  5109. * creation and deletion. Those operations are different with the
  5110. * common file/directory operations, they change two fs/file trees
  5111. * and root tree, the number of items that the qgroup reserves is
  5112. * different with the free space reservation. So we can not use
  5113. * the space reservation mechanism in start_transaction().
  5114. */
  5115. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  5116. struct btrfs_block_rsv *rsv,
  5117. int items,
  5118. u64 *qgroup_reserved,
  5119. bool use_global_rsv)
  5120. {
  5121. u64 num_bytes;
  5122. int ret;
  5123. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  5124. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
  5125. /* One for parent inode, two for dir entries */
  5126. num_bytes = 3 * root->nodesize;
  5127. ret = btrfs_qgroup_reserve_meta(root, num_bytes);
  5128. if (ret)
  5129. return ret;
  5130. } else {
  5131. num_bytes = 0;
  5132. }
  5133. *qgroup_reserved = num_bytes;
  5134. num_bytes = btrfs_calc_trans_metadata_size(root, items);
  5135. rsv->space_info = __find_space_info(root->fs_info,
  5136. BTRFS_BLOCK_GROUP_METADATA);
  5137. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  5138. BTRFS_RESERVE_FLUSH_ALL);
  5139. if (ret == -ENOSPC && use_global_rsv)
  5140. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
  5141. if (ret && *qgroup_reserved)
  5142. btrfs_qgroup_free_meta(root, *qgroup_reserved);
  5143. return ret;
  5144. }
  5145. void btrfs_subvolume_release_metadata(struct btrfs_root *root,
  5146. struct btrfs_block_rsv *rsv,
  5147. u64 qgroup_reserved)
  5148. {
  5149. btrfs_block_rsv_release(root, rsv, (u64)-1);
  5150. }
  5151. /**
  5152. * drop_outstanding_extent - drop an outstanding extent
  5153. * @inode: the inode we're dropping the extent for
  5154. * @num_bytes: the number of bytes we're releasing.
  5155. *
  5156. * This is called when we are freeing up an outstanding extent, either called
  5157. * after an error or after an extent is written. This will return the number of
  5158. * reserved extents that need to be freed. This must be called with
  5159. * BTRFS_I(inode)->lock held.
  5160. */
  5161. static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
  5162. {
  5163. unsigned drop_inode_space = 0;
  5164. unsigned dropped_extents = 0;
  5165. unsigned num_extents = 0;
  5166. num_extents = (unsigned)div64_u64(num_bytes +
  5167. BTRFS_MAX_EXTENT_SIZE - 1,
  5168. BTRFS_MAX_EXTENT_SIZE);
  5169. ASSERT(num_extents);
  5170. ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
  5171. BTRFS_I(inode)->outstanding_extents -= num_extents;
  5172. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  5173. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  5174. &BTRFS_I(inode)->runtime_flags))
  5175. drop_inode_space = 1;
  5176. /*
  5177. * If we have more or the same amount of outstanding extents than we have
  5178. * reserved then we need to leave the reserved extents count alone.
  5179. */
  5180. if (BTRFS_I(inode)->outstanding_extents >=
  5181. BTRFS_I(inode)->reserved_extents)
  5182. return drop_inode_space;
  5183. dropped_extents = BTRFS_I(inode)->reserved_extents -
  5184. BTRFS_I(inode)->outstanding_extents;
  5185. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  5186. return dropped_extents + drop_inode_space;
  5187. }
  5188. /**
  5189. * calc_csum_metadata_size - return the amount of metadata space that must be
  5190. * reserved/freed for the given bytes.
  5191. * @inode: the inode we're manipulating
  5192. * @num_bytes: the number of bytes in question
  5193. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  5194. *
  5195. * This adjusts the number of csum_bytes in the inode and then returns the
  5196. * correct amount of metadata that must either be reserved or freed. We
  5197. * calculate how many checksums we can fit into one leaf and then divide the
  5198. * number of bytes that will need to be checksumed by this value to figure out
  5199. * how many checksums will be required. If we are adding bytes then the number
  5200. * may go up and we will return the number of additional bytes that must be
  5201. * reserved. If it is going down we will return the number of bytes that must
  5202. * be freed.
  5203. *
  5204. * This must be called with BTRFS_I(inode)->lock held.
  5205. */
  5206. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  5207. int reserve)
  5208. {
  5209. struct btrfs_root *root = BTRFS_I(inode)->root;
  5210. u64 old_csums, num_csums;
  5211. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  5212. BTRFS_I(inode)->csum_bytes == 0)
  5213. return 0;
  5214. old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
  5215. if (reserve)
  5216. BTRFS_I(inode)->csum_bytes += num_bytes;
  5217. else
  5218. BTRFS_I(inode)->csum_bytes -= num_bytes;
  5219. num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
  5220. /* No change, no need to reserve more */
  5221. if (old_csums == num_csums)
  5222. return 0;
  5223. if (reserve)
  5224. return btrfs_calc_trans_metadata_size(root,
  5225. num_csums - old_csums);
  5226. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  5227. }
  5228. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  5229. {
  5230. struct btrfs_root *root = BTRFS_I(inode)->root;
  5231. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  5232. u64 to_reserve = 0;
  5233. u64 csum_bytes;
  5234. unsigned nr_extents = 0;
  5235. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  5236. int ret = 0;
  5237. bool delalloc_lock = true;
  5238. u64 to_free = 0;
  5239. unsigned dropped;
  5240. bool release_extra = false;
  5241. /* If we are a free space inode we need to not flush since we will be in
  5242. * the middle of a transaction commit. We also don't need the delalloc
  5243. * mutex since we won't race with anybody. We need this mostly to make
  5244. * lockdep shut its filthy mouth.
  5245. *
  5246. * If we have a transaction open (can happen if we call truncate_block
  5247. * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
  5248. */
  5249. if (btrfs_is_free_space_inode(inode)) {
  5250. flush = BTRFS_RESERVE_NO_FLUSH;
  5251. delalloc_lock = false;
  5252. } else if (current->journal_info) {
  5253. flush = BTRFS_RESERVE_FLUSH_LIMIT;
  5254. }
  5255. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  5256. btrfs_transaction_in_commit(root->fs_info))
  5257. schedule_timeout(1);
  5258. if (delalloc_lock)
  5259. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  5260. num_bytes = ALIGN(num_bytes, root->sectorsize);
  5261. spin_lock(&BTRFS_I(inode)->lock);
  5262. nr_extents = (unsigned)div64_u64(num_bytes +
  5263. BTRFS_MAX_EXTENT_SIZE - 1,
  5264. BTRFS_MAX_EXTENT_SIZE);
  5265. BTRFS_I(inode)->outstanding_extents += nr_extents;
  5266. nr_extents = 0;
  5267. if (BTRFS_I(inode)->outstanding_extents >
  5268. BTRFS_I(inode)->reserved_extents)
  5269. nr_extents += BTRFS_I(inode)->outstanding_extents -
  5270. BTRFS_I(inode)->reserved_extents;
  5271. /* We always want to reserve a slot for updating the inode. */
  5272. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
  5273. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  5274. csum_bytes = BTRFS_I(inode)->csum_bytes;
  5275. spin_unlock(&BTRFS_I(inode)->lock);
  5276. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
  5277. ret = btrfs_qgroup_reserve_meta(root,
  5278. nr_extents * root->nodesize);
  5279. if (ret)
  5280. goto out_fail;
  5281. }
  5282. ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
  5283. if (unlikely(ret)) {
  5284. btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
  5285. goto out_fail;
  5286. }
  5287. spin_lock(&BTRFS_I(inode)->lock);
  5288. if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  5289. &BTRFS_I(inode)->runtime_flags)) {
  5290. to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
  5291. release_extra = true;
  5292. }
  5293. BTRFS_I(inode)->reserved_extents += nr_extents;
  5294. spin_unlock(&BTRFS_I(inode)->lock);
  5295. if (delalloc_lock)
  5296. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  5297. if (to_reserve)
  5298. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  5299. btrfs_ino(inode), to_reserve, 1);
  5300. if (release_extra)
  5301. btrfs_block_rsv_release(root, block_rsv,
  5302. btrfs_calc_trans_metadata_size(root,
  5303. 1));
  5304. return 0;
  5305. out_fail:
  5306. spin_lock(&BTRFS_I(inode)->lock);
  5307. dropped = drop_outstanding_extent(inode, num_bytes);
  5308. /*
  5309. * If the inodes csum_bytes is the same as the original
  5310. * csum_bytes then we know we haven't raced with any free()ers
  5311. * so we can just reduce our inodes csum bytes and carry on.
  5312. */
  5313. if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
  5314. calc_csum_metadata_size(inode, num_bytes, 0);
  5315. } else {
  5316. u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
  5317. u64 bytes;
  5318. /*
  5319. * This is tricky, but first we need to figure out how much we
  5320. * freed from any free-ers that occurred during this
  5321. * reservation, so we reset ->csum_bytes to the csum_bytes
  5322. * before we dropped our lock, and then call the free for the
  5323. * number of bytes that were freed while we were trying our
  5324. * reservation.
  5325. */
  5326. bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
  5327. BTRFS_I(inode)->csum_bytes = csum_bytes;
  5328. to_free = calc_csum_metadata_size(inode, bytes, 0);
  5329. /*
  5330. * Now we need to see how much we would have freed had we not
  5331. * been making this reservation and our ->csum_bytes were not
  5332. * artificially inflated.
  5333. */
  5334. BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
  5335. bytes = csum_bytes - orig_csum_bytes;
  5336. bytes = calc_csum_metadata_size(inode, bytes, 0);
  5337. /*
  5338. * Now reset ->csum_bytes to what it should be. If bytes is
  5339. * more than to_free then we would have freed more space had we
  5340. * not had an artificially high ->csum_bytes, so we need to free
  5341. * the remainder. If bytes is the same or less then we don't
  5342. * need to do anything, the other free-ers did the correct
  5343. * thing.
  5344. */
  5345. BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
  5346. if (bytes > to_free)
  5347. to_free = bytes - to_free;
  5348. else
  5349. to_free = 0;
  5350. }
  5351. spin_unlock(&BTRFS_I(inode)->lock);
  5352. if (dropped)
  5353. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  5354. if (to_free) {
  5355. btrfs_block_rsv_release(root, block_rsv, to_free);
  5356. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  5357. btrfs_ino(inode), to_free, 0);
  5358. }
  5359. if (delalloc_lock)
  5360. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  5361. return ret;
  5362. }
  5363. /**
  5364. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  5365. * @inode: the inode to release the reservation for
  5366. * @num_bytes: the number of bytes we're releasing
  5367. *
  5368. * This will release the metadata reservation for an inode. This can be called
  5369. * once we complete IO for a given set of bytes to release their metadata
  5370. * reservations.
  5371. */
  5372. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  5373. {
  5374. struct btrfs_root *root = BTRFS_I(inode)->root;
  5375. u64 to_free = 0;
  5376. unsigned dropped;
  5377. num_bytes = ALIGN(num_bytes, root->sectorsize);
  5378. spin_lock(&BTRFS_I(inode)->lock);
  5379. dropped = drop_outstanding_extent(inode, num_bytes);
  5380. if (num_bytes)
  5381. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  5382. spin_unlock(&BTRFS_I(inode)->lock);
  5383. if (dropped > 0)
  5384. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  5385. if (btrfs_is_testing(root->fs_info))
  5386. return;
  5387. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  5388. btrfs_ino(inode), to_free, 0);
  5389. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  5390. to_free);
  5391. }
  5392. /**
  5393. * btrfs_delalloc_reserve_space - reserve data and metadata space for
  5394. * delalloc
  5395. * @inode: inode we're writing to
  5396. * @start: start range we are writing to
  5397. * @len: how long the range we are writing to
  5398. *
  5399. * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
  5400. *
  5401. * This will do the following things
  5402. *
  5403. * o reserve space in data space info for num bytes
  5404. * and reserve precious corresponding qgroup space
  5405. * (Done in check_data_free_space)
  5406. *
  5407. * o reserve space for metadata space, based on the number of outstanding
  5408. * extents and how much csums will be needed
  5409. * also reserve metadata space in a per root over-reserve method.
  5410. * o add to the inodes->delalloc_bytes
  5411. * o add it to the fs_info's delalloc inodes list.
  5412. * (Above 3 all done in delalloc_reserve_metadata)
  5413. *
  5414. * Return 0 for success
  5415. * Return <0 for error(-ENOSPC or -EQUOT)
  5416. */
  5417. int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
  5418. {
  5419. int ret;
  5420. ret = btrfs_check_data_free_space(inode, start, len);
  5421. if (ret < 0)
  5422. return ret;
  5423. ret = btrfs_delalloc_reserve_metadata(inode, len);
  5424. if (ret < 0)
  5425. btrfs_free_reserved_data_space(inode, start, len);
  5426. return ret;
  5427. }
  5428. /**
  5429. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  5430. * @inode: inode we're releasing space for
  5431. * @start: start position of the space already reserved
  5432. * @len: the len of the space already reserved
  5433. *
  5434. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  5435. * called in the case that we don't need the metadata AND data reservations
  5436. * anymore. So if there is an error or we insert an inline extent.
  5437. *
  5438. * This function will release the metadata space that was not used and will
  5439. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  5440. * list if there are no delalloc bytes left.
  5441. * Also it will handle the qgroup reserved space.
  5442. */
  5443. void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
  5444. {
  5445. btrfs_delalloc_release_metadata(inode, len);
  5446. btrfs_free_reserved_data_space(inode, start, len);
  5447. }
  5448. static int update_block_group(struct btrfs_trans_handle *trans,
  5449. struct btrfs_root *root, u64 bytenr,
  5450. u64 num_bytes, int alloc)
  5451. {
  5452. struct btrfs_block_group_cache *cache = NULL;
  5453. struct btrfs_fs_info *info = root->fs_info;
  5454. u64 total = num_bytes;
  5455. u64 old_val;
  5456. u64 byte_in_group;
  5457. int factor;
  5458. /* block accounting for super block */
  5459. spin_lock(&info->delalloc_root_lock);
  5460. old_val = btrfs_super_bytes_used(info->super_copy);
  5461. if (alloc)
  5462. old_val += num_bytes;
  5463. else
  5464. old_val -= num_bytes;
  5465. btrfs_set_super_bytes_used(info->super_copy, old_val);
  5466. spin_unlock(&info->delalloc_root_lock);
  5467. while (total) {
  5468. cache = btrfs_lookup_block_group(info, bytenr);
  5469. if (!cache)
  5470. return -ENOENT;
  5471. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  5472. BTRFS_BLOCK_GROUP_RAID1 |
  5473. BTRFS_BLOCK_GROUP_RAID10))
  5474. factor = 2;
  5475. else
  5476. factor = 1;
  5477. /*
  5478. * If this block group has free space cache written out, we
  5479. * need to make sure to load it if we are removing space. This
  5480. * is because we need the unpinning stage to actually add the
  5481. * space back to the block group, otherwise we will leak space.
  5482. */
  5483. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  5484. cache_block_group(cache, 1);
  5485. byte_in_group = bytenr - cache->key.objectid;
  5486. WARN_ON(byte_in_group > cache->key.offset);
  5487. spin_lock(&cache->space_info->lock);
  5488. spin_lock(&cache->lock);
  5489. if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
  5490. cache->disk_cache_state < BTRFS_DC_CLEAR)
  5491. cache->disk_cache_state = BTRFS_DC_CLEAR;
  5492. old_val = btrfs_block_group_used(&cache->item);
  5493. num_bytes = min(total, cache->key.offset - byte_in_group);
  5494. if (alloc) {
  5495. old_val += num_bytes;
  5496. btrfs_set_block_group_used(&cache->item, old_val);
  5497. cache->reserved -= num_bytes;
  5498. cache->space_info->bytes_reserved -= num_bytes;
  5499. cache->space_info->bytes_used += num_bytes;
  5500. cache->space_info->disk_used += num_bytes * factor;
  5501. spin_unlock(&cache->lock);
  5502. spin_unlock(&cache->space_info->lock);
  5503. } else {
  5504. old_val -= num_bytes;
  5505. btrfs_set_block_group_used(&cache->item, old_val);
  5506. cache->pinned += num_bytes;
  5507. cache->space_info->bytes_pinned += num_bytes;
  5508. cache->space_info->bytes_used -= num_bytes;
  5509. cache->space_info->disk_used -= num_bytes * factor;
  5510. spin_unlock(&cache->lock);
  5511. spin_unlock(&cache->space_info->lock);
  5512. trace_btrfs_space_reservation(root->fs_info, "pinned",
  5513. cache->space_info->flags,
  5514. num_bytes, 1);
  5515. set_extent_dirty(info->pinned_extents,
  5516. bytenr, bytenr + num_bytes - 1,
  5517. GFP_NOFS | __GFP_NOFAIL);
  5518. }
  5519. spin_lock(&trans->transaction->dirty_bgs_lock);
  5520. if (list_empty(&cache->dirty_list)) {
  5521. list_add_tail(&cache->dirty_list,
  5522. &trans->transaction->dirty_bgs);
  5523. trans->transaction->num_dirty_bgs++;
  5524. btrfs_get_block_group(cache);
  5525. }
  5526. spin_unlock(&trans->transaction->dirty_bgs_lock);
  5527. /*
  5528. * No longer have used bytes in this block group, queue it for
  5529. * deletion. We do this after adding the block group to the
  5530. * dirty list to avoid races between cleaner kthread and space
  5531. * cache writeout.
  5532. */
  5533. if (!alloc && old_val == 0) {
  5534. spin_lock(&info->unused_bgs_lock);
  5535. if (list_empty(&cache->bg_list)) {
  5536. btrfs_get_block_group(cache);
  5537. list_add_tail(&cache->bg_list,
  5538. &info->unused_bgs);
  5539. }
  5540. spin_unlock(&info->unused_bgs_lock);
  5541. }
  5542. btrfs_put_block_group(cache);
  5543. total -= num_bytes;
  5544. bytenr += num_bytes;
  5545. }
  5546. return 0;
  5547. }
  5548. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  5549. {
  5550. struct btrfs_block_group_cache *cache;
  5551. u64 bytenr;
  5552. spin_lock(&root->fs_info->block_group_cache_lock);
  5553. bytenr = root->fs_info->first_logical_byte;
  5554. spin_unlock(&root->fs_info->block_group_cache_lock);
  5555. if (bytenr < (u64)-1)
  5556. return bytenr;
  5557. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  5558. if (!cache)
  5559. return 0;
  5560. bytenr = cache->key.objectid;
  5561. btrfs_put_block_group(cache);
  5562. return bytenr;
  5563. }
  5564. static int pin_down_extent(struct btrfs_root *root,
  5565. struct btrfs_block_group_cache *cache,
  5566. u64 bytenr, u64 num_bytes, int reserved)
  5567. {
  5568. spin_lock(&cache->space_info->lock);
  5569. spin_lock(&cache->lock);
  5570. cache->pinned += num_bytes;
  5571. cache->space_info->bytes_pinned += num_bytes;
  5572. if (reserved) {
  5573. cache->reserved -= num_bytes;
  5574. cache->space_info->bytes_reserved -= num_bytes;
  5575. }
  5576. spin_unlock(&cache->lock);
  5577. spin_unlock(&cache->space_info->lock);
  5578. trace_btrfs_space_reservation(root->fs_info, "pinned",
  5579. cache->space_info->flags, num_bytes, 1);
  5580. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  5581. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  5582. return 0;
  5583. }
  5584. /*
  5585. * this function must be called within transaction
  5586. */
  5587. int btrfs_pin_extent(struct btrfs_root *root,
  5588. u64 bytenr, u64 num_bytes, int reserved)
  5589. {
  5590. struct btrfs_block_group_cache *cache;
  5591. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  5592. BUG_ON(!cache); /* Logic error */
  5593. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  5594. btrfs_put_block_group(cache);
  5595. return 0;
  5596. }
  5597. /*
  5598. * this function must be called within transaction
  5599. */
  5600. int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
  5601. u64 bytenr, u64 num_bytes)
  5602. {
  5603. struct btrfs_block_group_cache *cache;
  5604. int ret;
  5605. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  5606. if (!cache)
  5607. return -EINVAL;
  5608. /*
  5609. * pull in the free space cache (if any) so that our pin
  5610. * removes the free space from the cache. We have load_only set
  5611. * to one because the slow code to read in the free extents does check
  5612. * the pinned extents.
  5613. */
  5614. cache_block_group(cache, 1);
  5615. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  5616. /* remove us from the free space cache (if we're there at all) */
  5617. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  5618. btrfs_put_block_group(cache);
  5619. return ret;
  5620. }
  5621. static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
  5622. {
  5623. int ret;
  5624. struct btrfs_block_group_cache *block_group;
  5625. struct btrfs_caching_control *caching_ctl;
  5626. block_group = btrfs_lookup_block_group(root->fs_info, start);
  5627. if (!block_group)
  5628. return -EINVAL;
  5629. cache_block_group(block_group, 0);
  5630. caching_ctl = get_caching_control(block_group);
  5631. if (!caching_ctl) {
  5632. /* Logic error */
  5633. BUG_ON(!block_group_cache_done(block_group));
  5634. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5635. } else {
  5636. mutex_lock(&caching_ctl->mutex);
  5637. if (start >= caching_ctl->progress) {
  5638. ret = add_excluded_extent(root, start, num_bytes);
  5639. } else if (start + num_bytes <= caching_ctl->progress) {
  5640. ret = btrfs_remove_free_space(block_group,
  5641. start, num_bytes);
  5642. } else {
  5643. num_bytes = caching_ctl->progress - start;
  5644. ret = btrfs_remove_free_space(block_group,
  5645. start, num_bytes);
  5646. if (ret)
  5647. goto out_lock;
  5648. num_bytes = (start + num_bytes) -
  5649. caching_ctl->progress;
  5650. start = caching_ctl->progress;
  5651. ret = add_excluded_extent(root, start, num_bytes);
  5652. }
  5653. out_lock:
  5654. mutex_unlock(&caching_ctl->mutex);
  5655. put_caching_control(caching_ctl);
  5656. }
  5657. btrfs_put_block_group(block_group);
  5658. return ret;
  5659. }
  5660. int btrfs_exclude_logged_extents(struct btrfs_root *log,
  5661. struct extent_buffer *eb)
  5662. {
  5663. struct btrfs_file_extent_item *item;
  5664. struct btrfs_key key;
  5665. int found_type;
  5666. int i;
  5667. if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
  5668. return 0;
  5669. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  5670. btrfs_item_key_to_cpu(eb, &key, i);
  5671. if (key.type != BTRFS_EXTENT_DATA_KEY)
  5672. continue;
  5673. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  5674. found_type = btrfs_file_extent_type(eb, item);
  5675. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  5676. continue;
  5677. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  5678. continue;
  5679. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  5680. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  5681. __exclude_logged_extent(log, key.objectid, key.offset);
  5682. }
  5683. return 0;
  5684. }
  5685. static void
  5686. btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
  5687. {
  5688. atomic_inc(&bg->reservations);
  5689. }
  5690. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  5691. const u64 start)
  5692. {
  5693. struct btrfs_block_group_cache *bg;
  5694. bg = btrfs_lookup_block_group(fs_info, start);
  5695. ASSERT(bg);
  5696. if (atomic_dec_and_test(&bg->reservations))
  5697. wake_up_atomic_t(&bg->reservations);
  5698. btrfs_put_block_group(bg);
  5699. }
  5700. static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
  5701. {
  5702. schedule();
  5703. return 0;
  5704. }
  5705. void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  5706. {
  5707. struct btrfs_space_info *space_info = bg->space_info;
  5708. ASSERT(bg->ro);
  5709. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  5710. return;
  5711. /*
  5712. * Our block group is read only but before we set it to read only,
  5713. * some task might have had allocated an extent from it already, but it
  5714. * has not yet created a respective ordered extent (and added it to a
  5715. * root's list of ordered extents).
  5716. * Therefore wait for any task currently allocating extents, since the
  5717. * block group's reservations counter is incremented while a read lock
  5718. * on the groups' semaphore is held and decremented after releasing
  5719. * the read access on that semaphore and creating the ordered extent.
  5720. */
  5721. down_write(&space_info->groups_sem);
  5722. up_write(&space_info->groups_sem);
  5723. wait_on_atomic_t(&bg->reservations,
  5724. btrfs_wait_bg_reservations_atomic_t,
  5725. TASK_UNINTERRUPTIBLE);
  5726. }
  5727. /**
  5728. * btrfs_add_reserved_bytes - update the block_group and space info counters
  5729. * @cache: The cache we are manipulating
  5730. * @ram_bytes: The number of bytes of file content, and will be same to
  5731. * @num_bytes except for the compress path.
  5732. * @num_bytes: The number of bytes in question
  5733. * @delalloc: The blocks are allocated for the delalloc write
  5734. *
  5735. * This is called by the allocator when it reserves space. Metadata
  5736. * reservations should be called with RESERVE_ALLOC so we do the proper
  5737. * ENOSPC accounting. For data we handle the reservation through clearing the
  5738. * delalloc bits in the io_tree. We have to do this since we could end up
  5739. * allocating less disk space for the amount of data we have reserved in the
  5740. * case of compression.
  5741. *
  5742. * If this is a reservation and the block group has become read only we cannot
  5743. * make the reservation and return -EAGAIN, otherwise this function always
  5744. * succeeds.
  5745. */
  5746. static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
  5747. u64 ram_bytes, u64 num_bytes, int delalloc)
  5748. {
  5749. struct btrfs_space_info *space_info = cache->space_info;
  5750. int ret = 0;
  5751. spin_lock(&space_info->lock);
  5752. spin_lock(&cache->lock);
  5753. if (cache->ro) {
  5754. ret = -EAGAIN;
  5755. } else {
  5756. cache->reserved += num_bytes;
  5757. space_info->bytes_reserved += num_bytes;
  5758. trace_btrfs_space_reservation(cache->fs_info,
  5759. "space_info", space_info->flags,
  5760. ram_bytes, 0);
  5761. space_info->bytes_may_use -= ram_bytes;
  5762. if (delalloc)
  5763. cache->delalloc_bytes += num_bytes;
  5764. }
  5765. spin_unlock(&cache->lock);
  5766. spin_unlock(&space_info->lock);
  5767. return ret;
  5768. }
  5769. /**
  5770. * btrfs_free_reserved_bytes - update the block_group and space info counters
  5771. * @cache: The cache we are manipulating
  5772. * @num_bytes: The number of bytes in question
  5773. * @delalloc: The blocks are allocated for the delalloc write
  5774. *
  5775. * This is called by somebody who is freeing space that was never actually used
  5776. * on disk. For example if you reserve some space for a new leaf in transaction
  5777. * A and before transaction A commits you free that leaf, you call this with
  5778. * reserve set to 0 in order to clear the reservation.
  5779. */
  5780. static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
  5781. u64 num_bytes, int delalloc)
  5782. {
  5783. struct btrfs_space_info *space_info = cache->space_info;
  5784. int ret = 0;
  5785. spin_lock(&space_info->lock);
  5786. spin_lock(&cache->lock);
  5787. if (cache->ro)
  5788. space_info->bytes_readonly += num_bytes;
  5789. cache->reserved -= num_bytes;
  5790. space_info->bytes_reserved -= num_bytes;
  5791. if (delalloc)
  5792. cache->delalloc_bytes -= num_bytes;
  5793. spin_unlock(&cache->lock);
  5794. spin_unlock(&space_info->lock);
  5795. return ret;
  5796. }
  5797. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  5798. struct btrfs_root *root)
  5799. {
  5800. struct btrfs_fs_info *fs_info = root->fs_info;
  5801. struct btrfs_caching_control *next;
  5802. struct btrfs_caching_control *caching_ctl;
  5803. struct btrfs_block_group_cache *cache;
  5804. down_write(&fs_info->commit_root_sem);
  5805. list_for_each_entry_safe(caching_ctl, next,
  5806. &fs_info->caching_block_groups, list) {
  5807. cache = caching_ctl->block_group;
  5808. if (block_group_cache_done(cache)) {
  5809. cache->last_byte_to_unpin = (u64)-1;
  5810. list_del_init(&caching_ctl->list);
  5811. put_caching_control(caching_ctl);
  5812. } else {
  5813. cache->last_byte_to_unpin = caching_ctl->progress;
  5814. }
  5815. }
  5816. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5817. fs_info->pinned_extents = &fs_info->freed_extents[1];
  5818. else
  5819. fs_info->pinned_extents = &fs_info->freed_extents[0];
  5820. up_write(&fs_info->commit_root_sem);
  5821. update_global_block_rsv(fs_info);
  5822. }
  5823. /*
  5824. * Returns the free cluster for the given space info and sets empty_cluster to
  5825. * what it should be based on the mount options.
  5826. */
  5827. static struct btrfs_free_cluster *
  5828. fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
  5829. u64 *empty_cluster)
  5830. {
  5831. struct btrfs_free_cluster *ret = NULL;
  5832. bool ssd = btrfs_test_opt(root->fs_info, SSD);
  5833. *empty_cluster = 0;
  5834. if (btrfs_mixed_space_info(space_info))
  5835. return ret;
  5836. if (ssd)
  5837. *empty_cluster = SZ_2M;
  5838. if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  5839. ret = &root->fs_info->meta_alloc_cluster;
  5840. if (!ssd)
  5841. *empty_cluster = SZ_64K;
  5842. } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
  5843. ret = &root->fs_info->data_alloc_cluster;
  5844. }
  5845. return ret;
  5846. }
  5847. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
  5848. const bool return_free_space)
  5849. {
  5850. struct btrfs_fs_info *fs_info = root->fs_info;
  5851. struct btrfs_block_group_cache *cache = NULL;
  5852. struct btrfs_space_info *space_info;
  5853. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5854. struct btrfs_free_cluster *cluster = NULL;
  5855. u64 len;
  5856. u64 total_unpinned = 0;
  5857. u64 empty_cluster = 0;
  5858. bool readonly;
  5859. while (start <= end) {
  5860. readonly = false;
  5861. if (!cache ||
  5862. start >= cache->key.objectid + cache->key.offset) {
  5863. if (cache)
  5864. btrfs_put_block_group(cache);
  5865. total_unpinned = 0;
  5866. cache = btrfs_lookup_block_group(fs_info, start);
  5867. BUG_ON(!cache); /* Logic error */
  5868. cluster = fetch_cluster_info(root,
  5869. cache->space_info,
  5870. &empty_cluster);
  5871. empty_cluster <<= 1;
  5872. }
  5873. len = cache->key.objectid + cache->key.offset - start;
  5874. len = min(len, end + 1 - start);
  5875. if (start < cache->last_byte_to_unpin) {
  5876. len = min(len, cache->last_byte_to_unpin - start);
  5877. if (return_free_space)
  5878. btrfs_add_free_space(cache, start, len);
  5879. }
  5880. start += len;
  5881. total_unpinned += len;
  5882. space_info = cache->space_info;
  5883. /*
  5884. * If this space cluster has been marked as fragmented and we've
  5885. * unpinned enough in this block group to potentially allow a
  5886. * cluster to be created inside of it go ahead and clear the
  5887. * fragmented check.
  5888. */
  5889. if (cluster && cluster->fragmented &&
  5890. total_unpinned > empty_cluster) {
  5891. spin_lock(&cluster->lock);
  5892. cluster->fragmented = 0;
  5893. spin_unlock(&cluster->lock);
  5894. }
  5895. spin_lock(&space_info->lock);
  5896. spin_lock(&cache->lock);
  5897. cache->pinned -= len;
  5898. space_info->bytes_pinned -= len;
  5899. trace_btrfs_space_reservation(fs_info, "pinned",
  5900. space_info->flags, len, 0);
  5901. space_info->max_extent_size = 0;
  5902. percpu_counter_add(&space_info->total_bytes_pinned, -len);
  5903. if (cache->ro) {
  5904. space_info->bytes_readonly += len;
  5905. readonly = true;
  5906. }
  5907. spin_unlock(&cache->lock);
  5908. if (!readonly && return_free_space &&
  5909. global_rsv->space_info == space_info) {
  5910. u64 to_add = len;
  5911. WARN_ON(!return_free_space);
  5912. spin_lock(&global_rsv->lock);
  5913. if (!global_rsv->full) {
  5914. to_add = min(len, global_rsv->size -
  5915. global_rsv->reserved);
  5916. global_rsv->reserved += to_add;
  5917. space_info->bytes_may_use += to_add;
  5918. if (global_rsv->reserved >= global_rsv->size)
  5919. global_rsv->full = 1;
  5920. trace_btrfs_space_reservation(fs_info,
  5921. "space_info",
  5922. space_info->flags,
  5923. to_add, 1);
  5924. len -= to_add;
  5925. }
  5926. spin_unlock(&global_rsv->lock);
  5927. /* Add to any tickets we may have */
  5928. if (len)
  5929. space_info_add_new_bytes(fs_info, space_info,
  5930. len);
  5931. }
  5932. spin_unlock(&space_info->lock);
  5933. }
  5934. if (cache)
  5935. btrfs_put_block_group(cache);
  5936. return 0;
  5937. }
  5938. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  5939. struct btrfs_root *root)
  5940. {
  5941. struct btrfs_fs_info *fs_info = root->fs_info;
  5942. struct btrfs_block_group_cache *block_group, *tmp;
  5943. struct list_head *deleted_bgs;
  5944. struct extent_io_tree *unpin;
  5945. u64 start;
  5946. u64 end;
  5947. int ret;
  5948. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5949. unpin = &fs_info->freed_extents[1];
  5950. else
  5951. unpin = &fs_info->freed_extents[0];
  5952. while (!trans->aborted) {
  5953. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  5954. ret = find_first_extent_bit(unpin, 0, &start, &end,
  5955. EXTENT_DIRTY, NULL);
  5956. if (ret) {
  5957. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  5958. break;
  5959. }
  5960. if (btrfs_test_opt(root->fs_info, DISCARD))
  5961. ret = btrfs_discard_extent(root, start,
  5962. end + 1 - start, NULL);
  5963. clear_extent_dirty(unpin, start, end);
  5964. unpin_extent_range(root, start, end, true);
  5965. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  5966. cond_resched();
  5967. }
  5968. /*
  5969. * Transaction is finished. We don't need the lock anymore. We
  5970. * do need to clean up the block groups in case of a transaction
  5971. * abort.
  5972. */
  5973. deleted_bgs = &trans->transaction->deleted_bgs;
  5974. list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
  5975. u64 trimmed = 0;
  5976. ret = -EROFS;
  5977. if (!trans->aborted)
  5978. ret = btrfs_discard_extent(root,
  5979. block_group->key.objectid,
  5980. block_group->key.offset,
  5981. &trimmed);
  5982. list_del_init(&block_group->bg_list);
  5983. btrfs_put_block_group_trimming(block_group);
  5984. btrfs_put_block_group(block_group);
  5985. if (ret) {
  5986. const char *errstr = btrfs_decode_error(ret);
  5987. btrfs_warn(fs_info,
  5988. "Discard failed while removing blockgroup: errno=%d %s\n",
  5989. ret, errstr);
  5990. }
  5991. }
  5992. return 0;
  5993. }
  5994. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
  5995. u64 owner, u64 root_objectid)
  5996. {
  5997. struct btrfs_space_info *space_info;
  5998. u64 flags;
  5999. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  6000. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  6001. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  6002. else
  6003. flags = BTRFS_BLOCK_GROUP_METADATA;
  6004. } else {
  6005. flags = BTRFS_BLOCK_GROUP_DATA;
  6006. }
  6007. space_info = __find_space_info(fs_info, flags);
  6008. BUG_ON(!space_info); /* Logic bug */
  6009. percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
  6010. }
  6011. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  6012. struct btrfs_root *root,
  6013. struct btrfs_delayed_ref_node *node, u64 parent,
  6014. u64 root_objectid, u64 owner_objectid,
  6015. u64 owner_offset, int refs_to_drop,
  6016. struct btrfs_delayed_extent_op *extent_op)
  6017. {
  6018. struct btrfs_key key;
  6019. struct btrfs_path *path;
  6020. struct btrfs_fs_info *info = root->fs_info;
  6021. struct btrfs_root *extent_root = info->extent_root;
  6022. struct extent_buffer *leaf;
  6023. struct btrfs_extent_item *ei;
  6024. struct btrfs_extent_inline_ref *iref;
  6025. int ret;
  6026. int is_data;
  6027. int extent_slot = 0;
  6028. int found_extent = 0;
  6029. int num_to_del = 1;
  6030. u32 item_size;
  6031. u64 refs;
  6032. u64 bytenr = node->bytenr;
  6033. u64 num_bytes = node->num_bytes;
  6034. int last_ref = 0;
  6035. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  6036. SKINNY_METADATA);
  6037. path = btrfs_alloc_path();
  6038. if (!path)
  6039. return -ENOMEM;
  6040. path->reada = READA_FORWARD;
  6041. path->leave_spinning = 1;
  6042. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  6043. BUG_ON(!is_data && refs_to_drop != 1);
  6044. if (is_data)
  6045. skinny_metadata = 0;
  6046. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  6047. bytenr, num_bytes, parent,
  6048. root_objectid, owner_objectid,
  6049. owner_offset);
  6050. if (ret == 0) {
  6051. extent_slot = path->slots[0];
  6052. while (extent_slot >= 0) {
  6053. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6054. extent_slot);
  6055. if (key.objectid != bytenr)
  6056. break;
  6057. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  6058. key.offset == num_bytes) {
  6059. found_extent = 1;
  6060. break;
  6061. }
  6062. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  6063. key.offset == owner_objectid) {
  6064. found_extent = 1;
  6065. break;
  6066. }
  6067. if (path->slots[0] - extent_slot > 5)
  6068. break;
  6069. extent_slot--;
  6070. }
  6071. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6072. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  6073. if (found_extent && item_size < sizeof(*ei))
  6074. found_extent = 0;
  6075. #endif
  6076. if (!found_extent) {
  6077. BUG_ON(iref);
  6078. ret = remove_extent_backref(trans, extent_root, path,
  6079. NULL, refs_to_drop,
  6080. is_data, &last_ref);
  6081. if (ret) {
  6082. btrfs_abort_transaction(trans, ret);
  6083. goto out;
  6084. }
  6085. btrfs_release_path(path);
  6086. path->leave_spinning = 1;
  6087. key.objectid = bytenr;
  6088. key.type = BTRFS_EXTENT_ITEM_KEY;
  6089. key.offset = num_bytes;
  6090. if (!is_data && skinny_metadata) {
  6091. key.type = BTRFS_METADATA_ITEM_KEY;
  6092. key.offset = owner_objectid;
  6093. }
  6094. ret = btrfs_search_slot(trans, extent_root,
  6095. &key, path, -1, 1);
  6096. if (ret > 0 && skinny_metadata && path->slots[0]) {
  6097. /*
  6098. * Couldn't find our skinny metadata item,
  6099. * see if we have ye olde extent item.
  6100. */
  6101. path->slots[0]--;
  6102. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6103. path->slots[0]);
  6104. if (key.objectid == bytenr &&
  6105. key.type == BTRFS_EXTENT_ITEM_KEY &&
  6106. key.offset == num_bytes)
  6107. ret = 0;
  6108. }
  6109. if (ret > 0 && skinny_metadata) {
  6110. skinny_metadata = false;
  6111. key.objectid = bytenr;
  6112. key.type = BTRFS_EXTENT_ITEM_KEY;
  6113. key.offset = num_bytes;
  6114. btrfs_release_path(path);
  6115. ret = btrfs_search_slot(trans, extent_root,
  6116. &key, path, -1, 1);
  6117. }
  6118. if (ret) {
  6119. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  6120. ret, bytenr);
  6121. if (ret > 0)
  6122. btrfs_print_leaf(extent_root,
  6123. path->nodes[0]);
  6124. }
  6125. if (ret < 0) {
  6126. btrfs_abort_transaction(trans, ret);
  6127. goto out;
  6128. }
  6129. extent_slot = path->slots[0];
  6130. }
  6131. } else if (WARN_ON(ret == -ENOENT)) {
  6132. btrfs_print_leaf(extent_root, path->nodes[0]);
  6133. btrfs_err(info,
  6134. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  6135. bytenr, parent, root_objectid, owner_objectid,
  6136. owner_offset);
  6137. btrfs_abort_transaction(trans, ret);
  6138. goto out;
  6139. } else {
  6140. btrfs_abort_transaction(trans, ret);
  6141. goto out;
  6142. }
  6143. leaf = path->nodes[0];
  6144. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6145. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6146. if (item_size < sizeof(*ei)) {
  6147. BUG_ON(found_extent || extent_slot != path->slots[0]);
  6148. ret = convert_extent_item_v0(trans, extent_root, path,
  6149. owner_objectid, 0);
  6150. if (ret < 0) {
  6151. btrfs_abort_transaction(trans, ret);
  6152. goto out;
  6153. }
  6154. btrfs_release_path(path);
  6155. path->leave_spinning = 1;
  6156. key.objectid = bytenr;
  6157. key.type = BTRFS_EXTENT_ITEM_KEY;
  6158. key.offset = num_bytes;
  6159. ret = btrfs_search_slot(trans, extent_root, &key, path,
  6160. -1, 1);
  6161. if (ret) {
  6162. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  6163. ret, bytenr);
  6164. btrfs_print_leaf(extent_root, path->nodes[0]);
  6165. }
  6166. if (ret < 0) {
  6167. btrfs_abort_transaction(trans, ret);
  6168. goto out;
  6169. }
  6170. extent_slot = path->slots[0];
  6171. leaf = path->nodes[0];
  6172. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6173. }
  6174. #endif
  6175. BUG_ON(item_size < sizeof(*ei));
  6176. ei = btrfs_item_ptr(leaf, extent_slot,
  6177. struct btrfs_extent_item);
  6178. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  6179. key.type == BTRFS_EXTENT_ITEM_KEY) {
  6180. struct btrfs_tree_block_info *bi;
  6181. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  6182. bi = (struct btrfs_tree_block_info *)(ei + 1);
  6183. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  6184. }
  6185. refs = btrfs_extent_refs(leaf, ei);
  6186. if (refs < refs_to_drop) {
  6187. btrfs_err(info, "trying to drop %d refs but we only have %Lu "
  6188. "for bytenr %Lu", refs_to_drop, refs, bytenr);
  6189. ret = -EINVAL;
  6190. btrfs_abort_transaction(trans, ret);
  6191. goto out;
  6192. }
  6193. refs -= refs_to_drop;
  6194. if (refs > 0) {
  6195. if (extent_op)
  6196. __run_delayed_extent_op(extent_op, leaf, ei);
  6197. /*
  6198. * In the case of inline back ref, reference count will
  6199. * be updated by remove_extent_backref
  6200. */
  6201. if (iref) {
  6202. BUG_ON(!found_extent);
  6203. } else {
  6204. btrfs_set_extent_refs(leaf, ei, refs);
  6205. btrfs_mark_buffer_dirty(leaf);
  6206. }
  6207. if (found_extent) {
  6208. ret = remove_extent_backref(trans, extent_root, path,
  6209. iref, refs_to_drop,
  6210. is_data, &last_ref);
  6211. if (ret) {
  6212. btrfs_abort_transaction(trans, ret);
  6213. goto out;
  6214. }
  6215. }
  6216. add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
  6217. root_objectid);
  6218. } else {
  6219. if (found_extent) {
  6220. BUG_ON(is_data && refs_to_drop !=
  6221. extent_data_ref_count(path, iref));
  6222. if (iref) {
  6223. BUG_ON(path->slots[0] != extent_slot);
  6224. } else {
  6225. BUG_ON(path->slots[0] != extent_slot + 1);
  6226. path->slots[0] = extent_slot;
  6227. num_to_del = 2;
  6228. }
  6229. }
  6230. last_ref = 1;
  6231. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  6232. num_to_del);
  6233. if (ret) {
  6234. btrfs_abort_transaction(trans, ret);
  6235. goto out;
  6236. }
  6237. btrfs_release_path(path);
  6238. if (is_data) {
  6239. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  6240. if (ret) {
  6241. btrfs_abort_transaction(trans, ret);
  6242. goto out;
  6243. }
  6244. }
  6245. ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
  6246. num_bytes);
  6247. if (ret) {
  6248. btrfs_abort_transaction(trans, ret);
  6249. goto out;
  6250. }
  6251. ret = update_block_group(trans, root, bytenr, num_bytes, 0);
  6252. if (ret) {
  6253. btrfs_abort_transaction(trans, ret);
  6254. goto out;
  6255. }
  6256. }
  6257. btrfs_release_path(path);
  6258. out:
  6259. btrfs_free_path(path);
  6260. return ret;
  6261. }
  6262. /*
  6263. * when we free an block, it is possible (and likely) that we free the last
  6264. * delayed ref for that extent as well. This searches the delayed ref tree for
  6265. * a given extent, and if there are no other delayed refs to be processed, it
  6266. * removes it from the tree.
  6267. */
  6268. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  6269. struct btrfs_root *root, u64 bytenr)
  6270. {
  6271. struct btrfs_delayed_ref_head *head;
  6272. struct btrfs_delayed_ref_root *delayed_refs;
  6273. int ret = 0;
  6274. delayed_refs = &trans->transaction->delayed_refs;
  6275. spin_lock(&delayed_refs->lock);
  6276. head = btrfs_find_delayed_ref_head(trans, bytenr);
  6277. if (!head)
  6278. goto out_delayed_unlock;
  6279. spin_lock(&head->lock);
  6280. if (!list_empty(&head->ref_list))
  6281. goto out;
  6282. if (head->extent_op) {
  6283. if (!head->must_insert_reserved)
  6284. goto out;
  6285. btrfs_free_delayed_extent_op(head->extent_op);
  6286. head->extent_op = NULL;
  6287. }
  6288. /*
  6289. * waiting for the lock here would deadlock. If someone else has it
  6290. * locked they are already in the process of dropping it anyway
  6291. */
  6292. if (!mutex_trylock(&head->mutex))
  6293. goto out;
  6294. /*
  6295. * at this point we have a head with no other entries. Go
  6296. * ahead and process it.
  6297. */
  6298. head->node.in_tree = 0;
  6299. rb_erase(&head->href_node, &delayed_refs->href_root);
  6300. atomic_dec(&delayed_refs->num_entries);
  6301. /*
  6302. * we don't take a ref on the node because we're removing it from the
  6303. * tree, so we just steal the ref the tree was holding.
  6304. */
  6305. delayed_refs->num_heads--;
  6306. if (head->processing == 0)
  6307. delayed_refs->num_heads_ready--;
  6308. head->processing = 0;
  6309. spin_unlock(&head->lock);
  6310. spin_unlock(&delayed_refs->lock);
  6311. BUG_ON(head->extent_op);
  6312. if (head->must_insert_reserved)
  6313. ret = 1;
  6314. mutex_unlock(&head->mutex);
  6315. btrfs_put_delayed_ref(&head->node);
  6316. return ret;
  6317. out:
  6318. spin_unlock(&head->lock);
  6319. out_delayed_unlock:
  6320. spin_unlock(&delayed_refs->lock);
  6321. return 0;
  6322. }
  6323. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  6324. struct btrfs_root *root,
  6325. struct extent_buffer *buf,
  6326. u64 parent, int last_ref)
  6327. {
  6328. int pin = 1;
  6329. int ret;
  6330. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6331. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  6332. buf->start, buf->len,
  6333. parent, root->root_key.objectid,
  6334. btrfs_header_level(buf),
  6335. BTRFS_DROP_DELAYED_REF, NULL);
  6336. BUG_ON(ret); /* -ENOMEM */
  6337. }
  6338. if (!last_ref)
  6339. return;
  6340. if (btrfs_header_generation(buf) == trans->transid) {
  6341. struct btrfs_block_group_cache *cache;
  6342. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6343. ret = check_ref_cleanup(trans, root, buf->start);
  6344. if (!ret)
  6345. goto out;
  6346. }
  6347. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  6348. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  6349. pin_down_extent(root, cache, buf->start, buf->len, 1);
  6350. btrfs_put_block_group(cache);
  6351. goto out;
  6352. }
  6353. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  6354. btrfs_add_free_space(cache, buf->start, buf->len);
  6355. btrfs_free_reserved_bytes(cache, buf->len, 0);
  6356. btrfs_put_block_group(cache);
  6357. trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
  6358. pin = 0;
  6359. }
  6360. out:
  6361. if (pin)
  6362. add_pinned_bytes(root->fs_info, buf->len,
  6363. btrfs_header_level(buf),
  6364. root->root_key.objectid);
  6365. /*
  6366. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  6367. * anymore.
  6368. */
  6369. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  6370. }
  6371. /* Can return -ENOMEM */
  6372. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  6373. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  6374. u64 owner, u64 offset)
  6375. {
  6376. int ret;
  6377. struct btrfs_fs_info *fs_info = root->fs_info;
  6378. if (btrfs_is_testing(fs_info))
  6379. return 0;
  6380. add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
  6381. /*
  6382. * tree log blocks never actually go into the extent allocation
  6383. * tree, just update pinning info and exit early.
  6384. */
  6385. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  6386. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  6387. /* unlocks the pinned mutex */
  6388. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  6389. ret = 0;
  6390. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  6391. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  6392. num_bytes,
  6393. parent, root_objectid, (int)owner,
  6394. BTRFS_DROP_DELAYED_REF, NULL);
  6395. } else {
  6396. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  6397. num_bytes,
  6398. parent, root_objectid, owner,
  6399. offset, 0,
  6400. BTRFS_DROP_DELAYED_REF, NULL);
  6401. }
  6402. return ret;
  6403. }
  6404. /*
  6405. * when we wait for progress in the block group caching, its because
  6406. * our allocation attempt failed at least once. So, we must sleep
  6407. * and let some progress happen before we try again.
  6408. *
  6409. * This function will sleep at least once waiting for new free space to
  6410. * show up, and then it will check the block group free space numbers
  6411. * for our min num_bytes. Another option is to have it go ahead
  6412. * and look in the rbtree for a free extent of a given size, but this
  6413. * is a good start.
  6414. *
  6415. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  6416. * any of the information in this block group.
  6417. */
  6418. static noinline void
  6419. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  6420. u64 num_bytes)
  6421. {
  6422. struct btrfs_caching_control *caching_ctl;
  6423. caching_ctl = get_caching_control(cache);
  6424. if (!caching_ctl)
  6425. return;
  6426. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  6427. (cache->free_space_ctl->free_space >= num_bytes));
  6428. put_caching_control(caching_ctl);
  6429. }
  6430. static noinline int
  6431. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  6432. {
  6433. struct btrfs_caching_control *caching_ctl;
  6434. int ret = 0;
  6435. caching_ctl = get_caching_control(cache);
  6436. if (!caching_ctl)
  6437. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  6438. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  6439. if (cache->cached == BTRFS_CACHE_ERROR)
  6440. ret = -EIO;
  6441. put_caching_control(caching_ctl);
  6442. return ret;
  6443. }
  6444. int __get_raid_index(u64 flags)
  6445. {
  6446. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  6447. return BTRFS_RAID_RAID10;
  6448. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  6449. return BTRFS_RAID_RAID1;
  6450. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  6451. return BTRFS_RAID_DUP;
  6452. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6453. return BTRFS_RAID_RAID0;
  6454. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  6455. return BTRFS_RAID_RAID5;
  6456. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  6457. return BTRFS_RAID_RAID6;
  6458. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  6459. }
  6460. int get_block_group_index(struct btrfs_block_group_cache *cache)
  6461. {
  6462. return __get_raid_index(cache->flags);
  6463. }
  6464. static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
  6465. [BTRFS_RAID_RAID10] = "raid10",
  6466. [BTRFS_RAID_RAID1] = "raid1",
  6467. [BTRFS_RAID_DUP] = "dup",
  6468. [BTRFS_RAID_RAID0] = "raid0",
  6469. [BTRFS_RAID_SINGLE] = "single",
  6470. [BTRFS_RAID_RAID5] = "raid5",
  6471. [BTRFS_RAID_RAID6] = "raid6",
  6472. };
  6473. static const char *get_raid_name(enum btrfs_raid_types type)
  6474. {
  6475. if (type >= BTRFS_NR_RAID_TYPES)
  6476. return NULL;
  6477. return btrfs_raid_type_names[type];
  6478. }
  6479. enum btrfs_loop_type {
  6480. LOOP_CACHING_NOWAIT = 0,
  6481. LOOP_CACHING_WAIT = 1,
  6482. LOOP_ALLOC_CHUNK = 2,
  6483. LOOP_NO_EMPTY_SIZE = 3,
  6484. };
  6485. static inline void
  6486. btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
  6487. int delalloc)
  6488. {
  6489. if (delalloc)
  6490. down_read(&cache->data_rwsem);
  6491. }
  6492. static inline void
  6493. btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
  6494. int delalloc)
  6495. {
  6496. btrfs_get_block_group(cache);
  6497. if (delalloc)
  6498. down_read(&cache->data_rwsem);
  6499. }
  6500. static struct btrfs_block_group_cache *
  6501. btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
  6502. struct btrfs_free_cluster *cluster,
  6503. int delalloc)
  6504. {
  6505. struct btrfs_block_group_cache *used_bg = NULL;
  6506. spin_lock(&cluster->refill_lock);
  6507. while (1) {
  6508. used_bg = cluster->block_group;
  6509. if (!used_bg)
  6510. return NULL;
  6511. if (used_bg == block_group)
  6512. return used_bg;
  6513. btrfs_get_block_group(used_bg);
  6514. if (!delalloc)
  6515. return used_bg;
  6516. if (down_read_trylock(&used_bg->data_rwsem))
  6517. return used_bg;
  6518. spin_unlock(&cluster->refill_lock);
  6519. down_read(&used_bg->data_rwsem);
  6520. spin_lock(&cluster->refill_lock);
  6521. if (used_bg == cluster->block_group)
  6522. return used_bg;
  6523. up_read(&used_bg->data_rwsem);
  6524. btrfs_put_block_group(used_bg);
  6525. }
  6526. }
  6527. static inline void
  6528. btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  6529. int delalloc)
  6530. {
  6531. if (delalloc)
  6532. up_read(&cache->data_rwsem);
  6533. btrfs_put_block_group(cache);
  6534. }
  6535. /*
  6536. * walks the btree of allocated extents and find a hole of a given size.
  6537. * The key ins is changed to record the hole:
  6538. * ins->objectid == start position
  6539. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  6540. * ins->offset == the size of the hole.
  6541. * Any available blocks before search_start are skipped.
  6542. *
  6543. * If there is no suitable free space, we will record the max size of
  6544. * the free space extent currently.
  6545. */
  6546. static noinline int find_free_extent(struct btrfs_root *orig_root,
  6547. u64 ram_bytes, u64 num_bytes, u64 empty_size,
  6548. u64 hint_byte, struct btrfs_key *ins,
  6549. u64 flags, int delalloc)
  6550. {
  6551. int ret = 0;
  6552. struct btrfs_root *root = orig_root->fs_info->extent_root;
  6553. struct btrfs_free_cluster *last_ptr = NULL;
  6554. struct btrfs_block_group_cache *block_group = NULL;
  6555. u64 search_start = 0;
  6556. u64 max_extent_size = 0;
  6557. u64 empty_cluster = 0;
  6558. struct btrfs_space_info *space_info;
  6559. int loop = 0;
  6560. int index = __get_raid_index(flags);
  6561. bool failed_cluster_refill = false;
  6562. bool failed_alloc = false;
  6563. bool use_cluster = true;
  6564. bool have_caching_bg = false;
  6565. bool orig_have_caching_bg = false;
  6566. bool full_search = false;
  6567. WARN_ON(num_bytes < root->sectorsize);
  6568. ins->type = BTRFS_EXTENT_ITEM_KEY;
  6569. ins->objectid = 0;
  6570. ins->offset = 0;
  6571. trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
  6572. space_info = __find_space_info(root->fs_info, flags);
  6573. if (!space_info) {
  6574. btrfs_err(root->fs_info, "No space info for %llu", flags);
  6575. return -ENOSPC;
  6576. }
  6577. /*
  6578. * If our free space is heavily fragmented we may not be able to make
  6579. * big contiguous allocations, so instead of doing the expensive search
  6580. * for free space, simply return ENOSPC with our max_extent_size so we
  6581. * can go ahead and search for a more manageable chunk.
  6582. *
  6583. * If our max_extent_size is large enough for our allocation simply
  6584. * disable clustering since we will likely not be able to find enough
  6585. * space to create a cluster and induce latency trying.
  6586. */
  6587. if (unlikely(space_info->max_extent_size)) {
  6588. spin_lock(&space_info->lock);
  6589. if (space_info->max_extent_size &&
  6590. num_bytes > space_info->max_extent_size) {
  6591. ins->offset = space_info->max_extent_size;
  6592. spin_unlock(&space_info->lock);
  6593. return -ENOSPC;
  6594. } else if (space_info->max_extent_size) {
  6595. use_cluster = false;
  6596. }
  6597. spin_unlock(&space_info->lock);
  6598. }
  6599. last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
  6600. if (last_ptr) {
  6601. spin_lock(&last_ptr->lock);
  6602. if (last_ptr->block_group)
  6603. hint_byte = last_ptr->window_start;
  6604. if (last_ptr->fragmented) {
  6605. /*
  6606. * We still set window_start so we can keep track of the
  6607. * last place we found an allocation to try and save
  6608. * some time.
  6609. */
  6610. hint_byte = last_ptr->window_start;
  6611. use_cluster = false;
  6612. }
  6613. spin_unlock(&last_ptr->lock);
  6614. }
  6615. search_start = max(search_start, first_logical_byte(root, 0));
  6616. search_start = max(search_start, hint_byte);
  6617. if (search_start == hint_byte) {
  6618. block_group = btrfs_lookup_block_group(root->fs_info,
  6619. search_start);
  6620. /*
  6621. * we don't want to use the block group if it doesn't match our
  6622. * allocation bits, or if its not cached.
  6623. *
  6624. * However if we are re-searching with an ideal block group
  6625. * picked out then we don't care that the block group is cached.
  6626. */
  6627. if (block_group && block_group_bits(block_group, flags) &&
  6628. block_group->cached != BTRFS_CACHE_NO) {
  6629. down_read(&space_info->groups_sem);
  6630. if (list_empty(&block_group->list) ||
  6631. block_group->ro) {
  6632. /*
  6633. * someone is removing this block group,
  6634. * we can't jump into the have_block_group
  6635. * target because our list pointers are not
  6636. * valid
  6637. */
  6638. btrfs_put_block_group(block_group);
  6639. up_read(&space_info->groups_sem);
  6640. } else {
  6641. index = get_block_group_index(block_group);
  6642. btrfs_lock_block_group(block_group, delalloc);
  6643. goto have_block_group;
  6644. }
  6645. } else if (block_group) {
  6646. btrfs_put_block_group(block_group);
  6647. }
  6648. }
  6649. search:
  6650. have_caching_bg = false;
  6651. if (index == 0 || index == __get_raid_index(flags))
  6652. full_search = true;
  6653. down_read(&space_info->groups_sem);
  6654. list_for_each_entry(block_group, &space_info->block_groups[index],
  6655. list) {
  6656. u64 offset;
  6657. int cached;
  6658. btrfs_grab_block_group(block_group, delalloc);
  6659. search_start = block_group->key.objectid;
  6660. /*
  6661. * this can happen if we end up cycling through all the
  6662. * raid types, but we want to make sure we only allocate
  6663. * for the proper type.
  6664. */
  6665. if (!block_group_bits(block_group, flags)) {
  6666. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  6667. BTRFS_BLOCK_GROUP_RAID1 |
  6668. BTRFS_BLOCK_GROUP_RAID5 |
  6669. BTRFS_BLOCK_GROUP_RAID6 |
  6670. BTRFS_BLOCK_GROUP_RAID10;
  6671. /*
  6672. * if they asked for extra copies and this block group
  6673. * doesn't provide them, bail. This does allow us to
  6674. * fill raid0 from raid1.
  6675. */
  6676. if ((flags & extra) && !(block_group->flags & extra))
  6677. goto loop;
  6678. }
  6679. have_block_group:
  6680. cached = block_group_cache_done(block_group);
  6681. if (unlikely(!cached)) {
  6682. have_caching_bg = true;
  6683. ret = cache_block_group(block_group, 0);
  6684. BUG_ON(ret < 0);
  6685. ret = 0;
  6686. }
  6687. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  6688. goto loop;
  6689. if (unlikely(block_group->ro))
  6690. goto loop;
  6691. /*
  6692. * Ok we want to try and use the cluster allocator, so
  6693. * lets look there
  6694. */
  6695. if (last_ptr && use_cluster) {
  6696. struct btrfs_block_group_cache *used_block_group;
  6697. unsigned long aligned_cluster;
  6698. /*
  6699. * the refill lock keeps out other
  6700. * people trying to start a new cluster
  6701. */
  6702. used_block_group = btrfs_lock_cluster(block_group,
  6703. last_ptr,
  6704. delalloc);
  6705. if (!used_block_group)
  6706. goto refill_cluster;
  6707. if (used_block_group != block_group &&
  6708. (used_block_group->ro ||
  6709. !block_group_bits(used_block_group, flags)))
  6710. goto release_cluster;
  6711. offset = btrfs_alloc_from_cluster(used_block_group,
  6712. last_ptr,
  6713. num_bytes,
  6714. used_block_group->key.objectid,
  6715. &max_extent_size);
  6716. if (offset) {
  6717. /* we have a block, we're done */
  6718. spin_unlock(&last_ptr->refill_lock);
  6719. trace_btrfs_reserve_extent_cluster(root,
  6720. used_block_group,
  6721. search_start, num_bytes);
  6722. if (used_block_group != block_group) {
  6723. btrfs_release_block_group(block_group,
  6724. delalloc);
  6725. block_group = used_block_group;
  6726. }
  6727. goto checks;
  6728. }
  6729. WARN_ON(last_ptr->block_group != used_block_group);
  6730. release_cluster:
  6731. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  6732. * set up a new clusters, so lets just skip it
  6733. * and let the allocator find whatever block
  6734. * it can find. If we reach this point, we
  6735. * will have tried the cluster allocator
  6736. * plenty of times and not have found
  6737. * anything, so we are likely way too
  6738. * fragmented for the clustering stuff to find
  6739. * anything.
  6740. *
  6741. * However, if the cluster is taken from the
  6742. * current block group, release the cluster
  6743. * first, so that we stand a better chance of
  6744. * succeeding in the unclustered
  6745. * allocation. */
  6746. if (loop >= LOOP_NO_EMPTY_SIZE &&
  6747. used_block_group != block_group) {
  6748. spin_unlock(&last_ptr->refill_lock);
  6749. btrfs_release_block_group(used_block_group,
  6750. delalloc);
  6751. goto unclustered_alloc;
  6752. }
  6753. /*
  6754. * this cluster didn't work out, free it and
  6755. * start over
  6756. */
  6757. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6758. if (used_block_group != block_group)
  6759. btrfs_release_block_group(used_block_group,
  6760. delalloc);
  6761. refill_cluster:
  6762. if (loop >= LOOP_NO_EMPTY_SIZE) {
  6763. spin_unlock(&last_ptr->refill_lock);
  6764. goto unclustered_alloc;
  6765. }
  6766. aligned_cluster = max_t(unsigned long,
  6767. empty_cluster + empty_size,
  6768. block_group->full_stripe_len);
  6769. /* allocate a cluster in this block group */
  6770. ret = btrfs_find_space_cluster(root, block_group,
  6771. last_ptr, search_start,
  6772. num_bytes,
  6773. aligned_cluster);
  6774. if (ret == 0) {
  6775. /*
  6776. * now pull our allocation out of this
  6777. * cluster
  6778. */
  6779. offset = btrfs_alloc_from_cluster(block_group,
  6780. last_ptr,
  6781. num_bytes,
  6782. search_start,
  6783. &max_extent_size);
  6784. if (offset) {
  6785. /* we found one, proceed */
  6786. spin_unlock(&last_ptr->refill_lock);
  6787. trace_btrfs_reserve_extent_cluster(root,
  6788. block_group, search_start,
  6789. num_bytes);
  6790. goto checks;
  6791. }
  6792. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  6793. && !failed_cluster_refill) {
  6794. spin_unlock(&last_ptr->refill_lock);
  6795. failed_cluster_refill = true;
  6796. wait_block_group_cache_progress(block_group,
  6797. num_bytes + empty_cluster + empty_size);
  6798. goto have_block_group;
  6799. }
  6800. /*
  6801. * at this point we either didn't find a cluster
  6802. * or we weren't able to allocate a block from our
  6803. * cluster. Free the cluster we've been trying
  6804. * to use, and go to the next block group
  6805. */
  6806. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6807. spin_unlock(&last_ptr->refill_lock);
  6808. goto loop;
  6809. }
  6810. unclustered_alloc:
  6811. /*
  6812. * We are doing an unclustered alloc, set the fragmented flag so
  6813. * we don't bother trying to setup a cluster again until we get
  6814. * more space.
  6815. */
  6816. if (unlikely(last_ptr)) {
  6817. spin_lock(&last_ptr->lock);
  6818. last_ptr->fragmented = 1;
  6819. spin_unlock(&last_ptr->lock);
  6820. }
  6821. spin_lock(&block_group->free_space_ctl->tree_lock);
  6822. if (cached &&
  6823. block_group->free_space_ctl->free_space <
  6824. num_bytes + empty_cluster + empty_size) {
  6825. if (block_group->free_space_ctl->free_space >
  6826. max_extent_size)
  6827. max_extent_size =
  6828. block_group->free_space_ctl->free_space;
  6829. spin_unlock(&block_group->free_space_ctl->tree_lock);
  6830. goto loop;
  6831. }
  6832. spin_unlock(&block_group->free_space_ctl->tree_lock);
  6833. offset = btrfs_find_space_for_alloc(block_group, search_start,
  6834. num_bytes, empty_size,
  6835. &max_extent_size);
  6836. /*
  6837. * If we didn't find a chunk, and we haven't failed on this
  6838. * block group before, and this block group is in the middle of
  6839. * caching and we are ok with waiting, then go ahead and wait
  6840. * for progress to be made, and set failed_alloc to true.
  6841. *
  6842. * If failed_alloc is true then we've already waited on this
  6843. * block group once and should move on to the next block group.
  6844. */
  6845. if (!offset && !failed_alloc && !cached &&
  6846. loop > LOOP_CACHING_NOWAIT) {
  6847. wait_block_group_cache_progress(block_group,
  6848. num_bytes + empty_size);
  6849. failed_alloc = true;
  6850. goto have_block_group;
  6851. } else if (!offset) {
  6852. goto loop;
  6853. }
  6854. checks:
  6855. search_start = ALIGN(offset, root->stripesize);
  6856. /* move on to the next group */
  6857. if (search_start + num_bytes >
  6858. block_group->key.objectid + block_group->key.offset) {
  6859. btrfs_add_free_space(block_group, offset, num_bytes);
  6860. goto loop;
  6861. }
  6862. if (offset < search_start)
  6863. btrfs_add_free_space(block_group, offset,
  6864. search_start - offset);
  6865. BUG_ON(offset > search_start);
  6866. ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
  6867. num_bytes, delalloc);
  6868. if (ret == -EAGAIN) {
  6869. btrfs_add_free_space(block_group, offset, num_bytes);
  6870. goto loop;
  6871. }
  6872. btrfs_inc_block_group_reservations(block_group);
  6873. /* we are all good, lets return */
  6874. ins->objectid = search_start;
  6875. ins->offset = num_bytes;
  6876. trace_btrfs_reserve_extent(orig_root, block_group,
  6877. search_start, num_bytes);
  6878. btrfs_release_block_group(block_group, delalloc);
  6879. break;
  6880. loop:
  6881. failed_cluster_refill = false;
  6882. failed_alloc = false;
  6883. BUG_ON(index != get_block_group_index(block_group));
  6884. btrfs_release_block_group(block_group, delalloc);
  6885. }
  6886. up_read(&space_info->groups_sem);
  6887. if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
  6888. && !orig_have_caching_bg)
  6889. orig_have_caching_bg = true;
  6890. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  6891. goto search;
  6892. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  6893. goto search;
  6894. /*
  6895. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  6896. * caching kthreads as we move along
  6897. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  6898. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  6899. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  6900. * again
  6901. */
  6902. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  6903. index = 0;
  6904. if (loop == LOOP_CACHING_NOWAIT) {
  6905. /*
  6906. * We want to skip the LOOP_CACHING_WAIT step if we
  6907. * don't have any uncached bgs and we've already done a
  6908. * full search through.
  6909. */
  6910. if (orig_have_caching_bg || !full_search)
  6911. loop = LOOP_CACHING_WAIT;
  6912. else
  6913. loop = LOOP_ALLOC_CHUNK;
  6914. } else {
  6915. loop++;
  6916. }
  6917. if (loop == LOOP_ALLOC_CHUNK) {
  6918. struct btrfs_trans_handle *trans;
  6919. int exist = 0;
  6920. trans = current->journal_info;
  6921. if (trans)
  6922. exist = 1;
  6923. else
  6924. trans = btrfs_join_transaction(root);
  6925. if (IS_ERR(trans)) {
  6926. ret = PTR_ERR(trans);
  6927. goto out;
  6928. }
  6929. ret = do_chunk_alloc(trans, root, flags,
  6930. CHUNK_ALLOC_FORCE);
  6931. /*
  6932. * If we can't allocate a new chunk we've already looped
  6933. * through at least once, move on to the NO_EMPTY_SIZE
  6934. * case.
  6935. */
  6936. if (ret == -ENOSPC)
  6937. loop = LOOP_NO_EMPTY_SIZE;
  6938. /*
  6939. * Do not bail out on ENOSPC since we
  6940. * can do more things.
  6941. */
  6942. if (ret < 0 && ret != -ENOSPC)
  6943. btrfs_abort_transaction(trans, ret);
  6944. else
  6945. ret = 0;
  6946. if (!exist)
  6947. btrfs_end_transaction(trans, root);
  6948. if (ret)
  6949. goto out;
  6950. }
  6951. if (loop == LOOP_NO_EMPTY_SIZE) {
  6952. /*
  6953. * Don't loop again if we already have no empty_size and
  6954. * no empty_cluster.
  6955. */
  6956. if (empty_size == 0 &&
  6957. empty_cluster == 0) {
  6958. ret = -ENOSPC;
  6959. goto out;
  6960. }
  6961. empty_size = 0;
  6962. empty_cluster = 0;
  6963. }
  6964. goto search;
  6965. } else if (!ins->objectid) {
  6966. ret = -ENOSPC;
  6967. } else if (ins->objectid) {
  6968. if (!use_cluster && last_ptr) {
  6969. spin_lock(&last_ptr->lock);
  6970. last_ptr->window_start = ins->objectid;
  6971. spin_unlock(&last_ptr->lock);
  6972. }
  6973. ret = 0;
  6974. }
  6975. out:
  6976. if (ret == -ENOSPC) {
  6977. spin_lock(&space_info->lock);
  6978. space_info->max_extent_size = max_extent_size;
  6979. spin_unlock(&space_info->lock);
  6980. ins->offset = max_extent_size;
  6981. }
  6982. return ret;
  6983. }
  6984. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  6985. int dump_block_groups)
  6986. {
  6987. struct btrfs_block_group_cache *cache;
  6988. int index = 0;
  6989. spin_lock(&info->lock);
  6990. printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
  6991. info->flags,
  6992. info->total_bytes - info->bytes_used - info->bytes_pinned -
  6993. info->bytes_reserved - info->bytes_readonly -
  6994. info->bytes_may_use, (info->full) ? "" : "not ");
  6995. printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
  6996. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  6997. info->total_bytes, info->bytes_used, info->bytes_pinned,
  6998. info->bytes_reserved, info->bytes_may_use,
  6999. info->bytes_readonly);
  7000. spin_unlock(&info->lock);
  7001. if (!dump_block_groups)
  7002. return;
  7003. down_read(&info->groups_sem);
  7004. again:
  7005. list_for_each_entry(cache, &info->block_groups[index], list) {
  7006. spin_lock(&cache->lock);
  7007. printk(KERN_INFO "BTRFS: "
  7008. "block group %llu has %llu bytes, "
  7009. "%llu used %llu pinned %llu reserved %s\n",
  7010. cache->key.objectid, cache->key.offset,
  7011. btrfs_block_group_used(&cache->item), cache->pinned,
  7012. cache->reserved, cache->ro ? "[readonly]" : "");
  7013. btrfs_dump_free_space(cache, bytes);
  7014. spin_unlock(&cache->lock);
  7015. }
  7016. if (++index < BTRFS_NR_RAID_TYPES)
  7017. goto again;
  7018. up_read(&info->groups_sem);
  7019. }
  7020. int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
  7021. u64 num_bytes, u64 min_alloc_size,
  7022. u64 empty_size, u64 hint_byte,
  7023. struct btrfs_key *ins, int is_data, int delalloc)
  7024. {
  7025. bool final_tried = num_bytes == min_alloc_size;
  7026. u64 flags;
  7027. int ret;
  7028. flags = btrfs_get_alloc_profile(root, is_data);
  7029. again:
  7030. WARN_ON(num_bytes < root->sectorsize);
  7031. ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
  7032. hint_byte, ins, flags, delalloc);
  7033. if (!ret && !is_data) {
  7034. btrfs_dec_block_group_reservations(root->fs_info,
  7035. ins->objectid);
  7036. } else if (ret == -ENOSPC) {
  7037. if (!final_tried && ins->offset) {
  7038. num_bytes = min(num_bytes >> 1, ins->offset);
  7039. num_bytes = round_down(num_bytes, root->sectorsize);
  7040. num_bytes = max(num_bytes, min_alloc_size);
  7041. ram_bytes = num_bytes;
  7042. if (num_bytes == min_alloc_size)
  7043. final_tried = true;
  7044. goto again;
  7045. } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
  7046. struct btrfs_space_info *sinfo;
  7047. sinfo = __find_space_info(root->fs_info, flags);
  7048. btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
  7049. flags, num_bytes);
  7050. if (sinfo)
  7051. dump_space_info(sinfo, num_bytes, 1);
  7052. }
  7053. }
  7054. return ret;
  7055. }
  7056. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  7057. u64 start, u64 len,
  7058. int pin, int delalloc)
  7059. {
  7060. struct btrfs_block_group_cache *cache;
  7061. int ret = 0;
  7062. cache = btrfs_lookup_block_group(root->fs_info, start);
  7063. if (!cache) {
  7064. btrfs_err(root->fs_info, "Unable to find block group for %llu",
  7065. start);
  7066. return -ENOSPC;
  7067. }
  7068. if (pin)
  7069. pin_down_extent(root, cache, start, len, 1);
  7070. else {
  7071. if (btrfs_test_opt(root->fs_info, DISCARD))
  7072. ret = btrfs_discard_extent(root, start, len, NULL);
  7073. btrfs_add_free_space(cache, start, len);
  7074. btrfs_free_reserved_bytes(cache, len, delalloc);
  7075. trace_btrfs_reserved_extent_free(root, start, len);
  7076. }
  7077. btrfs_put_block_group(cache);
  7078. return ret;
  7079. }
  7080. int btrfs_free_reserved_extent(struct btrfs_root *root,
  7081. u64 start, u64 len, int delalloc)
  7082. {
  7083. return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
  7084. }
  7085. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  7086. u64 start, u64 len)
  7087. {
  7088. return __btrfs_free_reserved_extent(root, start, len, 1, 0);
  7089. }
  7090. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7091. struct btrfs_root *root,
  7092. u64 parent, u64 root_objectid,
  7093. u64 flags, u64 owner, u64 offset,
  7094. struct btrfs_key *ins, int ref_mod)
  7095. {
  7096. int ret;
  7097. struct btrfs_fs_info *fs_info = root->fs_info;
  7098. struct btrfs_extent_item *extent_item;
  7099. struct btrfs_extent_inline_ref *iref;
  7100. struct btrfs_path *path;
  7101. struct extent_buffer *leaf;
  7102. int type;
  7103. u32 size;
  7104. if (parent > 0)
  7105. type = BTRFS_SHARED_DATA_REF_KEY;
  7106. else
  7107. type = BTRFS_EXTENT_DATA_REF_KEY;
  7108. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  7109. path = btrfs_alloc_path();
  7110. if (!path)
  7111. return -ENOMEM;
  7112. path->leave_spinning = 1;
  7113. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7114. ins, size);
  7115. if (ret) {
  7116. btrfs_free_path(path);
  7117. return ret;
  7118. }
  7119. leaf = path->nodes[0];
  7120. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7121. struct btrfs_extent_item);
  7122. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  7123. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7124. btrfs_set_extent_flags(leaf, extent_item,
  7125. flags | BTRFS_EXTENT_FLAG_DATA);
  7126. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7127. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  7128. if (parent > 0) {
  7129. struct btrfs_shared_data_ref *ref;
  7130. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  7131. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7132. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  7133. } else {
  7134. struct btrfs_extent_data_ref *ref;
  7135. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  7136. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  7137. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  7138. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  7139. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  7140. }
  7141. btrfs_mark_buffer_dirty(path->nodes[0]);
  7142. btrfs_free_path(path);
  7143. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7144. ins->offset);
  7145. if (ret)
  7146. return ret;
  7147. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
  7148. if (ret) { /* -ENOENT, logic error */
  7149. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7150. ins->objectid, ins->offset);
  7151. BUG();
  7152. }
  7153. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  7154. return ret;
  7155. }
  7156. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  7157. struct btrfs_root *root,
  7158. u64 parent, u64 root_objectid,
  7159. u64 flags, struct btrfs_disk_key *key,
  7160. int level, struct btrfs_key *ins)
  7161. {
  7162. int ret;
  7163. struct btrfs_fs_info *fs_info = root->fs_info;
  7164. struct btrfs_extent_item *extent_item;
  7165. struct btrfs_tree_block_info *block_info;
  7166. struct btrfs_extent_inline_ref *iref;
  7167. struct btrfs_path *path;
  7168. struct extent_buffer *leaf;
  7169. u32 size = sizeof(*extent_item) + sizeof(*iref);
  7170. u64 num_bytes = ins->offset;
  7171. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  7172. SKINNY_METADATA);
  7173. if (!skinny_metadata)
  7174. size += sizeof(*block_info);
  7175. path = btrfs_alloc_path();
  7176. if (!path) {
  7177. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  7178. root->nodesize);
  7179. return -ENOMEM;
  7180. }
  7181. path->leave_spinning = 1;
  7182. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7183. ins, size);
  7184. if (ret) {
  7185. btrfs_free_path(path);
  7186. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  7187. root->nodesize);
  7188. return ret;
  7189. }
  7190. leaf = path->nodes[0];
  7191. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7192. struct btrfs_extent_item);
  7193. btrfs_set_extent_refs(leaf, extent_item, 1);
  7194. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7195. btrfs_set_extent_flags(leaf, extent_item,
  7196. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  7197. if (skinny_metadata) {
  7198. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7199. num_bytes = root->nodesize;
  7200. } else {
  7201. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  7202. btrfs_set_tree_block_key(leaf, block_info, key);
  7203. btrfs_set_tree_block_level(leaf, block_info, level);
  7204. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  7205. }
  7206. if (parent > 0) {
  7207. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  7208. btrfs_set_extent_inline_ref_type(leaf, iref,
  7209. BTRFS_SHARED_BLOCK_REF_KEY);
  7210. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7211. } else {
  7212. btrfs_set_extent_inline_ref_type(leaf, iref,
  7213. BTRFS_TREE_BLOCK_REF_KEY);
  7214. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  7215. }
  7216. btrfs_mark_buffer_dirty(leaf);
  7217. btrfs_free_path(path);
  7218. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7219. num_bytes);
  7220. if (ret)
  7221. return ret;
  7222. ret = update_block_group(trans, root, ins->objectid, root->nodesize,
  7223. 1);
  7224. if (ret) { /* -ENOENT, logic error */
  7225. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7226. ins->objectid, ins->offset);
  7227. BUG();
  7228. }
  7229. trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
  7230. return ret;
  7231. }
  7232. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7233. struct btrfs_root *root,
  7234. u64 root_objectid, u64 owner,
  7235. u64 offset, u64 ram_bytes,
  7236. struct btrfs_key *ins)
  7237. {
  7238. int ret;
  7239. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  7240. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  7241. ins->offset, 0,
  7242. root_objectid, owner, offset,
  7243. ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
  7244. NULL);
  7245. return ret;
  7246. }
  7247. /*
  7248. * this is used by the tree logging recovery code. It records that
  7249. * an extent has been allocated and makes sure to clear the free
  7250. * space cache bits as well
  7251. */
  7252. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  7253. struct btrfs_root *root,
  7254. u64 root_objectid, u64 owner, u64 offset,
  7255. struct btrfs_key *ins)
  7256. {
  7257. int ret;
  7258. struct btrfs_block_group_cache *block_group;
  7259. struct btrfs_space_info *space_info;
  7260. /*
  7261. * Mixed block groups will exclude before processing the log so we only
  7262. * need to do the exclude dance if this fs isn't mixed.
  7263. */
  7264. if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
  7265. ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
  7266. if (ret)
  7267. return ret;
  7268. }
  7269. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  7270. if (!block_group)
  7271. return -EINVAL;
  7272. space_info = block_group->space_info;
  7273. spin_lock(&space_info->lock);
  7274. spin_lock(&block_group->lock);
  7275. space_info->bytes_reserved += ins->offset;
  7276. block_group->reserved += ins->offset;
  7277. spin_unlock(&block_group->lock);
  7278. spin_unlock(&space_info->lock);
  7279. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  7280. 0, owner, offset, ins, 1);
  7281. btrfs_put_block_group(block_group);
  7282. return ret;
  7283. }
  7284. static struct extent_buffer *
  7285. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  7286. u64 bytenr, int level)
  7287. {
  7288. struct extent_buffer *buf;
  7289. buf = btrfs_find_create_tree_block(root, bytenr);
  7290. if (IS_ERR(buf))
  7291. return buf;
  7292. btrfs_set_header_generation(buf, trans->transid);
  7293. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  7294. btrfs_tree_lock(buf);
  7295. clean_tree_block(trans, root->fs_info, buf);
  7296. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  7297. btrfs_set_lock_blocking(buf);
  7298. set_extent_buffer_uptodate(buf);
  7299. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  7300. buf->log_index = root->log_transid % 2;
  7301. /*
  7302. * we allow two log transactions at a time, use different
  7303. * EXENT bit to differentiate dirty pages.
  7304. */
  7305. if (buf->log_index == 0)
  7306. set_extent_dirty(&root->dirty_log_pages, buf->start,
  7307. buf->start + buf->len - 1, GFP_NOFS);
  7308. else
  7309. set_extent_new(&root->dirty_log_pages, buf->start,
  7310. buf->start + buf->len - 1);
  7311. } else {
  7312. buf->log_index = -1;
  7313. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  7314. buf->start + buf->len - 1, GFP_NOFS);
  7315. }
  7316. trans->dirty = true;
  7317. /* this returns a buffer locked for blocking */
  7318. return buf;
  7319. }
  7320. static struct btrfs_block_rsv *
  7321. use_block_rsv(struct btrfs_trans_handle *trans,
  7322. struct btrfs_root *root, u32 blocksize)
  7323. {
  7324. struct btrfs_block_rsv *block_rsv;
  7325. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  7326. int ret;
  7327. bool global_updated = false;
  7328. block_rsv = get_block_rsv(trans, root);
  7329. if (unlikely(block_rsv->size == 0))
  7330. goto try_reserve;
  7331. again:
  7332. ret = block_rsv_use_bytes(block_rsv, blocksize);
  7333. if (!ret)
  7334. return block_rsv;
  7335. if (block_rsv->failfast)
  7336. return ERR_PTR(ret);
  7337. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  7338. global_updated = true;
  7339. update_global_block_rsv(root->fs_info);
  7340. goto again;
  7341. }
  7342. if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
  7343. static DEFINE_RATELIMIT_STATE(_rs,
  7344. DEFAULT_RATELIMIT_INTERVAL * 10,
  7345. /*DEFAULT_RATELIMIT_BURST*/ 1);
  7346. if (__ratelimit(&_rs))
  7347. WARN(1, KERN_DEBUG
  7348. "BTRFS: block rsv returned %d\n", ret);
  7349. }
  7350. try_reserve:
  7351. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  7352. BTRFS_RESERVE_NO_FLUSH);
  7353. if (!ret)
  7354. return block_rsv;
  7355. /*
  7356. * If we couldn't reserve metadata bytes try and use some from
  7357. * the global reserve if its space type is the same as the global
  7358. * reservation.
  7359. */
  7360. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  7361. block_rsv->space_info == global_rsv->space_info) {
  7362. ret = block_rsv_use_bytes(global_rsv, blocksize);
  7363. if (!ret)
  7364. return global_rsv;
  7365. }
  7366. return ERR_PTR(ret);
  7367. }
  7368. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  7369. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  7370. {
  7371. block_rsv_add_bytes(block_rsv, blocksize, 0);
  7372. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  7373. }
  7374. /*
  7375. * finds a free extent and does all the dirty work required for allocation
  7376. * returns the tree buffer or an ERR_PTR on error.
  7377. */
  7378. struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
  7379. struct btrfs_root *root,
  7380. u64 parent, u64 root_objectid,
  7381. struct btrfs_disk_key *key, int level,
  7382. u64 hint, u64 empty_size)
  7383. {
  7384. struct btrfs_key ins;
  7385. struct btrfs_block_rsv *block_rsv;
  7386. struct extent_buffer *buf;
  7387. struct btrfs_delayed_extent_op *extent_op;
  7388. u64 flags = 0;
  7389. int ret;
  7390. u32 blocksize = root->nodesize;
  7391. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  7392. SKINNY_METADATA);
  7393. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  7394. if (btrfs_is_testing(root->fs_info)) {
  7395. buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
  7396. level);
  7397. if (!IS_ERR(buf))
  7398. root->alloc_bytenr += blocksize;
  7399. return buf;
  7400. }
  7401. #endif
  7402. block_rsv = use_block_rsv(trans, root, blocksize);
  7403. if (IS_ERR(block_rsv))
  7404. return ERR_CAST(block_rsv);
  7405. ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
  7406. empty_size, hint, &ins, 0, 0);
  7407. if (ret)
  7408. goto out_unuse;
  7409. buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
  7410. if (IS_ERR(buf)) {
  7411. ret = PTR_ERR(buf);
  7412. goto out_free_reserved;
  7413. }
  7414. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  7415. if (parent == 0)
  7416. parent = ins.objectid;
  7417. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7418. } else
  7419. BUG_ON(parent > 0);
  7420. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  7421. extent_op = btrfs_alloc_delayed_extent_op();
  7422. if (!extent_op) {
  7423. ret = -ENOMEM;
  7424. goto out_free_buf;
  7425. }
  7426. if (key)
  7427. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  7428. else
  7429. memset(&extent_op->key, 0, sizeof(extent_op->key));
  7430. extent_op->flags_to_set = flags;
  7431. extent_op->update_key = skinny_metadata ? false : true;
  7432. extent_op->update_flags = true;
  7433. extent_op->is_data = false;
  7434. extent_op->level = level;
  7435. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  7436. ins.objectid, ins.offset,
  7437. parent, root_objectid, level,
  7438. BTRFS_ADD_DELAYED_EXTENT,
  7439. extent_op);
  7440. if (ret)
  7441. goto out_free_delayed;
  7442. }
  7443. return buf;
  7444. out_free_delayed:
  7445. btrfs_free_delayed_extent_op(extent_op);
  7446. out_free_buf:
  7447. free_extent_buffer(buf);
  7448. out_free_reserved:
  7449. btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
  7450. out_unuse:
  7451. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  7452. return ERR_PTR(ret);
  7453. }
  7454. struct walk_control {
  7455. u64 refs[BTRFS_MAX_LEVEL];
  7456. u64 flags[BTRFS_MAX_LEVEL];
  7457. struct btrfs_key update_progress;
  7458. int stage;
  7459. int level;
  7460. int shared_level;
  7461. int update_ref;
  7462. int keep_locks;
  7463. int reada_slot;
  7464. int reada_count;
  7465. int for_reloc;
  7466. };
  7467. #define DROP_REFERENCE 1
  7468. #define UPDATE_BACKREF 2
  7469. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  7470. struct btrfs_root *root,
  7471. struct walk_control *wc,
  7472. struct btrfs_path *path)
  7473. {
  7474. u64 bytenr;
  7475. u64 generation;
  7476. u64 refs;
  7477. u64 flags;
  7478. u32 nritems;
  7479. struct btrfs_key key;
  7480. struct extent_buffer *eb;
  7481. int ret;
  7482. int slot;
  7483. int nread = 0;
  7484. if (path->slots[wc->level] < wc->reada_slot) {
  7485. wc->reada_count = wc->reada_count * 2 / 3;
  7486. wc->reada_count = max(wc->reada_count, 2);
  7487. } else {
  7488. wc->reada_count = wc->reada_count * 3 / 2;
  7489. wc->reada_count = min_t(int, wc->reada_count,
  7490. BTRFS_NODEPTRS_PER_BLOCK(root));
  7491. }
  7492. eb = path->nodes[wc->level];
  7493. nritems = btrfs_header_nritems(eb);
  7494. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  7495. if (nread >= wc->reada_count)
  7496. break;
  7497. cond_resched();
  7498. bytenr = btrfs_node_blockptr(eb, slot);
  7499. generation = btrfs_node_ptr_generation(eb, slot);
  7500. if (slot == path->slots[wc->level])
  7501. goto reada;
  7502. if (wc->stage == UPDATE_BACKREF &&
  7503. generation <= root->root_key.offset)
  7504. continue;
  7505. /* We don't lock the tree block, it's OK to be racy here */
  7506. ret = btrfs_lookup_extent_info(trans, root, bytenr,
  7507. wc->level - 1, 1, &refs,
  7508. &flags);
  7509. /* We don't care about errors in readahead. */
  7510. if (ret < 0)
  7511. continue;
  7512. BUG_ON(refs == 0);
  7513. if (wc->stage == DROP_REFERENCE) {
  7514. if (refs == 1)
  7515. goto reada;
  7516. if (wc->level == 1 &&
  7517. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7518. continue;
  7519. if (!wc->update_ref ||
  7520. generation <= root->root_key.offset)
  7521. continue;
  7522. btrfs_node_key_to_cpu(eb, &key, slot);
  7523. ret = btrfs_comp_cpu_keys(&key,
  7524. &wc->update_progress);
  7525. if (ret < 0)
  7526. continue;
  7527. } else {
  7528. if (wc->level == 1 &&
  7529. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7530. continue;
  7531. }
  7532. reada:
  7533. readahead_tree_block(root, bytenr);
  7534. nread++;
  7535. }
  7536. wc->reada_slot = slot;
  7537. }
  7538. static int account_leaf_items(struct btrfs_trans_handle *trans,
  7539. struct btrfs_root *root,
  7540. struct extent_buffer *eb)
  7541. {
  7542. int nr = btrfs_header_nritems(eb);
  7543. int i, extent_type, ret;
  7544. struct btrfs_key key;
  7545. struct btrfs_file_extent_item *fi;
  7546. u64 bytenr, num_bytes;
  7547. /* We can be called directly from walk_up_proc() */
  7548. if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
  7549. return 0;
  7550. for (i = 0; i < nr; i++) {
  7551. btrfs_item_key_to_cpu(eb, &key, i);
  7552. if (key.type != BTRFS_EXTENT_DATA_KEY)
  7553. continue;
  7554. fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  7555. /* filter out non qgroup-accountable extents */
  7556. extent_type = btrfs_file_extent_type(eb, fi);
  7557. if (extent_type == BTRFS_FILE_EXTENT_INLINE)
  7558. continue;
  7559. bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
  7560. if (!bytenr)
  7561. continue;
  7562. num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
  7563. ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
  7564. bytenr, num_bytes, GFP_NOFS);
  7565. if (ret)
  7566. return ret;
  7567. }
  7568. return 0;
  7569. }
  7570. /*
  7571. * Walk up the tree from the bottom, freeing leaves and any interior
  7572. * nodes which have had all slots visited. If a node (leaf or
  7573. * interior) is freed, the node above it will have it's slot
  7574. * incremented. The root node will never be freed.
  7575. *
  7576. * At the end of this function, we should have a path which has all
  7577. * slots incremented to the next position for a search. If we need to
  7578. * read a new node it will be NULL and the node above it will have the
  7579. * correct slot selected for a later read.
  7580. *
  7581. * If we increment the root nodes slot counter past the number of
  7582. * elements, 1 is returned to signal completion of the search.
  7583. */
  7584. static int adjust_slots_upwards(struct btrfs_root *root,
  7585. struct btrfs_path *path, int root_level)
  7586. {
  7587. int level = 0;
  7588. int nr, slot;
  7589. struct extent_buffer *eb;
  7590. if (root_level == 0)
  7591. return 1;
  7592. while (level <= root_level) {
  7593. eb = path->nodes[level];
  7594. nr = btrfs_header_nritems(eb);
  7595. path->slots[level]++;
  7596. slot = path->slots[level];
  7597. if (slot >= nr || level == 0) {
  7598. /*
  7599. * Don't free the root - we will detect this
  7600. * condition after our loop and return a
  7601. * positive value for caller to stop walking the tree.
  7602. */
  7603. if (level != root_level) {
  7604. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7605. path->locks[level] = 0;
  7606. free_extent_buffer(eb);
  7607. path->nodes[level] = NULL;
  7608. path->slots[level] = 0;
  7609. }
  7610. } else {
  7611. /*
  7612. * We have a valid slot to walk back down
  7613. * from. Stop here so caller can process these
  7614. * new nodes.
  7615. */
  7616. break;
  7617. }
  7618. level++;
  7619. }
  7620. eb = path->nodes[root_level];
  7621. if (path->slots[root_level] >= btrfs_header_nritems(eb))
  7622. return 1;
  7623. return 0;
  7624. }
  7625. /*
  7626. * root_eb is the subtree root and is locked before this function is called.
  7627. */
  7628. static int account_shared_subtree(struct btrfs_trans_handle *trans,
  7629. struct btrfs_root *root,
  7630. struct extent_buffer *root_eb,
  7631. u64 root_gen,
  7632. int root_level)
  7633. {
  7634. int ret = 0;
  7635. int level;
  7636. struct extent_buffer *eb = root_eb;
  7637. struct btrfs_path *path = NULL;
  7638. BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
  7639. BUG_ON(root_eb == NULL);
  7640. if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
  7641. return 0;
  7642. if (!extent_buffer_uptodate(root_eb)) {
  7643. ret = btrfs_read_buffer(root_eb, root_gen);
  7644. if (ret)
  7645. goto out;
  7646. }
  7647. if (root_level == 0) {
  7648. ret = account_leaf_items(trans, root, root_eb);
  7649. goto out;
  7650. }
  7651. path = btrfs_alloc_path();
  7652. if (!path)
  7653. return -ENOMEM;
  7654. /*
  7655. * Walk down the tree. Missing extent blocks are filled in as
  7656. * we go. Metadata is accounted every time we read a new
  7657. * extent block.
  7658. *
  7659. * When we reach a leaf, we account for file extent items in it,
  7660. * walk back up the tree (adjusting slot pointers as we go)
  7661. * and restart the search process.
  7662. */
  7663. extent_buffer_get(root_eb); /* For path */
  7664. path->nodes[root_level] = root_eb;
  7665. path->slots[root_level] = 0;
  7666. path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
  7667. walk_down:
  7668. level = root_level;
  7669. while (level >= 0) {
  7670. if (path->nodes[level] == NULL) {
  7671. int parent_slot;
  7672. u64 child_gen;
  7673. u64 child_bytenr;
  7674. /* We need to get child blockptr/gen from
  7675. * parent before we can read it. */
  7676. eb = path->nodes[level + 1];
  7677. parent_slot = path->slots[level + 1];
  7678. child_bytenr = btrfs_node_blockptr(eb, parent_slot);
  7679. child_gen = btrfs_node_ptr_generation(eb, parent_slot);
  7680. eb = read_tree_block(root, child_bytenr, child_gen);
  7681. if (IS_ERR(eb)) {
  7682. ret = PTR_ERR(eb);
  7683. goto out;
  7684. } else if (!extent_buffer_uptodate(eb)) {
  7685. free_extent_buffer(eb);
  7686. ret = -EIO;
  7687. goto out;
  7688. }
  7689. path->nodes[level] = eb;
  7690. path->slots[level] = 0;
  7691. btrfs_tree_read_lock(eb);
  7692. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  7693. path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
  7694. ret = btrfs_qgroup_insert_dirty_extent(trans,
  7695. root->fs_info, child_bytenr,
  7696. root->nodesize, GFP_NOFS);
  7697. if (ret)
  7698. goto out;
  7699. }
  7700. if (level == 0) {
  7701. ret = account_leaf_items(trans, root, path->nodes[level]);
  7702. if (ret)
  7703. goto out;
  7704. /* Nonzero return here means we completed our search */
  7705. ret = adjust_slots_upwards(root, path, root_level);
  7706. if (ret)
  7707. break;
  7708. /* Restart search with new slots */
  7709. goto walk_down;
  7710. }
  7711. level--;
  7712. }
  7713. ret = 0;
  7714. out:
  7715. btrfs_free_path(path);
  7716. return ret;
  7717. }
  7718. /*
  7719. * helper to process tree block while walking down the tree.
  7720. *
  7721. * when wc->stage == UPDATE_BACKREF, this function updates
  7722. * back refs for pointers in the block.
  7723. *
  7724. * NOTE: return value 1 means we should stop walking down.
  7725. */
  7726. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  7727. struct btrfs_root *root,
  7728. struct btrfs_path *path,
  7729. struct walk_control *wc, int lookup_info)
  7730. {
  7731. int level = wc->level;
  7732. struct extent_buffer *eb = path->nodes[level];
  7733. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7734. int ret;
  7735. if (wc->stage == UPDATE_BACKREF &&
  7736. btrfs_header_owner(eb) != root->root_key.objectid)
  7737. return 1;
  7738. /*
  7739. * when reference count of tree block is 1, it won't increase
  7740. * again. once full backref flag is set, we never clear it.
  7741. */
  7742. if (lookup_info &&
  7743. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  7744. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  7745. BUG_ON(!path->locks[level]);
  7746. ret = btrfs_lookup_extent_info(trans, root,
  7747. eb->start, level, 1,
  7748. &wc->refs[level],
  7749. &wc->flags[level]);
  7750. BUG_ON(ret == -ENOMEM);
  7751. if (ret)
  7752. return ret;
  7753. BUG_ON(wc->refs[level] == 0);
  7754. }
  7755. if (wc->stage == DROP_REFERENCE) {
  7756. if (wc->refs[level] > 1)
  7757. return 1;
  7758. if (path->locks[level] && !wc->keep_locks) {
  7759. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7760. path->locks[level] = 0;
  7761. }
  7762. return 0;
  7763. }
  7764. /* wc->stage == UPDATE_BACKREF */
  7765. if (!(wc->flags[level] & flag)) {
  7766. BUG_ON(!path->locks[level]);
  7767. ret = btrfs_inc_ref(trans, root, eb, 1);
  7768. BUG_ON(ret); /* -ENOMEM */
  7769. ret = btrfs_dec_ref(trans, root, eb, 0);
  7770. BUG_ON(ret); /* -ENOMEM */
  7771. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  7772. eb->len, flag,
  7773. btrfs_header_level(eb), 0);
  7774. BUG_ON(ret); /* -ENOMEM */
  7775. wc->flags[level] |= flag;
  7776. }
  7777. /*
  7778. * the block is shared by multiple trees, so it's not good to
  7779. * keep the tree lock
  7780. */
  7781. if (path->locks[level] && level > 0) {
  7782. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7783. path->locks[level] = 0;
  7784. }
  7785. return 0;
  7786. }
  7787. /*
  7788. * helper to process tree block pointer.
  7789. *
  7790. * when wc->stage == DROP_REFERENCE, this function checks
  7791. * reference count of the block pointed to. if the block
  7792. * is shared and we need update back refs for the subtree
  7793. * rooted at the block, this function changes wc->stage to
  7794. * UPDATE_BACKREF. if the block is shared and there is no
  7795. * need to update back, this function drops the reference
  7796. * to the block.
  7797. *
  7798. * NOTE: return value 1 means we should stop walking down.
  7799. */
  7800. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  7801. struct btrfs_root *root,
  7802. struct btrfs_path *path,
  7803. struct walk_control *wc, int *lookup_info)
  7804. {
  7805. u64 bytenr;
  7806. u64 generation;
  7807. u64 parent;
  7808. u32 blocksize;
  7809. struct btrfs_key key;
  7810. struct extent_buffer *next;
  7811. int level = wc->level;
  7812. int reada = 0;
  7813. int ret = 0;
  7814. bool need_account = false;
  7815. generation = btrfs_node_ptr_generation(path->nodes[level],
  7816. path->slots[level]);
  7817. /*
  7818. * if the lower level block was created before the snapshot
  7819. * was created, we know there is no need to update back refs
  7820. * for the subtree
  7821. */
  7822. if (wc->stage == UPDATE_BACKREF &&
  7823. generation <= root->root_key.offset) {
  7824. *lookup_info = 1;
  7825. return 1;
  7826. }
  7827. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  7828. blocksize = root->nodesize;
  7829. next = btrfs_find_tree_block(root->fs_info, bytenr);
  7830. if (!next) {
  7831. next = btrfs_find_create_tree_block(root, bytenr);
  7832. if (IS_ERR(next))
  7833. return PTR_ERR(next);
  7834. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  7835. level - 1);
  7836. reada = 1;
  7837. }
  7838. btrfs_tree_lock(next);
  7839. btrfs_set_lock_blocking(next);
  7840. ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
  7841. &wc->refs[level - 1],
  7842. &wc->flags[level - 1]);
  7843. if (ret < 0) {
  7844. btrfs_tree_unlock(next);
  7845. return ret;
  7846. }
  7847. if (unlikely(wc->refs[level - 1] == 0)) {
  7848. btrfs_err(root->fs_info, "Missing references.");
  7849. BUG();
  7850. }
  7851. *lookup_info = 0;
  7852. if (wc->stage == DROP_REFERENCE) {
  7853. if (wc->refs[level - 1] > 1) {
  7854. need_account = true;
  7855. if (level == 1 &&
  7856. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7857. goto skip;
  7858. if (!wc->update_ref ||
  7859. generation <= root->root_key.offset)
  7860. goto skip;
  7861. btrfs_node_key_to_cpu(path->nodes[level], &key,
  7862. path->slots[level]);
  7863. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  7864. if (ret < 0)
  7865. goto skip;
  7866. wc->stage = UPDATE_BACKREF;
  7867. wc->shared_level = level - 1;
  7868. }
  7869. } else {
  7870. if (level == 1 &&
  7871. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7872. goto skip;
  7873. }
  7874. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  7875. btrfs_tree_unlock(next);
  7876. free_extent_buffer(next);
  7877. next = NULL;
  7878. *lookup_info = 1;
  7879. }
  7880. if (!next) {
  7881. if (reada && level == 1)
  7882. reada_walk_down(trans, root, wc, path);
  7883. next = read_tree_block(root, bytenr, generation);
  7884. if (IS_ERR(next)) {
  7885. return PTR_ERR(next);
  7886. } else if (!extent_buffer_uptodate(next)) {
  7887. free_extent_buffer(next);
  7888. return -EIO;
  7889. }
  7890. btrfs_tree_lock(next);
  7891. btrfs_set_lock_blocking(next);
  7892. }
  7893. level--;
  7894. BUG_ON(level != btrfs_header_level(next));
  7895. path->nodes[level] = next;
  7896. path->slots[level] = 0;
  7897. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7898. wc->level = level;
  7899. if (wc->level == 1)
  7900. wc->reada_slot = 0;
  7901. return 0;
  7902. skip:
  7903. wc->refs[level - 1] = 0;
  7904. wc->flags[level - 1] = 0;
  7905. if (wc->stage == DROP_REFERENCE) {
  7906. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  7907. parent = path->nodes[level]->start;
  7908. } else {
  7909. BUG_ON(root->root_key.objectid !=
  7910. btrfs_header_owner(path->nodes[level]));
  7911. parent = 0;
  7912. }
  7913. if (need_account) {
  7914. ret = account_shared_subtree(trans, root, next,
  7915. generation, level - 1);
  7916. if (ret) {
  7917. btrfs_err_rl(root->fs_info,
  7918. "Error "
  7919. "%d accounting shared subtree. Quota "
  7920. "is out of sync, rescan required.",
  7921. ret);
  7922. }
  7923. }
  7924. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  7925. root->root_key.objectid, level - 1, 0);
  7926. BUG_ON(ret); /* -ENOMEM */
  7927. }
  7928. btrfs_tree_unlock(next);
  7929. free_extent_buffer(next);
  7930. *lookup_info = 1;
  7931. return 1;
  7932. }
  7933. /*
  7934. * helper to process tree block while walking up the tree.
  7935. *
  7936. * when wc->stage == DROP_REFERENCE, this function drops
  7937. * reference count on the block.
  7938. *
  7939. * when wc->stage == UPDATE_BACKREF, this function changes
  7940. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  7941. * to UPDATE_BACKREF previously while processing the block.
  7942. *
  7943. * NOTE: return value 1 means we should stop walking up.
  7944. */
  7945. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  7946. struct btrfs_root *root,
  7947. struct btrfs_path *path,
  7948. struct walk_control *wc)
  7949. {
  7950. int ret;
  7951. int level = wc->level;
  7952. struct extent_buffer *eb = path->nodes[level];
  7953. u64 parent = 0;
  7954. if (wc->stage == UPDATE_BACKREF) {
  7955. BUG_ON(wc->shared_level < level);
  7956. if (level < wc->shared_level)
  7957. goto out;
  7958. ret = find_next_key(path, level + 1, &wc->update_progress);
  7959. if (ret > 0)
  7960. wc->update_ref = 0;
  7961. wc->stage = DROP_REFERENCE;
  7962. wc->shared_level = -1;
  7963. path->slots[level] = 0;
  7964. /*
  7965. * check reference count again if the block isn't locked.
  7966. * we should start walking down the tree again if reference
  7967. * count is one.
  7968. */
  7969. if (!path->locks[level]) {
  7970. BUG_ON(level == 0);
  7971. btrfs_tree_lock(eb);
  7972. btrfs_set_lock_blocking(eb);
  7973. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7974. ret = btrfs_lookup_extent_info(trans, root,
  7975. eb->start, level, 1,
  7976. &wc->refs[level],
  7977. &wc->flags[level]);
  7978. if (ret < 0) {
  7979. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7980. path->locks[level] = 0;
  7981. return ret;
  7982. }
  7983. BUG_ON(wc->refs[level] == 0);
  7984. if (wc->refs[level] == 1) {
  7985. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7986. path->locks[level] = 0;
  7987. return 1;
  7988. }
  7989. }
  7990. }
  7991. /* wc->stage == DROP_REFERENCE */
  7992. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  7993. if (wc->refs[level] == 1) {
  7994. if (level == 0) {
  7995. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7996. ret = btrfs_dec_ref(trans, root, eb, 1);
  7997. else
  7998. ret = btrfs_dec_ref(trans, root, eb, 0);
  7999. BUG_ON(ret); /* -ENOMEM */
  8000. ret = account_leaf_items(trans, root, eb);
  8001. if (ret) {
  8002. btrfs_err_rl(root->fs_info,
  8003. "error "
  8004. "%d accounting leaf items. Quota "
  8005. "is out of sync, rescan required.",
  8006. ret);
  8007. }
  8008. }
  8009. /* make block locked assertion in clean_tree_block happy */
  8010. if (!path->locks[level] &&
  8011. btrfs_header_generation(eb) == trans->transid) {
  8012. btrfs_tree_lock(eb);
  8013. btrfs_set_lock_blocking(eb);
  8014. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8015. }
  8016. clean_tree_block(trans, root->fs_info, eb);
  8017. }
  8018. if (eb == root->node) {
  8019. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  8020. parent = eb->start;
  8021. else
  8022. BUG_ON(root->root_key.objectid !=
  8023. btrfs_header_owner(eb));
  8024. } else {
  8025. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  8026. parent = path->nodes[level + 1]->start;
  8027. else
  8028. BUG_ON(root->root_key.objectid !=
  8029. btrfs_header_owner(path->nodes[level + 1]));
  8030. }
  8031. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  8032. out:
  8033. wc->refs[level] = 0;
  8034. wc->flags[level] = 0;
  8035. return 0;
  8036. }
  8037. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  8038. struct btrfs_root *root,
  8039. struct btrfs_path *path,
  8040. struct walk_control *wc)
  8041. {
  8042. int level = wc->level;
  8043. int lookup_info = 1;
  8044. int ret;
  8045. while (level >= 0) {
  8046. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  8047. if (ret > 0)
  8048. break;
  8049. if (level == 0)
  8050. break;
  8051. if (path->slots[level] >=
  8052. btrfs_header_nritems(path->nodes[level]))
  8053. break;
  8054. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  8055. if (ret > 0) {
  8056. path->slots[level]++;
  8057. continue;
  8058. } else if (ret < 0)
  8059. return ret;
  8060. level = wc->level;
  8061. }
  8062. return 0;
  8063. }
  8064. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  8065. struct btrfs_root *root,
  8066. struct btrfs_path *path,
  8067. struct walk_control *wc, int max_level)
  8068. {
  8069. int level = wc->level;
  8070. int ret;
  8071. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  8072. while (level < max_level && path->nodes[level]) {
  8073. wc->level = level;
  8074. if (path->slots[level] + 1 <
  8075. btrfs_header_nritems(path->nodes[level])) {
  8076. path->slots[level]++;
  8077. return 0;
  8078. } else {
  8079. ret = walk_up_proc(trans, root, path, wc);
  8080. if (ret > 0)
  8081. return 0;
  8082. if (path->locks[level]) {
  8083. btrfs_tree_unlock_rw(path->nodes[level],
  8084. path->locks[level]);
  8085. path->locks[level] = 0;
  8086. }
  8087. free_extent_buffer(path->nodes[level]);
  8088. path->nodes[level] = NULL;
  8089. level++;
  8090. }
  8091. }
  8092. return 1;
  8093. }
  8094. /*
  8095. * drop a subvolume tree.
  8096. *
  8097. * this function traverses the tree freeing any blocks that only
  8098. * referenced by the tree.
  8099. *
  8100. * when a shared tree block is found. this function decreases its
  8101. * reference count by one. if update_ref is true, this function
  8102. * also make sure backrefs for the shared block and all lower level
  8103. * blocks are properly updated.
  8104. *
  8105. * If called with for_reloc == 0, may exit early with -EAGAIN
  8106. */
  8107. int btrfs_drop_snapshot(struct btrfs_root *root,
  8108. struct btrfs_block_rsv *block_rsv, int update_ref,
  8109. int for_reloc)
  8110. {
  8111. struct btrfs_path *path;
  8112. struct btrfs_trans_handle *trans;
  8113. struct btrfs_root *tree_root = root->fs_info->tree_root;
  8114. struct btrfs_root_item *root_item = &root->root_item;
  8115. struct walk_control *wc;
  8116. struct btrfs_key key;
  8117. int err = 0;
  8118. int ret;
  8119. int level;
  8120. bool root_dropped = false;
  8121. btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
  8122. path = btrfs_alloc_path();
  8123. if (!path) {
  8124. err = -ENOMEM;
  8125. goto out;
  8126. }
  8127. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8128. if (!wc) {
  8129. btrfs_free_path(path);
  8130. err = -ENOMEM;
  8131. goto out;
  8132. }
  8133. trans = btrfs_start_transaction(tree_root, 0);
  8134. if (IS_ERR(trans)) {
  8135. err = PTR_ERR(trans);
  8136. goto out_free;
  8137. }
  8138. if (block_rsv)
  8139. trans->block_rsv = block_rsv;
  8140. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  8141. level = btrfs_header_level(root->node);
  8142. path->nodes[level] = btrfs_lock_root_node(root);
  8143. btrfs_set_lock_blocking(path->nodes[level]);
  8144. path->slots[level] = 0;
  8145. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8146. memset(&wc->update_progress, 0,
  8147. sizeof(wc->update_progress));
  8148. } else {
  8149. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  8150. memcpy(&wc->update_progress, &key,
  8151. sizeof(wc->update_progress));
  8152. level = root_item->drop_level;
  8153. BUG_ON(level == 0);
  8154. path->lowest_level = level;
  8155. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  8156. path->lowest_level = 0;
  8157. if (ret < 0) {
  8158. err = ret;
  8159. goto out_end_trans;
  8160. }
  8161. WARN_ON(ret > 0);
  8162. /*
  8163. * unlock our path, this is safe because only this
  8164. * function is allowed to delete this snapshot
  8165. */
  8166. btrfs_unlock_up_safe(path, 0);
  8167. level = btrfs_header_level(root->node);
  8168. while (1) {
  8169. btrfs_tree_lock(path->nodes[level]);
  8170. btrfs_set_lock_blocking(path->nodes[level]);
  8171. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8172. ret = btrfs_lookup_extent_info(trans, root,
  8173. path->nodes[level]->start,
  8174. level, 1, &wc->refs[level],
  8175. &wc->flags[level]);
  8176. if (ret < 0) {
  8177. err = ret;
  8178. goto out_end_trans;
  8179. }
  8180. BUG_ON(wc->refs[level] == 0);
  8181. if (level == root_item->drop_level)
  8182. break;
  8183. btrfs_tree_unlock(path->nodes[level]);
  8184. path->locks[level] = 0;
  8185. WARN_ON(wc->refs[level] != 1);
  8186. level--;
  8187. }
  8188. }
  8189. wc->level = level;
  8190. wc->shared_level = -1;
  8191. wc->stage = DROP_REFERENCE;
  8192. wc->update_ref = update_ref;
  8193. wc->keep_locks = 0;
  8194. wc->for_reloc = for_reloc;
  8195. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  8196. while (1) {
  8197. ret = walk_down_tree(trans, root, path, wc);
  8198. if (ret < 0) {
  8199. err = ret;
  8200. break;
  8201. }
  8202. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  8203. if (ret < 0) {
  8204. err = ret;
  8205. break;
  8206. }
  8207. if (ret > 0) {
  8208. BUG_ON(wc->stage != DROP_REFERENCE);
  8209. break;
  8210. }
  8211. if (wc->stage == DROP_REFERENCE) {
  8212. level = wc->level;
  8213. btrfs_node_key(path->nodes[level],
  8214. &root_item->drop_progress,
  8215. path->slots[level]);
  8216. root_item->drop_level = level;
  8217. }
  8218. BUG_ON(wc->level == 0);
  8219. if (btrfs_should_end_transaction(trans, tree_root) ||
  8220. (!for_reloc && btrfs_need_cleaner_sleep(root))) {
  8221. ret = btrfs_update_root(trans, tree_root,
  8222. &root->root_key,
  8223. root_item);
  8224. if (ret) {
  8225. btrfs_abort_transaction(trans, ret);
  8226. err = ret;
  8227. goto out_end_trans;
  8228. }
  8229. btrfs_end_transaction_throttle(trans, tree_root);
  8230. if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
  8231. pr_debug("BTRFS: drop snapshot early exit\n");
  8232. err = -EAGAIN;
  8233. goto out_free;
  8234. }
  8235. trans = btrfs_start_transaction(tree_root, 0);
  8236. if (IS_ERR(trans)) {
  8237. err = PTR_ERR(trans);
  8238. goto out_free;
  8239. }
  8240. if (block_rsv)
  8241. trans->block_rsv = block_rsv;
  8242. }
  8243. }
  8244. btrfs_release_path(path);
  8245. if (err)
  8246. goto out_end_trans;
  8247. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  8248. if (ret) {
  8249. btrfs_abort_transaction(trans, ret);
  8250. goto out_end_trans;
  8251. }
  8252. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  8253. ret = btrfs_find_root(tree_root, &root->root_key, path,
  8254. NULL, NULL);
  8255. if (ret < 0) {
  8256. btrfs_abort_transaction(trans, ret);
  8257. err = ret;
  8258. goto out_end_trans;
  8259. } else if (ret > 0) {
  8260. /* if we fail to delete the orphan item this time
  8261. * around, it'll get picked up the next time.
  8262. *
  8263. * The most common failure here is just -ENOENT.
  8264. */
  8265. btrfs_del_orphan_item(trans, tree_root,
  8266. root->root_key.objectid);
  8267. }
  8268. }
  8269. if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
  8270. btrfs_add_dropped_root(trans, root);
  8271. } else {
  8272. free_extent_buffer(root->node);
  8273. free_extent_buffer(root->commit_root);
  8274. btrfs_put_fs_root(root);
  8275. }
  8276. root_dropped = true;
  8277. out_end_trans:
  8278. btrfs_end_transaction_throttle(trans, tree_root);
  8279. out_free:
  8280. kfree(wc);
  8281. btrfs_free_path(path);
  8282. out:
  8283. /*
  8284. * So if we need to stop dropping the snapshot for whatever reason we
  8285. * need to make sure to add it back to the dead root list so that we
  8286. * keep trying to do the work later. This also cleans up roots if we
  8287. * don't have it in the radix (like when we recover after a power fail
  8288. * or unmount) so we don't leak memory.
  8289. */
  8290. if (!for_reloc && root_dropped == false)
  8291. btrfs_add_dead_root(root);
  8292. if (err && err != -EAGAIN)
  8293. btrfs_handle_fs_error(root->fs_info, err, NULL);
  8294. return err;
  8295. }
  8296. /*
  8297. * drop subtree rooted at tree block 'node'.
  8298. *
  8299. * NOTE: this function will unlock and release tree block 'node'
  8300. * only used by relocation code
  8301. */
  8302. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  8303. struct btrfs_root *root,
  8304. struct extent_buffer *node,
  8305. struct extent_buffer *parent)
  8306. {
  8307. struct btrfs_path *path;
  8308. struct walk_control *wc;
  8309. int level;
  8310. int parent_level;
  8311. int ret = 0;
  8312. int wret;
  8313. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  8314. path = btrfs_alloc_path();
  8315. if (!path)
  8316. return -ENOMEM;
  8317. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8318. if (!wc) {
  8319. btrfs_free_path(path);
  8320. return -ENOMEM;
  8321. }
  8322. btrfs_assert_tree_locked(parent);
  8323. parent_level = btrfs_header_level(parent);
  8324. extent_buffer_get(parent);
  8325. path->nodes[parent_level] = parent;
  8326. path->slots[parent_level] = btrfs_header_nritems(parent);
  8327. btrfs_assert_tree_locked(node);
  8328. level = btrfs_header_level(node);
  8329. path->nodes[level] = node;
  8330. path->slots[level] = 0;
  8331. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8332. wc->refs[parent_level] = 1;
  8333. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  8334. wc->level = level;
  8335. wc->shared_level = -1;
  8336. wc->stage = DROP_REFERENCE;
  8337. wc->update_ref = 0;
  8338. wc->keep_locks = 1;
  8339. wc->for_reloc = 1;
  8340. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  8341. while (1) {
  8342. wret = walk_down_tree(trans, root, path, wc);
  8343. if (wret < 0) {
  8344. ret = wret;
  8345. break;
  8346. }
  8347. wret = walk_up_tree(trans, root, path, wc, parent_level);
  8348. if (wret < 0)
  8349. ret = wret;
  8350. if (wret != 0)
  8351. break;
  8352. }
  8353. kfree(wc);
  8354. btrfs_free_path(path);
  8355. return ret;
  8356. }
  8357. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  8358. {
  8359. u64 num_devices;
  8360. u64 stripped;
  8361. /*
  8362. * if restripe for this chunk_type is on pick target profile and
  8363. * return, otherwise do the usual balance
  8364. */
  8365. stripped = get_restripe_target(root->fs_info, flags);
  8366. if (stripped)
  8367. return extended_to_chunk(stripped);
  8368. num_devices = root->fs_info->fs_devices->rw_devices;
  8369. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  8370. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  8371. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  8372. if (num_devices == 1) {
  8373. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8374. stripped = flags & ~stripped;
  8375. /* turn raid0 into single device chunks */
  8376. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  8377. return stripped;
  8378. /* turn mirroring into duplication */
  8379. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8380. BTRFS_BLOCK_GROUP_RAID10))
  8381. return stripped | BTRFS_BLOCK_GROUP_DUP;
  8382. } else {
  8383. /* they already had raid on here, just return */
  8384. if (flags & stripped)
  8385. return flags;
  8386. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8387. stripped = flags & ~stripped;
  8388. /* switch duplicated blocks with raid1 */
  8389. if (flags & BTRFS_BLOCK_GROUP_DUP)
  8390. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  8391. /* this is drive concat, leave it alone */
  8392. }
  8393. return flags;
  8394. }
  8395. static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  8396. {
  8397. struct btrfs_space_info *sinfo = cache->space_info;
  8398. u64 num_bytes;
  8399. u64 min_allocable_bytes;
  8400. int ret = -ENOSPC;
  8401. /*
  8402. * We need some metadata space and system metadata space for
  8403. * allocating chunks in some corner cases until we force to set
  8404. * it to be readonly.
  8405. */
  8406. if ((sinfo->flags &
  8407. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  8408. !force)
  8409. min_allocable_bytes = SZ_1M;
  8410. else
  8411. min_allocable_bytes = 0;
  8412. spin_lock(&sinfo->lock);
  8413. spin_lock(&cache->lock);
  8414. if (cache->ro) {
  8415. cache->ro++;
  8416. ret = 0;
  8417. goto out;
  8418. }
  8419. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  8420. cache->bytes_super - btrfs_block_group_used(&cache->item);
  8421. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  8422. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  8423. min_allocable_bytes <= sinfo->total_bytes) {
  8424. sinfo->bytes_readonly += num_bytes;
  8425. cache->ro++;
  8426. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  8427. ret = 0;
  8428. }
  8429. out:
  8430. spin_unlock(&cache->lock);
  8431. spin_unlock(&sinfo->lock);
  8432. return ret;
  8433. }
  8434. int btrfs_inc_block_group_ro(struct btrfs_root *root,
  8435. struct btrfs_block_group_cache *cache)
  8436. {
  8437. struct btrfs_trans_handle *trans;
  8438. u64 alloc_flags;
  8439. int ret;
  8440. again:
  8441. trans = btrfs_join_transaction(root);
  8442. if (IS_ERR(trans))
  8443. return PTR_ERR(trans);
  8444. /*
  8445. * we're not allowed to set block groups readonly after the dirty
  8446. * block groups cache has started writing. If it already started,
  8447. * back off and let this transaction commit
  8448. */
  8449. mutex_lock(&root->fs_info->ro_block_group_mutex);
  8450. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  8451. u64 transid = trans->transid;
  8452. mutex_unlock(&root->fs_info->ro_block_group_mutex);
  8453. btrfs_end_transaction(trans, root);
  8454. ret = btrfs_wait_for_commit(root, transid);
  8455. if (ret)
  8456. return ret;
  8457. goto again;
  8458. }
  8459. /*
  8460. * if we are changing raid levels, try to allocate a corresponding
  8461. * block group with the new raid level.
  8462. */
  8463. alloc_flags = update_block_group_flags(root, cache->flags);
  8464. if (alloc_flags != cache->flags) {
  8465. ret = do_chunk_alloc(trans, root, alloc_flags,
  8466. CHUNK_ALLOC_FORCE);
  8467. /*
  8468. * ENOSPC is allowed here, we may have enough space
  8469. * already allocated at the new raid level to
  8470. * carry on
  8471. */
  8472. if (ret == -ENOSPC)
  8473. ret = 0;
  8474. if (ret < 0)
  8475. goto out;
  8476. }
  8477. ret = inc_block_group_ro(cache, 0);
  8478. if (!ret)
  8479. goto out;
  8480. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  8481. ret = do_chunk_alloc(trans, root, alloc_flags,
  8482. CHUNK_ALLOC_FORCE);
  8483. if (ret < 0)
  8484. goto out;
  8485. ret = inc_block_group_ro(cache, 0);
  8486. out:
  8487. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  8488. alloc_flags = update_block_group_flags(root, cache->flags);
  8489. lock_chunks(root->fs_info->chunk_root);
  8490. check_system_chunk(trans, root, alloc_flags);
  8491. unlock_chunks(root->fs_info->chunk_root);
  8492. }
  8493. mutex_unlock(&root->fs_info->ro_block_group_mutex);
  8494. btrfs_end_transaction(trans, root);
  8495. return ret;
  8496. }
  8497. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  8498. struct btrfs_root *root, u64 type)
  8499. {
  8500. u64 alloc_flags = get_alloc_profile(root, type);
  8501. return do_chunk_alloc(trans, root, alloc_flags,
  8502. CHUNK_ALLOC_FORCE);
  8503. }
  8504. /*
  8505. * helper to account the unused space of all the readonly block group in the
  8506. * space_info. takes mirrors into account.
  8507. */
  8508. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  8509. {
  8510. struct btrfs_block_group_cache *block_group;
  8511. u64 free_bytes = 0;
  8512. int factor;
  8513. /* It's df, we don't care if it's racy */
  8514. if (list_empty(&sinfo->ro_bgs))
  8515. return 0;
  8516. spin_lock(&sinfo->lock);
  8517. list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
  8518. spin_lock(&block_group->lock);
  8519. if (!block_group->ro) {
  8520. spin_unlock(&block_group->lock);
  8521. continue;
  8522. }
  8523. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8524. BTRFS_BLOCK_GROUP_RAID10 |
  8525. BTRFS_BLOCK_GROUP_DUP))
  8526. factor = 2;
  8527. else
  8528. factor = 1;
  8529. free_bytes += (block_group->key.offset -
  8530. btrfs_block_group_used(&block_group->item)) *
  8531. factor;
  8532. spin_unlock(&block_group->lock);
  8533. }
  8534. spin_unlock(&sinfo->lock);
  8535. return free_bytes;
  8536. }
  8537. void btrfs_dec_block_group_ro(struct btrfs_root *root,
  8538. struct btrfs_block_group_cache *cache)
  8539. {
  8540. struct btrfs_space_info *sinfo = cache->space_info;
  8541. u64 num_bytes;
  8542. BUG_ON(!cache->ro);
  8543. spin_lock(&sinfo->lock);
  8544. spin_lock(&cache->lock);
  8545. if (!--cache->ro) {
  8546. num_bytes = cache->key.offset - cache->reserved -
  8547. cache->pinned - cache->bytes_super -
  8548. btrfs_block_group_used(&cache->item);
  8549. sinfo->bytes_readonly -= num_bytes;
  8550. list_del_init(&cache->ro_list);
  8551. }
  8552. spin_unlock(&cache->lock);
  8553. spin_unlock(&sinfo->lock);
  8554. }
  8555. /*
  8556. * checks to see if its even possible to relocate this block group.
  8557. *
  8558. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  8559. * ok to go ahead and try.
  8560. */
  8561. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  8562. {
  8563. struct btrfs_block_group_cache *block_group;
  8564. struct btrfs_space_info *space_info;
  8565. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  8566. struct btrfs_device *device;
  8567. struct btrfs_trans_handle *trans;
  8568. u64 min_free;
  8569. u64 dev_min = 1;
  8570. u64 dev_nr = 0;
  8571. u64 target;
  8572. int debug;
  8573. int index;
  8574. int full = 0;
  8575. int ret = 0;
  8576. debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
  8577. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  8578. /* odd, couldn't find the block group, leave it alone */
  8579. if (!block_group) {
  8580. if (debug)
  8581. btrfs_warn(root->fs_info,
  8582. "can't find block group for bytenr %llu",
  8583. bytenr);
  8584. return -1;
  8585. }
  8586. min_free = btrfs_block_group_used(&block_group->item);
  8587. /* no bytes used, we're good */
  8588. if (!min_free)
  8589. goto out;
  8590. space_info = block_group->space_info;
  8591. spin_lock(&space_info->lock);
  8592. full = space_info->full;
  8593. /*
  8594. * if this is the last block group we have in this space, we can't
  8595. * relocate it unless we're able to allocate a new chunk below.
  8596. *
  8597. * Otherwise, we need to make sure we have room in the space to handle
  8598. * all of the extents from this block group. If we can, we're good
  8599. */
  8600. if ((space_info->total_bytes != block_group->key.offset) &&
  8601. (space_info->bytes_used + space_info->bytes_reserved +
  8602. space_info->bytes_pinned + space_info->bytes_readonly +
  8603. min_free < space_info->total_bytes)) {
  8604. spin_unlock(&space_info->lock);
  8605. goto out;
  8606. }
  8607. spin_unlock(&space_info->lock);
  8608. /*
  8609. * ok we don't have enough space, but maybe we have free space on our
  8610. * devices to allocate new chunks for relocation, so loop through our
  8611. * alloc devices and guess if we have enough space. if this block
  8612. * group is going to be restriped, run checks against the target
  8613. * profile instead of the current one.
  8614. */
  8615. ret = -1;
  8616. /*
  8617. * index:
  8618. * 0: raid10
  8619. * 1: raid1
  8620. * 2: dup
  8621. * 3: raid0
  8622. * 4: single
  8623. */
  8624. target = get_restripe_target(root->fs_info, block_group->flags);
  8625. if (target) {
  8626. index = __get_raid_index(extended_to_chunk(target));
  8627. } else {
  8628. /*
  8629. * this is just a balance, so if we were marked as full
  8630. * we know there is no space for a new chunk
  8631. */
  8632. if (full) {
  8633. if (debug)
  8634. btrfs_warn(root->fs_info,
  8635. "no space to alloc new chunk for block group %llu",
  8636. block_group->key.objectid);
  8637. goto out;
  8638. }
  8639. index = get_block_group_index(block_group);
  8640. }
  8641. if (index == BTRFS_RAID_RAID10) {
  8642. dev_min = 4;
  8643. /* Divide by 2 */
  8644. min_free >>= 1;
  8645. } else if (index == BTRFS_RAID_RAID1) {
  8646. dev_min = 2;
  8647. } else if (index == BTRFS_RAID_DUP) {
  8648. /* Multiply by 2 */
  8649. min_free <<= 1;
  8650. } else if (index == BTRFS_RAID_RAID0) {
  8651. dev_min = fs_devices->rw_devices;
  8652. min_free = div64_u64(min_free, dev_min);
  8653. }
  8654. /* We need to do this so that we can look at pending chunks */
  8655. trans = btrfs_join_transaction(root);
  8656. if (IS_ERR(trans)) {
  8657. ret = PTR_ERR(trans);
  8658. goto out;
  8659. }
  8660. mutex_lock(&root->fs_info->chunk_mutex);
  8661. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  8662. u64 dev_offset;
  8663. /*
  8664. * check to make sure we can actually find a chunk with enough
  8665. * space to fit our block group in.
  8666. */
  8667. if (device->total_bytes > device->bytes_used + min_free &&
  8668. !device->is_tgtdev_for_dev_replace) {
  8669. ret = find_free_dev_extent(trans, device, min_free,
  8670. &dev_offset, NULL);
  8671. if (!ret)
  8672. dev_nr++;
  8673. if (dev_nr >= dev_min)
  8674. break;
  8675. ret = -1;
  8676. }
  8677. }
  8678. if (debug && ret == -1)
  8679. btrfs_warn(root->fs_info,
  8680. "no space to allocate a new chunk for block group %llu",
  8681. block_group->key.objectid);
  8682. mutex_unlock(&root->fs_info->chunk_mutex);
  8683. btrfs_end_transaction(trans, root);
  8684. out:
  8685. btrfs_put_block_group(block_group);
  8686. return ret;
  8687. }
  8688. static int find_first_block_group(struct btrfs_root *root,
  8689. struct btrfs_path *path, struct btrfs_key *key)
  8690. {
  8691. int ret = 0;
  8692. struct btrfs_key found_key;
  8693. struct extent_buffer *leaf;
  8694. int slot;
  8695. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  8696. if (ret < 0)
  8697. goto out;
  8698. while (1) {
  8699. slot = path->slots[0];
  8700. leaf = path->nodes[0];
  8701. if (slot >= btrfs_header_nritems(leaf)) {
  8702. ret = btrfs_next_leaf(root, path);
  8703. if (ret == 0)
  8704. continue;
  8705. if (ret < 0)
  8706. goto out;
  8707. break;
  8708. }
  8709. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  8710. if (found_key.objectid >= key->objectid &&
  8711. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  8712. struct extent_map_tree *em_tree;
  8713. struct extent_map *em;
  8714. em_tree = &root->fs_info->mapping_tree.map_tree;
  8715. read_lock(&em_tree->lock);
  8716. em = lookup_extent_mapping(em_tree, found_key.objectid,
  8717. found_key.offset);
  8718. read_unlock(&em_tree->lock);
  8719. if (!em) {
  8720. btrfs_err(root->fs_info,
  8721. "logical %llu len %llu found bg but no related chunk",
  8722. found_key.objectid, found_key.offset);
  8723. ret = -ENOENT;
  8724. } else {
  8725. ret = 0;
  8726. }
  8727. free_extent_map(em);
  8728. goto out;
  8729. }
  8730. path->slots[0]++;
  8731. }
  8732. out:
  8733. return ret;
  8734. }
  8735. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  8736. {
  8737. struct btrfs_block_group_cache *block_group;
  8738. u64 last = 0;
  8739. while (1) {
  8740. struct inode *inode;
  8741. block_group = btrfs_lookup_first_block_group(info, last);
  8742. while (block_group) {
  8743. spin_lock(&block_group->lock);
  8744. if (block_group->iref)
  8745. break;
  8746. spin_unlock(&block_group->lock);
  8747. block_group = next_block_group(info->tree_root,
  8748. block_group);
  8749. }
  8750. if (!block_group) {
  8751. if (last == 0)
  8752. break;
  8753. last = 0;
  8754. continue;
  8755. }
  8756. inode = block_group->inode;
  8757. block_group->iref = 0;
  8758. block_group->inode = NULL;
  8759. spin_unlock(&block_group->lock);
  8760. ASSERT(block_group->io_ctl.inode == NULL);
  8761. iput(inode);
  8762. last = block_group->key.objectid + block_group->key.offset;
  8763. btrfs_put_block_group(block_group);
  8764. }
  8765. }
  8766. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  8767. {
  8768. struct btrfs_block_group_cache *block_group;
  8769. struct btrfs_space_info *space_info;
  8770. struct btrfs_caching_control *caching_ctl;
  8771. struct rb_node *n;
  8772. down_write(&info->commit_root_sem);
  8773. while (!list_empty(&info->caching_block_groups)) {
  8774. caching_ctl = list_entry(info->caching_block_groups.next,
  8775. struct btrfs_caching_control, list);
  8776. list_del(&caching_ctl->list);
  8777. put_caching_control(caching_ctl);
  8778. }
  8779. up_write(&info->commit_root_sem);
  8780. spin_lock(&info->unused_bgs_lock);
  8781. while (!list_empty(&info->unused_bgs)) {
  8782. block_group = list_first_entry(&info->unused_bgs,
  8783. struct btrfs_block_group_cache,
  8784. bg_list);
  8785. list_del_init(&block_group->bg_list);
  8786. btrfs_put_block_group(block_group);
  8787. }
  8788. spin_unlock(&info->unused_bgs_lock);
  8789. spin_lock(&info->block_group_cache_lock);
  8790. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  8791. block_group = rb_entry(n, struct btrfs_block_group_cache,
  8792. cache_node);
  8793. rb_erase(&block_group->cache_node,
  8794. &info->block_group_cache_tree);
  8795. RB_CLEAR_NODE(&block_group->cache_node);
  8796. spin_unlock(&info->block_group_cache_lock);
  8797. down_write(&block_group->space_info->groups_sem);
  8798. list_del(&block_group->list);
  8799. up_write(&block_group->space_info->groups_sem);
  8800. if (block_group->cached == BTRFS_CACHE_STARTED)
  8801. wait_block_group_cache_done(block_group);
  8802. /*
  8803. * We haven't cached this block group, which means we could
  8804. * possibly have excluded extents on this block group.
  8805. */
  8806. if (block_group->cached == BTRFS_CACHE_NO ||
  8807. block_group->cached == BTRFS_CACHE_ERROR)
  8808. free_excluded_extents(info->extent_root, block_group);
  8809. btrfs_remove_free_space_cache(block_group);
  8810. ASSERT(list_empty(&block_group->dirty_list));
  8811. ASSERT(list_empty(&block_group->io_list));
  8812. ASSERT(list_empty(&block_group->bg_list));
  8813. ASSERT(atomic_read(&block_group->count) == 1);
  8814. btrfs_put_block_group(block_group);
  8815. spin_lock(&info->block_group_cache_lock);
  8816. }
  8817. spin_unlock(&info->block_group_cache_lock);
  8818. /* now that all the block groups are freed, go through and
  8819. * free all the space_info structs. This is only called during
  8820. * the final stages of unmount, and so we know nobody is
  8821. * using them. We call synchronize_rcu() once before we start,
  8822. * just to be on the safe side.
  8823. */
  8824. synchronize_rcu();
  8825. release_global_block_rsv(info);
  8826. while (!list_empty(&info->space_info)) {
  8827. int i;
  8828. space_info = list_entry(info->space_info.next,
  8829. struct btrfs_space_info,
  8830. list);
  8831. /*
  8832. * Do not hide this behind enospc_debug, this is actually
  8833. * important and indicates a real bug if this happens.
  8834. */
  8835. if (WARN_ON(space_info->bytes_pinned > 0 ||
  8836. space_info->bytes_reserved > 0 ||
  8837. space_info->bytes_may_use > 0))
  8838. dump_space_info(space_info, 0, 0);
  8839. list_del(&space_info->list);
  8840. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  8841. struct kobject *kobj;
  8842. kobj = space_info->block_group_kobjs[i];
  8843. space_info->block_group_kobjs[i] = NULL;
  8844. if (kobj) {
  8845. kobject_del(kobj);
  8846. kobject_put(kobj);
  8847. }
  8848. }
  8849. kobject_del(&space_info->kobj);
  8850. kobject_put(&space_info->kobj);
  8851. }
  8852. return 0;
  8853. }
  8854. static void __link_block_group(struct btrfs_space_info *space_info,
  8855. struct btrfs_block_group_cache *cache)
  8856. {
  8857. int index = get_block_group_index(cache);
  8858. bool first = false;
  8859. down_write(&space_info->groups_sem);
  8860. if (list_empty(&space_info->block_groups[index]))
  8861. first = true;
  8862. list_add_tail(&cache->list, &space_info->block_groups[index]);
  8863. up_write(&space_info->groups_sem);
  8864. if (first) {
  8865. struct raid_kobject *rkobj;
  8866. int ret;
  8867. rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
  8868. if (!rkobj)
  8869. goto out_err;
  8870. rkobj->raid_type = index;
  8871. kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
  8872. ret = kobject_add(&rkobj->kobj, &space_info->kobj,
  8873. "%s", get_raid_name(index));
  8874. if (ret) {
  8875. kobject_put(&rkobj->kobj);
  8876. goto out_err;
  8877. }
  8878. space_info->block_group_kobjs[index] = &rkobj->kobj;
  8879. }
  8880. return;
  8881. out_err:
  8882. pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
  8883. }
  8884. static struct btrfs_block_group_cache *
  8885. btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
  8886. {
  8887. struct btrfs_block_group_cache *cache;
  8888. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  8889. if (!cache)
  8890. return NULL;
  8891. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  8892. GFP_NOFS);
  8893. if (!cache->free_space_ctl) {
  8894. kfree(cache);
  8895. return NULL;
  8896. }
  8897. cache->key.objectid = start;
  8898. cache->key.offset = size;
  8899. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8900. cache->sectorsize = root->sectorsize;
  8901. cache->fs_info = root->fs_info;
  8902. cache->full_stripe_len = btrfs_full_stripe_len(root,
  8903. &root->fs_info->mapping_tree,
  8904. start);
  8905. set_free_space_tree_thresholds(cache);
  8906. atomic_set(&cache->count, 1);
  8907. spin_lock_init(&cache->lock);
  8908. init_rwsem(&cache->data_rwsem);
  8909. INIT_LIST_HEAD(&cache->list);
  8910. INIT_LIST_HEAD(&cache->cluster_list);
  8911. INIT_LIST_HEAD(&cache->bg_list);
  8912. INIT_LIST_HEAD(&cache->ro_list);
  8913. INIT_LIST_HEAD(&cache->dirty_list);
  8914. INIT_LIST_HEAD(&cache->io_list);
  8915. btrfs_init_free_space_ctl(cache);
  8916. atomic_set(&cache->trimming, 0);
  8917. mutex_init(&cache->free_space_lock);
  8918. return cache;
  8919. }
  8920. int btrfs_read_block_groups(struct btrfs_root *root)
  8921. {
  8922. struct btrfs_path *path;
  8923. int ret;
  8924. struct btrfs_block_group_cache *cache;
  8925. struct btrfs_fs_info *info = root->fs_info;
  8926. struct btrfs_space_info *space_info;
  8927. struct btrfs_key key;
  8928. struct btrfs_key found_key;
  8929. struct extent_buffer *leaf;
  8930. int need_clear = 0;
  8931. u64 cache_gen;
  8932. u64 feature;
  8933. int mixed;
  8934. feature = btrfs_super_incompat_flags(info->super_copy);
  8935. mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
  8936. root = info->extent_root;
  8937. key.objectid = 0;
  8938. key.offset = 0;
  8939. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8940. path = btrfs_alloc_path();
  8941. if (!path)
  8942. return -ENOMEM;
  8943. path->reada = READA_FORWARD;
  8944. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  8945. if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
  8946. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  8947. need_clear = 1;
  8948. if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
  8949. need_clear = 1;
  8950. while (1) {
  8951. ret = find_first_block_group(root, path, &key);
  8952. if (ret > 0)
  8953. break;
  8954. if (ret != 0)
  8955. goto error;
  8956. leaf = path->nodes[0];
  8957. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  8958. cache = btrfs_create_block_group_cache(root, found_key.objectid,
  8959. found_key.offset);
  8960. if (!cache) {
  8961. ret = -ENOMEM;
  8962. goto error;
  8963. }
  8964. if (need_clear) {
  8965. /*
  8966. * When we mount with old space cache, we need to
  8967. * set BTRFS_DC_CLEAR and set dirty flag.
  8968. *
  8969. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  8970. * truncate the old free space cache inode and
  8971. * setup a new one.
  8972. * b) Setting 'dirty flag' makes sure that we flush
  8973. * the new space cache info onto disk.
  8974. */
  8975. if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
  8976. cache->disk_cache_state = BTRFS_DC_CLEAR;
  8977. }
  8978. read_extent_buffer(leaf, &cache->item,
  8979. btrfs_item_ptr_offset(leaf, path->slots[0]),
  8980. sizeof(cache->item));
  8981. cache->flags = btrfs_block_group_flags(&cache->item);
  8982. if (!mixed &&
  8983. ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  8984. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  8985. btrfs_err(info,
  8986. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  8987. cache->key.objectid);
  8988. ret = -EINVAL;
  8989. goto error;
  8990. }
  8991. key.objectid = found_key.objectid + found_key.offset;
  8992. btrfs_release_path(path);
  8993. /*
  8994. * We need to exclude the super stripes now so that the space
  8995. * info has super bytes accounted for, otherwise we'll think
  8996. * we have more space than we actually do.
  8997. */
  8998. ret = exclude_super_stripes(root, cache);
  8999. if (ret) {
  9000. /*
  9001. * We may have excluded something, so call this just in
  9002. * case.
  9003. */
  9004. free_excluded_extents(root, cache);
  9005. btrfs_put_block_group(cache);
  9006. goto error;
  9007. }
  9008. /*
  9009. * check for two cases, either we are full, and therefore
  9010. * don't need to bother with the caching work since we won't
  9011. * find any space, or we are empty, and we can just add all
  9012. * the space in and be done with it. This saves us _alot_ of
  9013. * time, particularly in the full case.
  9014. */
  9015. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  9016. cache->last_byte_to_unpin = (u64)-1;
  9017. cache->cached = BTRFS_CACHE_FINISHED;
  9018. free_excluded_extents(root, cache);
  9019. } else if (btrfs_block_group_used(&cache->item) == 0) {
  9020. cache->last_byte_to_unpin = (u64)-1;
  9021. cache->cached = BTRFS_CACHE_FINISHED;
  9022. add_new_free_space(cache, root->fs_info,
  9023. found_key.objectid,
  9024. found_key.objectid +
  9025. found_key.offset);
  9026. free_excluded_extents(root, cache);
  9027. }
  9028. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  9029. if (ret) {
  9030. btrfs_remove_free_space_cache(cache);
  9031. btrfs_put_block_group(cache);
  9032. goto error;
  9033. }
  9034. trace_btrfs_add_block_group(root->fs_info, cache, 0);
  9035. ret = update_space_info(info, cache->flags, found_key.offset,
  9036. btrfs_block_group_used(&cache->item),
  9037. cache->bytes_super, &space_info);
  9038. if (ret) {
  9039. btrfs_remove_free_space_cache(cache);
  9040. spin_lock(&info->block_group_cache_lock);
  9041. rb_erase(&cache->cache_node,
  9042. &info->block_group_cache_tree);
  9043. RB_CLEAR_NODE(&cache->cache_node);
  9044. spin_unlock(&info->block_group_cache_lock);
  9045. btrfs_put_block_group(cache);
  9046. goto error;
  9047. }
  9048. cache->space_info = space_info;
  9049. __link_block_group(space_info, cache);
  9050. set_avail_alloc_bits(root->fs_info, cache->flags);
  9051. if (btrfs_chunk_readonly(root, cache->key.objectid)) {
  9052. inc_block_group_ro(cache, 1);
  9053. } else if (btrfs_block_group_used(&cache->item) == 0) {
  9054. spin_lock(&info->unused_bgs_lock);
  9055. /* Should always be true but just in case. */
  9056. if (list_empty(&cache->bg_list)) {
  9057. btrfs_get_block_group(cache);
  9058. list_add_tail(&cache->bg_list,
  9059. &info->unused_bgs);
  9060. }
  9061. spin_unlock(&info->unused_bgs_lock);
  9062. }
  9063. }
  9064. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  9065. if (!(get_alloc_profile(root, space_info->flags) &
  9066. (BTRFS_BLOCK_GROUP_RAID10 |
  9067. BTRFS_BLOCK_GROUP_RAID1 |
  9068. BTRFS_BLOCK_GROUP_RAID5 |
  9069. BTRFS_BLOCK_GROUP_RAID6 |
  9070. BTRFS_BLOCK_GROUP_DUP)))
  9071. continue;
  9072. /*
  9073. * avoid allocating from un-mirrored block group if there are
  9074. * mirrored block groups.
  9075. */
  9076. list_for_each_entry(cache,
  9077. &space_info->block_groups[BTRFS_RAID_RAID0],
  9078. list)
  9079. inc_block_group_ro(cache, 1);
  9080. list_for_each_entry(cache,
  9081. &space_info->block_groups[BTRFS_RAID_SINGLE],
  9082. list)
  9083. inc_block_group_ro(cache, 1);
  9084. }
  9085. init_global_block_rsv(info);
  9086. ret = 0;
  9087. error:
  9088. btrfs_free_path(path);
  9089. return ret;
  9090. }
  9091. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  9092. struct btrfs_root *root)
  9093. {
  9094. struct btrfs_block_group_cache *block_group, *tmp;
  9095. struct btrfs_root *extent_root = root->fs_info->extent_root;
  9096. struct btrfs_block_group_item item;
  9097. struct btrfs_key key;
  9098. int ret = 0;
  9099. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  9100. trans->can_flush_pending_bgs = false;
  9101. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
  9102. if (ret)
  9103. goto next;
  9104. spin_lock(&block_group->lock);
  9105. memcpy(&item, &block_group->item, sizeof(item));
  9106. memcpy(&key, &block_group->key, sizeof(key));
  9107. spin_unlock(&block_group->lock);
  9108. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  9109. sizeof(item));
  9110. if (ret)
  9111. btrfs_abort_transaction(trans, ret);
  9112. ret = btrfs_finish_chunk_alloc(trans, extent_root,
  9113. key.objectid, key.offset);
  9114. if (ret)
  9115. btrfs_abort_transaction(trans, ret);
  9116. add_block_group_free_space(trans, root->fs_info, block_group);
  9117. /* already aborted the transaction if it failed. */
  9118. next:
  9119. list_del_init(&block_group->bg_list);
  9120. }
  9121. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  9122. }
  9123. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  9124. struct btrfs_root *root, u64 bytes_used,
  9125. u64 type, u64 chunk_objectid, u64 chunk_offset,
  9126. u64 size)
  9127. {
  9128. int ret;
  9129. struct btrfs_root *extent_root;
  9130. struct btrfs_block_group_cache *cache;
  9131. extent_root = root->fs_info->extent_root;
  9132. btrfs_set_log_full_commit(root->fs_info, trans);
  9133. cache = btrfs_create_block_group_cache(root, chunk_offset, size);
  9134. if (!cache)
  9135. return -ENOMEM;
  9136. btrfs_set_block_group_used(&cache->item, bytes_used);
  9137. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  9138. btrfs_set_block_group_flags(&cache->item, type);
  9139. cache->flags = type;
  9140. cache->last_byte_to_unpin = (u64)-1;
  9141. cache->cached = BTRFS_CACHE_FINISHED;
  9142. cache->needs_free_space = 1;
  9143. ret = exclude_super_stripes(root, cache);
  9144. if (ret) {
  9145. /*
  9146. * We may have excluded something, so call this just in
  9147. * case.
  9148. */
  9149. free_excluded_extents(root, cache);
  9150. btrfs_put_block_group(cache);
  9151. return ret;
  9152. }
  9153. add_new_free_space(cache, root->fs_info, chunk_offset,
  9154. chunk_offset + size);
  9155. free_excluded_extents(root, cache);
  9156. #ifdef CONFIG_BTRFS_DEBUG
  9157. if (btrfs_should_fragment_free_space(root, cache)) {
  9158. u64 new_bytes_used = size - bytes_used;
  9159. bytes_used += new_bytes_used >> 1;
  9160. fragment_free_space(root, cache);
  9161. }
  9162. #endif
  9163. /*
  9164. * Call to ensure the corresponding space_info object is created and
  9165. * assigned to our block group, but don't update its counters just yet.
  9166. * We want our bg to be added to the rbtree with its ->space_info set.
  9167. */
  9168. ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
  9169. &cache->space_info);
  9170. if (ret) {
  9171. btrfs_remove_free_space_cache(cache);
  9172. btrfs_put_block_group(cache);
  9173. return ret;
  9174. }
  9175. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  9176. if (ret) {
  9177. btrfs_remove_free_space_cache(cache);
  9178. btrfs_put_block_group(cache);
  9179. return ret;
  9180. }
  9181. /*
  9182. * Now that our block group has its ->space_info set and is inserted in
  9183. * the rbtree, update the space info's counters.
  9184. */
  9185. trace_btrfs_add_block_group(root->fs_info, cache, 1);
  9186. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  9187. cache->bytes_super, &cache->space_info);
  9188. if (ret) {
  9189. btrfs_remove_free_space_cache(cache);
  9190. spin_lock(&root->fs_info->block_group_cache_lock);
  9191. rb_erase(&cache->cache_node,
  9192. &root->fs_info->block_group_cache_tree);
  9193. RB_CLEAR_NODE(&cache->cache_node);
  9194. spin_unlock(&root->fs_info->block_group_cache_lock);
  9195. btrfs_put_block_group(cache);
  9196. return ret;
  9197. }
  9198. update_global_block_rsv(root->fs_info);
  9199. __link_block_group(cache->space_info, cache);
  9200. list_add_tail(&cache->bg_list, &trans->new_bgs);
  9201. set_avail_alloc_bits(extent_root->fs_info, type);
  9202. return 0;
  9203. }
  9204. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  9205. {
  9206. u64 extra_flags = chunk_to_extended(flags) &
  9207. BTRFS_EXTENDED_PROFILE_MASK;
  9208. write_seqlock(&fs_info->profiles_lock);
  9209. if (flags & BTRFS_BLOCK_GROUP_DATA)
  9210. fs_info->avail_data_alloc_bits &= ~extra_flags;
  9211. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  9212. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  9213. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  9214. fs_info->avail_system_alloc_bits &= ~extra_flags;
  9215. write_sequnlock(&fs_info->profiles_lock);
  9216. }
  9217. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  9218. struct btrfs_root *root, u64 group_start,
  9219. struct extent_map *em)
  9220. {
  9221. struct btrfs_path *path;
  9222. struct btrfs_block_group_cache *block_group;
  9223. struct btrfs_free_cluster *cluster;
  9224. struct btrfs_root *tree_root = root->fs_info->tree_root;
  9225. struct btrfs_key key;
  9226. struct inode *inode;
  9227. struct kobject *kobj = NULL;
  9228. int ret;
  9229. int index;
  9230. int factor;
  9231. struct btrfs_caching_control *caching_ctl = NULL;
  9232. bool remove_em;
  9233. root = root->fs_info->extent_root;
  9234. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  9235. BUG_ON(!block_group);
  9236. BUG_ON(!block_group->ro);
  9237. /*
  9238. * Free the reserved super bytes from this block group before
  9239. * remove it.
  9240. */
  9241. free_excluded_extents(root, block_group);
  9242. memcpy(&key, &block_group->key, sizeof(key));
  9243. index = get_block_group_index(block_group);
  9244. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  9245. BTRFS_BLOCK_GROUP_RAID1 |
  9246. BTRFS_BLOCK_GROUP_RAID10))
  9247. factor = 2;
  9248. else
  9249. factor = 1;
  9250. /* make sure this block group isn't part of an allocation cluster */
  9251. cluster = &root->fs_info->data_alloc_cluster;
  9252. spin_lock(&cluster->refill_lock);
  9253. btrfs_return_cluster_to_free_space(block_group, cluster);
  9254. spin_unlock(&cluster->refill_lock);
  9255. /*
  9256. * make sure this block group isn't part of a metadata
  9257. * allocation cluster
  9258. */
  9259. cluster = &root->fs_info->meta_alloc_cluster;
  9260. spin_lock(&cluster->refill_lock);
  9261. btrfs_return_cluster_to_free_space(block_group, cluster);
  9262. spin_unlock(&cluster->refill_lock);
  9263. path = btrfs_alloc_path();
  9264. if (!path) {
  9265. ret = -ENOMEM;
  9266. goto out;
  9267. }
  9268. /*
  9269. * get the inode first so any iput calls done for the io_list
  9270. * aren't the final iput (no unlinks allowed now)
  9271. */
  9272. inode = lookup_free_space_inode(tree_root, block_group, path);
  9273. mutex_lock(&trans->transaction->cache_write_mutex);
  9274. /*
  9275. * make sure our free spache cache IO is done before remove the
  9276. * free space inode
  9277. */
  9278. spin_lock(&trans->transaction->dirty_bgs_lock);
  9279. if (!list_empty(&block_group->io_list)) {
  9280. list_del_init(&block_group->io_list);
  9281. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  9282. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9283. btrfs_wait_cache_io(root, trans, block_group,
  9284. &block_group->io_ctl, path,
  9285. block_group->key.objectid);
  9286. btrfs_put_block_group(block_group);
  9287. spin_lock(&trans->transaction->dirty_bgs_lock);
  9288. }
  9289. if (!list_empty(&block_group->dirty_list)) {
  9290. list_del_init(&block_group->dirty_list);
  9291. btrfs_put_block_group(block_group);
  9292. }
  9293. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9294. mutex_unlock(&trans->transaction->cache_write_mutex);
  9295. if (!IS_ERR(inode)) {
  9296. ret = btrfs_orphan_add(trans, inode);
  9297. if (ret) {
  9298. btrfs_add_delayed_iput(inode);
  9299. goto out;
  9300. }
  9301. clear_nlink(inode);
  9302. /* One for the block groups ref */
  9303. spin_lock(&block_group->lock);
  9304. if (block_group->iref) {
  9305. block_group->iref = 0;
  9306. block_group->inode = NULL;
  9307. spin_unlock(&block_group->lock);
  9308. iput(inode);
  9309. } else {
  9310. spin_unlock(&block_group->lock);
  9311. }
  9312. /* One for our lookup ref */
  9313. btrfs_add_delayed_iput(inode);
  9314. }
  9315. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  9316. key.offset = block_group->key.objectid;
  9317. key.type = 0;
  9318. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  9319. if (ret < 0)
  9320. goto out;
  9321. if (ret > 0)
  9322. btrfs_release_path(path);
  9323. if (ret == 0) {
  9324. ret = btrfs_del_item(trans, tree_root, path);
  9325. if (ret)
  9326. goto out;
  9327. btrfs_release_path(path);
  9328. }
  9329. spin_lock(&root->fs_info->block_group_cache_lock);
  9330. rb_erase(&block_group->cache_node,
  9331. &root->fs_info->block_group_cache_tree);
  9332. RB_CLEAR_NODE(&block_group->cache_node);
  9333. if (root->fs_info->first_logical_byte == block_group->key.objectid)
  9334. root->fs_info->first_logical_byte = (u64)-1;
  9335. spin_unlock(&root->fs_info->block_group_cache_lock);
  9336. down_write(&block_group->space_info->groups_sem);
  9337. /*
  9338. * we must use list_del_init so people can check to see if they
  9339. * are still on the list after taking the semaphore
  9340. */
  9341. list_del_init(&block_group->list);
  9342. if (list_empty(&block_group->space_info->block_groups[index])) {
  9343. kobj = block_group->space_info->block_group_kobjs[index];
  9344. block_group->space_info->block_group_kobjs[index] = NULL;
  9345. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  9346. }
  9347. up_write(&block_group->space_info->groups_sem);
  9348. if (kobj) {
  9349. kobject_del(kobj);
  9350. kobject_put(kobj);
  9351. }
  9352. if (block_group->has_caching_ctl)
  9353. caching_ctl = get_caching_control(block_group);
  9354. if (block_group->cached == BTRFS_CACHE_STARTED)
  9355. wait_block_group_cache_done(block_group);
  9356. if (block_group->has_caching_ctl) {
  9357. down_write(&root->fs_info->commit_root_sem);
  9358. if (!caching_ctl) {
  9359. struct btrfs_caching_control *ctl;
  9360. list_for_each_entry(ctl,
  9361. &root->fs_info->caching_block_groups, list)
  9362. if (ctl->block_group == block_group) {
  9363. caching_ctl = ctl;
  9364. atomic_inc(&caching_ctl->count);
  9365. break;
  9366. }
  9367. }
  9368. if (caching_ctl)
  9369. list_del_init(&caching_ctl->list);
  9370. up_write(&root->fs_info->commit_root_sem);
  9371. if (caching_ctl) {
  9372. /* Once for the caching bgs list and once for us. */
  9373. put_caching_control(caching_ctl);
  9374. put_caching_control(caching_ctl);
  9375. }
  9376. }
  9377. spin_lock(&trans->transaction->dirty_bgs_lock);
  9378. if (!list_empty(&block_group->dirty_list)) {
  9379. WARN_ON(1);
  9380. }
  9381. if (!list_empty(&block_group->io_list)) {
  9382. WARN_ON(1);
  9383. }
  9384. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9385. btrfs_remove_free_space_cache(block_group);
  9386. spin_lock(&block_group->space_info->lock);
  9387. list_del_init(&block_group->ro_list);
  9388. if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
  9389. WARN_ON(block_group->space_info->total_bytes
  9390. < block_group->key.offset);
  9391. WARN_ON(block_group->space_info->bytes_readonly
  9392. < block_group->key.offset);
  9393. WARN_ON(block_group->space_info->disk_total
  9394. < block_group->key.offset * factor);
  9395. }
  9396. block_group->space_info->total_bytes -= block_group->key.offset;
  9397. block_group->space_info->bytes_readonly -= block_group->key.offset;
  9398. block_group->space_info->disk_total -= block_group->key.offset * factor;
  9399. spin_unlock(&block_group->space_info->lock);
  9400. memcpy(&key, &block_group->key, sizeof(key));
  9401. lock_chunks(root);
  9402. if (!list_empty(&em->list)) {
  9403. /* We're in the transaction->pending_chunks list. */
  9404. free_extent_map(em);
  9405. }
  9406. spin_lock(&block_group->lock);
  9407. block_group->removed = 1;
  9408. /*
  9409. * At this point trimming can't start on this block group, because we
  9410. * removed the block group from the tree fs_info->block_group_cache_tree
  9411. * so no one can't find it anymore and even if someone already got this
  9412. * block group before we removed it from the rbtree, they have already
  9413. * incremented block_group->trimming - if they didn't, they won't find
  9414. * any free space entries because we already removed them all when we
  9415. * called btrfs_remove_free_space_cache().
  9416. *
  9417. * And we must not remove the extent map from the fs_info->mapping_tree
  9418. * to prevent the same logical address range and physical device space
  9419. * ranges from being reused for a new block group. This is because our
  9420. * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  9421. * completely transactionless, so while it is trimming a range the
  9422. * currently running transaction might finish and a new one start,
  9423. * allowing for new block groups to be created that can reuse the same
  9424. * physical device locations unless we take this special care.
  9425. *
  9426. * There may also be an implicit trim operation if the file system
  9427. * is mounted with -odiscard. The same protections must remain
  9428. * in place until the extents have been discarded completely when
  9429. * the transaction commit has completed.
  9430. */
  9431. remove_em = (atomic_read(&block_group->trimming) == 0);
  9432. /*
  9433. * Make sure a trimmer task always sees the em in the pinned_chunks list
  9434. * if it sees block_group->removed == 1 (needs to lock block_group->lock
  9435. * before checking block_group->removed).
  9436. */
  9437. if (!remove_em) {
  9438. /*
  9439. * Our em might be in trans->transaction->pending_chunks which
  9440. * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
  9441. * and so is the fs_info->pinned_chunks list.
  9442. *
  9443. * So at this point we must be holding the chunk_mutex to avoid
  9444. * any races with chunk allocation (more specifically at
  9445. * volumes.c:contains_pending_extent()), to ensure it always
  9446. * sees the em, either in the pending_chunks list or in the
  9447. * pinned_chunks list.
  9448. */
  9449. list_move_tail(&em->list, &root->fs_info->pinned_chunks);
  9450. }
  9451. spin_unlock(&block_group->lock);
  9452. if (remove_em) {
  9453. struct extent_map_tree *em_tree;
  9454. em_tree = &root->fs_info->mapping_tree.map_tree;
  9455. write_lock(&em_tree->lock);
  9456. /*
  9457. * The em might be in the pending_chunks list, so make sure the
  9458. * chunk mutex is locked, since remove_extent_mapping() will
  9459. * delete us from that list.
  9460. */
  9461. remove_extent_mapping(em_tree, em);
  9462. write_unlock(&em_tree->lock);
  9463. /* once for the tree */
  9464. free_extent_map(em);
  9465. }
  9466. unlock_chunks(root);
  9467. ret = remove_block_group_free_space(trans, root->fs_info, block_group);
  9468. if (ret)
  9469. goto out;
  9470. btrfs_put_block_group(block_group);
  9471. btrfs_put_block_group(block_group);
  9472. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  9473. if (ret > 0)
  9474. ret = -EIO;
  9475. if (ret < 0)
  9476. goto out;
  9477. ret = btrfs_del_item(trans, root, path);
  9478. out:
  9479. btrfs_free_path(path);
  9480. return ret;
  9481. }
  9482. struct btrfs_trans_handle *
  9483. btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
  9484. const u64 chunk_offset)
  9485. {
  9486. struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
  9487. struct extent_map *em;
  9488. struct map_lookup *map;
  9489. unsigned int num_items;
  9490. read_lock(&em_tree->lock);
  9491. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  9492. read_unlock(&em_tree->lock);
  9493. ASSERT(em && em->start == chunk_offset);
  9494. /*
  9495. * We need to reserve 3 + N units from the metadata space info in order
  9496. * to remove a block group (done at btrfs_remove_chunk() and at
  9497. * btrfs_remove_block_group()), which are used for:
  9498. *
  9499. * 1 unit for adding the free space inode's orphan (located in the tree
  9500. * of tree roots).
  9501. * 1 unit for deleting the block group item (located in the extent
  9502. * tree).
  9503. * 1 unit for deleting the free space item (located in tree of tree
  9504. * roots).
  9505. * N units for deleting N device extent items corresponding to each
  9506. * stripe (located in the device tree).
  9507. *
  9508. * In order to remove a block group we also need to reserve units in the
  9509. * system space info in order to update the chunk tree (update one or
  9510. * more device items and remove one chunk item), but this is done at
  9511. * btrfs_remove_chunk() through a call to check_system_chunk().
  9512. */
  9513. map = em->map_lookup;
  9514. num_items = 3 + map->num_stripes;
  9515. free_extent_map(em);
  9516. return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
  9517. num_items, 1);
  9518. }
  9519. /*
  9520. * Process the unused_bgs list and remove any that don't have any allocated
  9521. * space inside of them.
  9522. */
  9523. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  9524. {
  9525. struct btrfs_block_group_cache *block_group;
  9526. struct btrfs_space_info *space_info;
  9527. struct btrfs_root *root = fs_info->extent_root;
  9528. struct btrfs_trans_handle *trans;
  9529. int ret = 0;
  9530. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  9531. return;
  9532. spin_lock(&fs_info->unused_bgs_lock);
  9533. while (!list_empty(&fs_info->unused_bgs)) {
  9534. u64 start, end;
  9535. int trimming;
  9536. block_group = list_first_entry(&fs_info->unused_bgs,
  9537. struct btrfs_block_group_cache,
  9538. bg_list);
  9539. list_del_init(&block_group->bg_list);
  9540. space_info = block_group->space_info;
  9541. if (ret || btrfs_mixed_space_info(space_info)) {
  9542. btrfs_put_block_group(block_group);
  9543. continue;
  9544. }
  9545. spin_unlock(&fs_info->unused_bgs_lock);
  9546. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  9547. /* Don't want to race with allocators so take the groups_sem */
  9548. down_write(&space_info->groups_sem);
  9549. spin_lock(&block_group->lock);
  9550. if (block_group->reserved ||
  9551. btrfs_block_group_used(&block_group->item) ||
  9552. (block_group->ro && !block_group->removed) ||
  9553. list_is_singular(&block_group->list)) {
  9554. /*
  9555. * We want to bail if we made new allocations or have
  9556. * outstanding allocations in this block group. We do
  9557. * the ro check in case balance is currently acting on
  9558. * this block group.
  9559. */
  9560. spin_unlock(&block_group->lock);
  9561. up_write(&space_info->groups_sem);
  9562. goto next;
  9563. }
  9564. spin_unlock(&block_group->lock);
  9565. /* We don't want to force the issue, only flip if it's ok. */
  9566. ret = inc_block_group_ro(block_group, 0);
  9567. up_write(&space_info->groups_sem);
  9568. if (ret < 0) {
  9569. ret = 0;
  9570. goto next;
  9571. }
  9572. /*
  9573. * Want to do this before we do anything else so we can recover
  9574. * properly if we fail to join the transaction.
  9575. */
  9576. trans = btrfs_start_trans_remove_block_group(fs_info,
  9577. block_group->key.objectid);
  9578. if (IS_ERR(trans)) {
  9579. btrfs_dec_block_group_ro(root, block_group);
  9580. ret = PTR_ERR(trans);
  9581. goto next;
  9582. }
  9583. /*
  9584. * We could have pending pinned extents for this block group,
  9585. * just delete them, we don't care about them anymore.
  9586. */
  9587. start = block_group->key.objectid;
  9588. end = start + block_group->key.offset - 1;
  9589. /*
  9590. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  9591. * btrfs_finish_extent_commit(). If we are at transaction N,
  9592. * another task might be running finish_extent_commit() for the
  9593. * previous transaction N - 1, and have seen a range belonging
  9594. * to the block group in freed_extents[] before we were able to
  9595. * clear the whole block group range from freed_extents[]. This
  9596. * means that task can lookup for the block group after we
  9597. * unpinned it from freed_extents[] and removed it, leading to
  9598. * a BUG_ON() at btrfs_unpin_extent_range().
  9599. */
  9600. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  9601. ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
  9602. EXTENT_DIRTY);
  9603. if (ret) {
  9604. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9605. btrfs_dec_block_group_ro(root, block_group);
  9606. goto end_trans;
  9607. }
  9608. ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
  9609. EXTENT_DIRTY);
  9610. if (ret) {
  9611. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9612. btrfs_dec_block_group_ro(root, block_group);
  9613. goto end_trans;
  9614. }
  9615. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9616. /* Reset pinned so btrfs_put_block_group doesn't complain */
  9617. spin_lock(&space_info->lock);
  9618. spin_lock(&block_group->lock);
  9619. space_info->bytes_pinned -= block_group->pinned;
  9620. space_info->bytes_readonly += block_group->pinned;
  9621. percpu_counter_add(&space_info->total_bytes_pinned,
  9622. -block_group->pinned);
  9623. block_group->pinned = 0;
  9624. spin_unlock(&block_group->lock);
  9625. spin_unlock(&space_info->lock);
  9626. /* DISCARD can flip during remount */
  9627. trimming = btrfs_test_opt(root->fs_info, DISCARD);
  9628. /* Implicit trim during transaction commit. */
  9629. if (trimming)
  9630. btrfs_get_block_group_trimming(block_group);
  9631. /*
  9632. * Btrfs_remove_chunk will abort the transaction if things go
  9633. * horribly wrong.
  9634. */
  9635. ret = btrfs_remove_chunk(trans, root,
  9636. block_group->key.objectid);
  9637. if (ret) {
  9638. if (trimming)
  9639. btrfs_put_block_group_trimming(block_group);
  9640. goto end_trans;
  9641. }
  9642. /*
  9643. * If we're not mounted with -odiscard, we can just forget
  9644. * about this block group. Otherwise we'll need to wait
  9645. * until transaction commit to do the actual discard.
  9646. */
  9647. if (trimming) {
  9648. spin_lock(&fs_info->unused_bgs_lock);
  9649. /*
  9650. * A concurrent scrub might have added us to the list
  9651. * fs_info->unused_bgs, so use a list_move operation
  9652. * to add the block group to the deleted_bgs list.
  9653. */
  9654. list_move(&block_group->bg_list,
  9655. &trans->transaction->deleted_bgs);
  9656. spin_unlock(&fs_info->unused_bgs_lock);
  9657. btrfs_get_block_group(block_group);
  9658. }
  9659. end_trans:
  9660. btrfs_end_transaction(trans, root);
  9661. next:
  9662. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  9663. btrfs_put_block_group(block_group);
  9664. spin_lock(&fs_info->unused_bgs_lock);
  9665. }
  9666. spin_unlock(&fs_info->unused_bgs_lock);
  9667. }
  9668. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  9669. {
  9670. struct btrfs_space_info *space_info;
  9671. struct btrfs_super_block *disk_super;
  9672. u64 features;
  9673. u64 flags;
  9674. int mixed = 0;
  9675. int ret;
  9676. disk_super = fs_info->super_copy;
  9677. if (!btrfs_super_root(disk_super))
  9678. return -EINVAL;
  9679. features = btrfs_super_incompat_flags(disk_super);
  9680. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  9681. mixed = 1;
  9682. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  9683. ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
  9684. if (ret)
  9685. goto out;
  9686. if (mixed) {
  9687. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  9688. ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
  9689. } else {
  9690. flags = BTRFS_BLOCK_GROUP_METADATA;
  9691. ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
  9692. if (ret)
  9693. goto out;
  9694. flags = BTRFS_BLOCK_GROUP_DATA;
  9695. ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
  9696. }
  9697. out:
  9698. return ret;
  9699. }
  9700. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  9701. {
  9702. return unpin_extent_range(root, start, end, false);
  9703. }
  9704. /*
  9705. * It used to be that old block groups would be left around forever.
  9706. * Iterating over them would be enough to trim unused space. Since we
  9707. * now automatically remove them, we also need to iterate over unallocated
  9708. * space.
  9709. *
  9710. * We don't want a transaction for this since the discard may take a
  9711. * substantial amount of time. We don't require that a transaction be
  9712. * running, but we do need to take a running transaction into account
  9713. * to ensure that we're not discarding chunks that were released in
  9714. * the current transaction.
  9715. *
  9716. * Holding the chunks lock will prevent other threads from allocating
  9717. * or releasing chunks, but it won't prevent a running transaction
  9718. * from committing and releasing the memory that the pending chunks
  9719. * list head uses. For that, we need to take a reference to the
  9720. * transaction.
  9721. */
  9722. static int btrfs_trim_free_extents(struct btrfs_device *device,
  9723. u64 minlen, u64 *trimmed)
  9724. {
  9725. u64 start = 0, len = 0;
  9726. int ret;
  9727. *trimmed = 0;
  9728. /* Not writeable = nothing to do. */
  9729. if (!device->writeable)
  9730. return 0;
  9731. /* No free space = nothing to do. */
  9732. if (device->total_bytes <= device->bytes_used)
  9733. return 0;
  9734. ret = 0;
  9735. while (1) {
  9736. struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
  9737. struct btrfs_transaction *trans;
  9738. u64 bytes;
  9739. ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
  9740. if (ret)
  9741. return ret;
  9742. down_read(&fs_info->commit_root_sem);
  9743. spin_lock(&fs_info->trans_lock);
  9744. trans = fs_info->running_transaction;
  9745. if (trans)
  9746. atomic_inc(&trans->use_count);
  9747. spin_unlock(&fs_info->trans_lock);
  9748. ret = find_free_dev_extent_start(trans, device, minlen, start,
  9749. &start, &len);
  9750. if (trans)
  9751. btrfs_put_transaction(trans);
  9752. if (ret) {
  9753. up_read(&fs_info->commit_root_sem);
  9754. mutex_unlock(&fs_info->chunk_mutex);
  9755. if (ret == -ENOSPC)
  9756. ret = 0;
  9757. break;
  9758. }
  9759. ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
  9760. up_read(&fs_info->commit_root_sem);
  9761. mutex_unlock(&fs_info->chunk_mutex);
  9762. if (ret)
  9763. break;
  9764. start += len;
  9765. *trimmed += bytes;
  9766. if (fatal_signal_pending(current)) {
  9767. ret = -ERESTARTSYS;
  9768. break;
  9769. }
  9770. cond_resched();
  9771. }
  9772. return ret;
  9773. }
  9774. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  9775. {
  9776. struct btrfs_fs_info *fs_info = root->fs_info;
  9777. struct btrfs_block_group_cache *cache = NULL;
  9778. struct btrfs_device *device;
  9779. struct list_head *devices;
  9780. u64 group_trimmed;
  9781. u64 start;
  9782. u64 end;
  9783. u64 trimmed = 0;
  9784. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  9785. int ret = 0;
  9786. /*
  9787. * try to trim all FS space, our block group may start from non-zero.
  9788. */
  9789. if (range->len == total_bytes)
  9790. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  9791. else
  9792. cache = btrfs_lookup_block_group(fs_info, range->start);
  9793. while (cache) {
  9794. if (cache->key.objectid >= (range->start + range->len)) {
  9795. btrfs_put_block_group(cache);
  9796. break;
  9797. }
  9798. start = max(range->start, cache->key.objectid);
  9799. end = min(range->start + range->len,
  9800. cache->key.objectid + cache->key.offset);
  9801. if (end - start >= range->minlen) {
  9802. if (!block_group_cache_done(cache)) {
  9803. ret = cache_block_group(cache, 0);
  9804. if (ret) {
  9805. btrfs_put_block_group(cache);
  9806. break;
  9807. }
  9808. ret = wait_block_group_cache_done(cache);
  9809. if (ret) {
  9810. btrfs_put_block_group(cache);
  9811. break;
  9812. }
  9813. }
  9814. ret = btrfs_trim_block_group(cache,
  9815. &group_trimmed,
  9816. start,
  9817. end,
  9818. range->minlen);
  9819. trimmed += group_trimmed;
  9820. if (ret) {
  9821. btrfs_put_block_group(cache);
  9822. break;
  9823. }
  9824. }
  9825. cache = next_block_group(fs_info->tree_root, cache);
  9826. }
  9827. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  9828. devices = &root->fs_info->fs_devices->alloc_list;
  9829. list_for_each_entry(device, devices, dev_alloc_list) {
  9830. ret = btrfs_trim_free_extents(device, range->minlen,
  9831. &group_trimmed);
  9832. if (ret)
  9833. break;
  9834. trimmed += group_trimmed;
  9835. }
  9836. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  9837. range->len = trimmed;
  9838. return ret;
  9839. }
  9840. /*
  9841. * btrfs_{start,end}_write_no_snapshoting() are similar to
  9842. * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
  9843. * data into the page cache through nocow before the subvolume is snapshoted,
  9844. * but flush the data into disk after the snapshot creation, or to prevent
  9845. * operations while snapshoting is ongoing and that cause the snapshot to be
  9846. * inconsistent (writes followed by expanding truncates for example).
  9847. */
  9848. void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
  9849. {
  9850. percpu_counter_dec(&root->subv_writers->counter);
  9851. /*
  9852. * Make sure counter is updated before we wake up waiters.
  9853. */
  9854. smp_mb();
  9855. if (waitqueue_active(&root->subv_writers->wait))
  9856. wake_up(&root->subv_writers->wait);
  9857. }
  9858. int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
  9859. {
  9860. if (atomic_read(&root->will_be_snapshoted))
  9861. return 0;
  9862. percpu_counter_inc(&root->subv_writers->counter);
  9863. /*
  9864. * Make sure counter is updated before we check for snapshot creation.
  9865. */
  9866. smp_mb();
  9867. if (atomic_read(&root->will_be_snapshoted)) {
  9868. btrfs_end_write_no_snapshoting(root);
  9869. return 0;
  9870. }
  9871. return 1;
  9872. }
  9873. static int wait_snapshoting_atomic_t(atomic_t *a)
  9874. {
  9875. schedule();
  9876. return 0;
  9877. }
  9878. void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
  9879. {
  9880. while (true) {
  9881. int ret;
  9882. ret = btrfs_start_write_no_snapshoting(root);
  9883. if (ret)
  9884. break;
  9885. wait_on_atomic_t(&root->will_be_snapshoted,
  9886. wait_snapshoting_atomic_t,
  9887. TASK_UNINTERRUPTIBLE);
  9888. }
  9889. }