extent-tree.c 301 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/sched/signal.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/writeback.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/sort.h>
  11. #include <linux/rcupdate.h>
  12. #include <linux/kthread.h>
  13. #include <linux/slab.h>
  14. #include <linux/ratelimit.h>
  15. #include <linux/percpu_counter.h>
  16. #include <linux/lockdep.h>
  17. #include <linux/crc32c.h>
  18. #include "tree-log.h"
  19. #include "disk-io.h"
  20. #include "print-tree.h"
  21. #include "volumes.h"
  22. #include "raid56.h"
  23. #include "locking.h"
  24. #include "free-space-cache.h"
  25. #include "free-space-tree.h"
  26. #include "math.h"
  27. #include "sysfs.h"
  28. #include "qgroup.h"
  29. #include "ref-verify.h"
  30. #undef SCRAMBLE_DELAYED_REFS
  31. /*
  32. * control flags for do_chunk_alloc's force field
  33. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  34. * if we really need one.
  35. *
  36. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  37. * if we have very few chunks already allocated. This is
  38. * used as part of the clustering code to help make sure
  39. * we have a good pool of storage to cluster in, without
  40. * filling the FS with empty chunks
  41. *
  42. * CHUNK_ALLOC_FORCE means it must try to allocate one
  43. *
  44. */
  45. enum {
  46. CHUNK_ALLOC_NO_FORCE = 0,
  47. CHUNK_ALLOC_LIMITED = 1,
  48. CHUNK_ALLOC_FORCE = 2,
  49. };
  50. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  51. struct btrfs_delayed_ref_node *node, u64 parent,
  52. u64 root_objectid, u64 owner_objectid,
  53. u64 owner_offset, int refs_to_drop,
  54. struct btrfs_delayed_extent_op *extra_op);
  55. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  56. struct extent_buffer *leaf,
  57. struct btrfs_extent_item *ei);
  58. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  59. u64 parent, u64 root_objectid,
  60. u64 flags, u64 owner, u64 offset,
  61. struct btrfs_key *ins, int ref_mod);
  62. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  63. struct btrfs_delayed_ref_node *node,
  64. struct btrfs_delayed_extent_op *extent_op);
  65. static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
  66. int force);
  67. static int find_next_key(struct btrfs_path *path, int level,
  68. struct btrfs_key *key);
  69. static void dump_space_info(struct btrfs_fs_info *fs_info,
  70. struct btrfs_space_info *info, u64 bytes,
  71. int dump_block_groups);
  72. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  73. u64 num_bytes);
  74. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  75. struct btrfs_space_info *space_info,
  76. u64 num_bytes);
  77. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  78. struct btrfs_space_info *space_info,
  79. u64 num_bytes);
  80. static noinline int
  81. block_group_cache_done(struct btrfs_block_group_cache *cache)
  82. {
  83. smp_mb();
  84. return cache->cached == BTRFS_CACHE_FINISHED ||
  85. cache->cached == BTRFS_CACHE_ERROR;
  86. }
  87. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  88. {
  89. return (cache->flags & bits) == bits;
  90. }
  91. void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  92. {
  93. atomic_inc(&cache->count);
  94. }
  95. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  96. {
  97. if (atomic_dec_and_test(&cache->count)) {
  98. WARN_ON(cache->pinned > 0);
  99. WARN_ON(cache->reserved > 0);
  100. /*
  101. * If not empty, someone is still holding mutex of
  102. * full_stripe_lock, which can only be released by caller.
  103. * And it will definitely cause use-after-free when caller
  104. * tries to release full stripe lock.
  105. *
  106. * No better way to resolve, but only to warn.
  107. */
  108. WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
  109. kfree(cache->free_space_ctl);
  110. kfree(cache);
  111. }
  112. }
  113. /*
  114. * this adds the block group to the fs_info rb tree for the block group
  115. * cache
  116. */
  117. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  118. struct btrfs_block_group_cache *block_group)
  119. {
  120. struct rb_node **p;
  121. struct rb_node *parent = NULL;
  122. struct btrfs_block_group_cache *cache;
  123. spin_lock(&info->block_group_cache_lock);
  124. p = &info->block_group_cache_tree.rb_node;
  125. while (*p) {
  126. parent = *p;
  127. cache = rb_entry(parent, struct btrfs_block_group_cache,
  128. cache_node);
  129. if (block_group->key.objectid < cache->key.objectid) {
  130. p = &(*p)->rb_left;
  131. } else if (block_group->key.objectid > cache->key.objectid) {
  132. p = &(*p)->rb_right;
  133. } else {
  134. spin_unlock(&info->block_group_cache_lock);
  135. return -EEXIST;
  136. }
  137. }
  138. rb_link_node(&block_group->cache_node, parent, p);
  139. rb_insert_color(&block_group->cache_node,
  140. &info->block_group_cache_tree);
  141. if (info->first_logical_byte > block_group->key.objectid)
  142. info->first_logical_byte = block_group->key.objectid;
  143. spin_unlock(&info->block_group_cache_lock);
  144. return 0;
  145. }
  146. /*
  147. * This will return the block group at or after bytenr if contains is 0, else
  148. * it will return the block group that contains the bytenr
  149. */
  150. static struct btrfs_block_group_cache *
  151. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  152. int contains)
  153. {
  154. struct btrfs_block_group_cache *cache, *ret = NULL;
  155. struct rb_node *n;
  156. u64 end, start;
  157. spin_lock(&info->block_group_cache_lock);
  158. n = info->block_group_cache_tree.rb_node;
  159. while (n) {
  160. cache = rb_entry(n, struct btrfs_block_group_cache,
  161. cache_node);
  162. end = cache->key.objectid + cache->key.offset - 1;
  163. start = cache->key.objectid;
  164. if (bytenr < start) {
  165. if (!contains && (!ret || start < ret->key.objectid))
  166. ret = cache;
  167. n = n->rb_left;
  168. } else if (bytenr > start) {
  169. if (contains && bytenr <= end) {
  170. ret = cache;
  171. break;
  172. }
  173. n = n->rb_right;
  174. } else {
  175. ret = cache;
  176. break;
  177. }
  178. }
  179. if (ret) {
  180. btrfs_get_block_group(ret);
  181. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  182. info->first_logical_byte = ret->key.objectid;
  183. }
  184. spin_unlock(&info->block_group_cache_lock);
  185. return ret;
  186. }
  187. static int add_excluded_extent(struct btrfs_fs_info *fs_info,
  188. u64 start, u64 num_bytes)
  189. {
  190. u64 end = start + num_bytes - 1;
  191. set_extent_bits(&fs_info->freed_extents[0],
  192. start, end, EXTENT_UPTODATE);
  193. set_extent_bits(&fs_info->freed_extents[1],
  194. start, end, EXTENT_UPTODATE);
  195. return 0;
  196. }
  197. static void free_excluded_extents(struct btrfs_block_group_cache *cache)
  198. {
  199. struct btrfs_fs_info *fs_info = cache->fs_info;
  200. u64 start, end;
  201. start = cache->key.objectid;
  202. end = start + cache->key.offset - 1;
  203. clear_extent_bits(&fs_info->freed_extents[0],
  204. start, end, EXTENT_UPTODATE);
  205. clear_extent_bits(&fs_info->freed_extents[1],
  206. start, end, EXTENT_UPTODATE);
  207. }
  208. static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
  209. {
  210. struct btrfs_fs_info *fs_info = cache->fs_info;
  211. u64 bytenr;
  212. u64 *logical;
  213. int stripe_len;
  214. int i, nr, ret;
  215. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  216. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  217. cache->bytes_super += stripe_len;
  218. ret = add_excluded_extent(fs_info, cache->key.objectid,
  219. stripe_len);
  220. if (ret)
  221. return ret;
  222. }
  223. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  224. bytenr = btrfs_sb_offset(i);
  225. ret = btrfs_rmap_block(fs_info, cache->key.objectid,
  226. bytenr, &logical, &nr, &stripe_len);
  227. if (ret)
  228. return ret;
  229. while (nr--) {
  230. u64 start, len;
  231. if (logical[nr] > cache->key.objectid +
  232. cache->key.offset)
  233. continue;
  234. if (logical[nr] + stripe_len <= cache->key.objectid)
  235. continue;
  236. start = logical[nr];
  237. if (start < cache->key.objectid) {
  238. start = cache->key.objectid;
  239. len = (logical[nr] + stripe_len) - start;
  240. } else {
  241. len = min_t(u64, stripe_len,
  242. cache->key.objectid +
  243. cache->key.offset - start);
  244. }
  245. cache->bytes_super += len;
  246. ret = add_excluded_extent(fs_info, start, len);
  247. if (ret) {
  248. kfree(logical);
  249. return ret;
  250. }
  251. }
  252. kfree(logical);
  253. }
  254. return 0;
  255. }
  256. static struct btrfs_caching_control *
  257. get_caching_control(struct btrfs_block_group_cache *cache)
  258. {
  259. struct btrfs_caching_control *ctl;
  260. spin_lock(&cache->lock);
  261. if (!cache->caching_ctl) {
  262. spin_unlock(&cache->lock);
  263. return NULL;
  264. }
  265. ctl = cache->caching_ctl;
  266. refcount_inc(&ctl->count);
  267. spin_unlock(&cache->lock);
  268. return ctl;
  269. }
  270. static void put_caching_control(struct btrfs_caching_control *ctl)
  271. {
  272. if (refcount_dec_and_test(&ctl->count))
  273. kfree(ctl);
  274. }
  275. #ifdef CONFIG_BTRFS_DEBUG
  276. static void fragment_free_space(struct btrfs_block_group_cache *block_group)
  277. {
  278. struct btrfs_fs_info *fs_info = block_group->fs_info;
  279. u64 start = block_group->key.objectid;
  280. u64 len = block_group->key.offset;
  281. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  282. fs_info->nodesize : fs_info->sectorsize;
  283. u64 step = chunk << 1;
  284. while (len > chunk) {
  285. btrfs_remove_free_space(block_group, start, chunk);
  286. start += step;
  287. if (len < step)
  288. len = 0;
  289. else
  290. len -= step;
  291. }
  292. }
  293. #endif
  294. /*
  295. * this is only called by cache_block_group, since we could have freed extents
  296. * we need to check the pinned_extents for any extents that can't be used yet
  297. * since their free space will be released as soon as the transaction commits.
  298. */
  299. u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  300. u64 start, u64 end)
  301. {
  302. struct btrfs_fs_info *info = block_group->fs_info;
  303. u64 extent_start, extent_end, size, total_added = 0;
  304. int ret;
  305. while (start < end) {
  306. ret = find_first_extent_bit(info->pinned_extents, start,
  307. &extent_start, &extent_end,
  308. EXTENT_DIRTY | EXTENT_UPTODATE,
  309. NULL);
  310. if (ret)
  311. break;
  312. if (extent_start <= start) {
  313. start = extent_end + 1;
  314. } else if (extent_start > start && extent_start < end) {
  315. size = extent_start - start;
  316. total_added += size;
  317. ret = btrfs_add_free_space(block_group, start,
  318. size);
  319. BUG_ON(ret); /* -ENOMEM or logic error */
  320. start = extent_end + 1;
  321. } else {
  322. break;
  323. }
  324. }
  325. if (start < end) {
  326. size = end - start;
  327. total_added += size;
  328. ret = btrfs_add_free_space(block_group, start, size);
  329. BUG_ON(ret); /* -ENOMEM or logic error */
  330. }
  331. return total_added;
  332. }
  333. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  334. {
  335. struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
  336. struct btrfs_fs_info *fs_info = block_group->fs_info;
  337. struct btrfs_root *extent_root = fs_info->extent_root;
  338. struct btrfs_path *path;
  339. struct extent_buffer *leaf;
  340. struct btrfs_key key;
  341. u64 total_found = 0;
  342. u64 last = 0;
  343. u32 nritems;
  344. int ret;
  345. bool wakeup = true;
  346. path = btrfs_alloc_path();
  347. if (!path)
  348. return -ENOMEM;
  349. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  350. #ifdef CONFIG_BTRFS_DEBUG
  351. /*
  352. * If we're fragmenting we don't want to make anybody think we can
  353. * allocate from this block group until we've had a chance to fragment
  354. * the free space.
  355. */
  356. if (btrfs_should_fragment_free_space(block_group))
  357. wakeup = false;
  358. #endif
  359. /*
  360. * We don't want to deadlock with somebody trying to allocate a new
  361. * extent for the extent root while also trying to search the extent
  362. * root to add free space. So we skip locking and search the commit
  363. * root, since its read-only
  364. */
  365. path->skip_locking = 1;
  366. path->search_commit_root = 1;
  367. path->reada = READA_FORWARD;
  368. key.objectid = last;
  369. key.offset = 0;
  370. key.type = BTRFS_EXTENT_ITEM_KEY;
  371. next:
  372. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  373. if (ret < 0)
  374. goto out;
  375. leaf = path->nodes[0];
  376. nritems = btrfs_header_nritems(leaf);
  377. while (1) {
  378. if (btrfs_fs_closing(fs_info) > 1) {
  379. last = (u64)-1;
  380. break;
  381. }
  382. if (path->slots[0] < nritems) {
  383. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  384. } else {
  385. ret = find_next_key(path, 0, &key);
  386. if (ret)
  387. break;
  388. if (need_resched() ||
  389. rwsem_is_contended(&fs_info->commit_root_sem)) {
  390. if (wakeup)
  391. caching_ctl->progress = last;
  392. btrfs_release_path(path);
  393. up_read(&fs_info->commit_root_sem);
  394. mutex_unlock(&caching_ctl->mutex);
  395. cond_resched();
  396. mutex_lock(&caching_ctl->mutex);
  397. down_read(&fs_info->commit_root_sem);
  398. goto next;
  399. }
  400. ret = btrfs_next_leaf(extent_root, path);
  401. if (ret < 0)
  402. goto out;
  403. if (ret)
  404. break;
  405. leaf = path->nodes[0];
  406. nritems = btrfs_header_nritems(leaf);
  407. continue;
  408. }
  409. if (key.objectid < last) {
  410. key.objectid = last;
  411. key.offset = 0;
  412. key.type = BTRFS_EXTENT_ITEM_KEY;
  413. if (wakeup)
  414. caching_ctl->progress = last;
  415. btrfs_release_path(path);
  416. goto next;
  417. }
  418. if (key.objectid < block_group->key.objectid) {
  419. path->slots[0]++;
  420. continue;
  421. }
  422. if (key.objectid >= block_group->key.objectid +
  423. block_group->key.offset)
  424. break;
  425. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  426. key.type == BTRFS_METADATA_ITEM_KEY) {
  427. total_found += add_new_free_space(block_group, last,
  428. key.objectid);
  429. if (key.type == BTRFS_METADATA_ITEM_KEY)
  430. last = key.objectid +
  431. fs_info->nodesize;
  432. else
  433. last = key.objectid + key.offset;
  434. if (total_found > CACHING_CTL_WAKE_UP) {
  435. total_found = 0;
  436. if (wakeup)
  437. wake_up(&caching_ctl->wait);
  438. }
  439. }
  440. path->slots[0]++;
  441. }
  442. ret = 0;
  443. total_found += add_new_free_space(block_group, last,
  444. block_group->key.objectid +
  445. block_group->key.offset);
  446. caching_ctl->progress = (u64)-1;
  447. out:
  448. btrfs_free_path(path);
  449. return ret;
  450. }
  451. static noinline void caching_thread(struct btrfs_work *work)
  452. {
  453. struct btrfs_block_group_cache *block_group;
  454. struct btrfs_fs_info *fs_info;
  455. struct btrfs_caching_control *caching_ctl;
  456. int ret;
  457. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  458. block_group = caching_ctl->block_group;
  459. fs_info = block_group->fs_info;
  460. mutex_lock(&caching_ctl->mutex);
  461. down_read(&fs_info->commit_root_sem);
  462. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
  463. ret = load_free_space_tree(caching_ctl);
  464. else
  465. ret = load_extent_tree_free(caching_ctl);
  466. spin_lock(&block_group->lock);
  467. block_group->caching_ctl = NULL;
  468. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  469. spin_unlock(&block_group->lock);
  470. #ifdef CONFIG_BTRFS_DEBUG
  471. if (btrfs_should_fragment_free_space(block_group)) {
  472. u64 bytes_used;
  473. spin_lock(&block_group->space_info->lock);
  474. spin_lock(&block_group->lock);
  475. bytes_used = block_group->key.offset -
  476. btrfs_block_group_used(&block_group->item);
  477. block_group->space_info->bytes_used += bytes_used >> 1;
  478. spin_unlock(&block_group->lock);
  479. spin_unlock(&block_group->space_info->lock);
  480. fragment_free_space(block_group);
  481. }
  482. #endif
  483. caching_ctl->progress = (u64)-1;
  484. up_read(&fs_info->commit_root_sem);
  485. free_excluded_extents(block_group);
  486. mutex_unlock(&caching_ctl->mutex);
  487. wake_up(&caching_ctl->wait);
  488. put_caching_control(caching_ctl);
  489. btrfs_put_block_group(block_group);
  490. }
  491. static int cache_block_group(struct btrfs_block_group_cache *cache,
  492. int load_cache_only)
  493. {
  494. DEFINE_WAIT(wait);
  495. struct btrfs_fs_info *fs_info = cache->fs_info;
  496. struct btrfs_caching_control *caching_ctl;
  497. int ret = 0;
  498. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  499. if (!caching_ctl)
  500. return -ENOMEM;
  501. INIT_LIST_HEAD(&caching_ctl->list);
  502. mutex_init(&caching_ctl->mutex);
  503. init_waitqueue_head(&caching_ctl->wait);
  504. caching_ctl->block_group = cache;
  505. caching_ctl->progress = cache->key.objectid;
  506. refcount_set(&caching_ctl->count, 1);
  507. btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
  508. caching_thread, NULL, NULL);
  509. spin_lock(&cache->lock);
  510. /*
  511. * This should be a rare occasion, but this could happen I think in the
  512. * case where one thread starts to load the space cache info, and then
  513. * some other thread starts a transaction commit which tries to do an
  514. * allocation while the other thread is still loading the space cache
  515. * info. The previous loop should have kept us from choosing this block
  516. * group, but if we've moved to the state where we will wait on caching
  517. * block groups we need to first check if we're doing a fast load here,
  518. * so we can wait for it to finish, otherwise we could end up allocating
  519. * from a block group who's cache gets evicted for one reason or
  520. * another.
  521. */
  522. while (cache->cached == BTRFS_CACHE_FAST) {
  523. struct btrfs_caching_control *ctl;
  524. ctl = cache->caching_ctl;
  525. refcount_inc(&ctl->count);
  526. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  527. spin_unlock(&cache->lock);
  528. schedule();
  529. finish_wait(&ctl->wait, &wait);
  530. put_caching_control(ctl);
  531. spin_lock(&cache->lock);
  532. }
  533. if (cache->cached != BTRFS_CACHE_NO) {
  534. spin_unlock(&cache->lock);
  535. kfree(caching_ctl);
  536. return 0;
  537. }
  538. WARN_ON(cache->caching_ctl);
  539. cache->caching_ctl = caching_ctl;
  540. cache->cached = BTRFS_CACHE_FAST;
  541. spin_unlock(&cache->lock);
  542. if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
  543. mutex_lock(&caching_ctl->mutex);
  544. ret = load_free_space_cache(fs_info, cache);
  545. spin_lock(&cache->lock);
  546. if (ret == 1) {
  547. cache->caching_ctl = NULL;
  548. cache->cached = BTRFS_CACHE_FINISHED;
  549. cache->last_byte_to_unpin = (u64)-1;
  550. caching_ctl->progress = (u64)-1;
  551. } else {
  552. if (load_cache_only) {
  553. cache->caching_ctl = NULL;
  554. cache->cached = BTRFS_CACHE_NO;
  555. } else {
  556. cache->cached = BTRFS_CACHE_STARTED;
  557. cache->has_caching_ctl = 1;
  558. }
  559. }
  560. spin_unlock(&cache->lock);
  561. #ifdef CONFIG_BTRFS_DEBUG
  562. if (ret == 1 &&
  563. btrfs_should_fragment_free_space(cache)) {
  564. u64 bytes_used;
  565. spin_lock(&cache->space_info->lock);
  566. spin_lock(&cache->lock);
  567. bytes_used = cache->key.offset -
  568. btrfs_block_group_used(&cache->item);
  569. cache->space_info->bytes_used += bytes_used >> 1;
  570. spin_unlock(&cache->lock);
  571. spin_unlock(&cache->space_info->lock);
  572. fragment_free_space(cache);
  573. }
  574. #endif
  575. mutex_unlock(&caching_ctl->mutex);
  576. wake_up(&caching_ctl->wait);
  577. if (ret == 1) {
  578. put_caching_control(caching_ctl);
  579. free_excluded_extents(cache);
  580. return 0;
  581. }
  582. } else {
  583. /*
  584. * We're either using the free space tree or no caching at all.
  585. * Set cached to the appropriate value and wakeup any waiters.
  586. */
  587. spin_lock(&cache->lock);
  588. if (load_cache_only) {
  589. cache->caching_ctl = NULL;
  590. cache->cached = BTRFS_CACHE_NO;
  591. } else {
  592. cache->cached = BTRFS_CACHE_STARTED;
  593. cache->has_caching_ctl = 1;
  594. }
  595. spin_unlock(&cache->lock);
  596. wake_up(&caching_ctl->wait);
  597. }
  598. if (load_cache_only) {
  599. put_caching_control(caching_ctl);
  600. return 0;
  601. }
  602. down_write(&fs_info->commit_root_sem);
  603. refcount_inc(&caching_ctl->count);
  604. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  605. up_write(&fs_info->commit_root_sem);
  606. btrfs_get_block_group(cache);
  607. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  608. return ret;
  609. }
  610. /*
  611. * return the block group that starts at or after bytenr
  612. */
  613. static struct btrfs_block_group_cache *
  614. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  615. {
  616. return block_group_cache_tree_search(info, bytenr, 0);
  617. }
  618. /*
  619. * return the block group that contains the given bytenr
  620. */
  621. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  622. struct btrfs_fs_info *info,
  623. u64 bytenr)
  624. {
  625. return block_group_cache_tree_search(info, bytenr, 1);
  626. }
  627. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  628. u64 flags)
  629. {
  630. struct list_head *head = &info->space_info;
  631. struct btrfs_space_info *found;
  632. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  633. rcu_read_lock();
  634. list_for_each_entry_rcu(found, head, list) {
  635. if (found->flags & flags) {
  636. rcu_read_unlock();
  637. return found;
  638. }
  639. }
  640. rcu_read_unlock();
  641. return NULL;
  642. }
  643. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
  644. bool metadata, u64 root_objectid)
  645. {
  646. struct btrfs_space_info *space_info;
  647. u64 flags;
  648. if (metadata) {
  649. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  650. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  651. else
  652. flags = BTRFS_BLOCK_GROUP_METADATA;
  653. } else {
  654. flags = BTRFS_BLOCK_GROUP_DATA;
  655. }
  656. space_info = __find_space_info(fs_info, flags);
  657. ASSERT(space_info);
  658. percpu_counter_add_batch(&space_info->total_bytes_pinned, num_bytes,
  659. BTRFS_TOTAL_BYTES_PINNED_BATCH);
  660. }
  661. /*
  662. * after adding space to the filesystem, we need to clear the full flags
  663. * on all the space infos.
  664. */
  665. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  666. {
  667. struct list_head *head = &info->space_info;
  668. struct btrfs_space_info *found;
  669. rcu_read_lock();
  670. list_for_each_entry_rcu(found, head, list)
  671. found->full = 0;
  672. rcu_read_unlock();
  673. }
  674. /* simple helper to search for an existing data extent at a given offset */
  675. int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
  676. {
  677. int ret;
  678. struct btrfs_key key;
  679. struct btrfs_path *path;
  680. path = btrfs_alloc_path();
  681. if (!path)
  682. return -ENOMEM;
  683. key.objectid = start;
  684. key.offset = len;
  685. key.type = BTRFS_EXTENT_ITEM_KEY;
  686. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  687. btrfs_free_path(path);
  688. return ret;
  689. }
  690. /*
  691. * helper function to lookup reference count and flags of a tree block.
  692. *
  693. * the head node for delayed ref is used to store the sum of all the
  694. * reference count modifications queued up in the rbtree. the head
  695. * node may also store the extent flags to set. This way you can check
  696. * to see what the reference count and extent flags would be if all of
  697. * the delayed refs are not processed.
  698. */
  699. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  700. struct btrfs_fs_info *fs_info, u64 bytenr,
  701. u64 offset, int metadata, u64 *refs, u64 *flags)
  702. {
  703. struct btrfs_delayed_ref_head *head;
  704. struct btrfs_delayed_ref_root *delayed_refs;
  705. struct btrfs_path *path;
  706. struct btrfs_extent_item *ei;
  707. struct extent_buffer *leaf;
  708. struct btrfs_key key;
  709. u32 item_size;
  710. u64 num_refs;
  711. u64 extent_flags;
  712. int ret;
  713. /*
  714. * If we don't have skinny metadata, don't bother doing anything
  715. * different
  716. */
  717. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
  718. offset = fs_info->nodesize;
  719. metadata = 0;
  720. }
  721. path = btrfs_alloc_path();
  722. if (!path)
  723. return -ENOMEM;
  724. if (!trans) {
  725. path->skip_locking = 1;
  726. path->search_commit_root = 1;
  727. }
  728. search_again:
  729. key.objectid = bytenr;
  730. key.offset = offset;
  731. if (metadata)
  732. key.type = BTRFS_METADATA_ITEM_KEY;
  733. else
  734. key.type = BTRFS_EXTENT_ITEM_KEY;
  735. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
  736. if (ret < 0)
  737. goto out_free;
  738. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  739. if (path->slots[0]) {
  740. path->slots[0]--;
  741. btrfs_item_key_to_cpu(path->nodes[0], &key,
  742. path->slots[0]);
  743. if (key.objectid == bytenr &&
  744. key.type == BTRFS_EXTENT_ITEM_KEY &&
  745. key.offset == fs_info->nodesize)
  746. ret = 0;
  747. }
  748. }
  749. if (ret == 0) {
  750. leaf = path->nodes[0];
  751. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  752. if (item_size >= sizeof(*ei)) {
  753. ei = btrfs_item_ptr(leaf, path->slots[0],
  754. struct btrfs_extent_item);
  755. num_refs = btrfs_extent_refs(leaf, ei);
  756. extent_flags = btrfs_extent_flags(leaf, ei);
  757. } else {
  758. ret = -EINVAL;
  759. btrfs_print_v0_err(fs_info);
  760. if (trans)
  761. btrfs_abort_transaction(trans, ret);
  762. else
  763. btrfs_handle_fs_error(fs_info, ret, NULL);
  764. goto out_free;
  765. }
  766. BUG_ON(num_refs == 0);
  767. } else {
  768. num_refs = 0;
  769. extent_flags = 0;
  770. ret = 0;
  771. }
  772. if (!trans)
  773. goto out;
  774. delayed_refs = &trans->transaction->delayed_refs;
  775. spin_lock(&delayed_refs->lock);
  776. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  777. if (head) {
  778. if (!mutex_trylock(&head->mutex)) {
  779. refcount_inc(&head->refs);
  780. spin_unlock(&delayed_refs->lock);
  781. btrfs_release_path(path);
  782. /*
  783. * Mutex was contended, block until it's released and try
  784. * again
  785. */
  786. mutex_lock(&head->mutex);
  787. mutex_unlock(&head->mutex);
  788. btrfs_put_delayed_ref_head(head);
  789. goto search_again;
  790. }
  791. spin_lock(&head->lock);
  792. if (head->extent_op && head->extent_op->update_flags)
  793. extent_flags |= head->extent_op->flags_to_set;
  794. else
  795. BUG_ON(num_refs == 0);
  796. num_refs += head->ref_mod;
  797. spin_unlock(&head->lock);
  798. mutex_unlock(&head->mutex);
  799. }
  800. spin_unlock(&delayed_refs->lock);
  801. out:
  802. WARN_ON(num_refs == 0);
  803. if (refs)
  804. *refs = num_refs;
  805. if (flags)
  806. *flags = extent_flags;
  807. out_free:
  808. btrfs_free_path(path);
  809. return ret;
  810. }
  811. /*
  812. * Back reference rules. Back refs have three main goals:
  813. *
  814. * 1) differentiate between all holders of references to an extent so that
  815. * when a reference is dropped we can make sure it was a valid reference
  816. * before freeing the extent.
  817. *
  818. * 2) Provide enough information to quickly find the holders of an extent
  819. * if we notice a given block is corrupted or bad.
  820. *
  821. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  822. * maintenance. This is actually the same as #2, but with a slightly
  823. * different use case.
  824. *
  825. * There are two kinds of back refs. The implicit back refs is optimized
  826. * for pointers in non-shared tree blocks. For a given pointer in a block,
  827. * back refs of this kind provide information about the block's owner tree
  828. * and the pointer's key. These information allow us to find the block by
  829. * b-tree searching. The full back refs is for pointers in tree blocks not
  830. * referenced by their owner trees. The location of tree block is recorded
  831. * in the back refs. Actually the full back refs is generic, and can be
  832. * used in all cases the implicit back refs is used. The major shortcoming
  833. * of the full back refs is its overhead. Every time a tree block gets
  834. * COWed, we have to update back refs entry for all pointers in it.
  835. *
  836. * For a newly allocated tree block, we use implicit back refs for
  837. * pointers in it. This means most tree related operations only involve
  838. * implicit back refs. For a tree block created in old transaction, the
  839. * only way to drop a reference to it is COW it. So we can detect the
  840. * event that tree block loses its owner tree's reference and do the
  841. * back refs conversion.
  842. *
  843. * When a tree block is COWed through a tree, there are four cases:
  844. *
  845. * The reference count of the block is one and the tree is the block's
  846. * owner tree. Nothing to do in this case.
  847. *
  848. * The reference count of the block is one and the tree is not the
  849. * block's owner tree. In this case, full back refs is used for pointers
  850. * in the block. Remove these full back refs, add implicit back refs for
  851. * every pointers in the new block.
  852. *
  853. * The reference count of the block is greater than one and the tree is
  854. * the block's owner tree. In this case, implicit back refs is used for
  855. * pointers in the block. Add full back refs for every pointers in the
  856. * block, increase lower level extents' reference counts. The original
  857. * implicit back refs are entailed to the new block.
  858. *
  859. * The reference count of the block is greater than one and the tree is
  860. * not the block's owner tree. Add implicit back refs for every pointer in
  861. * the new block, increase lower level extents' reference count.
  862. *
  863. * Back Reference Key composing:
  864. *
  865. * The key objectid corresponds to the first byte in the extent,
  866. * The key type is used to differentiate between types of back refs.
  867. * There are different meanings of the key offset for different types
  868. * of back refs.
  869. *
  870. * File extents can be referenced by:
  871. *
  872. * - multiple snapshots, subvolumes, or different generations in one subvol
  873. * - different files inside a single subvolume
  874. * - different offsets inside a file (bookend extents in file.c)
  875. *
  876. * The extent ref structure for the implicit back refs has fields for:
  877. *
  878. * - Objectid of the subvolume root
  879. * - objectid of the file holding the reference
  880. * - original offset in the file
  881. * - how many bookend extents
  882. *
  883. * The key offset for the implicit back refs is hash of the first
  884. * three fields.
  885. *
  886. * The extent ref structure for the full back refs has field for:
  887. *
  888. * - number of pointers in the tree leaf
  889. *
  890. * The key offset for the implicit back refs is the first byte of
  891. * the tree leaf
  892. *
  893. * When a file extent is allocated, The implicit back refs is used.
  894. * the fields are filled in:
  895. *
  896. * (root_key.objectid, inode objectid, offset in file, 1)
  897. *
  898. * When a file extent is removed file truncation, we find the
  899. * corresponding implicit back refs and check the following fields:
  900. *
  901. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  902. *
  903. * Btree extents can be referenced by:
  904. *
  905. * - Different subvolumes
  906. *
  907. * Both the implicit back refs and the full back refs for tree blocks
  908. * only consist of key. The key offset for the implicit back refs is
  909. * objectid of block's owner tree. The key offset for the full back refs
  910. * is the first byte of parent block.
  911. *
  912. * When implicit back refs is used, information about the lowest key and
  913. * level of the tree block are required. These information are stored in
  914. * tree block info structure.
  915. */
  916. /*
  917. * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
  918. * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
  919. * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
  920. */
  921. int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
  922. struct btrfs_extent_inline_ref *iref,
  923. enum btrfs_inline_ref_type is_data)
  924. {
  925. int type = btrfs_extent_inline_ref_type(eb, iref);
  926. u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
  927. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  928. type == BTRFS_SHARED_BLOCK_REF_KEY ||
  929. type == BTRFS_SHARED_DATA_REF_KEY ||
  930. type == BTRFS_EXTENT_DATA_REF_KEY) {
  931. if (is_data == BTRFS_REF_TYPE_BLOCK) {
  932. if (type == BTRFS_TREE_BLOCK_REF_KEY)
  933. return type;
  934. if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  935. ASSERT(eb->fs_info);
  936. /*
  937. * Every shared one has parent tree
  938. * block, which must be aligned to
  939. * nodesize.
  940. */
  941. if (offset &&
  942. IS_ALIGNED(offset, eb->fs_info->nodesize))
  943. return type;
  944. }
  945. } else if (is_data == BTRFS_REF_TYPE_DATA) {
  946. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  947. return type;
  948. if (type == BTRFS_SHARED_DATA_REF_KEY) {
  949. ASSERT(eb->fs_info);
  950. /*
  951. * Every shared one has parent tree
  952. * block, which must be aligned to
  953. * nodesize.
  954. */
  955. if (offset &&
  956. IS_ALIGNED(offset, eb->fs_info->nodesize))
  957. return type;
  958. }
  959. } else {
  960. ASSERT(is_data == BTRFS_REF_TYPE_ANY);
  961. return type;
  962. }
  963. }
  964. btrfs_print_leaf((struct extent_buffer *)eb);
  965. btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
  966. eb->start, type);
  967. WARN_ON(1);
  968. return BTRFS_REF_TYPE_INVALID;
  969. }
  970. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  971. {
  972. u32 high_crc = ~(u32)0;
  973. u32 low_crc = ~(u32)0;
  974. __le64 lenum;
  975. lenum = cpu_to_le64(root_objectid);
  976. high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
  977. lenum = cpu_to_le64(owner);
  978. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  979. lenum = cpu_to_le64(offset);
  980. low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
  981. return ((u64)high_crc << 31) ^ (u64)low_crc;
  982. }
  983. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  984. struct btrfs_extent_data_ref *ref)
  985. {
  986. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  987. btrfs_extent_data_ref_objectid(leaf, ref),
  988. btrfs_extent_data_ref_offset(leaf, ref));
  989. }
  990. static int match_extent_data_ref(struct extent_buffer *leaf,
  991. struct btrfs_extent_data_ref *ref,
  992. u64 root_objectid, u64 owner, u64 offset)
  993. {
  994. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  995. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  996. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  997. return 0;
  998. return 1;
  999. }
  1000. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  1001. struct btrfs_path *path,
  1002. u64 bytenr, u64 parent,
  1003. u64 root_objectid,
  1004. u64 owner, u64 offset)
  1005. {
  1006. struct btrfs_root *root = trans->fs_info->extent_root;
  1007. struct btrfs_key key;
  1008. struct btrfs_extent_data_ref *ref;
  1009. struct extent_buffer *leaf;
  1010. u32 nritems;
  1011. int ret;
  1012. int recow;
  1013. int err = -ENOENT;
  1014. key.objectid = bytenr;
  1015. if (parent) {
  1016. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1017. key.offset = parent;
  1018. } else {
  1019. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1020. key.offset = hash_extent_data_ref(root_objectid,
  1021. owner, offset);
  1022. }
  1023. again:
  1024. recow = 0;
  1025. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1026. if (ret < 0) {
  1027. err = ret;
  1028. goto fail;
  1029. }
  1030. if (parent) {
  1031. if (!ret)
  1032. return 0;
  1033. goto fail;
  1034. }
  1035. leaf = path->nodes[0];
  1036. nritems = btrfs_header_nritems(leaf);
  1037. while (1) {
  1038. if (path->slots[0] >= nritems) {
  1039. ret = btrfs_next_leaf(root, path);
  1040. if (ret < 0)
  1041. err = ret;
  1042. if (ret)
  1043. goto fail;
  1044. leaf = path->nodes[0];
  1045. nritems = btrfs_header_nritems(leaf);
  1046. recow = 1;
  1047. }
  1048. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1049. if (key.objectid != bytenr ||
  1050. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1051. goto fail;
  1052. ref = btrfs_item_ptr(leaf, path->slots[0],
  1053. struct btrfs_extent_data_ref);
  1054. if (match_extent_data_ref(leaf, ref, root_objectid,
  1055. owner, offset)) {
  1056. if (recow) {
  1057. btrfs_release_path(path);
  1058. goto again;
  1059. }
  1060. err = 0;
  1061. break;
  1062. }
  1063. path->slots[0]++;
  1064. }
  1065. fail:
  1066. return err;
  1067. }
  1068. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1069. struct btrfs_path *path,
  1070. u64 bytenr, u64 parent,
  1071. u64 root_objectid, u64 owner,
  1072. u64 offset, int refs_to_add)
  1073. {
  1074. struct btrfs_root *root = trans->fs_info->extent_root;
  1075. struct btrfs_key key;
  1076. struct extent_buffer *leaf;
  1077. u32 size;
  1078. u32 num_refs;
  1079. int ret;
  1080. key.objectid = bytenr;
  1081. if (parent) {
  1082. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1083. key.offset = parent;
  1084. size = sizeof(struct btrfs_shared_data_ref);
  1085. } else {
  1086. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1087. key.offset = hash_extent_data_ref(root_objectid,
  1088. owner, offset);
  1089. size = sizeof(struct btrfs_extent_data_ref);
  1090. }
  1091. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1092. if (ret && ret != -EEXIST)
  1093. goto fail;
  1094. leaf = path->nodes[0];
  1095. if (parent) {
  1096. struct btrfs_shared_data_ref *ref;
  1097. ref = btrfs_item_ptr(leaf, path->slots[0],
  1098. struct btrfs_shared_data_ref);
  1099. if (ret == 0) {
  1100. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1101. } else {
  1102. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1103. num_refs += refs_to_add;
  1104. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1105. }
  1106. } else {
  1107. struct btrfs_extent_data_ref *ref;
  1108. while (ret == -EEXIST) {
  1109. ref = btrfs_item_ptr(leaf, path->slots[0],
  1110. struct btrfs_extent_data_ref);
  1111. if (match_extent_data_ref(leaf, ref, root_objectid,
  1112. owner, offset))
  1113. break;
  1114. btrfs_release_path(path);
  1115. key.offset++;
  1116. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1117. size);
  1118. if (ret && ret != -EEXIST)
  1119. goto fail;
  1120. leaf = path->nodes[0];
  1121. }
  1122. ref = btrfs_item_ptr(leaf, path->slots[0],
  1123. struct btrfs_extent_data_ref);
  1124. if (ret == 0) {
  1125. btrfs_set_extent_data_ref_root(leaf, ref,
  1126. root_objectid);
  1127. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1128. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1129. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1130. } else {
  1131. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1132. num_refs += refs_to_add;
  1133. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1134. }
  1135. }
  1136. btrfs_mark_buffer_dirty(leaf);
  1137. ret = 0;
  1138. fail:
  1139. btrfs_release_path(path);
  1140. return ret;
  1141. }
  1142. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1143. struct btrfs_path *path,
  1144. int refs_to_drop, int *last_ref)
  1145. {
  1146. struct btrfs_key key;
  1147. struct btrfs_extent_data_ref *ref1 = NULL;
  1148. struct btrfs_shared_data_ref *ref2 = NULL;
  1149. struct extent_buffer *leaf;
  1150. u32 num_refs = 0;
  1151. int ret = 0;
  1152. leaf = path->nodes[0];
  1153. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1154. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1155. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1156. struct btrfs_extent_data_ref);
  1157. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1158. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1159. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1160. struct btrfs_shared_data_ref);
  1161. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1162. } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
  1163. btrfs_print_v0_err(trans->fs_info);
  1164. btrfs_abort_transaction(trans, -EINVAL);
  1165. return -EINVAL;
  1166. } else {
  1167. BUG();
  1168. }
  1169. BUG_ON(num_refs < refs_to_drop);
  1170. num_refs -= refs_to_drop;
  1171. if (num_refs == 0) {
  1172. ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
  1173. *last_ref = 1;
  1174. } else {
  1175. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1176. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1177. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1178. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1179. btrfs_mark_buffer_dirty(leaf);
  1180. }
  1181. return ret;
  1182. }
  1183. static noinline u32 extent_data_ref_count(struct btrfs_path *path,
  1184. struct btrfs_extent_inline_ref *iref)
  1185. {
  1186. struct btrfs_key key;
  1187. struct extent_buffer *leaf;
  1188. struct btrfs_extent_data_ref *ref1;
  1189. struct btrfs_shared_data_ref *ref2;
  1190. u32 num_refs = 0;
  1191. int type;
  1192. leaf = path->nodes[0];
  1193. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1194. BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
  1195. if (iref) {
  1196. /*
  1197. * If type is invalid, we should have bailed out earlier than
  1198. * this call.
  1199. */
  1200. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  1201. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1202. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1203. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1204. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1205. } else {
  1206. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1207. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1208. }
  1209. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1210. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1211. struct btrfs_extent_data_ref);
  1212. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1213. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1214. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1215. struct btrfs_shared_data_ref);
  1216. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1217. } else {
  1218. WARN_ON(1);
  1219. }
  1220. return num_refs;
  1221. }
  1222. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1223. struct btrfs_path *path,
  1224. u64 bytenr, u64 parent,
  1225. u64 root_objectid)
  1226. {
  1227. struct btrfs_root *root = trans->fs_info->extent_root;
  1228. struct btrfs_key key;
  1229. int ret;
  1230. key.objectid = bytenr;
  1231. if (parent) {
  1232. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1233. key.offset = parent;
  1234. } else {
  1235. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1236. key.offset = root_objectid;
  1237. }
  1238. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1239. if (ret > 0)
  1240. ret = -ENOENT;
  1241. return ret;
  1242. }
  1243. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1244. struct btrfs_path *path,
  1245. u64 bytenr, u64 parent,
  1246. u64 root_objectid)
  1247. {
  1248. struct btrfs_key key;
  1249. int ret;
  1250. key.objectid = bytenr;
  1251. if (parent) {
  1252. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1253. key.offset = parent;
  1254. } else {
  1255. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1256. key.offset = root_objectid;
  1257. }
  1258. ret = btrfs_insert_empty_item(trans, trans->fs_info->extent_root,
  1259. path, &key, 0);
  1260. btrfs_release_path(path);
  1261. return ret;
  1262. }
  1263. static inline int extent_ref_type(u64 parent, u64 owner)
  1264. {
  1265. int type;
  1266. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1267. if (parent > 0)
  1268. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1269. else
  1270. type = BTRFS_TREE_BLOCK_REF_KEY;
  1271. } else {
  1272. if (parent > 0)
  1273. type = BTRFS_SHARED_DATA_REF_KEY;
  1274. else
  1275. type = BTRFS_EXTENT_DATA_REF_KEY;
  1276. }
  1277. return type;
  1278. }
  1279. static int find_next_key(struct btrfs_path *path, int level,
  1280. struct btrfs_key *key)
  1281. {
  1282. for (; level < BTRFS_MAX_LEVEL; level++) {
  1283. if (!path->nodes[level])
  1284. break;
  1285. if (path->slots[level] + 1 >=
  1286. btrfs_header_nritems(path->nodes[level]))
  1287. continue;
  1288. if (level == 0)
  1289. btrfs_item_key_to_cpu(path->nodes[level], key,
  1290. path->slots[level] + 1);
  1291. else
  1292. btrfs_node_key_to_cpu(path->nodes[level], key,
  1293. path->slots[level] + 1);
  1294. return 0;
  1295. }
  1296. return 1;
  1297. }
  1298. /*
  1299. * look for inline back ref. if back ref is found, *ref_ret is set
  1300. * to the address of inline back ref, and 0 is returned.
  1301. *
  1302. * if back ref isn't found, *ref_ret is set to the address where it
  1303. * should be inserted, and -ENOENT is returned.
  1304. *
  1305. * if insert is true and there are too many inline back refs, the path
  1306. * points to the extent item, and -EAGAIN is returned.
  1307. *
  1308. * NOTE: inline back refs are ordered in the same way that back ref
  1309. * items in the tree are ordered.
  1310. */
  1311. static noinline_for_stack
  1312. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1313. struct btrfs_path *path,
  1314. struct btrfs_extent_inline_ref **ref_ret,
  1315. u64 bytenr, u64 num_bytes,
  1316. u64 parent, u64 root_objectid,
  1317. u64 owner, u64 offset, int insert)
  1318. {
  1319. struct btrfs_fs_info *fs_info = trans->fs_info;
  1320. struct btrfs_root *root = fs_info->extent_root;
  1321. struct btrfs_key key;
  1322. struct extent_buffer *leaf;
  1323. struct btrfs_extent_item *ei;
  1324. struct btrfs_extent_inline_ref *iref;
  1325. u64 flags;
  1326. u64 item_size;
  1327. unsigned long ptr;
  1328. unsigned long end;
  1329. int extra_size;
  1330. int type;
  1331. int want;
  1332. int ret;
  1333. int err = 0;
  1334. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  1335. int needed;
  1336. key.objectid = bytenr;
  1337. key.type = BTRFS_EXTENT_ITEM_KEY;
  1338. key.offset = num_bytes;
  1339. want = extent_ref_type(parent, owner);
  1340. if (insert) {
  1341. extra_size = btrfs_extent_inline_ref_size(want);
  1342. path->keep_locks = 1;
  1343. } else
  1344. extra_size = -1;
  1345. /*
  1346. * Owner is our level, so we can just add one to get the level for the
  1347. * block we are interested in.
  1348. */
  1349. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1350. key.type = BTRFS_METADATA_ITEM_KEY;
  1351. key.offset = owner;
  1352. }
  1353. again:
  1354. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1355. if (ret < 0) {
  1356. err = ret;
  1357. goto out;
  1358. }
  1359. /*
  1360. * We may be a newly converted file system which still has the old fat
  1361. * extent entries for metadata, so try and see if we have one of those.
  1362. */
  1363. if (ret > 0 && skinny_metadata) {
  1364. skinny_metadata = false;
  1365. if (path->slots[0]) {
  1366. path->slots[0]--;
  1367. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1368. path->slots[0]);
  1369. if (key.objectid == bytenr &&
  1370. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1371. key.offset == num_bytes)
  1372. ret = 0;
  1373. }
  1374. if (ret) {
  1375. key.objectid = bytenr;
  1376. key.type = BTRFS_EXTENT_ITEM_KEY;
  1377. key.offset = num_bytes;
  1378. btrfs_release_path(path);
  1379. goto again;
  1380. }
  1381. }
  1382. if (ret && !insert) {
  1383. err = -ENOENT;
  1384. goto out;
  1385. } else if (WARN_ON(ret)) {
  1386. err = -EIO;
  1387. goto out;
  1388. }
  1389. leaf = path->nodes[0];
  1390. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1391. if (unlikely(item_size < sizeof(*ei))) {
  1392. err = -EINVAL;
  1393. btrfs_print_v0_err(fs_info);
  1394. btrfs_abort_transaction(trans, err);
  1395. goto out;
  1396. }
  1397. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1398. flags = btrfs_extent_flags(leaf, ei);
  1399. ptr = (unsigned long)(ei + 1);
  1400. end = (unsigned long)ei + item_size;
  1401. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1402. ptr += sizeof(struct btrfs_tree_block_info);
  1403. BUG_ON(ptr > end);
  1404. }
  1405. if (owner >= BTRFS_FIRST_FREE_OBJECTID)
  1406. needed = BTRFS_REF_TYPE_DATA;
  1407. else
  1408. needed = BTRFS_REF_TYPE_BLOCK;
  1409. err = -ENOENT;
  1410. while (1) {
  1411. if (ptr >= end) {
  1412. WARN_ON(ptr > end);
  1413. break;
  1414. }
  1415. iref = (struct btrfs_extent_inline_ref *)ptr;
  1416. type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
  1417. if (type == BTRFS_REF_TYPE_INVALID) {
  1418. err = -EUCLEAN;
  1419. goto out;
  1420. }
  1421. if (want < type)
  1422. break;
  1423. if (want > type) {
  1424. ptr += btrfs_extent_inline_ref_size(type);
  1425. continue;
  1426. }
  1427. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1428. struct btrfs_extent_data_ref *dref;
  1429. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1430. if (match_extent_data_ref(leaf, dref, root_objectid,
  1431. owner, offset)) {
  1432. err = 0;
  1433. break;
  1434. }
  1435. if (hash_extent_data_ref_item(leaf, dref) <
  1436. hash_extent_data_ref(root_objectid, owner, offset))
  1437. break;
  1438. } else {
  1439. u64 ref_offset;
  1440. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1441. if (parent > 0) {
  1442. if (parent == ref_offset) {
  1443. err = 0;
  1444. break;
  1445. }
  1446. if (ref_offset < parent)
  1447. break;
  1448. } else {
  1449. if (root_objectid == ref_offset) {
  1450. err = 0;
  1451. break;
  1452. }
  1453. if (ref_offset < root_objectid)
  1454. break;
  1455. }
  1456. }
  1457. ptr += btrfs_extent_inline_ref_size(type);
  1458. }
  1459. if (err == -ENOENT && insert) {
  1460. if (item_size + extra_size >=
  1461. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1462. err = -EAGAIN;
  1463. goto out;
  1464. }
  1465. /*
  1466. * To add new inline back ref, we have to make sure
  1467. * there is no corresponding back ref item.
  1468. * For simplicity, we just do not add new inline back
  1469. * ref if there is any kind of item for this block
  1470. */
  1471. if (find_next_key(path, 0, &key) == 0 &&
  1472. key.objectid == bytenr &&
  1473. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1474. err = -EAGAIN;
  1475. goto out;
  1476. }
  1477. }
  1478. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1479. out:
  1480. if (insert) {
  1481. path->keep_locks = 0;
  1482. btrfs_unlock_up_safe(path, 1);
  1483. }
  1484. return err;
  1485. }
  1486. /*
  1487. * helper to add new inline back ref
  1488. */
  1489. static noinline_for_stack
  1490. void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
  1491. struct btrfs_path *path,
  1492. struct btrfs_extent_inline_ref *iref,
  1493. u64 parent, u64 root_objectid,
  1494. u64 owner, u64 offset, int refs_to_add,
  1495. struct btrfs_delayed_extent_op *extent_op)
  1496. {
  1497. struct extent_buffer *leaf;
  1498. struct btrfs_extent_item *ei;
  1499. unsigned long ptr;
  1500. unsigned long end;
  1501. unsigned long item_offset;
  1502. u64 refs;
  1503. int size;
  1504. int type;
  1505. leaf = path->nodes[0];
  1506. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1507. item_offset = (unsigned long)iref - (unsigned long)ei;
  1508. type = extent_ref_type(parent, owner);
  1509. size = btrfs_extent_inline_ref_size(type);
  1510. btrfs_extend_item(fs_info, path, size);
  1511. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1512. refs = btrfs_extent_refs(leaf, ei);
  1513. refs += refs_to_add;
  1514. btrfs_set_extent_refs(leaf, ei, refs);
  1515. if (extent_op)
  1516. __run_delayed_extent_op(extent_op, leaf, ei);
  1517. ptr = (unsigned long)ei + item_offset;
  1518. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1519. if (ptr < end - size)
  1520. memmove_extent_buffer(leaf, ptr + size, ptr,
  1521. end - size - ptr);
  1522. iref = (struct btrfs_extent_inline_ref *)ptr;
  1523. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1524. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1525. struct btrfs_extent_data_ref *dref;
  1526. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1527. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1528. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1529. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1530. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1531. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1532. struct btrfs_shared_data_ref *sref;
  1533. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1534. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1535. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1536. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1537. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1538. } else {
  1539. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1540. }
  1541. btrfs_mark_buffer_dirty(leaf);
  1542. }
  1543. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1544. struct btrfs_path *path,
  1545. struct btrfs_extent_inline_ref **ref_ret,
  1546. u64 bytenr, u64 num_bytes, u64 parent,
  1547. u64 root_objectid, u64 owner, u64 offset)
  1548. {
  1549. int ret;
  1550. ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr,
  1551. num_bytes, parent, root_objectid,
  1552. owner, offset, 0);
  1553. if (ret != -ENOENT)
  1554. return ret;
  1555. btrfs_release_path(path);
  1556. *ref_ret = NULL;
  1557. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1558. ret = lookup_tree_block_ref(trans, path, bytenr, parent,
  1559. root_objectid);
  1560. } else {
  1561. ret = lookup_extent_data_ref(trans, path, bytenr, parent,
  1562. root_objectid, owner, offset);
  1563. }
  1564. return ret;
  1565. }
  1566. /*
  1567. * helper to update/remove inline back ref
  1568. */
  1569. static noinline_for_stack
  1570. void update_inline_extent_backref(struct btrfs_path *path,
  1571. struct btrfs_extent_inline_ref *iref,
  1572. int refs_to_mod,
  1573. struct btrfs_delayed_extent_op *extent_op,
  1574. int *last_ref)
  1575. {
  1576. struct extent_buffer *leaf = path->nodes[0];
  1577. struct btrfs_fs_info *fs_info = leaf->fs_info;
  1578. struct btrfs_extent_item *ei;
  1579. struct btrfs_extent_data_ref *dref = NULL;
  1580. struct btrfs_shared_data_ref *sref = NULL;
  1581. unsigned long ptr;
  1582. unsigned long end;
  1583. u32 item_size;
  1584. int size;
  1585. int type;
  1586. u64 refs;
  1587. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1588. refs = btrfs_extent_refs(leaf, ei);
  1589. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1590. refs += refs_to_mod;
  1591. btrfs_set_extent_refs(leaf, ei, refs);
  1592. if (extent_op)
  1593. __run_delayed_extent_op(extent_op, leaf, ei);
  1594. /*
  1595. * If type is invalid, we should have bailed out after
  1596. * lookup_inline_extent_backref().
  1597. */
  1598. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
  1599. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1600. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1601. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1602. refs = btrfs_extent_data_ref_count(leaf, dref);
  1603. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1604. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1605. refs = btrfs_shared_data_ref_count(leaf, sref);
  1606. } else {
  1607. refs = 1;
  1608. BUG_ON(refs_to_mod != -1);
  1609. }
  1610. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1611. refs += refs_to_mod;
  1612. if (refs > 0) {
  1613. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1614. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1615. else
  1616. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1617. } else {
  1618. *last_ref = 1;
  1619. size = btrfs_extent_inline_ref_size(type);
  1620. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1621. ptr = (unsigned long)iref;
  1622. end = (unsigned long)ei + item_size;
  1623. if (ptr + size < end)
  1624. memmove_extent_buffer(leaf, ptr, ptr + size,
  1625. end - ptr - size);
  1626. item_size -= size;
  1627. btrfs_truncate_item(fs_info, path, item_size, 1);
  1628. }
  1629. btrfs_mark_buffer_dirty(leaf);
  1630. }
  1631. static noinline_for_stack
  1632. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1633. struct btrfs_path *path,
  1634. u64 bytenr, u64 num_bytes, u64 parent,
  1635. u64 root_objectid, u64 owner,
  1636. u64 offset, int refs_to_add,
  1637. struct btrfs_delayed_extent_op *extent_op)
  1638. {
  1639. struct btrfs_extent_inline_ref *iref;
  1640. int ret;
  1641. ret = lookup_inline_extent_backref(trans, path, &iref, bytenr,
  1642. num_bytes, parent, root_objectid,
  1643. owner, offset, 1);
  1644. if (ret == 0) {
  1645. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1646. update_inline_extent_backref(path, iref, refs_to_add,
  1647. extent_op, NULL);
  1648. } else if (ret == -ENOENT) {
  1649. setup_inline_extent_backref(trans->fs_info, path, iref, parent,
  1650. root_objectid, owner, offset,
  1651. refs_to_add, extent_op);
  1652. ret = 0;
  1653. }
  1654. return ret;
  1655. }
  1656. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1657. struct btrfs_path *path,
  1658. u64 bytenr, u64 parent, u64 root_objectid,
  1659. u64 owner, u64 offset, int refs_to_add)
  1660. {
  1661. int ret;
  1662. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1663. BUG_ON(refs_to_add != 1);
  1664. ret = insert_tree_block_ref(trans, path, bytenr, parent,
  1665. root_objectid);
  1666. } else {
  1667. ret = insert_extent_data_ref(trans, path, bytenr, parent,
  1668. root_objectid, owner, offset,
  1669. refs_to_add);
  1670. }
  1671. return ret;
  1672. }
  1673. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1674. struct btrfs_path *path,
  1675. struct btrfs_extent_inline_ref *iref,
  1676. int refs_to_drop, int is_data, int *last_ref)
  1677. {
  1678. int ret = 0;
  1679. BUG_ON(!is_data && refs_to_drop != 1);
  1680. if (iref) {
  1681. update_inline_extent_backref(path, iref, -refs_to_drop, NULL,
  1682. last_ref);
  1683. } else if (is_data) {
  1684. ret = remove_extent_data_ref(trans, path, refs_to_drop,
  1685. last_ref);
  1686. } else {
  1687. *last_ref = 1;
  1688. ret = btrfs_del_item(trans, trans->fs_info->extent_root, path);
  1689. }
  1690. return ret;
  1691. }
  1692. #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
  1693. static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
  1694. u64 *discarded_bytes)
  1695. {
  1696. int j, ret = 0;
  1697. u64 bytes_left, end;
  1698. u64 aligned_start = ALIGN(start, 1 << 9);
  1699. if (WARN_ON(start != aligned_start)) {
  1700. len -= aligned_start - start;
  1701. len = round_down(len, 1 << 9);
  1702. start = aligned_start;
  1703. }
  1704. *discarded_bytes = 0;
  1705. if (!len)
  1706. return 0;
  1707. end = start + len;
  1708. bytes_left = len;
  1709. /* Skip any superblocks on this device. */
  1710. for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
  1711. u64 sb_start = btrfs_sb_offset(j);
  1712. u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
  1713. u64 size = sb_start - start;
  1714. if (!in_range(sb_start, start, bytes_left) &&
  1715. !in_range(sb_end, start, bytes_left) &&
  1716. !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
  1717. continue;
  1718. /*
  1719. * Superblock spans beginning of range. Adjust start and
  1720. * try again.
  1721. */
  1722. if (sb_start <= start) {
  1723. start += sb_end - start;
  1724. if (start > end) {
  1725. bytes_left = 0;
  1726. break;
  1727. }
  1728. bytes_left = end - start;
  1729. continue;
  1730. }
  1731. if (size) {
  1732. ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
  1733. GFP_NOFS, 0);
  1734. if (!ret)
  1735. *discarded_bytes += size;
  1736. else if (ret != -EOPNOTSUPP)
  1737. return ret;
  1738. }
  1739. start = sb_end;
  1740. if (start > end) {
  1741. bytes_left = 0;
  1742. break;
  1743. }
  1744. bytes_left = end - start;
  1745. }
  1746. if (bytes_left) {
  1747. ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
  1748. GFP_NOFS, 0);
  1749. if (!ret)
  1750. *discarded_bytes += bytes_left;
  1751. }
  1752. return ret;
  1753. }
  1754. int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
  1755. u64 num_bytes, u64 *actual_bytes)
  1756. {
  1757. int ret;
  1758. u64 discarded_bytes = 0;
  1759. struct btrfs_bio *bbio = NULL;
  1760. /*
  1761. * Avoid races with device replace and make sure our bbio has devices
  1762. * associated to its stripes that don't go away while we are discarding.
  1763. */
  1764. btrfs_bio_counter_inc_blocked(fs_info);
  1765. /* Tell the block device(s) that the sectors can be discarded */
  1766. ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
  1767. &bbio, 0);
  1768. /* Error condition is -ENOMEM */
  1769. if (!ret) {
  1770. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1771. int i;
  1772. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1773. u64 bytes;
  1774. struct request_queue *req_q;
  1775. if (!stripe->dev->bdev) {
  1776. ASSERT(btrfs_test_opt(fs_info, DEGRADED));
  1777. continue;
  1778. }
  1779. req_q = bdev_get_queue(stripe->dev->bdev);
  1780. if (!blk_queue_discard(req_q))
  1781. continue;
  1782. ret = btrfs_issue_discard(stripe->dev->bdev,
  1783. stripe->physical,
  1784. stripe->length,
  1785. &bytes);
  1786. if (!ret)
  1787. discarded_bytes += bytes;
  1788. else if (ret != -EOPNOTSUPP)
  1789. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1790. /*
  1791. * Just in case we get back EOPNOTSUPP for some reason,
  1792. * just ignore the return value so we don't screw up
  1793. * people calling discard_extent.
  1794. */
  1795. ret = 0;
  1796. }
  1797. btrfs_put_bbio(bbio);
  1798. }
  1799. btrfs_bio_counter_dec(fs_info);
  1800. if (actual_bytes)
  1801. *actual_bytes = discarded_bytes;
  1802. if (ret == -EOPNOTSUPP)
  1803. ret = 0;
  1804. return ret;
  1805. }
  1806. /* Can return -ENOMEM */
  1807. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1808. struct btrfs_root *root,
  1809. u64 bytenr, u64 num_bytes, u64 parent,
  1810. u64 root_objectid, u64 owner, u64 offset)
  1811. {
  1812. struct btrfs_fs_info *fs_info = root->fs_info;
  1813. int old_ref_mod, new_ref_mod;
  1814. int ret;
  1815. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1816. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1817. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
  1818. owner, offset, BTRFS_ADD_DELAYED_REF);
  1819. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1820. ret = btrfs_add_delayed_tree_ref(trans, bytenr,
  1821. num_bytes, parent,
  1822. root_objectid, (int)owner,
  1823. BTRFS_ADD_DELAYED_REF, NULL,
  1824. &old_ref_mod, &new_ref_mod);
  1825. } else {
  1826. ret = btrfs_add_delayed_data_ref(trans, bytenr,
  1827. num_bytes, parent,
  1828. root_objectid, owner, offset,
  1829. 0, BTRFS_ADD_DELAYED_REF,
  1830. &old_ref_mod, &new_ref_mod);
  1831. }
  1832. if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) {
  1833. bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
  1834. add_pinned_bytes(fs_info, -num_bytes, metadata, root_objectid);
  1835. }
  1836. return ret;
  1837. }
  1838. /*
  1839. * __btrfs_inc_extent_ref - insert backreference for a given extent
  1840. *
  1841. * @trans: Handle of transaction
  1842. *
  1843. * @node: The delayed ref node used to get the bytenr/length for
  1844. * extent whose references are incremented.
  1845. *
  1846. * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
  1847. * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
  1848. * bytenr of the parent block. Since new extents are always
  1849. * created with indirect references, this will only be the case
  1850. * when relocating a shared extent. In that case, root_objectid
  1851. * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
  1852. * be 0
  1853. *
  1854. * @root_objectid: The id of the root where this modification has originated,
  1855. * this can be either one of the well-known metadata trees or
  1856. * the subvolume id which references this extent.
  1857. *
  1858. * @owner: For data extents it is the inode number of the owning file.
  1859. * For metadata extents this parameter holds the level in the
  1860. * tree of the extent.
  1861. *
  1862. * @offset: For metadata extents the offset is ignored and is currently
  1863. * always passed as 0. For data extents it is the fileoffset
  1864. * this extent belongs to.
  1865. *
  1866. * @refs_to_add Number of references to add
  1867. *
  1868. * @extent_op Pointer to a structure, holding information necessary when
  1869. * updating a tree block's flags
  1870. *
  1871. */
  1872. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1873. struct btrfs_delayed_ref_node *node,
  1874. u64 parent, u64 root_objectid,
  1875. u64 owner, u64 offset, int refs_to_add,
  1876. struct btrfs_delayed_extent_op *extent_op)
  1877. {
  1878. struct btrfs_path *path;
  1879. struct extent_buffer *leaf;
  1880. struct btrfs_extent_item *item;
  1881. struct btrfs_key key;
  1882. u64 bytenr = node->bytenr;
  1883. u64 num_bytes = node->num_bytes;
  1884. u64 refs;
  1885. int ret;
  1886. path = btrfs_alloc_path();
  1887. if (!path)
  1888. return -ENOMEM;
  1889. path->reada = READA_FORWARD;
  1890. path->leave_spinning = 1;
  1891. /* this will setup the path even if it fails to insert the back ref */
  1892. ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
  1893. parent, root_objectid, owner,
  1894. offset, refs_to_add, extent_op);
  1895. if ((ret < 0 && ret != -EAGAIN) || !ret)
  1896. goto out;
  1897. /*
  1898. * Ok we had -EAGAIN which means we didn't have space to insert and
  1899. * inline extent ref, so just update the reference count and add a
  1900. * normal backref.
  1901. */
  1902. leaf = path->nodes[0];
  1903. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1904. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1905. refs = btrfs_extent_refs(leaf, item);
  1906. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1907. if (extent_op)
  1908. __run_delayed_extent_op(extent_op, leaf, item);
  1909. btrfs_mark_buffer_dirty(leaf);
  1910. btrfs_release_path(path);
  1911. path->reada = READA_FORWARD;
  1912. path->leave_spinning = 1;
  1913. /* now insert the actual backref */
  1914. ret = insert_extent_backref(trans, path, bytenr, parent, root_objectid,
  1915. owner, offset, refs_to_add);
  1916. if (ret)
  1917. btrfs_abort_transaction(trans, ret);
  1918. out:
  1919. btrfs_free_path(path);
  1920. return ret;
  1921. }
  1922. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1923. struct btrfs_delayed_ref_node *node,
  1924. struct btrfs_delayed_extent_op *extent_op,
  1925. int insert_reserved)
  1926. {
  1927. int ret = 0;
  1928. struct btrfs_delayed_data_ref *ref;
  1929. struct btrfs_key ins;
  1930. u64 parent = 0;
  1931. u64 ref_root = 0;
  1932. u64 flags = 0;
  1933. ins.objectid = node->bytenr;
  1934. ins.offset = node->num_bytes;
  1935. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1936. ref = btrfs_delayed_node_to_data_ref(node);
  1937. trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
  1938. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1939. parent = ref->parent;
  1940. ref_root = ref->root;
  1941. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1942. if (extent_op)
  1943. flags |= extent_op->flags_to_set;
  1944. ret = alloc_reserved_file_extent(trans, parent, ref_root,
  1945. flags, ref->objectid,
  1946. ref->offset, &ins,
  1947. node->ref_mod);
  1948. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1949. ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
  1950. ref->objectid, ref->offset,
  1951. node->ref_mod, extent_op);
  1952. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1953. ret = __btrfs_free_extent(trans, node, parent,
  1954. ref_root, ref->objectid,
  1955. ref->offset, node->ref_mod,
  1956. extent_op);
  1957. } else {
  1958. BUG();
  1959. }
  1960. return ret;
  1961. }
  1962. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1963. struct extent_buffer *leaf,
  1964. struct btrfs_extent_item *ei)
  1965. {
  1966. u64 flags = btrfs_extent_flags(leaf, ei);
  1967. if (extent_op->update_flags) {
  1968. flags |= extent_op->flags_to_set;
  1969. btrfs_set_extent_flags(leaf, ei, flags);
  1970. }
  1971. if (extent_op->update_key) {
  1972. struct btrfs_tree_block_info *bi;
  1973. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1974. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1975. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1976. }
  1977. }
  1978. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1979. struct btrfs_delayed_ref_head *head,
  1980. struct btrfs_delayed_extent_op *extent_op)
  1981. {
  1982. struct btrfs_fs_info *fs_info = trans->fs_info;
  1983. struct btrfs_key key;
  1984. struct btrfs_path *path;
  1985. struct btrfs_extent_item *ei;
  1986. struct extent_buffer *leaf;
  1987. u32 item_size;
  1988. int ret;
  1989. int err = 0;
  1990. int metadata = !extent_op->is_data;
  1991. if (trans->aborted)
  1992. return 0;
  1993. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  1994. metadata = 0;
  1995. path = btrfs_alloc_path();
  1996. if (!path)
  1997. return -ENOMEM;
  1998. key.objectid = head->bytenr;
  1999. if (metadata) {
  2000. key.type = BTRFS_METADATA_ITEM_KEY;
  2001. key.offset = extent_op->level;
  2002. } else {
  2003. key.type = BTRFS_EXTENT_ITEM_KEY;
  2004. key.offset = head->num_bytes;
  2005. }
  2006. again:
  2007. path->reada = READA_FORWARD;
  2008. path->leave_spinning = 1;
  2009. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
  2010. if (ret < 0) {
  2011. err = ret;
  2012. goto out;
  2013. }
  2014. if (ret > 0) {
  2015. if (metadata) {
  2016. if (path->slots[0] > 0) {
  2017. path->slots[0]--;
  2018. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2019. path->slots[0]);
  2020. if (key.objectid == head->bytenr &&
  2021. key.type == BTRFS_EXTENT_ITEM_KEY &&
  2022. key.offset == head->num_bytes)
  2023. ret = 0;
  2024. }
  2025. if (ret > 0) {
  2026. btrfs_release_path(path);
  2027. metadata = 0;
  2028. key.objectid = head->bytenr;
  2029. key.offset = head->num_bytes;
  2030. key.type = BTRFS_EXTENT_ITEM_KEY;
  2031. goto again;
  2032. }
  2033. } else {
  2034. err = -EIO;
  2035. goto out;
  2036. }
  2037. }
  2038. leaf = path->nodes[0];
  2039. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2040. if (unlikely(item_size < sizeof(*ei))) {
  2041. err = -EINVAL;
  2042. btrfs_print_v0_err(fs_info);
  2043. btrfs_abort_transaction(trans, err);
  2044. goto out;
  2045. }
  2046. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2047. __run_delayed_extent_op(extent_op, leaf, ei);
  2048. btrfs_mark_buffer_dirty(leaf);
  2049. out:
  2050. btrfs_free_path(path);
  2051. return err;
  2052. }
  2053. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  2054. struct btrfs_delayed_ref_node *node,
  2055. struct btrfs_delayed_extent_op *extent_op,
  2056. int insert_reserved)
  2057. {
  2058. int ret = 0;
  2059. struct btrfs_delayed_tree_ref *ref;
  2060. u64 parent = 0;
  2061. u64 ref_root = 0;
  2062. ref = btrfs_delayed_node_to_tree_ref(node);
  2063. trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
  2064. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2065. parent = ref->parent;
  2066. ref_root = ref->root;
  2067. if (node->ref_mod != 1) {
  2068. btrfs_err(trans->fs_info,
  2069. "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
  2070. node->bytenr, node->ref_mod, node->action, ref_root,
  2071. parent);
  2072. return -EIO;
  2073. }
  2074. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2075. BUG_ON(!extent_op || !extent_op->update_flags);
  2076. ret = alloc_reserved_tree_block(trans, node, extent_op);
  2077. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2078. ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
  2079. ref->level, 0, 1, extent_op);
  2080. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2081. ret = __btrfs_free_extent(trans, node, parent, ref_root,
  2082. ref->level, 0, 1, extent_op);
  2083. } else {
  2084. BUG();
  2085. }
  2086. return ret;
  2087. }
  2088. /* helper function to actually process a single delayed ref entry */
  2089. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2090. struct btrfs_delayed_ref_node *node,
  2091. struct btrfs_delayed_extent_op *extent_op,
  2092. int insert_reserved)
  2093. {
  2094. int ret = 0;
  2095. if (trans->aborted) {
  2096. if (insert_reserved)
  2097. btrfs_pin_extent(trans->fs_info, node->bytenr,
  2098. node->num_bytes, 1);
  2099. return 0;
  2100. }
  2101. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2102. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2103. ret = run_delayed_tree_ref(trans, node, extent_op,
  2104. insert_reserved);
  2105. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2106. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2107. ret = run_delayed_data_ref(trans, node, extent_op,
  2108. insert_reserved);
  2109. else
  2110. BUG();
  2111. if (ret && insert_reserved)
  2112. btrfs_pin_extent(trans->fs_info, node->bytenr,
  2113. node->num_bytes, 1);
  2114. return ret;
  2115. }
  2116. static inline struct btrfs_delayed_ref_node *
  2117. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2118. {
  2119. struct btrfs_delayed_ref_node *ref;
  2120. if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
  2121. return NULL;
  2122. /*
  2123. * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2124. * This is to prevent a ref count from going down to zero, which deletes
  2125. * the extent item from the extent tree, when there still are references
  2126. * to add, which would fail because they would not find the extent item.
  2127. */
  2128. if (!list_empty(&head->ref_add_list))
  2129. return list_first_entry(&head->ref_add_list,
  2130. struct btrfs_delayed_ref_node, add_list);
  2131. ref = rb_entry(rb_first_cached(&head->ref_tree),
  2132. struct btrfs_delayed_ref_node, ref_node);
  2133. ASSERT(list_empty(&ref->add_list));
  2134. return ref;
  2135. }
  2136. static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
  2137. struct btrfs_delayed_ref_head *head)
  2138. {
  2139. spin_lock(&delayed_refs->lock);
  2140. head->processing = 0;
  2141. delayed_refs->num_heads_ready++;
  2142. spin_unlock(&delayed_refs->lock);
  2143. btrfs_delayed_ref_unlock(head);
  2144. }
  2145. static int cleanup_extent_op(struct btrfs_trans_handle *trans,
  2146. struct btrfs_delayed_ref_head *head)
  2147. {
  2148. struct btrfs_delayed_extent_op *extent_op = head->extent_op;
  2149. int ret;
  2150. if (!extent_op)
  2151. return 0;
  2152. head->extent_op = NULL;
  2153. if (head->must_insert_reserved) {
  2154. btrfs_free_delayed_extent_op(extent_op);
  2155. return 0;
  2156. }
  2157. spin_unlock(&head->lock);
  2158. ret = run_delayed_extent_op(trans, head, extent_op);
  2159. btrfs_free_delayed_extent_op(extent_op);
  2160. return ret ? ret : 1;
  2161. }
  2162. static int cleanup_ref_head(struct btrfs_trans_handle *trans,
  2163. struct btrfs_delayed_ref_head *head)
  2164. {
  2165. struct btrfs_fs_info *fs_info = trans->fs_info;
  2166. struct btrfs_delayed_ref_root *delayed_refs;
  2167. int ret;
  2168. delayed_refs = &trans->transaction->delayed_refs;
  2169. ret = cleanup_extent_op(trans, head);
  2170. if (ret < 0) {
  2171. unselect_delayed_ref_head(delayed_refs, head);
  2172. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2173. return ret;
  2174. } else if (ret) {
  2175. return ret;
  2176. }
  2177. /*
  2178. * Need to drop our head ref lock and re-acquire the delayed ref lock
  2179. * and then re-check to make sure nobody got added.
  2180. */
  2181. spin_unlock(&head->lock);
  2182. spin_lock(&delayed_refs->lock);
  2183. spin_lock(&head->lock);
  2184. if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
  2185. spin_unlock(&head->lock);
  2186. spin_unlock(&delayed_refs->lock);
  2187. return 1;
  2188. }
  2189. delayed_refs->num_heads--;
  2190. rb_erase_cached(&head->href_node, &delayed_refs->href_root);
  2191. RB_CLEAR_NODE(&head->href_node);
  2192. spin_unlock(&head->lock);
  2193. spin_unlock(&delayed_refs->lock);
  2194. atomic_dec(&delayed_refs->num_entries);
  2195. trace_run_delayed_ref_head(fs_info, head, 0);
  2196. if (head->total_ref_mod < 0) {
  2197. struct btrfs_space_info *space_info;
  2198. u64 flags;
  2199. if (head->is_data)
  2200. flags = BTRFS_BLOCK_GROUP_DATA;
  2201. else if (head->is_system)
  2202. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  2203. else
  2204. flags = BTRFS_BLOCK_GROUP_METADATA;
  2205. space_info = __find_space_info(fs_info, flags);
  2206. ASSERT(space_info);
  2207. percpu_counter_add_batch(&space_info->total_bytes_pinned,
  2208. -head->num_bytes,
  2209. BTRFS_TOTAL_BYTES_PINNED_BATCH);
  2210. if (head->is_data) {
  2211. spin_lock(&delayed_refs->lock);
  2212. delayed_refs->pending_csums -= head->num_bytes;
  2213. spin_unlock(&delayed_refs->lock);
  2214. }
  2215. }
  2216. if (head->must_insert_reserved) {
  2217. btrfs_pin_extent(fs_info, head->bytenr,
  2218. head->num_bytes, 1);
  2219. if (head->is_data) {
  2220. ret = btrfs_del_csums(trans, fs_info, head->bytenr,
  2221. head->num_bytes);
  2222. }
  2223. }
  2224. /* Also free its reserved qgroup space */
  2225. btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
  2226. head->qgroup_reserved);
  2227. btrfs_delayed_ref_unlock(head);
  2228. btrfs_put_delayed_ref_head(head);
  2229. return 0;
  2230. }
  2231. static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
  2232. struct btrfs_trans_handle *trans)
  2233. {
  2234. struct btrfs_delayed_ref_root *delayed_refs =
  2235. &trans->transaction->delayed_refs;
  2236. struct btrfs_delayed_ref_head *head = NULL;
  2237. int ret;
  2238. spin_lock(&delayed_refs->lock);
  2239. head = btrfs_select_ref_head(delayed_refs);
  2240. if (!head) {
  2241. spin_unlock(&delayed_refs->lock);
  2242. return head;
  2243. }
  2244. /*
  2245. * Grab the lock that says we are going to process all the refs for
  2246. * this head
  2247. */
  2248. ret = btrfs_delayed_ref_lock(delayed_refs, head);
  2249. spin_unlock(&delayed_refs->lock);
  2250. /*
  2251. * We may have dropped the spin lock to get the head mutex lock, and
  2252. * that might have given someone else time to free the head. If that's
  2253. * true, it has been removed from our list and we can move on.
  2254. */
  2255. if (ret == -EAGAIN)
  2256. head = ERR_PTR(-EAGAIN);
  2257. return head;
  2258. }
  2259. static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
  2260. struct btrfs_delayed_ref_head *locked_ref,
  2261. unsigned long *run_refs)
  2262. {
  2263. struct btrfs_fs_info *fs_info = trans->fs_info;
  2264. struct btrfs_delayed_ref_root *delayed_refs;
  2265. struct btrfs_delayed_extent_op *extent_op;
  2266. struct btrfs_delayed_ref_node *ref;
  2267. int must_insert_reserved = 0;
  2268. int ret;
  2269. delayed_refs = &trans->transaction->delayed_refs;
  2270. lockdep_assert_held(&locked_ref->mutex);
  2271. lockdep_assert_held(&locked_ref->lock);
  2272. while ((ref = select_delayed_ref(locked_ref))) {
  2273. if (ref->seq &&
  2274. btrfs_check_delayed_seq(fs_info, ref->seq)) {
  2275. spin_unlock(&locked_ref->lock);
  2276. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2277. return -EAGAIN;
  2278. }
  2279. (*run_refs)++;
  2280. ref->in_tree = 0;
  2281. rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
  2282. RB_CLEAR_NODE(&ref->ref_node);
  2283. if (!list_empty(&ref->add_list))
  2284. list_del(&ref->add_list);
  2285. /*
  2286. * When we play the delayed ref, also correct the ref_mod on
  2287. * head
  2288. */
  2289. switch (ref->action) {
  2290. case BTRFS_ADD_DELAYED_REF:
  2291. case BTRFS_ADD_DELAYED_EXTENT:
  2292. locked_ref->ref_mod -= ref->ref_mod;
  2293. break;
  2294. case BTRFS_DROP_DELAYED_REF:
  2295. locked_ref->ref_mod += ref->ref_mod;
  2296. break;
  2297. default:
  2298. WARN_ON(1);
  2299. }
  2300. atomic_dec(&delayed_refs->num_entries);
  2301. /*
  2302. * Record the must_insert_reserved flag before we drop the
  2303. * spin lock.
  2304. */
  2305. must_insert_reserved = locked_ref->must_insert_reserved;
  2306. locked_ref->must_insert_reserved = 0;
  2307. extent_op = locked_ref->extent_op;
  2308. locked_ref->extent_op = NULL;
  2309. spin_unlock(&locked_ref->lock);
  2310. ret = run_one_delayed_ref(trans, ref, extent_op,
  2311. must_insert_reserved);
  2312. btrfs_free_delayed_extent_op(extent_op);
  2313. if (ret) {
  2314. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2315. btrfs_put_delayed_ref(ref);
  2316. btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
  2317. ret);
  2318. return ret;
  2319. }
  2320. btrfs_put_delayed_ref(ref);
  2321. cond_resched();
  2322. spin_lock(&locked_ref->lock);
  2323. btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
  2324. }
  2325. return 0;
  2326. }
  2327. /*
  2328. * Returns 0 on success or if called with an already aborted transaction.
  2329. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2330. */
  2331. static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2332. unsigned long nr)
  2333. {
  2334. struct btrfs_fs_info *fs_info = trans->fs_info;
  2335. struct btrfs_delayed_ref_root *delayed_refs;
  2336. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2337. ktime_t start = ktime_get();
  2338. int ret;
  2339. unsigned long count = 0;
  2340. unsigned long actual_count = 0;
  2341. delayed_refs = &trans->transaction->delayed_refs;
  2342. do {
  2343. if (!locked_ref) {
  2344. locked_ref = btrfs_obtain_ref_head(trans);
  2345. if (IS_ERR_OR_NULL(locked_ref)) {
  2346. if (PTR_ERR(locked_ref) == -EAGAIN) {
  2347. continue;
  2348. } else {
  2349. break;
  2350. }
  2351. }
  2352. count++;
  2353. }
  2354. /*
  2355. * We need to try and merge add/drops of the same ref since we
  2356. * can run into issues with relocate dropping the implicit ref
  2357. * and then it being added back again before the drop can
  2358. * finish. If we merged anything we need to re-loop so we can
  2359. * get a good ref.
  2360. * Or we can get node references of the same type that weren't
  2361. * merged when created due to bumps in the tree mod seq, and
  2362. * we need to merge them to prevent adding an inline extent
  2363. * backref before dropping it (triggering a BUG_ON at
  2364. * insert_inline_extent_backref()).
  2365. */
  2366. spin_lock(&locked_ref->lock);
  2367. btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
  2368. ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
  2369. &actual_count);
  2370. if (ret < 0 && ret != -EAGAIN) {
  2371. /*
  2372. * Error, btrfs_run_delayed_refs_for_head already
  2373. * unlocked everything so just bail out
  2374. */
  2375. return ret;
  2376. } else if (!ret) {
  2377. /*
  2378. * Success, perform the usual cleanup of a processed
  2379. * head
  2380. */
  2381. ret = cleanup_ref_head(trans, locked_ref);
  2382. if (ret > 0 ) {
  2383. /* We dropped our lock, we need to loop. */
  2384. ret = 0;
  2385. continue;
  2386. } else if (ret) {
  2387. return ret;
  2388. }
  2389. }
  2390. /*
  2391. * Either success case or btrfs_run_delayed_refs_for_head
  2392. * returned -EAGAIN, meaning we need to select another head
  2393. */
  2394. locked_ref = NULL;
  2395. cond_resched();
  2396. } while ((nr != -1 && count < nr) || locked_ref);
  2397. /*
  2398. * We don't want to include ref heads since we can have empty ref heads
  2399. * and those will drastically skew our runtime down since we just do
  2400. * accounting, no actual extent tree updates.
  2401. */
  2402. if (actual_count > 0) {
  2403. u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
  2404. u64 avg;
  2405. /*
  2406. * We weigh the current average higher than our current runtime
  2407. * to avoid large swings in the average.
  2408. */
  2409. spin_lock(&delayed_refs->lock);
  2410. avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
  2411. fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
  2412. spin_unlock(&delayed_refs->lock);
  2413. }
  2414. return 0;
  2415. }
  2416. #ifdef SCRAMBLE_DELAYED_REFS
  2417. /*
  2418. * Normally delayed refs get processed in ascending bytenr order. This
  2419. * correlates in most cases to the order added. To expose dependencies on this
  2420. * order, we start to process the tree in the middle instead of the beginning
  2421. */
  2422. static u64 find_middle(struct rb_root *root)
  2423. {
  2424. struct rb_node *n = root->rb_node;
  2425. struct btrfs_delayed_ref_node *entry;
  2426. int alt = 1;
  2427. u64 middle;
  2428. u64 first = 0, last = 0;
  2429. n = rb_first(root);
  2430. if (n) {
  2431. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2432. first = entry->bytenr;
  2433. }
  2434. n = rb_last(root);
  2435. if (n) {
  2436. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2437. last = entry->bytenr;
  2438. }
  2439. n = root->rb_node;
  2440. while (n) {
  2441. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2442. WARN_ON(!entry->in_tree);
  2443. middle = entry->bytenr;
  2444. if (alt)
  2445. n = n->rb_left;
  2446. else
  2447. n = n->rb_right;
  2448. alt = 1 - alt;
  2449. }
  2450. return middle;
  2451. }
  2452. #endif
  2453. static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
  2454. {
  2455. u64 num_bytes;
  2456. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2457. sizeof(struct btrfs_extent_inline_ref));
  2458. if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2459. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2460. /*
  2461. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2462. * closer to what we're really going to want to use.
  2463. */
  2464. return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
  2465. }
  2466. /*
  2467. * Takes the number of bytes to be csumm'ed and figures out how many leaves it
  2468. * would require to store the csums for that many bytes.
  2469. */
  2470. u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
  2471. {
  2472. u64 csum_size;
  2473. u64 num_csums_per_leaf;
  2474. u64 num_csums;
  2475. csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
  2476. num_csums_per_leaf = div64_u64(csum_size,
  2477. (u64)btrfs_super_csum_size(fs_info->super_copy));
  2478. num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
  2479. num_csums += num_csums_per_leaf - 1;
  2480. num_csums = div64_u64(num_csums, num_csums_per_leaf);
  2481. return num_csums;
  2482. }
  2483. int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
  2484. {
  2485. struct btrfs_fs_info *fs_info = trans->fs_info;
  2486. struct btrfs_block_rsv *global_rsv;
  2487. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2488. u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
  2489. unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
  2490. u64 num_bytes, num_dirty_bgs_bytes;
  2491. int ret = 0;
  2492. num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  2493. num_heads = heads_to_leaves(fs_info, num_heads);
  2494. if (num_heads > 1)
  2495. num_bytes += (num_heads - 1) * fs_info->nodesize;
  2496. num_bytes <<= 1;
  2497. num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
  2498. fs_info->nodesize;
  2499. num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
  2500. num_dirty_bgs);
  2501. global_rsv = &fs_info->global_block_rsv;
  2502. /*
  2503. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2504. * wiggle room since running delayed refs can create more delayed refs.
  2505. */
  2506. if (global_rsv->space_info->full) {
  2507. num_dirty_bgs_bytes <<= 1;
  2508. num_bytes <<= 1;
  2509. }
  2510. spin_lock(&global_rsv->lock);
  2511. if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
  2512. ret = 1;
  2513. spin_unlock(&global_rsv->lock);
  2514. return ret;
  2515. }
  2516. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
  2517. {
  2518. u64 num_entries =
  2519. atomic_read(&trans->transaction->delayed_refs.num_entries);
  2520. u64 avg_runtime;
  2521. u64 val;
  2522. smp_mb();
  2523. avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
  2524. val = num_entries * avg_runtime;
  2525. if (val >= NSEC_PER_SEC)
  2526. return 1;
  2527. if (val >= NSEC_PER_SEC / 2)
  2528. return 2;
  2529. return btrfs_check_space_for_delayed_refs(trans);
  2530. }
  2531. struct async_delayed_refs {
  2532. struct btrfs_root *root;
  2533. u64 transid;
  2534. int count;
  2535. int error;
  2536. int sync;
  2537. struct completion wait;
  2538. struct btrfs_work work;
  2539. };
  2540. static inline struct async_delayed_refs *
  2541. to_async_delayed_refs(struct btrfs_work *work)
  2542. {
  2543. return container_of(work, struct async_delayed_refs, work);
  2544. }
  2545. static void delayed_ref_async_start(struct btrfs_work *work)
  2546. {
  2547. struct async_delayed_refs *async = to_async_delayed_refs(work);
  2548. struct btrfs_trans_handle *trans;
  2549. struct btrfs_fs_info *fs_info = async->root->fs_info;
  2550. int ret;
  2551. /* if the commit is already started, we don't need to wait here */
  2552. if (btrfs_transaction_blocked(fs_info))
  2553. goto done;
  2554. trans = btrfs_join_transaction(async->root);
  2555. if (IS_ERR(trans)) {
  2556. async->error = PTR_ERR(trans);
  2557. goto done;
  2558. }
  2559. /*
  2560. * trans->sync means that when we call end_transaction, we won't
  2561. * wait on delayed refs
  2562. */
  2563. trans->sync = true;
  2564. /* Don't bother flushing if we got into a different transaction */
  2565. if (trans->transid > async->transid)
  2566. goto end;
  2567. ret = btrfs_run_delayed_refs(trans, async->count);
  2568. if (ret)
  2569. async->error = ret;
  2570. end:
  2571. ret = btrfs_end_transaction(trans);
  2572. if (ret && !async->error)
  2573. async->error = ret;
  2574. done:
  2575. if (async->sync)
  2576. complete(&async->wait);
  2577. else
  2578. kfree(async);
  2579. }
  2580. int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
  2581. unsigned long count, u64 transid, int wait)
  2582. {
  2583. struct async_delayed_refs *async;
  2584. int ret;
  2585. async = kmalloc(sizeof(*async), GFP_NOFS);
  2586. if (!async)
  2587. return -ENOMEM;
  2588. async->root = fs_info->tree_root;
  2589. async->count = count;
  2590. async->error = 0;
  2591. async->transid = transid;
  2592. if (wait)
  2593. async->sync = 1;
  2594. else
  2595. async->sync = 0;
  2596. init_completion(&async->wait);
  2597. btrfs_init_work(&async->work, btrfs_extent_refs_helper,
  2598. delayed_ref_async_start, NULL, NULL);
  2599. btrfs_queue_work(fs_info->extent_workers, &async->work);
  2600. if (wait) {
  2601. wait_for_completion(&async->wait);
  2602. ret = async->error;
  2603. kfree(async);
  2604. return ret;
  2605. }
  2606. return 0;
  2607. }
  2608. /*
  2609. * this starts processing the delayed reference count updates and
  2610. * extent insertions we have queued up so far. count can be
  2611. * 0, which means to process everything in the tree at the start
  2612. * of the run (but not newly added entries), or it can be some target
  2613. * number you'd like to process.
  2614. *
  2615. * Returns 0 on success or if called with an aborted transaction
  2616. * Returns <0 on error and aborts the transaction
  2617. */
  2618. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2619. unsigned long count)
  2620. {
  2621. struct btrfs_fs_info *fs_info = trans->fs_info;
  2622. struct rb_node *node;
  2623. struct btrfs_delayed_ref_root *delayed_refs;
  2624. struct btrfs_delayed_ref_head *head;
  2625. int ret;
  2626. int run_all = count == (unsigned long)-1;
  2627. /* We'll clean this up in btrfs_cleanup_transaction */
  2628. if (trans->aborted)
  2629. return 0;
  2630. if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
  2631. return 0;
  2632. delayed_refs = &trans->transaction->delayed_refs;
  2633. if (count == 0)
  2634. count = atomic_read(&delayed_refs->num_entries) * 2;
  2635. again:
  2636. #ifdef SCRAMBLE_DELAYED_REFS
  2637. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2638. #endif
  2639. ret = __btrfs_run_delayed_refs(trans, count);
  2640. if (ret < 0) {
  2641. btrfs_abort_transaction(trans, ret);
  2642. return ret;
  2643. }
  2644. if (run_all) {
  2645. if (!list_empty(&trans->new_bgs))
  2646. btrfs_create_pending_block_groups(trans);
  2647. spin_lock(&delayed_refs->lock);
  2648. node = rb_first_cached(&delayed_refs->href_root);
  2649. if (!node) {
  2650. spin_unlock(&delayed_refs->lock);
  2651. goto out;
  2652. }
  2653. head = rb_entry(node, struct btrfs_delayed_ref_head,
  2654. href_node);
  2655. refcount_inc(&head->refs);
  2656. spin_unlock(&delayed_refs->lock);
  2657. /* Mutex was contended, block until it's released and retry. */
  2658. mutex_lock(&head->mutex);
  2659. mutex_unlock(&head->mutex);
  2660. btrfs_put_delayed_ref_head(head);
  2661. cond_resched();
  2662. goto again;
  2663. }
  2664. out:
  2665. return 0;
  2666. }
  2667. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2668. struct btrfs_fs_info *fs_info,
  2669. u64 bytenr, u64 num_bytes, u64 flags,
  2670. int level, int is_data)
  2671. {
  2672. struct btrfs_delayed_extent_op *extent_op;
  2673. int ret;
  2674. extent_op = btrfs_alloc_delayed_extent_op();
  2675. if (!extent_op)
  2676. return -ENOMEM;
  2677. extent_op->flags_to_set = flags;
  2678. extent_op->update_flags = true;
  2679. extent_op->update_key = false;
  2680. extent_op->is_data = is_data ? true : false;
  2681. extent_op->level = level;
  2682. ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
  2683. num_bytes, extent_op);
  2684. if (ret)
  2685. btrfs_free_delayed_extent_op(extent_op);
  2686. return ret;
  2687. }
  2688. static noinline int check_delayed_ref(struct btrfs_root *root,
  2689. struct btrfs_path *path,
  2690. u64 objectid, u64 offset, u64 bytenr)
  2691. {
  2692. struct btrfs_delayed_ref_head *head;
  2693. struct btrfs_delayed_ref_node *ref;
  2694. struct btrfs_delayed_data_ref *data_ref;
  2695. struct btrfs_delayed_ref_root *delayed_refs;
  2696. struct btrfs_transaction *cur_trans;
  2697. struct rb_node *node;
  2698. int ret = 0;
  2699. spin_lock(&root->fs_info->trans_lock);
  2700. cur_trans = root->fs_info->running_transaction;
  2701. if (cur_trans)
  2702. refcount_inc(&cur_trans->use_count);
  2703. spin_unlock(&root->fs_info->trans_lock);
  2704. if (!cur_trans)
  2705. return 0;
  2706. delayed_refs = &cur_trans->delayed_refs;
  2707. spin_lock(&delayed_refs->lock);
  2708. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  2709. if (!head) {
  2710. spin_unlock(&delayed_refs->lock);
  2711. btrfs_put_transaction(cur_trans);
  2712. return 0;
  2713. }
  2714. if (!mutex_trylock(&head->mutex)) {
  2715. refcount_inc(&head->refs);
  2716. spin_unlock(&delayed_refs->lock);
  2717. btrfs_release_path(path);
  2718. /*
  2719. * Mutex was contended, block until it's released and let
  2720. * caller try again
  2721. */
  2722. mutex_lock(&head->mutex);
  2723. mutex_unlock(&head->mutex);
  2724. btrfs_put_delayed_ref_head(head);
  2725. btrfs_put_transaction(cur_trans);
  2726. return -EAGAIN;
  2727. }
  2728. spin_unlock(&delayed_refs->lock);
  2729. spin_lock(&head->lock);
  2730. /*
  2731. * XXX: We should replace this with a proper search function in the
  2732. * future.
  2733. */
  2734. for (node = rb_first_cached(&head->ref_tree); node;
  2735. node = rb_next(node)) {
  2736. ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  2737. /* If it's a shared ref we know a cross reference exists */
  2738. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
  2739. ret = 1;
  2740. break;
  2741. }
  2742. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2743. /*
  2744. * If our ref doesn't match the one we're currently looking at
  2745. * then we have a cross reference.
  2746. */
  2747. if (data_ref->root != root->root_key.objectid ||
  2748. data_ref->objectid != objectid ||
  2749. data_ref->offset != offset) {
  2750. ret = 1;
  2751. break;
  2752. }
  2753. }
  2754. spin_unlock(&head->lock);
  2755. mutex_unlock(&head->mutex);
  2756. btrfs_put_transaction(cur_trans);
  2757. return ret;
  2758. }
  2759. static noinline int check_committed_ref(struct btrfs_root *root,
  2760. struct btrfs_path *path,
  2761. u64 objectid, u64 offset, u64 bytenr)
  2762. {
  2763. struct btrfs_fs_info *fs_info = root->fs_info;
  2764. struct btrfs_root *extent_root = fs_info->extent_root;
  2765. struct extent_buffer *leaf;
  2766. struct btrfs_extent_data_ref *ref;
  2767. struct btrfs_extent_inline_ref *iref;
  2768. struct btrfs_extent_item *ei;
  2769. struct btrfs_key key;
  2770. u32 item_size;
  2771. int type;
  2772. int ret;
  2773. key.objectid = bytenr;
  2774. key.offset = (u64)-1;
  2775. key.type = BTRFS_EXTENT_ITEM_KEY;
  2776. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2777. if (ret < 0)
  2778. goto out;
  2779. BUG_ON(ret == 0); /* Corruption */
  2780. ret = -ENOENT;
  2781. if (path->slots[0] == 0)
  2782. goto out;
  2783. path->slots[0]--;
  2784. leaf = path->nodes[0];
  2785. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2786. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2787. goto out;
  2788. ret = 1;
  2789. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2790. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2791. if (item_size != sizeof(*ei) +
  2792. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2793. goto out;
  2794. if (btrfs_extent_generation(leaf, ei) <=
  2795. btrfs_root_last_snapshot(&root->root_item))
  2796. goto out;
  2797. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2798. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  2799. if (type != BTRFS_EXTENT_DATA_REF_KEY)
  2800. goto out;
  2801. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2802. if (btrfs_extent_refs(leaf, ei) !=
  2803. btrfs_extent_data_ref_count(leaf, ref) ||
  2804. btrfs_extent_data_ref_root(leaf, ref) !=
  2805. root->root_key.objectid ||
  2806. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2807. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2808. goto out;
  2809. ret = 0;
  2810. out:
  2811. return ret;
  2812. }
  2813. int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
  2814. u64 bytenr)
  2815. {
  2816. struct btrfs_path *path;
  2817. int ret;
  2818. path = btrfs_alloc_path();
  2819. if (!path)
  2820. return -ENOMEM;
  2821. do {
  2822. ret = check_committed_ref(root, path, objectid,
  2823. offset, bytenr);
  2824. if (ret && ret != -ENOENT)
  2825. goto out;
  2826. ret = check_delayed_ref(root, path, objectid, offset, bytenr);
  2827. } while (ret == -EAGAIN);
  2828. out:
  2829. btrfs_free_path(path);
  2830. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2831. WARN_ON(ret > 0);
  2832. return ret;
  2833. }
  2834. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2835. struct btrfs_root *root,
  2836. struct extent_buffer *buf,
  2837. int full_backref, int inc)
  2838. {
  2839. struct btrfs_fs_info *fs_info = root->fs_info;
  2840. u64 bytenr;
  2841. u64 num_bytes;
  2842. u64 parent;
  2843. u64 ref_root;
  2844. u32 nritems;
  2845. struct btrfs_key key;
  2846. struct btrfs_file_extent_item *fi;
  2847. int i;
  2848. int level;
  2849. int ret = 0;
  2850. int (*process_func)(struct btrfs_trans_handle *,
  2851. struct btrfs_root *,
  2852. u64, u64, u64, u64, u64, u64);
  2853. if (btrfs_is_testing(fs_info))
  2854. return 0;
  2855. ref_root = btrfs_header_owner(buf);
  2856. nritems = btrfs_header_nritems(buf);
  2857. level = btrfs_header_level(buf);
  2858. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
  2859. return 0;
  2860. if (inc)
  2861. process_func = btrfs_inc_extent_ref;
  2862. else
  2863. process_func = btrfs_free_extent;
  2864. if (full_backref)
  2865. parent = buf->start;
  2866. else
  2867. parent = 0;
  2868. for (i = 0; i < nritems; i++) {
  2869. if (level == 0) {
  2870. btrfs_item_key_to_cpu(buf, &key, i);
  2871. if (key.type != BTRFS_EXTENT_DATA_KEY)
  2872. continue;
  2873. fi = btrfs_item_ptr(buf, i,
  2874. struct btrfs_file_extent_item);
  2875. if (btrfs_file_extent_type(buf, fi) ==
  2876. BTRFS_FILE_EXTENT_INLINE)
  2877. continue;
  2878. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2879. if (bytenr == 0)
  2880. continue;
  2881. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2882. key.offset -= btrfs_file_extent_offset(buf, fi);
  2883. ret = process_func(trans, root, bytenr, num_bytes,
  2884. parent, ref_root, key.objectid,
  2885. key.offset);
  2886. if (ret)
  2887. goto fail;
  2888. } else {
  2889. bytenr = btrfs_node_blockptr(buf, i);
  2890. num_bytes = fs_info->nodesize;
  2891. ret = process_func(trans, root, bytenr, num_bytes,
  2892. parent, ref_root, level - 1, 0);
  2893. if (ret)
  2894. goto fail;
  2895. }
  2896. }
  2897. return 0;
  2898. fail:
  2899. return ret;
  2900. }
  2901. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2902. struct extent_buffer *buf, int full_backref)
  2903. {
  2904. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  2905. }
  2906. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2907. struct extent_buffer *buf, int full_backref)
  2908. {
  2909. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  2910. }
  2911. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2912. struct btrfs_fs_info *fs_info,
  2913. struct btrfs_path *path,
  2914. struct btrfs_block_group_cache *cache)
  2915. {
  2916. int ret;
  2917. struct btrfs_root *extent_root = fs_info->extent_root;
  2918. unsigned long bi;
  2919. struct extent_buffer *leaf;
  2920. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2921. if (ret) {
  2922. if (ret > 0)
  2923. ret = -ENOENT;
  2924. goto fail;
  2925. }
  2926. leaf = path->nodes[0];
  2927. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2928. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2929. btrfs_mark_buffer_dirty(leaf);
  2930. fail:
  2931. btrfs_release_path(path);
  2932. return ret;
  2933. }
  2934. static struct btrfs_block_group_cache *
  2935. next_block_group(struct btrfs_fs_info *fs_info,
  2936. struct btrfs_block_group_cache *cache)
  2937. {
  2938. struct rb_node *node;
  2939. spin_lock(&fs_info->block_group_cache_lock);
  2940. /* If our block group was removed, we need a full search. */
  2941. if (RB_EMPTY_NODE(&cache->cache_node)) {
  2942. const u64 next_bytenr = cache->key.objectid + cache->key.offset;
  2943. spin_unlock(&fs_info->block_group_cache_lock);
  2944. btrfs_put_block_group(cache);
  2945. cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
  2946. }
  2947. node = rb_next(&cache->cache_node);
  2948. btrfs_put_block_group(cache);
  2949. if (node) {
  2950. cache = rb_entry(node, struct btrfs_block_group_cache,
  2951. cache_node);
  2952. btrfs_get_block_group(cache);
  2953. } else
  2954. cache = NULL;
  2955. spin_unlock(&fs_info->block_group_cache_lock);
  2956. return cache;
  2957. }
  2958. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2959. struct btrfs_trans_handle *trans,
  2960. struct btrfs_path *path)
  2961. {
  2962. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2963. struct btrfs_root *root = fs_info->tree_root;
  2964. struct inode *inode = NULL;
  2965. struct extent_changeset *data_reserved = NULL;
  2966. u64 alloc_hint = 0;
  2967. int dcs = BTRFS_DC_ERROR;
  2968. u64 num_pages = 0;
  2969. int retries = 0;
  2970. int ret = 0;
  2971. /*
  2972. * If this block group is smaller than 100 megs don't bother caching the
  2973. * block group.
  2974. */
  2975. if (block_group->key.offset < (100 * SZ_1M)) {
  2976. spin_lock(&block_group->lock);
  2977. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2978. spin_unlock(&block_group->lock);
  2979. return 0;
  2980. }
  2981. if (trans->aborted)
  2982. return 0;
  2983. again:
  2984. inode = lookup_free_space_inode(fs_info, block_group, path);
  2985. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2986. ret = PTR_ERR(inode);
  2987. btrfs_release_path(path);
  2988. goto out;
  2989. }
  2990. if (IS_ERR(inode)) {
  2991. BUG_ON(retries);
  2992. retries++;
  2993. if (block_group->ro)
  2994. goto out_free;
  2995. ret = create_free_space_inode(fs_info, trans, block_group,
  2996. path);
  2997. if (ret)
  2998. goto out_free;
  2999. goto again;
  3000. }
  3001. /*
  3002. * We want to set the generation to 0, that way if anything goes wrong
  3003. * from here on out we know not to trust this cache when we load up next
  3004. * time.
  3005. */
  3006. BTRFS_I(inode)->generation = 0;
  3007. ret = btrfs_update_inode(trans, root, inode);
  3008. if (ret) {
  3009. /*
  3010. * So theoretically we could recover from this, simply set the
  3011. * super cache generation to 0 so we know to invalidate the
  3012. * cache, but then we'd have to keep track of the block groups
  3013. * that fail this way so we know we _have_ to reset this cache
  3014. * before the next commit or risk reading stale cache. So to
  3015. * limit our exposure to horrible edge cases lets just abort the
  3016. * transaction, this only happens in really bad situations
  3017. * anyway.
  3018. */
  3019. btrfs_abort_transaction(trans, ret);
  3020. goto out_put;
  3021. }
  3022. WARN_ON(ret);
  3023. /* We've already setup this transaction, go ahead and exit */
  3024. if (block_group->cache_generation == trans->transid &&
  3025. i_size_read(inode)) {
  3026. dcs = BTRFS_DC_SETUP;
  3027. goto out_put;
  3028. }
  3029. if (i_size_read(inode) > 0) {
  3030. ret = btrfs_check_trunc_cache_free_space(fs_info,
  3031. &fs_info->global_block_rsv);
  3032. if (ret)
  3033. goto out_put;
  3034. ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
  3035. if (ret)
  3036. goto out_put;
  3037. }
  3038. spin_lock(&block_group->lock);
  3039. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  3040. !btrfs_test_opt(fs_info, SPACE_CACHE)) {
  3041. /*
  3042. * don't bother trying to write stuff out _if_
  3043. * a) we're not cached,
  3044. * b) we're with nospace_cache mount option,
  3045. * c) we're with v2 space_cache (FREE_SPACE_TREE).
  3046. */
  3047. dcs = BTRFS_DC_WRITTEN;
  3048. spin_unlock(&block_group->lock);
  3049. goto out_put;
  3050. }
  3051. spin_unlock(&block_group->lock);
  3052. /*
  3053. * We hit an ENOSPC when setting up the cache in this transaction, just
  3054. * skip doing the setup, we've already cleared the cache so we're safe.
  3055. */
  3056. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  3057. ret = -ENOSPC;
  3058. goto out_put;
  3059. }
  3060. /*
  3061. * Try to preallocate enough space based on how big the block group is.
  3062. * Keep in mind this has to include any pinned space which could end up
  3063. * taking up quite a bit since it's not folded into the other space
  3064. * cache.
  3065. */
  3066. num_pages = div_u64(block_group->key.offset, SZ_256M);
  3067. if (!num_pages)
  3068. num_pages = 1;
  3069. num_pages *= 16;
  3070. num_pages *= PAGE_SIZE;
  3071. ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
  3072. if (ret)
  3073. goto out_put;
  3074. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  3075. num_pages, num_pages,
  3076. &alloc_hint);
  3077. /*
  3078. * Our cache requires contiguous chunks so that we don't modify a bunch
  3079. * of metadata or split extents when writing the cache out, which means
  3080. * we can enospc if we are heavily fragmented in addition to just normal
  3081. * out of space conditions. So if we hit this just skip setting up any
  3082. * other block groups for this transaction, maybe we'll unpin enough
  3083. * space the next time around.
  3084. */
  3085. if (!ret)
  3086. dcs = BTRFS_DC_SETUP;
  3087. else if (ret == -ENOSPC)
  3088. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  3089. out_put:
  3090. iput(inode);
  3091. out_free:
  3092. btrfs_release_path(path);
  3093. out:
  3094. spin_lock(&block_group->lock);
  3095. if (!ret && dcs == BTRFS_DC_SETUP)
  3096. block_group->cache_generation = trans->transid;
  3097. block_group->disk_cache_state = dcs;
  3098. spin_unlock(&block_group->lock);
  3099. extent_changeset_free(data_reserved);
  3100. return ret;
  3101. }
  3102. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  3103. struct btrfs_fs_info *fs_info)
  3104. {
  3105. struct btrfs_block_group_cache *cache, *tmp;
  3106. struct btrfs_transaction *cur_trans = trans->transaction;
  3107. struct btrfs_path *path;
  3108. if (list_empty(&cur_trans->dirty_bgs) ||
  3109. !btrfs_test_opt(fs_info, SPACE_CACHE))
  3110. return 0;
  3111. path = btrfs_alloc_path();
  3112. if (!path)
  3113. return -ENOMEM;
  3114. /* Could add new block groups, use _safe just in case */
  3115. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  3116. dirty_list) {
  3117. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  3118. cache_save_setup(cache, trans, path);
  3119. }
  3120. btrfs_free_path(path);
  3121. return 0;
  3122. }
  3123. /*
  3124. * transaction commit does final block group cache writeback during a
  3125. * critical section where nothing is allowed to change the FS. This is
  3126. * required in order for the cache to actually match the block group,
  3127. * but can introduce a lot of latency into the commit.
  3128. *
  3129. * So, btrfs_start_dirty_block_groups is here to kick off block group
  3130. * cache IO. There's a chance we'll have to redo some of it if the
  3131. * block group changes again during the commit, but it greatly reduces
  3132. * the commit latency by getting rid of the easy block groups while
  3133. * we're still allowing others to join the commit.
  3134. */
  3135. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
  3136. {
  3137. struct btrfs_fs_info *fs_info = trans->fs_info;
  3138. struct btrfs_block_group_cache *cache;
  3139. struct btrfs_transaction *cur_trans = trans->transaction;
  3140. int ret = 0;
  3141. int should_put;
  3142. struct btrfs_path *path = NULL;
  3143. LIST_HEAD(dirty);
  3144. struct list_head *io = &cur_trans->io_bgs;
  3145. int num_started = 0;
  3146. int loops = 0;
  3147. spin_lock(&cur_trans->dirty_bgs_lock);
  3148. if (list_empty(&cur_trans->dirty_bgs)) {
  3149. spin_unlock(&cur_trans->dirty_bgs_lock);
  3150. return 0;
  3151. }
  3152. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3153. spin_unlock(&cur_trans->dirty_bgs_lock);
  3154. again:
  3155. /*
  3156. * make sure all the block groups on our dirty list actually
  3157. * exist
  3158. */
  3159. btrfs_create_pending_block_groups(trans);
  3160. if (!path) {
  3161. path = btrfs_alloc_path();
  3162. if (!path)
  3163. return -ENOMEM;
  3164. }
  3165. /*
  3166. * cache_write_mutex is here only to save us from balance or automatic
  3167. * removal of empty block groups deleting this block group while we are
  3168. * writing out the cache
  3169. */
  3170. mutex_lock(&trans->transaction->cache_write_mutex);
  3171. while (!list_empty(&dirty)) {
  3172. cache = list_first_entry(&dirty,
  3173. struct btrfs_block_group_cache,
  3174. dirty_list);
  3175. /*
  3176. * this can happen if something re-dirties a block
  3177. * group that is already under IO. Just wait for it to
  3178. * finish and then do it all again
  3179. */
  3180. if (!list_empty(&cache->io_list)) {
  3181. list_del_init(&cache->io_list);
  3182. btrfs_wait_cache_io(trans, cache, path);
  3183. btrfs_put_block_group(cache);
  3184. }
  3185. /*
  3186. * btrfs_wait_cache_io uses the cache->dirty_list to decide
  3187. * if it should update the cache_state. Don't delete
  3188. * until after we wait.
  3189. *
  3190. * Since we're not running in the commit critical section
  3191. * we need the dirty_bgs_lock to protect from update_block_group
  3192. */
  3193. spin_lock(&cur_trans->dirty_bgs_lock);
  3194. list_del_init(&cache->dirty_list);
  3195. spin_unlock(&cur_trans->dirty_bgs_lock);
  3196. should_put = 1;
  3197. cache_save_setup(cache, trans, path);
  3198. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  3199. cache->io_ctl.inode = NULL;
  3200. ret = btrfs_write_out_cache(fs_info, trans,
  3201. cache, path);
  3202. if (ret == 0 && cache->io_ctl.inode) {
  3203. num_started++;
  3204. should_put = 0;
  3205. /*
  3206. * The cache_write_mutex is protecting the
  3207. * io_list, also refer to the definition of
  3208. * btrfs_transaction::io_bgs for more details
  3209. */
  3210. list_add_tail(&cache->io_list, io);
  3211. } else {
  3212. /*
  3213. * if we failed to write the cache, the
  3214. * generation will be bad and life goes on
  3215. */
  3216. ret = 0;
  3217. }
  3218. }
  3219. if (!ret) {
  3220. ret = write_one_cache_group(trans, fs_info,
  3221. path, cache);
  3222. /*
  3223. * Our block group might still be attached to the list
  3224. * of new block groups in the transaction handle of some
  3225. * other task (struct btrfs_trans_handle->new_bgs). This
  3226. * means its block group item isn't yet in the extent
  3227. * tree. If this happens ignore the error, as we will
  3228. * try again later in the critical section of the
  3229. * transaction commit.
  3230. */
  3231. if (ret == -ENOENT) {
  3232. ret = 0;
  3233. spin_lock(&cur_trans->dirty_bgs_lock);
  3234. if (list_empty(&cache->dirty_list)) {
  3235. list_add_tail(&cache->dirty_list,
  3236. &cur_trans->dirty_bgs);
  3237. btrfs_get_block_group(cache);
  3238. }
  3239. spin_unlock(&cur_trans->dirty_bgs_lock);
  3240. } else if (ret) {
  3241. btrfs_abort_transaction(trans, ret);
  3242. }
  3243. }
  3244. /* if its not on the io list, we need to put the block group */
  3245. if (should_put)
  3246. btrfs_put_block_group(cache);
  3247. if (ret)
  3248. break;
  3249. /*
  3250. * Avoid blocking other tasks for too long. It might even save
  3251. * us from writing caches for block groups that are going to be
  3252. * removed.
  3253. */
  3254. mutex_unlock(&trans->transaction->cache_write_mutex);
  3255. mutex_lock(&trans->transaction->cache_write_mutex);
  3256. }
  3257. mutex_unlock(&trans->transaction->cache_write_mutex);
  3258. /*
  3259. * go through delayed refs for all the stuff we've just kicked off
  3260. * and then loop back (just once)
  3261. */
  3262. ret = btrfs_run_delayed_refs(trans, 0);
  3263. if (!ret && loops == 0) {
  3264. loops++;
  3265. spin_lock(&cur_trans->dirty_bgs_lock);
  3266. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3267. /*
  3268. * dirty_bgs_lock protects us from concurrent block group
  3269. * deletes too (not just cache_write_mutex).
  3270. */
  3271. if (!list_empty(&dirty)) {
  3272. spin_unlock(&cur_trans->dirty_bgs_lock);
  3273. goto again;
  3274. }
  3275. spin_unlock(&cur_trans->dirty_bgs_lock);
  3276. } else if (ret < 0) {
  3277. btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
  3278. }
  3279. btrfs_free_path(path);
  3280. return ret;
  3281. }
  3282. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  3283. struct btrfs_fs_info *fs_info)
  3284. {
  3285. struct btrfs_block_group_cache *cache;
  3286. struct btrfs_transaction *cur_trans = trans->transaction;
  3287. int ret = 0;
  3288. int should_put;
  3289. struct btrfs_path *path;
  3290. struct list_head *io = &cur_trans->io_bgs;
  3291. int num_started = 0;
  3292. path = btrfs_alloc_path();
  3293. if (!path)
  3294. return -ENOMEM;
  3295. /*
  3296. * Even though we are in the critical section of the transaction commit,
  3297. * we can still have concurrent tasks adding elements to this
  3298. * transaction's list of dirty block groups. These tasks correspond to
  3299. * endio free space workers started when writeback finishes for a
  3300. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  3301. * allocate new block groups as a result of COWing nodes of the root
  3302. * tree when updating the free space inode. The writeback for the space
  3303. * caches is triggered by an earlier call to
  3304. * btrfs_start_dirty_block_groups() and iterations of the following
  3305. * loop.
  3306. * Also we want to do the cache_save_setup first and then run the
  3307. * delayed refs to make sure we have the best chance at doing this all
  3308. * in one shot.
  3309. */
  3310. spin_lock(&cur_trans->dirty_bgs_lock);
  3311. while (!list_empty(&cur_trans->dirty_bgs)) {
  3312. cache = list_first_entry(&cur_trans->dirty_bgs,
  3313. struct btrfs_block_group_cache,
  3314. dirty_list);
  3315. /*
  3316. * this can happen if cache_save_setup re-dirties a block
  3317. * group that is already under IO. Just wait for it to
  3318. * finish and then do it all again
  3319. */
  3320. if (!list_empty(&cache->io_list)) {
  3321. spin_unlock(&cur_trans->dirty_bgs_lock);
  3322. list_del_init(&cache->io_list);
  3323. btrfs_wait_cache_io(trans, cache, path);
  3324. btrfs_put_block_group(cache);
  3325. spin_lock(&cur_trans->dirty_bgs_lock);
  3326. }
  3327. /*
  3328. * don't remove from the dirty list until after we've waited
  3329. * on any pending IO
  3330. */
  3331. list_del_init(&cache->dirty_list);
  3332. spin_unlock(&cur_trans->dirty_bgs_lock);
  3333. should_put = 1;
  3334. cache_save_setup(cache, trans, path);
  3335. if (!ret)
  3336. ret = btrfs_run_delayed_refs(trans,
  3337. (unsigned long) -1);
  3338. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  3339. cache->io_ctl.inode = NULL;
  3340. ret = btrfs_write_out_cache(fs_info, trans,
  3341. cache, path);
  3342. if (ret == 0 && cache->io_ctl.inode) {
  3343. num_started++;
  3344. should_put = 0;
  3345. list_add_tail(&cache->io_list, io);
  3346. } else {
  3347. /*
  3348. * if we failed to write the cache, the
  3349. * generation will be bad and life goes on
  3350. */
  3351. ret = 0;
  3352. }
  3353. }
  3354. if (!ret) {
  3355. ret = write_one_cache_group(trans, fs_info,
  3356. path, cache);
  3357. /*
  3358. * One of the free space endio workers might have
  3359. * created a new block group while updating a free space
  3360. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  3361. * and hasn't released its transaction handle yet, in
  3362. * which case the new block group is still attached to
  3363. * its transaction handle and its creation has not
  3364. * finished yet (no block group item in the extent tree
  3365. * yet, etc). If this is the case, wait for all free
  3366. * space endio workers to finish and retry. This is a
  3367. * a very rare case so no need for a more efficient and
  3368. * complex approach.
  3369. */
  3370. if (ret == -ENOENT) {
  3371. wait_event(cur_trans->writer_wait,
  3372. atomic_read(&cur_trans->num_writers) == 1);
  3373. ret = write_one_cache_group(trans, fs_info,
  3374. path, cache);
  3375. }
  3376. if (ret)
  3377. btrfs_abort_transaction(trans, ret);
  3378. }
  3379. /* if its not on the io list, we need to put the block group */
  3380. if (should_put)
  3381. btrfs_put_block_group(cache);
  3382. spin_lock(&cur_trans->dirty_bgs_lock);
  3383. }
  3384. spin_unlock(&cur_trans->dirty_bgs_lock);
  3385. /*
  3386. * Refer to the definition of io_bgs member for details why it's safe
  3387. * to use it without any locking
  3388. */
  3389. while (!list_empty(io)) {
  3390. cache = list_first_entry(io, struct btrfs_block_group_cache,
  3391. io_list);
  3392. list_del_init(&cache->io_list);
  3393. btrfs_wait_cache_io(trans, cache, path);
  3394. btrfs_put_block_group(cache);
  3395. }
  3396. btrfs_free_path(path);
  3397. return ret;
  3398. }
  3399. int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
  3400. {
  3401. struct btrfs_block_group_cache *block_group;
  3402. int readonly = 0;
  3403. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  3404. if (!block_group || block_group->ro)
  3405. readonly = 1;
  3406. if (block_group)
  3407. btrfs_put_block_group(block_group);
  3408. return readonly;
  3409. }
  3410. bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3411. {
  3412. struct btrfs_block_group_cache *bg;
  3413. bool ret = true;
  3414. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3415. if (!bg)
  3416. return false;
  3417. spin_lock(&bg->lock);
  3418. if (bg->ro)
  3419. ret = false;
  3420. else
  3421. atomic_inc(&bg->nocow_writers);
  3422. spin_unlock(&bg->lock);
  3423. /* no put on block group, done by btrfs_dec_nocow_writers */
  3424. if (!ret)
  3425. btrfs_put_block_group(bg);
  3426. return ret;
  3427. }
  3428. void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3429. {
  3430. struct btrfs_block_group_cache *bg;
  3431. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3432. ASSERT(bg);
  3433. if (atomic_dec_and_test(&bg->nocow_writers))
  3434. wake_up_var(&bg->nocow_writers);
  3435. /*
  3436. * Once for our lookup and once for the lookup done by a previous call
  3437. * to btrfs_inc_nocow_writers()
  3438. */
  3439. btrfs_put_block_group(bg);
  3440. btrfs_put_block_group(bg);
  3441. }
  3442. void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
  3443. {
  3444. wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
  3445. }
  3446. static const char *alloc_name(u64 flags)
  3447. {
  3448. switch (flags) {
  3449. case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
  3450. return "mixed";
  3451. case BTRFS_BLOCK_GROUP_METADATA:
  3452. return "metadata";
  3453. case BTRFS_BLOCK_GROUP_DATA:
  3454. return "data";
  3455. case BTRFS_BLOCK_GROUP_SYSTEM:
  3456. return "system";
  3457. default:
  3458. WARN_ON(1);
  3459. return "invalid-combination";
  3460. };
  3461. }
  3462. static int create_space_info(struct btrfs_fs_info *info, u64 flags)
  3463. {
  3464. struct btrfs_space_info *space_info;
  3465. int i;
  3466. int ret;
  3467. space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
  3468. if (!space_info)
  3469. return -ENOMEM;
  3470. ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
  3471. GFP_KERNEL);
  3472. if (ret) {
  3473. kfree(space_info);
  3474. return ret;
  3475. }
  3476. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3477. INIT_LIST_HEAD(&space_info->block_groups[i]);
  3478. init_rwsem(&space_info->groups_sem);
  3479. spin_lock_init(&space_info->lock);
  3480. space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3481. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3482. init_waitqueue_head(&space_info->wait);
  3483. INIT_LIST_HEAD(&space_info->ro_bgs);
  3484. INIT_LIST_HEAD(&space_info->tickets);
  3485. INIT_LIST_HEAD(&space_info->priority_tickets);
  3486. ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
  3487. info->space_info_kobj, "%s",
  3488. alloc_name(space_info->flags));
  3489. if (ret) {
  3490. percpu_counter_destroy(&space_info->total_bytes_pinned);
  3491. kfree(space_info);
  3492. return ret;
  3493. }
  3494. list_add_rcu(&space_info->list, &info->space_info);
  3495. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3496. info->data_sinfo = space_info;
  3497. return ret;
  3498. }
  3499. static void update_space_info(struct btrfs_fs_info *info, u64 flags,
  3500. u64 total_bytes, u64 bytes_used,
  3501. u64 bytes_readonly,
  3502. struct btrfs_space_info **space_info)
  3503. {
  3504. struct btrfs_space_info *found;
  3505. int factor;
  3506. factor = btrfs_bg_type_to_factor(flags);
  3507. found = __find_space_info(info, flags);
  3508. ASSERT(found);
  3509. spin_lock(&found->lock);
  3510. found->total_bytes += total_bytes;
  3511. found->disk_total += total_bytes * factor;
  3512. found->bytes_used += bytes_used;
  3513. found->disk_used += bytes_used * factor;
  3514. found->bytes_readonly += bytes_readonly;
  3515. if (total_bytes > 0)
  3516. found->full = 0;
  3517. space_info_add_new_bytes(info, found, total_bytes -
  3518. bytes_used - bytes_readonly);
  3519. spin_unlock(&found->lock);
  3520. *space_info = found;
  3521. }
  3522. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3523. {
  3524. u64 extra_flags = chunk_to_extended(flags) &
  3525. BTRFS_EXTENDED_PROFILE_MASK;
  3526. write_seqlock(&fs_info->profiles_lock);
  3527. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3528. fs_info->avail_data_alloc_bits |= extra_flags;
  3529. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3530. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3531. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3532. fs_info->avail_system_alloc_bits |= extra_flags;
  3533. write_sequnlock(&fs_info->profiles_lock);
  3534. }
  3535. /*
  3536. * returns target flags in extended format or 0 if restripe for this
  3537. * chunk_type is not in progress
  3538. *
  3539. * should be called with balance_lock held
  3540. */
  3541. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3542. {
  3543. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3544. u64 target = 0;
  3545. if (!bctl)
  3546. return 0;
  3547. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3548. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3549. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3550. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3551. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3552. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3553. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3554. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3555. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3556. }
  3557. return target;
  3558. }
  3559. /*
  3560. * @flags: available profiles in extended format (see ctree.h)
  3561. *
  3562. * Returns reduced profile in chunk format. If profile changing is in
  3563. * progress (either running or paused) picks the target profile (if it's
  3564. * already available), otherwise falls back to plain reducing.
  3565. */
  3566. static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  3567. {
  3568. u64 num_devices = fs_info->fs_devices->rw_devices;
  3569. u64 target;
  3570. u64 raid_type;
  3571. u64 allowed = 0;
  3572. /*
  3573. * see if restripe for this chunk_type is in progress, if so
  3574. * try to reduce to the target profile
  3575. */
  3576. spin_lock(&fs_info->balance_lock);
  3577. target = get_restripe_target(fs_info, flags);
  3578. if (target) {
  3579. /* pick target profile only if it's already available */
  3580. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3581. spin_unlock(&fs_info->balance_lock);
  3582. return extended_to_chunk(target);
  3583. }
  3584. }
  3585. spin_unlock(&fs_info->balance_lock);
  3586. /* First, mask out the RAID levels which aren't possible */
  3587. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  3588. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  3589. allowed |= btrfs_raid_array[raid_type].bg_flag;
  3590. }
  3591. allowed &= flags;
  3592. if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  3593. allowed = BTRFS_BLOCK_GROUP_RAID6;
  3594. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  3595. allowed = BTRFS_BLOCK_GROUP_RAID5;
  3596. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  3597. allowed = BTRFS_BLOCK_GROUP_RAID10;
  3598. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  3599. allowed = BTRFS_BLOCK_GROUP_RAID1;
  3600. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  3601. allowed = BTRFS_BLOCK_GROUP_RAID0;
  3602. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  3603. return extended_to_chunk(flags | allowed);
  3604. }
  3605. static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  3606. {
  3607. unsigned seq;
  3608. u64 flags;
  3609. do {
  3610. flags = orig_flags;
  3611. seq = read_seqbegin(&fs_info->profiles_lock);
  3612. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3613. flags |= fs_info->avail_data_alloc_bits;
  3614. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3615. flags |= fs_info->avail_system_alloc_bits;
  3616. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3617. flags |= fs_info->avail_metadata_alloc_bits;
  3618. } while (read_seqretry(&fs_info->profiles_lock, seq));
  3619. return btrfs_reduce_alloc_profile(fs_info, flags);
  3620. }
  3621. static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
  3622. {
  3623. struct btrfs_fs_info *fs_info = root->fs_info;
  3624. u64 flags;
  3625. u64 ret;
  3626. if (data)
  3627. flags = BTRFS_BLOCK_GROUP_DATA;
  3628. else if (root == fs_info->chunk_root)
  3629. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3630. else
  3631. flags = BTRFS_BLOCK_GROUP_METADATA;
  3632. ret = get_alloc_profile(fs_info, flags);
  3633. return ret;
  3634. }
  3635. u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
  3636. {
  3637. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3638. }
  3639. u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
  3640. {
  3641. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3642. }
  3643. u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
  3644. {
  3645. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3646. }
  3647. static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
  3648. bool may_use_included)
  3649. {
  3650. ASSERT(s_info);
  3651. return s_info->bytes_used + s_info->bytes_reserved +
  3652. s_info->bytes_pinned + s_info->bytes_readonly +
  3653. (may_use_included ? s_info->bytes_may_use : 0);
  3654. }
  3655. int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
  3656. {
  3657. struct btrfs_root *root = inode->root;
  3658. struct btrfs_fs_info *fs_info = root->fs_info;
  3659. struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
  3660. u64 used;
  3661. int ret = 0;
  3662. int need_commit = 2;
  3663. int have_pinned_space;
  3664. /* make sure bytes are sectorsize aligned */
  3665. bytes = ALIGN(bytes, fs_info->sectorsize);
  3666. if (btrfs_is_free_space_inode(inode)) {
  3667. need_commit = 0;
  3668. ASSERT(current->journal_info);
  3669. }
  3670. again:
  3671. /* make sure we have enough space to handle the data first */
  3672. spin_lock(&data_sinfo->lock);
  3673. used = btrfs_space_info_used(data_sinfo, true);
  3674. if (used + bytes > data_sinfo->total_bytes) {
  3675. struct btrfs_trans_handle *trans;
  3676. /*
  3677. * if we don't have enough free bytes in this space then we need
  3678. * to alloc a new chunk.
  3679. */
  3680. if (!data_sinfo->full) {
  3681. u64 alloc_target;
  3682. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3683. spin_unlock(&data_sinfo->lock);
  3684. alloc_target = btrfs_data_alloc_profile(fs_info);
  3685. /*
  3686. * It is ugly that we don't call nolock join
  3687. * transaction for the free space inode case here.
  3688. * But it is safe because we only do the data space
  3689. * reservation for the free space cache in the
  3690. * transaction context, the common join transaction
  3691. * just increase the counter of the current transaction
  3692. * handler, doesn't try to acquire the trans_lock of
  3693. * the fs.
  3694. */
  3695. trans = btrfs_join_transaction(root);
  3696. if (IS_ERR(trans))
  3697. return PTR_ERR(trans);
  3698. ret = do_chunk_alloc(trans, alloc_target,
  3699. CHUNK_ALLOC_NO_FORCE);
  3700. btrfs_end_transaction(trans);
  3701. if (ret < 0) {
  3702. if (ret != -ENOSPC)
  3703. return ret;
  3704. else {
  3705. have_pinned_space = 1;
  3706. goto commit_trans;
  3707. }
  3708. }
  3709. goto again;
  3710. }
  3711. /*
  3712. * If we don't have enough pinned space to deal with this
  3713. * allocation, and no removed chunk in current transaction,
  3714. * don't bother committing the transaction.
  3715. */
  3716. have_pinned_space = __percpu_counter_compare(
  3717. &data_sinfo->total_bytes_pinned,
  3718. used + bytes - data_sinfo->total_bytes,
  3719. BTRFS_TOTAL_BYTES_PINNED_BATCH);
  3720. spin_unlock(&data_sinfo->lock);
  3721. /* commit the current transaction and try again */
  3722. commit_trans:
  3723. if (need_commit) {
  3724. need_commit--;
  3725. if (need_commit > 0) {
  3726. btrfs_start_delalloc_roots(fs_info, -1);
  3727. btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
  3728. (u64)-1);
  3729. }
  3730. trans = btrfs_join_transaction(root);
  3731. if (IS_ERR(trans))
  3732. return PTR_ERR(trans);
  3733. if (have_pinned_space >= 0 ||
  3734. test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
  3735. &trans->transaction->flags) ||
  3736. need_commit > 0) {
  3737. ret = btrfs_commit_transaction(trans);
  3738. if (ret)
  3739. return ret;
  3740. /*
  3741. * The cleaner kthread might still be doing iput
  3742. * operations. Wait for it to finish so that
  3743. * more space is released.
  3744. */
  3745. mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
  3746. mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
  3747. goto again;
  3748. } else {
  3749. btrfs_end_transaction(trans);
  3750. }
  3751. }
  3752. trace_btrfs_space_reservation(fs_info,
  3753. "space_info:enospc",
  3754. data_sinfo->flags, bytes, 1);
  3755. return -ENOSPC;
  3756. }
  3757. data_sinfo->bytes_may_use += bytes;
  3758. trace_btrfs_space_reservation(fs_info, "space_info",
  3759. data_sinfo->flags, bytes, 1);
  3760. spin_unlock(&data_sinfo->lock);
  3761. return 0;
  3762. }
  3763. int btrfs_check_data_free_space(struct inode *inode,
  3764. struct extent_changeset **reserved, u64 start, u64 len)
  3765. {
  3766. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3767. int ret;
  3768. /* align the range */
  3769. len = round_up(start + len, fs_info->sectorsize) -
  3770. round_down(start, fs_info->sectorsize);
  3771. start = round_down(start, fs_info->sectorsize);
  3772. ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
  3773. if (ret < 0)
  3774. return ret;
  3775. /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
  3776. ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
  3777. if (ret < 0)
  3778. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3779. else
  3780. ret = 0;
  3781. return ret;
  3782. }
  3783. /*
  3784. * Called if we need to clear a data reservation for this inode
  3785. * Normally in a error case.
  3786. *
  3787. * This one will *NOT* use accurate qgroup reserved space API, just for case
  3788. * which we can't sleep and is sure it won't affect qgroup reserved space.
  3789. * Like clear_bit_hook().
  3790. */
  3791. void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
  3792. u64 len)
  3793. {
  3794. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3795. struct btrfs_space_info *data_sinfo;
  3796. /* Make sure the range is aligned to sectorsize */
  3797. len = round_up(start + len, fs_info->sectorsize) -
  3798. round_down(start, fs_info->sectorsize);
  3799. start = round_down(start, fs_info->sectorsize);
  3800. data_sinfo = fs_info->data_sinfo;
  3801. spin_lock(&data_sinfo->lock);
  3802. if (WARN_ON(data_sinfo->bytes_may_use < len))
  3803. data_sinfo->bytes_may_use = 0;
  3804. else
  3805. data_sinfo->bytes_may_use -= len;
  3806. trace_btrfs_space_reservation(fs_info, "space_info",
  3807. data_sinfo->flags, len, 0);
  3808. spin_unlock(&data_sinfo->lock);
  3809. }
  3810. /*
  3811. * Called if we need to clear a data reservation for this inode
  3812. * Normally in a error case.
  3813. *
  3814. * This one will handle the per-inode data rsv map for accurate reserved
  3815. * space framework.
  3816. */
  3817. void btrfs_free_reserved_data_space(struct inode *inode,
  3818. struct extent_changeset *reserved, u64 start, u64 len)
  3819. {
  3820. struct btrfs_root *root = BTRFS_I(inode)->root;
  3821. /* Make sure the range is aligned to sectorsize */
  3822. len = round_up(start + len, root->fs_info->sectorsize) -
  3823. round_down(start, root->fs_info->sectorsize);
  3824. start = round_down(start, root->fs_info->sectorsize);
  3825. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3826. btrfs_qgroup_free_data(inode, reserved, start, len);
  3827. }
  3828. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3829. {
  3830. struct list_head *head = &info->space_info;
  3831. struct btrfs_space_info *found;
  3832. rcu_read_lock();
  3833. list_for_each_entry_rcu(found, head, list) {
  3834. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3835. found->force_alloc = CHUNK_ALLOC_FORCE;
  3836. }
  3837. rcu_read_unlock();
  3838. }
  3839. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3840. {
  3841. return (global->size << 1);
  3842. }
  3843. static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
  3844. struct btrfs_space_info *sinfo, int force)
  3845. {
  3846. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  3847. u64 bytes_used = btrfs_space_info_used(sinfo, false);
  3848. u64 thresh;
  3849. if (force == CHUNK_ALLOC_FORCE)
  3850. return 1;
  3851. /*
  3852. * We need to take into account the global rsv because for all intents
  3853. * and purposes it's used space. Don't worry about locking the
  3854. * global_rsv, it doesn't change except when the transaction commits.
  3855. */
  3856. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3857. bytes_used += calc_global_rsv_need_space(global_rsv);
  3858. /*
  3859. * in limited mode, we want to have some free space up to
  3860. * about 1% of the FS size.
  3861. */
  3862. if (force == CHUNK_ALLOC_LIMITED) {
  3863. thresh = btrfs_super_total_bytes(fs_info->super_copy);
  3864. thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
  3865. if (sinfo->total_bytes - bytes_used < thresh)
  3866. return 1;
  3867. }
  3868. if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
  3869. return 0;
  3870. return 1;
  3871. }
  3872. static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
  3873. {
  3874. u64 num_dev;
  3875. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3876. BTRFS_BLOCK_GROUP_RAID0 |
  3877. BTRFS_BLOCK_GROUP_RAID5 |
  3878. BTRFS_BLOCK_GROUP_RAID6))
  3879. num_dev = fs_info->fs_devices->rw_devices;
  3880. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3881. num_dev = 2;
  3882. else
  3883. num_dev = 1; /* DUP or single */
  3884. return num_dev;
  3885. }
  3886. /*
  3887. * If @is_allocation is true, reserve space in the system space info necessary
  3888. * for allocating a chunk, otherwise if it's false, reserve space necessary for
  3889. * removing a chunk.
  3890. */
  3891. void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
  3892. {
  3893. struct btrfs_fs_info *fs_info = trans->fs_info;
  3894. struct btrfs_space_info *info;
  3895. u64 left;
  3896. u64 thresh;
  3897. int ret = 0;
  3898. u64 num_devs;
  3899. /*
  3900. * Needed because we can end up allocating a system chunk and for an
  3901. * atomic and race free space reservation in the chunk block reserve.
  3902. */
  3903. lockdep_assert_held(&fs_info->chunk_mutex);
  3904. info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3905. spin_lock(&info->lock);
  3906. left = info->total_bytes - btrfs_space_info_used(info, true);
  3907. spin_unlock(&info->lock);
  3908. num_devs = get_profile_num_devs(fs_info, type);
  3909. /* num_devs device items to update and 1 chunk item to add or remove */
  3910. thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
  3911. btrfs_calc_trans_metadata_size(fs_info, 1);
  3912. if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  3913. btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
  3914. left, thresh, type);
  3915. dump_space_info(fs_info, info, 0, 0);
  3916. }
  3917. if (left < thresh) {
  3918. u64 flags = btrfs_system_alloc_profile(fs_info);
  3919. /*
  3920. * Ignore failure to create system chunk. We might end up not
  3921. * needing it, as we might not need to COW all nodes/leafs from
  3922. * the paths we visit in the chunk tree (they were already COWed
  3923. * or created in the current transaction for example).
  3924. */
  3925. ret = btrfs_alloc_chunk(trans, flags);
  3926. }
  3927. if (!ret) {
  3928. ret = btrfs_block_rsv_add(fs_info->chunk_root,
  3929. &fs_info->chunk_block_rsv,
  3930. thresh, BTRFS_RESERVE_NO_FLUSH);
  3931. if (!ret)
  3932. trans->chunk_bytes_reserved += thresh;
  3933. }
  3934. }
  3935. /*
  3936. * If force is CHUNK_ALLOC_FORCE:
  3937. * - return 1 if it successfully allocates a chunk,
  3938. * - return errors including -ENOSPC otherwise.
  3939. * If force is NOT CHUNK_ALLOC_FORCE:
  3940. * - return 0 if it doesn't need to allocate a new chunk,
  3941. * - return 1 if it successfully allocates a chunk,
  3942. * - return errors including -ENOSPC otherwise.
  3943. */
  3944. static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
  3945. int force)
  3946. {
  3947. struct btrfs_fs_info *fs_info = trans->fs_info;
  3948. struct btrfs_space_info *space_info;
  3949. bool wait_for_alloc = false;
  3950. bool should_alloc = false;
  3951. int ret = 0;
  3952. /* Don't re-enter if we're already allocating a chunk */
  3953. if (trans->allocating_chunk)
  3954. return -ENOSPC;
  3955. space_info = __find_space_info(fs_info, flags);
  3956. ASSERT(space_info);
  3957. do {
  3958. spin_lock(&space_info->lock);
  3959. if (force < space_info->force_alloc)
  3960. force = space_info->force_alloc;
  3961. should_alloc = should_alloc_chunk(fs_info, space_info, force);
  3962. if (space_info->full) {
  3963. /* No more free physical space */
  3964. if (should_alloc)
  3965. ret = -ENOSPC;
  3966. else
  3967. ret = 0;
  3968. spin_unlock(&space_info->lock);
  3969. return ret;
  3970. } else if (!should_alloc) {
  3971. spin_unlock(&space_info->lock);
  3972. return 0;
  3973. } else if (space_info->chunk_alloc) {
  3974. /*
  3975. * Someone is already allocating, so we need to block
  3976. * until this someone is finished and then loop to
  3977. * recheck if we should continue with our allocation
  3978. * attempt.
  3979. */
  3980. wait_for_alloc = true;
  3981. spin_unlock(&space_info->lock);
  3982. mutex_lock(&fs_info->chunk_mutex);
  3983. mutex_unlock(&fs_info->chunk_mutex);
  3984. } else {
  3985. /* Proceed with allocation */
  3986. space_info->chunk_alloc = 1;
  3987. wait_for_alloc = false;
  3988. spin_unlock(&space_info->lock);
  3989. }
  3990. cond_resched();
  3991. } while (wait_for_alloc);
  3992. mutex_lock(&fs_info->chunk_mutex);
  3993. trans->allocating_chunk = true;
  3994. /*
  3995. * If we have mixed data/metadata chunks we want to make sure we keep
  3996. * allocating mixed chunks instead of individual chunks.
  3997. */
  3998. if (btrfs_mixed_space_info(space_info))
  3999. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  4000. /*
  4001. * if we're doing a data chunk, go ahead and make sure that
  4002. * we keep a reasonable number of metadata chunks allocated in the
  4003. * FS as well.
  4004. */
  4005. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  4006. fs_info->data_chunk_allocations++;
  4007. if (!(fs_info->data_chunk_allocations %
  4008. fs_info->metadata_ratio))
  4009. force_metadata_allocation(fs_info);
  4010. }
  4011. /*
  4012. * Check if we have enough space in SYSTEM chunk because we may need
  4013. * to update devices.
  4014. */
  4015. check_system_chunk(trans, flags);
  4016. ret = btrfs_alloc_chunk(trans, flags);
  4017. trans->allocating_chunk = false;
  4018. spin_lock(&space_info->lock);
  4019. if (ret < 0) {
  4020. if (ret == -ENOSPC)
  4021. space_info->full = 1;
  4022. else
  4023. goto out;
  4024. } else {
  4025. ret = 1;
  4026. space_info->max_extent_size = 0;
  4027. }
  4028. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  4029. out:
  4030. space_info->chunk_alloc = 0;
  4031. spin_unlock(&space_info->lock);
  4032. mutex_unlock(&fs_info->chunk_mutex);
  4033. /*
  4034. * When we allocate a new chunk we reserve space in the chunk block
  4035. * reserve to make sure we can COW nodes/leafs in the chunk tree or
  4036. * add new nodes/leafs to it if we end up needing to do it when
  4037. * inserting the chunk item and updating device items as part of the
  4038. * second phase of chunk allocation, performed by
  4039. * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
  4040. * large number of new block groups to create in our transaction
  4041. * handle's new_bgs list to avoid exhausting the chunk block reserve
  4042. * in extreme cases - like having a single transaction create many new
  4043. * block groups when starting to write out the free space caches of all
  4044. * the block groups that were made dirty during the lifetime of the
  4045. * transaction.
  4046. */
  4047. if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
  4048. btrfs_create_pending_block_groups(trans);
  4049. return ret;
  4050. }
  4051. static int can_overcommit(struct btrfs_fs_info *fs_info,
  4052. struct btrfs_space_info *space_info, u64 bytes,
  4053. enum btrfs_reserve_flush_enum flush,
  4054. bool system_chunk)
  4055. {
  4056. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4057. u64 profile;
  4058. u64 space_size;
  4059. u64 avail;
  4060. u64 used;
  4061. int factor;
  4062. /* Don't overcommit when in mixed mode. */
  4063. if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
  4064. return 0;
  4065. if (system_chunk)
  4066. profile = btrfs_system_alloc_profile(fs_info);
  4067. else
  4068. profile = btrfs_metadata_alloc_profile(fs_info);
  4069. used = btrfs_space_info_used(space_info, false);
  4070. /*
  4071. * We only want to allow over committing if we have lots of actual space
  4072. * free, but if we don't have enough space to handle the global reserve
  4073. * space then we could end up having a real enospc problem when trying
  4074. * to allocate a chunk or some other such important allocation.
  4075. */
  4076. spin_lock(&global_rsv->lock);
  4077. space_size = calc_global_rsv_need_space(global_rsv);
  4078. spin_unlock(&global_rsv->lock);
  4079. if (used + space_size >= space_info->total_bytes)
  4080. return 0;
  4081. used += space_info->bytes_may_use;
  4082. avail = atomic64_read(&fs_info->free_chunk_space);
  4083. /*
  4084. * If we have dup, raid1 or raid10 then only half of the free
  4085. * space is actually useable. For raid56, the space info used
  4086. * doesn't include the parity drive, so we don't have to
  4087. * change the math
  4088. */
  4089. factor = btrfs_bg_type_to_factor(profile);
  4090. avail = div_u64(avail, factor);
  4091. /*
  4092. * If we aren't flushing all things, let us overcommit up to
  4093. * 1/2th of the space. If we can flush, don't let us overcommit
  4094. * too much, let it overcommit up to 1/8 of the space.
  4095. */
  4096. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4097. avail >>= 3;
  4098. else
  4099. avail >>= 1;
  4100. if (used + bytes < space_info->total_bytes + avail)
  4101. return 1;
  4102. return 0;
  4103. }
  4104. static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
  4105. unsigned long nr_pages, int nr_items)
  4106. {
  4107. struct super_block *sb = fs_info->sb;
  4108. if (down_read_trylock(&sb->s_umount)) {
  4109. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  4110. up_read(&sb->s_umount);
  4111. } else {
  4112. /*
  4113. * We needn't worry the filesystem going from r/w to r/o though
  4114. * we don't acquire ->s_umount mutex, because the filesystem
  4115. * should guarantee the delalloc inodes list be empty after
  4116. * the filesystem is readonly(all dirty pages are written to
  4117. * the disk).
  4118. */
  4119. btrfs_start_delalloc_roots(fs_info, nr_items);
  4120. if (!current->journal_info)
  4121. btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
  4122. }
  4123. }
  4124. static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
  4125. u64 to_reclaim)
  4126. {
  4127. u64 bytes;
  4128. u64 nr;
  4129. bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  4130. nr = div64_u64(to_reclaim, bytes);
  4131. if (!nr)
  4132. nr = 1;
  4133. return nr;
  4134. }
  4135. #define EXTENT_SIZE_PER_ITEM SZ_256K
  4136. /*
  4137. * shrink metadata reservation for delalloc
  4138. */
  4139. static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
  4140. u64 orig, bool wait_ordered)
  4141. {
  4142. struct btrfs_space_info *space_info;
  4143. struct btrfs_trans_handle *trans;
  4144. u64 delalloc_bytes;
  4145. u64 max_reclaim;
  4146. u64 items;
  4147. long time_left;
  4148. unsigned long nr_pages;
  4149. int loops;
  4150. /* Calc the number of the pages we need flush for space reservation */
  4151. items = calc_reclaim_items_nr(fs_info, to_reclaim);
  4152. to_reclaim = items * EXTENT_SIZE_PER_ITEM;
  4153. trans = (struct btrfs_trans_handle *)current->journal_info;
  4154. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4155. delalloc_bytes = percpu_counter_sum_positive(
  4156. &fs_info->delalloc_bytes);
  4157. if (delalloc_bytes == 0) {
  4158. if (trans)
  4159. return;
  4160. if (wait_ordered)
  4161. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4162. return;
  4163. }
  4164. loops = 0;
  4165. while (delalloc_bytes && loops < 3) {
  4166. max_reclaim = min(delalloc_bytes, to_reclaim);
  4167. nr_pages = max_reclaim >> PAGE_SHIFT;
  4168. btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
  4169. /*
  4170. * We need to wait for the async pages to actually start before
  4171. * we do anything.
  4172. */
  4173. max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
  4174. if (!max_reclaim)
  4175. goto skip_async;
  4176. if (max_reclaim <= nr_pages)
  4177. max_reclaim = 0;
  4178. else
  4179. max_reclaim -= nr_pages;
  4180. wait_event(fs_info->async_submit_wait,
  4181. atomic_read(&fs_info->async_delalloc_pages) <=
  4182. (int)max_reclaim);
  4183. skip_async:
  4184. spin_lock(&space_info->lock);
  4185. if (list_empty(&space_info->tickets) &&
  4186. list_empty(&space_info->priority_tickets)) {
  4187. spin_unlock(&space_info->lock);
  4188. break;
  4189. }
  4190. spin_unlock(&space_info->lock);
  4191. loops++;
  4192. if (wait_ordered && !trans) {
  4193. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4194. } else {
  4195. time_left = schedule_timeout_killable(1);
  4196. if (time_left)
  4197. break;
  4198. }
  4199. delalloc_bytes = percpu_counter_sum_positive(
  4200. &fs_info->delalloc_bytes);
  4201. }
  4202. }
  4203. struct reserve_ticket {
  4204. u64 bytes;
  4205. int error;
  4206. struct list_head list;
  4207. wait_queue_head_t wait;
  4208. };
  4209. /**
  4210. * maybe_commit_transaction - possibly commit the transaction if its ok to
  4211. * @root - the root we're allocating for
  4212. * @bytes - the number of bytes we want to reserve
  4213. * @force - force the commit
  4214. *
  4215. * This will check to make sure that committing the transaction will actually
  4216. * get us somewhere and then commit the transaction if it does. Otherwise it
  4217. * will return -ENOSPC.
  4218. */
  4219. static int may_commit_transaction(struct btrfs_fs_info *fs_info,
  4220. struct btrfs_space_info *space_info)
  4221. {
  4222. struct reserve_ticket *ticket = NULL;
  4223. struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
  4224. struct btrfs_trans_handle *trans;
  4225. u64 bytes;
  4226. trans = (struct btrfs_trans_handle *)current->journal_info;
  4227. if (trans)
  4228. return -EAGAIN;
  4229. spin_lock(&space_info->lock);
  4230. if (!list_empty(&space_info->priority_tickets))
  4231. ticket = list_first_entry(&space_info->priority_tickets,
  4232. struct reserve_ticket, list);
  4233. else if (!list_empty(&space_info->tickets))
  4234. ticket = list_first_entry(&space_info->tickets,
  4235. struct reserve_ticket, list);
  4236. bytes = (ticket) ? ticket->bytes : 0;
  4237. spin_unlock(&space_info->lock);
  4238. if (!bytes)
  4239. return 0;
  4240. /* See if there is enough pinned space to make this reservation */
  4241. if (__percpu_counter_compare(&space_info->total_bytes_pinned,
  4242. bytes,
  4243. BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
  4244. goto commit;
  4245. /*
  4246. * See if there is some space in the delayed insertion reservation for
  4247. * this reservation.
  4248. */
  4249. if (space_info != delayed_rsv->space_info)
  4250. return -ENOSPC;
  4251. spin_lock(&delayed_rsv->lock);
  4252. if (delayed_rsv->size > bytes)
  4253. bytes = 0;
  4254. else
  4255. bytes -= delayed_rsv->size;
  4256. spin_unlock(&delayed_rsv->lock);
  4257. if (__percpu_counter_compare(&space_info->total_bytes_pinned,
  4258. bytes,
  4259. BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
  4260. return -ENOSPC;
  4261. }
  4262. commit:
  4263. trans = btrfs_join_transaction(fs_info->extent_root);
  4264. if (IS_ERR(trans))
  4265. return -ENOSPC;
  4266. return btrfs_commit_transaction(trans);
  4267. }
  4268. /*
  4269. * Try to flush some data based on policy set by @state. This is only advisory
  4270. * and may fail for various reasons. The caller is supposed to examine the
  4271. * state of @space_info to detect the outcome.
  4272. */
  4273. static void flush_space(struct btrfs_fs_info *fs_info,
  4274. struct btrfs_space_info *space_info, u64 num_bytes,
  4275. int state)
  4276. {
  4277. struct btrfs_root *root = fs_info->extent_root;
  4278. struct btrfs_trans_handle *trans;
  4279. int nr;
  4280. int ret = 0;
  4281. switch (state) {
  4282. case FLUSH_DELAYED_ITEMS_NR:
  4283. case FLUSH_DELAYED_ITEMS:
  4284. if (state == FLUSH_DELAYED_ITEMS_NR)
  4285. nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
  4286. else
  4287. nr = -1;
  4288. trans = btrfs_join_transaction(root);
  4289. if (IS_ERR(trans)) {
  4290. ret = PTR_ERR(trans);
  4291. break;
  4292. }
  4293. ret = btrfs_run_delayed_items_nr(trans, nr);
  4294. btrfs_end_transaction(trans);
  4295. break;
  4296. case FLUSH_DELALLOC:
  4297. case FLUSH_DELALLOC_WAIT:
  4298. shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
  4299. state == FLUSH_DELALLOC_WAIT);
  4300. break;
  4301. case ALLOC_CHUNK:
  4302. trans = btrfs_join_transaction(root);
  4303. if (IS_ERR(trans)) {
  4304. ret = PTR_ERR(trans);
  4305. break;
  4306. }
  4307. ret = do_chunk_alloc(trans,
  4308. btrfs_metadata_alloc_profile(fs_info),
  4309. CHUNK_ALLOC_NO_FORCE);
  4310. btrfs_end_transaction(trans);
  4311. if (ret > 0 || ret == -ENOSPC)
  4312. ret = 0;
  4313. break;
  4314. case COMMIT_TRANS:
  4315. ret = may_commit_transaction(fs_info, space_info);
  4316. break;
  4317. default:
  4318. ret = -ENOSPC;
  4319. break;
  4320. }
  4321. trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
  4322. ret);
  4323. return;
  4324. }
  4325. static inline u64
  4326. btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
  4327. struct btrfs_space_info *space_info,
  4328. bool system_chunk)
  4329. {
  4330. struct reserve_ticket *ticket;
  4331. u64 used;
  4332. u64 expected;
  4333. u64 to_reclaim = 0;
  4334. list_for_each_entry(ticket, &space_info->tickets, list)
  4335. to_reclaim += ticket->bytes;
  4336. list_for_each_entry(ticket, &space_info->priority_tickets, list)
  4337. to_reclaim += ticket->bytes;
  4338. if (to_reclaim)
  4339. return to_reclaim;
  4340. to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
  4341. if (can_overcommit(fs_info, space_info, to_reclaim,
  4342. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4343. return 0;
  4344. used = btrfs_space_info_used(space_info, true);
  4345. if (can_overcommit(fs_info, space_info, SZ_1M,
  4346. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4347. expected = div_factor_fine(space_info->total_bytes, 95);
  4348. else
  4349. expected = div_factor_fine(space_info->total_bytes, 90);
  4350. if (used > expected)
  4351. to_reclaim = used - expected;
  4352. else
  4353. to_reclaim = 0;
  4354. to_reclaim = min(to_reclaim, space_info->bytes_may_use +
  4355. space_info->bytes_reserved);
  4356. return to_reclaim;
  4357. }
  4358. static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
  4359. struct btrfs_space_info *space_info,
  4360. u64 used, bool system_chunk)
  4361. {
  4362. u64 thresh = div_factor_fine(space_info->total_bytes, 98);
  4363. /* If we're just plain full then async reclaim just slows us down. */
  4364. if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
  4365. return 0;
  4366. if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4367. system_chunk))
  4368. return 0;
  4369. return (used >= thresh && !btrfs_fs_closing(fs_info) &&
  4370. !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
  4371. }
  4372. static void wake_all_tickets(struct list_head *head)
  4373. {
  4374. struct reserve_ticket *ticket;
  4375. while (!list_empty(head)) {
  4376. ticket = list_first_entry(head, struct reserve_ticket, list);
  4377. list_del_init(&ticket->list);
  4378. ticket->error = -ENOSPC;
  4379. wake_up(&ticket->wait);
  4380. }
  4381. }
  4382. /*
  4383. * This is for normal flushers, we can wait all goddamned day if we want to. We
  4384. * will loop and continuously try to flush as long as we are making progress.
  4385. * We count progress as clearing off tickets each time we have to loop.
  4386. */
  4387. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  4388. {
  4389. struct btrfs_fs_info *fs_info;
  4390. struct btrfs_space_info *space_info;
  4391. u64 to_reclaim;
  4392. int flush_state;
  4393. int commit_cycles = 0;
  4394. u64 last_tickets_id;
  4395. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  4396. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4397. spin_lock(&space_info->lock);
  4398. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4399. false);
  4400. if (!to_reclaim) {
  4401. space_info->flush = 0;
  4402. spin_unlock(&space_info->lock);
  4403. return;
  4404. }
  4405. last_tickets_id = space_info->tickets_id;
  4406. spin_unlock(&space_info->lock);
  4407. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4408. do {
  4409. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4410. spin_lock(&space_info->lock);
  4411. if (list_empty(&space_info->tickets)) {
  4412. space_info->flush = 0;
  4413. spin_unlock(&space_info->lock);
  4414. return;
  4415. }
  4416. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
  4417. space_info,
  4418. false);
  4419. if (last_tickets_id == space_info->tickets_id) {
  4420. flush_state++;
  4421. } else {
  4422. last_tickets_id = space_info->tickets_id;
  4423. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4424. if (commit_cycles)
  4425. commit_cycles--;
  4426. }
  4427. if (flush_state > COMMIT_TRANS) {
  4428. commit_cycles++;
  4429. if (commit_cycles > 2) {
  4430. wake_all_tickets(&space_info->tickets);
  4431. space_info->flush = 0;
  4432. } else {
  4433. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4434. }
  4435. }
  4436. spin_unlock(&space_info->lock);
  4437. } while (flush_state <= COMMIT_TRANS);
  4438. }
  4439. void btrfs_init_async_reclaim_work(struct work_struct *work)
  4440. {
  4441. INIT_WORK(work, btrfs_async_reclaim_metadata_space);
  4442. }
  4443. static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
  4444. struct btrfs_space_info *space_info,
  4445. struct reserve_ticket *ticket)
  4446. {
  4447. u64 to_reclaim;
  4448. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  4449. spin_lock(&space_info->lock);
  4450. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4451. false);
  4452. if (!to_reclaim) {
  4453. spin_unlock(&space_info->lock);
  4454. return;
  4455. }
  4456. spin_unlock(&space_info->lock);
  4457. do {
  4458. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4459. flush_state++;
  4460. spin_lock(&space_info->lock);
  4461. if (ticket->bytes == 0) {
  4462. spin_unlock(&space_info->lock);
  4463. return;
  4464. }
  4465. spin_unlock(&space_info->lock);
  4466. /*
  4467. * Priority flushers can't wait on delalloc without
  4468. * deadlocking.
  4469. */
  4470. if (flush_state == FLUSH_DELALLOC ||
  4471. flush_state == FLUSH_DELALLOC_WAIT)
  4472. flush_state = ALLOC_CHUNK;
  4473. } while (flush_state < COMMIT_TRANS);
  4474. }
  4475. static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  4476. struct btrfs_space_info *space_info,
  4477. struct reserve_ticket *ticket, u64 orig_bytes)
  4478. {
  4479. DEFINE_WAIT(wait);
  4480. int ret = 0;
  4481. spin_lock(&space_info->lock);
  4482. while (ticket->bytes > 0 && ticket->error == 0) {
  4483. ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
  4484. if (ret) {
  4485. ret = -EINTR;
  4486. break;
  4487. }
  4488. spin_unlock(&space_info->lock);
  4489. schedule();
  4490. finish_wait(&ticket->wait, &wait);
  4491. spin_lock(&space_info->lock);
  4492. }
  4493. if (!ret)
  4494. ret = ticket->error;
  4495. if (!list_empty(&ticket->list))
  4496. list_del_init(&ticket->list);
  4497. if (ticket->bytes && ticket->bytes < orig_bytes) {
  4498. u64 num_bytes = orig_bytes - ticket->bytes;
  4499. space_info->bytes_may_use -= num_bytes;
  4500. trace_btrfs_space_reservation(fs_info, "space_info",
  4501. space_info->flags, num_bytes, 0);
  4502. }
  4503. spin_unlock(&space_info->lock);
  4504. return ret;
  4505. }
  4506. /**
  4507. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4508. * @root - the root we're allocating for
  4509. * @space_info - the space info we want to allocate from
  4510. * @orig_bytes - the number of bytes we want
  4511. * @flush - whether or not we can flush to make our reservation
  4512. *
  4513. * This will reserve orig_bytes number of bytes from the space info associated
  4514. * with the block_rsv. If there is not enough space it will make an attempt to
  4515. * flush out space to make room. It will do this by flushing delalloc if
  4516. * possible or committing the transaction. If flush is 0 then no attempts to
  4517. * regain reservations will be made and this will fail if there is not enough
  4518. * space already.
  4519. */
  4520. static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
  4521. struct btrfs_space_info *space_info,
  4522. u64 orig_bytes,
  4523. enum btrfs_reserve_flush_enum flush,
  4524. bool system_chunk)
  4525. {
  4526. struct reserve_ticket ticket;
  4527. u64 used;
  4528. int ret = 0;
  4529. ASSERT(orig_bytes);
  4530. ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
  4531. spin_lock(&space_info->lock);
  4532. ret = -ENOSPC;
  4533. used = btrfs_space_info_used(space_info, true);
  4534. /*
  4535. * If we have enough space then hooray, make our reservation and carry
  4536. * on. If not see if we can overcommit, and if we can, hooray carry on.
  4537. * If not things get more complicated.
  4538. */
  4539. if (used + orig_bytes <= space_info->total_bytes) {
  4540. space_info->bytes_may_use += orig_bytes;
  4541. trace_btrfs_space_reservation(fs_info, "space_info",
  4542. space_info->flags, orig_bytes, 1);
  4543. ret = 0;
  4544. } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
  4545. system_chunk)) {
  4546. space_info->bytes_may_use += orig_bytes;
  4547. trace_btrfs_space_reservation(fs_info, "space_info",
  4548. space_info->flags, orig_bytes, 1);
  4549. ret = 0;
  4550. }
  4551. /*
  4552. * If we couldn't make a reservation then setup our reservation ticket
  4553. * and kick the async worker if it's not already running.
  4554. *
  4555. * If we are a priority flusher then we just need to add our ticket to
  4556. * the list and we will do our own flushing further down.
  4557. */
  4558. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  4559. ticket.bytes = orig_bytes;
  4560. ticket.error = 0;
  4561. init_waitqueue_head(&ticket.wait);
  4562. if (flush == BTRFS_RESERVE_FLUSH_ALL) {
  4563. list_add_tail(&ticket.list, &space_info->tickets);
  4564. if (!space_info->flush) {
  4565. space_info->flush = 1;
  4566. trace_btrfs_trigger_flush(fs_info,
  4567. space_info->flags,
  4568. orig_bytes, flush,
  4569. "enospc");
  4570. queue_work(system_unbound_wq,
  4571. &fs_info->async_reclaim_work);
  4572. }
  4573. } else {
  4574. list_add_tail(&ticket.list,
  4575. &space_info->priority_tickets);
  4576. }
  4577. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  4578. used += orig_bytes;
  4579. /*
  4580. * We will do the space reservation dance during log replay,
  4581. * which means we won't have fs_info->fs_root set, so don't do
  4582. * the async reclaim as we will panic.
  4583. */
  4584. if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
  4585. need_do_async_reclaim(fs_info, space_info,
  4586. used, system_chunk) &&
  4587. !work_busy(&fs_info->async_reclaim_work)) {
  4588. trace_btrfs_trigger_flush(fs_info, space_info->flags,
  4589. orig_bytes, flush, "preempt");
  4590. queue_work(system_unbound_wq,
  4591. &fs_info->async_reclaim_work);
  4592. }
  4593. }
  4594. spin_unlock(&space_info->lock);
  4595. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  4596. return ret;
  4597. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4598. return wait_reserve_ticket(fs_info, space_info, &ticket,
  4599. orig_bytes);
  4600. ret = 0;
  4601. priority_reclaim_metadata_space(fs_info, space_info, &ticket);
  4602. spin_lock(&space_info->lock);
  4603. if (ticket.bytes) {
  4604. if (ticket.bytes < orig_bytes) {
  4605. u64 num_bytes = orig_bytes - ticket.bytes;
  4606. space_info->bytes_may_use -= num_bytes;
  4607. trace_btrfs_space_reservation(fs_info, "space_info",
  4608. space_info->flags,
  4609. num_bytes, 0);
  4610. }
  4611. list_del_init(&ticket.list);
  4612. ret = -ENOSPC;
  4613. }
  4614. spin_unlock(&space_info->lock);
  4615. ASSERT(list_empty(&ticket.list));
  4616. return ret;
  4617. }
  4618. /**
  4619. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4620. * @root - the root we're allocating for
  4621. * @block_rsv - the block_rsv we're allocating for
  4622. * @orig_bytes - the number of bytes we want
  4623. * @flush - whether or not we can flush to make our reservation
  4624. *
  4625. * This will reserve orgi_bytes number of bytes from the space info associated
  4626. * with the block_rsv. If there is not enough space it will make an attempt to
  4627. * flush out space to make room. It will do this by flushing delalloc if
  4628. * possible or committing the transaction. If flush is 0 then no attempts to
  4629. * regain reservations will be made and this will fail if there is not enough
  4630. * space already.
  4631. */
  4632. static int reserve_metadata_bytes(struct btrfs_root *root,
  4633. struct btrfs_block_rsv *block_rsv,
  4634. u64 orig_bytes,
  4635. enum btrfs_reserve_flush_enum flush)
  4636. {
  4637. struct btrfs_fs_info *fs_info = root->fs_info;
  4638. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4639. int ret;
  4640. bool system_chunk = (root == fs_info->chunk_root);
  4641. ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
  4642. orig_bytes, flush, system_chunk);
  4643. if (ret == -ENOSPC &&
  4644. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  4645. if (block_rsv != global_rsv &&
  4646. !block_rsv_use_bytes(global_rsv, orig_bytes))
  4647. ret = 0;
  4648. }
  4649. if (ret == -ENOSPC) {
  4650. trace_btrfs_space_reservation(fs_info, "space_info:enospc",
  4651. block_rsv->space_info->flags,
  4652. orig_bytes, 1);
  4653. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
  4654. dump_space_info(fs_info, block_rsv->space_info,
  4655. orig_bytes, 0);
  4656. }
  4657. return ret;
  4658. }
  4659. static struct btrfs_block_rsv *get_block_rsv(
  4660. const struct btrfs_trans_handle *trans,
  4661. const struct btrfs_root *root)
  4662. {
  4663. struct btrfs_fs_info *fs_info = root->fs_info;
  4664. struct btrfs_block_rsv *block_rsv = NULL;
  4665. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  4666. (root == fs_info->csum_root && trans->adding_csums) ||
  4667. (root == fs_info->uuid_root))
  4668. block_rsv = trans->block_rsv;
  4669. if (!block_rsv)
  4670. block_rsv = root->block_rsv;
  4671. if (!block_rsv)
  4672. block_rsv = &fs_info->empty_block_rsv;
  4673. return block_rsv;
  4674. }
  4675. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  4676. u64 num_bytes)
  4677. {
  4678. int ret = -ENOSPC;
  4679. spin_lock(&block_rsv->lock);
  4680. if (block_rsv->reserved >= num_bytes) {
  4681. block_rsv->reserved -= num_bytes;
  4682. if (block_rsv->reserved < block_rsv->size)
  4683. block_rsv->full = 0;
  4684. ret = 0;
  4685. }
  4686. spin_unlock(&block_rsv->lock);
  4687. return ret;
  4688. }
  4689. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  4690. u64 num_bytes, bool update_size)
  4691. {
  4692. spin_lock(&block_rsv->lock);
  4693. block_rsv->reserved += num_bytes;
  4694. if (update_size)
  4695. block_rsv->size += num_bytes;
  4696. else if (block_rsv->reserved >= block_rsv->size)
  4697. block_rsv->full = 1;
  4698. spin_unlock(&block_rsv->lock);
  4699. }
  4700. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  4701. struct btrfs_block_rsv *dest, u64 num_bytes,
  4702. int min_factor)
  4703. {
  4704. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4705. u64 min_bytes;
  4706. if (global_rsv->space_info != dest->space_info)
  4707. return -ENOSPC;
  4708. spin_lock(&global_rsv->lock);
  4709. min_bytes = div_factor(global_rsv->size, min_factor);
  4710. if (global_rsv->reserved < min_bytes + num_bytes) {
  4711. spin_unlock(&global_rsv->lock);
  4712. return -ENOSPC;
  4713. }
  4714. global_rsv->reserved -= num_bytes;
  4715. if (global_rsv->reserved < global_rsv->size)
  4716. global_rsv->full = 0;
  4717. spin_unlock(&global_rsv->lock);
  4718. block_rsv_add_bytes(dest, num_bytes, true);
  4719. return 0;
  4720. }
  4721. /*
  4722. * This is for space we already have accounted in space_info->bytes_may_use, so
  4723. * basically when we're returning space from block_rsv's.
  4724. */
  4725. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  4726. struct btrfs_space_info *space_info,
  4727. u64 num_bytes)
  4728. {
  4729. struct reserve_ticket *ticket;
  4730. struct list_head *head;
  4731. u64 used;
  4732. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
  4733. bool check_overcommit = false;
  4734. spin_lock(&space_info->lock);
  4735. head = &space_info->priority_tickets;
  4736. /*
  4737. * If we are over our limit then we need to check and see if we can
  4738. * overcommit, and if we can't then we just need to free up our space
  4739. * and not satisfy any requests.
  4740. */
  4741. used = btrfs_space_info_used(space_info, true);
  4742. if (used - num_bytes >= space_info->total_bytes)
  4743. check_overcommit = true;
  4744. again:
  4745. while (!list_empty(head) && num_bytes) {
  4746. ticket = list_first_entry(head, struct reserve_ticket,
  4747. list);
  4748. /*
  4749. * We use 0 bytes because this space is already reserved, so
  4750. * adding the ticket space would be a double count.
  4751. */
  4752. if (check_overcommit &&
  4753. !can_overcommit(fs_info, space_info, 0, flush, false))
  4754. break;
  4755. if (num_bytes >= ticket->bytes) {
  4756. list_del_init(&ticket->list);
  4757. num_bytes -= ticket->bytes;
  4758. ticket->bytes = 0;
  4759. space_info->tickets_id++;
  4760. wake_up(&ticket->wait);
  4761. } else {
  4762. ticket->bytes -= num_bytes;
  4763. num_bytes = 0;
  4764. }
  4765. }
  4766. if (num_bytes && head == &space_info->priority_tickets) {
  4767. head = &space_info->tickets;
  4768. flush = BTRFS_RESERVE_FLUSH_ALL;
  4769. goto again;
  4770. }
  4771. space_info->bytes_may_use -= num_bytes;
  4772. trace_btrfs_space_reservation(fs_info, "space_info",
  4773. space_info->flags, num_bytes, 0);
  4774. spin_unlock(&space_info->lock);
  4775. }
  4776. /*
  4777. * This is for newly allocated space that isn't accounted in
  4778. * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
  4779. * we use this helper.
  4780. */
  4781. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  4782. struct btrfs_space_info *space_info,
  4783. u64 num_bytes)
  4784. {
  4785. struct reserve_ticket *ticket;
  4786. struct list_head *head = &space_info->priority_tickets;
  4787. again:
  4788. while (!list_empty(head) && num_bytes) {
  4789. ticket = list_first_entry(head, struct reserve_ticket,
  4790. list);
  4791. if (num_bytes >= ticket->bytes) {
  4792. trace_btrfs_space_reservation(fs_info, "space_info",
  4793. space_info->flags,
  4794. ticket->bytes, 1);
  4795. list_del_init(&ticket->list);
  4796. num_bytes -= ticket->bytes;
  4797. space_info->bytes_may_use += ticket->bytes;
  4798. ticket->bytes = 0;
  4799. space_info->tickets_id++;
  4800. wake_up(&ticket->wait);
  4801. } else {
  4802. trace_btrfs_space_reservation(fs_info, "space_info",
  4803. space_info->flags,
  4804. num_bytes, 1);
  4805. space_info->bytes_may_use += num_bytes;
  4806. ticket->bytes -= num_bytes;
  4807. num_bytes = 0;
  4808. }
  4809. }
  4810. if (num_bytes && head == &space_info->priority_tickets) {
  4811. head = &space_info->tickets;
  4812. goto again;
  4813. }
  4814. }
  4815. static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  4816. struct btrfs_block_rsv *block_rsv,
  4817. struct btrfs_block_rsv *dest, u64 num_bytes,
  4818. u64 *qgroup_to_release_ret)
  4819. {
  4820. struct btrfs_space_info *space_info = block_rsv->space_info;
  4821. u64 qgroup_to_release = 0;
  4822. u64 ret;
  4823. spin_lock(&block_rsv->lock);
  4824. if (num_bytes == (u64)-1) {
  4825. num_bytes = block_rsv->size;
  4826. qgroup_to_release = block_rsv->qgroup_rsv_size;
  4827. }
  4828. block_rsv->size -= num_bytes;
  4829. if (block_rsv->reserved >= block_rsv->size) {
  4830. num_bytes = block_rsv->reserved - block_rsv->size;
  4831. block_rsv->reserved = block_rsv->size;
  4832. block_rsv->full = 1;
  4833. } else {
  4834. num_bytes = 0;
  4835. }
  4836. if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
  4837. qgroup_to_release = block_rsv->qgroup_rsv_reserved -
  4838. block_rsv->qgroup_rsv_size;
  4839. block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
  4840. } else {
  4841. qgroup_to_release = 0;
  4842. }
  4843. spin_unlock(&block_rsv->lock);
  4844. ret = num_bytes;
  4845. if (num_bytes > 0) {
  4846. if (dest) {
  4847. spin_lock(&dest->lock);
  4848. if (!dest->full) {
  4849. u64 bytes_to_add;
  4850. bytes_to_add = dest->size - dest->reserved;
  4851. bytes_to_add = min(num_bytes, bytes_to_add);
  4852. dest->reserved += bytes_to_add;
  4853. if (dest->reserved >= dest->size)
  4854. dest->full = 1;
  4855. num_bytes -= bytes_to_add;
  4856. }
  4857. spin_unlock(&dest->lock);
  4858. }
  4859. if (num_bytes)
  4860. space_info_add_old_bytes(fs_info, space_info,
  4861. num_bytes);
  4862. }
  4863. if (qgroup_to_release_ret)
  4864. *qgroup_to_release_ret = qgroup_to_release;
  4865. return ret;
  4866. }
  4867. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
  4868. struct btrfs_block_rsv *dst, u64 num_bytes,
  4869. bool update_size)
  4870. {
  4871. int ret;
  4872. ret = block_rsv_use_bytes(src, num_bytes);
  4873. if (ret)
  4874. return ret;
  4875. block_rsv_add_bytes(dst, num_bytes, update_size);
  4876. return 0;
  4877. }
  4878. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  4879. {
  4880. memset(rsv, 0, sizeof(*rsv));
  4881. spin_lock_init(&rsv->lock);
  4882. rsv->type = type;
  4883. }
  4884. void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
  4885. struct btrfs_block_rsv *rsv,
  4886. unsigned short type)
  4887. {
  4888. btrfs_init_block_rsv(rsv, type);
  4889. rsv->space_info = __find_space_info(fs_info,
  4890. BTRFS_BLOCK_GROUP_METADATA);
  4891. }
  4892. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
  4893. unsigned short type)
  4894. {
  4895. struct btrfs_block_rsv *block_rsv;
  4896. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  4897. if (!block_rsv)
  4898. return NULL;
  4899. btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
  4900. return block_rsv;
  4901. }
  4902. void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
  4903. struct btrfs_block_rsv *rsv)
  4904. {
  4905. if (!rsv)
  4906. return;
  4907. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  4908. kfree(rsv);
  4909. }
  4910. int btrfs_block_rsv_add(struct btrfs_root *root,
  4911. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  4912. enum btrfs_reserve_flush_enum flush)
  4913. {
  4914. int ret;
  4915. if (num_bytes == 0)
  4916. return 0;
  4917. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4918. if (!ret)
  4919. block_rsv_add_bytes(block_rsv, num_bytes, true);
  4920. return ret;
  4921. }
  4922. int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
  4923. {
  4924. u64 num_bytes = 0;
  4925. int ret = -ENOSPC;
  4926. if (!block_rsv)
  4927. return 0;
  4928. spin_lock(&block_rsv->lock);
  4929. num_bytes = div_factor(block_rsv->size, min_factor);
  4930. if (block_rsv->reserved >= num_bytes)
  4931. ret = 0;
  4932. spin_unlock(&block_rsv->lock);
  4933. return ret;
  4934. }
  4935. int btrfs_block_rsv_refill(struct btrfs_root *root,
  4936. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  4937. enum btrfs_reserve_flush_enum flush)
  4938. {
  4939. u64 num_bytes = 0;
  4940. int ret = -ENOSPC;
  4941. if (!block_rsv)
  4942. return 0;
  4943. spin_lock(&block_rsv->lock);
  4944. num_bytes = min_reserved;
  4945. if (block_rsv->reserved >= num_bytes)
  4946. ret = 0;
  4947. else
  4948. num_bytes -= block_rsv->reserved;
  4949. spin_unlock(&block_rsv->lock);
  4950. if (!ret)
  4951. return 0;
  4952. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4953. if (!ret) {
  4954. block_rsv_add_bytes(block_rsv, num_bytes, false);
  4955. return 0;
  4956. }
  4957. return ret;
  4958. }
  4959. /**
  4960. * btrfs_inode_rsv_refill - refill the inode block rsv.
  4961. * @inode - the inode we are refilling.
  4962. * @flush - the flusing restriction.
  4963. *
  4964. * Essentially the same as btrfs_block_rsv_refill, except it uses the
  4965. * block_rsv->size as the minimum size. We'll either refill the missing amount
  4966. * or return if we already have enough space. This will also handle the resreve
  4967. * tracepoint for the reserved amount.
  4968. */
  4969. static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
  4970. enum btrfs_reserve_flush_enum flush)
  4971. {
  4972. struct btrfs_root *root = inode->root;
  4973. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  4974. u64 num_bytes = 0;
  4975. u64 qgroup_num_bytes = 0;
  4976. int ret = -ENOSPC;
  4977. spin_lock(&block_rsv->lock);
  4978. if (block_rsv->reserved < block_rsv->size)
  4979. num_bytes = block_rsv->size - block_rsv->reserved;
  4980. if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
  4981. qgroup_num_bytes = block_rsv->qgroup_rsv_size -
  4982. block_rsv->qgroup_rsv_reserved;
  4983. spin_unlock(&block_rsv->lock);
  4984. if (num_bytes == 0)
  4985. return 0;
  4986. ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true);
  4987. if (ret)
  4988. return ret;
  4989. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4990. if (!ret) {
  4991. block_rsv_add_bytes(block_rsv, num_bytes, false);
  4992. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4993. btrfs_ino(inode), num_bytes, 1);
  4994. /* Don't forget to increase qgroup_rsv_reserved */
  4995. spin_lock(&block_rsv->lock);
  4996. block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
  4997. spin_unlock(&block_rsv->lock);
  4998. } else
  4999. btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
  5000. return ret;
  5001. }
  5002. /**
  5003. * btrfs_inode_rsv_release - release any excessive reservation.
  5004. * @inode - the inode we need to release from.
  5005. * @qgroup_free - free or convert qgroup meta.
  5006. * Unlike normal operation, qgroup meta reservation needs to know if we are
  5007. * freeing qgroup reservation or just converting it into per-trans. Normally
  5008. * @qgroup_free is true for error handling, and false for normal release.
  5009. *
  5010. * This is the same as btrfs_block_rsv_release, except that it handles the
  5011. * tracepoint for the reservation.
  5012. */
  5013. static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
  5014. {
  5015. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5016. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5017. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5018. u64 released = 0;
  5019. u64 qgroup_to_release = 0;
  5020. /*
  5021. * Since we statically set the block_rsv->size we just want to say we
  5022. * are releasing 0 bytes, and then we'll just get the reservation over
  5023. * the size free'd.
  5024. */
  5025. released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0,
  5026. &qgroup_to_release);
  5027. if (released > 0)
  5028. trace_btrfs_space_reservation(fs_info, "delalloc",
  5029. btrfs_ino(inode), released, 0);
  5030. if (qgroup_free)
  5031. btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release);
  5032. else
  5033. btrfs_qgroup_convert_reserved_meta(inode->root,
  5034. qgroup_to_release);
  5035. }
  5036. void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
  5037. struct btrfs_block_rsv *block_rsv,
  5038. u64 num_bytes)
  5039. {
  5040. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5041. if (global_rsv == block_rsv ||
  5042. block_rsv->space_info != global_rsv->space_info)
  5043. global_rsv = NULL;
  5044. block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
  5045. }
  5046. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  5047. {
  5048. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  5049. struct btrfs_space_info *sinfo = block_rsv->space_info;
  5050. u64 num_bytes;
  5051. /*
  5052. * The global block rsv is based on the size of the extent tree, the
  5053. * checksum tree and the root tree. If the fs is empty we want to set
  5054. * it to a minimal amount for safety.
  5055. */
  5056. num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
  5057. btrfs_root_used(&fs_info->csum_root->root_item) +
  5058. btrfs_root_used(&fs_info->tree_root->root_item);
  5059. num_bytes = max_t(u64, num_bytes, SZ_16M);
  5060. spin_lock(&sinfo->lock);
  5061. spin_lock(&block_rsv->lock);
  5062. block_rsv->size = min_t(u64, num_bytes, SZ_512M);
  5063. if (block_rsv->reserved < block_rsv->size) {
  5064. num_bytes = btrfs_space_info_used(sinfo, true);
  5065. if (sinfo->total_bytes > num_bytes) {
  5066. num_bytes = sinfo->total_bytes - num_bytes;
  5067. num_bytes = min(num_bytes,
  5068. block_rsv->size - block_rsv->reserved);
  5069. block_rsv->reserved += num_bytes;
  5070. sinfo->bytes_may_use += num_bytes;
  5071. trace_btrfs_space_reservation(fs_info, "space_info",
  5072. sinfo->flags, num_bytes,
  5073. 1);
  5074. }
  5075. } else if (block_rsv->reserved > block_rsv->size) {
  5076. num_bytes = block_rsv->reserved - block_rsv->size;
  5077. sinfo->bytes_may_use -= num_bytes;
  5078. trace_btrfs_space_reservation(fs_info, "space_info",
  5079. sinfo->flags, num_bytes, 0);
  5080. block_rsv->reserved = block_rsv->size;
  5081. }
  5082. if (block_rsv->reserved == block_rsv->size)
  5083. block_rsv->full = 1;
  5084. else
  5085. block_rsv->full = 0;
  5086. spin_unlock(&block_rsv->lock);
  5087. spin_unlock(&sinfo->lock);
  5088. }
  5089. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  5090. {
  5091. struct btrfs_space_info *space_info;
  5092. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  5093. fs_info->chunk_block_rsv.space_info = space_info;
  5094. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  5095. fs_info->global_block_rsv.space_info = space_info;
  5096. fs_info->trans_block_rsv.space_info = space_info;
  5097. fs_info->empty_block_rsv.space_info = space_info;
  5098. fs_info->delayed_block_rsv.space_info = space_info;
  5099. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  5100. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  5101. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  5102. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  5103. if (fs_info->quota_root)
  5104. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  5105. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  5106. update_global_block_rsv(fs_info);
  5107. }
  5108. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  5109. {
  5110. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  5111. (u64)-1, NULL);
  5112. WARN_ON(fs_info->trans_block_rsv.size > 0);
  5113. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  5114. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  5115. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  5116. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  5117. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  5118. }
  5119. /*
  5120. * To be called after all the new block groups attached to the transaction
  5121. * handle have been created (btrfs_create_pending_block_groups()).
  5122. */
  5123. void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  5124. {
  5125. struct btrfs_fs_info *fs_info = trans->fs_info;
  5126. if (!trans->chunk_bytes_reserved)
  5127. return;
  5128. WARN_ON_ONCE(!list_empty(&trans->new_bgs));
  5129. block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
  5130. trans->chunk_bytes_reserved, NULL);
  5131. trans->chunk_bytes_reserved = 0;
  5132. }
  5133. /*
  5134. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  5135. * root: the root of the parent directory
  5136. * rsv: block reservation
  5137. * items: the number of items that we need do reservation
  5138. * use_global_rsv: allow fallback to the global block reservation
  5139. *
  5140. * This function is used to reserve the space for snapshot/subvolume
  5141. * creation and deletion. Those operations are different with the
  5142. * common file/directory operations, they change two fs/file trees
  5143. * and root tree, the number of items that the qgroup reserves is
  5144. * different with the free space reservation. So we can not use
  5145. * the space reservation mechanism in start_transaction().
  5146. */
  5147. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  5148. struct btrfs_block_rsv *rsv, int items,
  5149. bool use_global_rsv)
  5150. {
  5151. u64 qgroup_num_bytes = 0;
  5152. u64 num_bytes;
  5153. int ret;
  5154. struct btrfs_fs_info *fs_info = root->fs_info;
  5155. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5156. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
  5157. /* One for parent inode, two for dir entries */
  5158. qgroup_num_bytes = 3 * fs_info->nodesize;
  5159. ret = btrfs_qgroup_reserve_meta_prealloc(root,
  5160. qgroup_num_bytes, true);
  5161. if (ret)
  5162. return ret;
  5163. }
  5164. num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
  5165. rsv->space_info = __find_space_info(fs_info,
  5166. BTRFS_BLOCK_GROUP_METADATA);
  5167. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  5168. BTRFS_RESERVE_FLUSH_ALL);
  5169. if (ret == -ENOSPC && use_global_rsv)
  5170. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true);
  5171. if (ret && qgroup_num_bytes)
  5172. btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
  5173. return ret;
  5174. }
  5175. void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
  5176. struct btrfs_block_rsv *rsv)
  5177. {
  5178. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  5179. }
  5180. static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
  5181. struct btrfs_inode *inode)
  5182. {
  5183. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5184. u64 reserve_size = 0;
  5185. u64 qgroup_rsv_size = 0;
  5186. u64 csum_leaves;
  5187. unsigned outstanding_extents;
  5188. lockdep_assert_held(&inode->lock);
  5189. outstanding_extents = inode->outstanding_extents;
  5190. if (outstanding_extents)
  5191. reserve_size = btrfs_calc_trans_metadata_size(fs_info,
  5192. outstanding_extents + 1);
  5193. csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
  5194. inode->csum_bytes);
  5195. reserve_size += btrfs_calc_trans_metadata_size(fs_info,
  5196. csum_leaves);
  5197. /*
  5198. * For qgroup rsv, the calculation is very simple:
  5199. * account one nodesize for each outstanding extent
  5200. *
  5201. * This is overestimating in most cases.
  5202. */
  5203. qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
  5204. spin_lock(&block_rsv->lock);
  5205. block_rsv->size = reserve_size;
  5206. block_rsv->qgroup_rsv_size = qgroup_rsv_size;
  5207. spin_unlock(&block_rsv->lock);
  5208. }
  5209. int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
  5210. {
  5211. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5212. unsigned nr_extents;
  5213. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  5214. int ret = 0;
  5215. bool delalloc_lock = true;
  5216. /* If we are a free space inode we need to not flush since we will be in
  5217. * the middle of a transaction commit. We also don't need the delalloc
  5218. * mutex since we won't race with anybody. We need this mostly to make
  5219. * lockdep shut its filthy mouth.
  5220. *
  5221. * If we have a transaction open (can happen if we call truncate_block
  5222. * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
  5223. */
  5224. if (btrfs_is_free_space_inode(inode)) {
  5225. flush = BTRFS_RESERVE_NO_FLUSH;
  5226. delalloc_lock = false;
  5227. } else {
  5228. if (current->journal_info)
  5229. flush = BTRFS_RESERVE_FLUSH_LIMIT;
  5230. if (btrfs_transaction_in_commit(fs_info))
  5231. schedule_timeout(1);
  5232. }
  5233. if (delalloc_lock)
  5234. mutex_lock(&inode->delalloc_mutex);
  5235. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5236. /* Add our new extents and calculate the new rsv size. */
  5237. spin_lock(&inode->lock);
  5238. nr_extents = count_max_extents(num_bytes);
  5239. btrfs_mod_outstanding_extents(inode, nr_extents);
  5240. inode->csum_bytes += num_bytes;
  5241. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5242. spin_unlock(&inode->lock);
  5243. ret = btrfs_inode_rsv_refill(inode, flush);
  5244. if (unlikely(ret))
  5245. goto out_fail;
  5246. if (delalloc_lock)
  5247. mutex_unlock(&inode->delalloc_mutex);
  5248. return 0;
  5249. out_fail:
  5250. spin_lock(&inode->lock);
  5251. nr_extents = count_max_extents(num_bytes);
  5252. btrfs_mod_outstanding_extents(inode, -nr_extents);
  5253. inode->csum_bytes -= num_bytes;
  5254. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5255. spin_unlock(&inode->lock);
  5256. btrfs_inode_rsv_release(inode, true);
  5257. if (delalloc_lock)
  5258. mutex_unlock(&inode->delalloc_mutex);
  5259. return ret;
  5260. }
  5261. /**
  5262. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  5263. * @inode: the inode to release the reservation for.
  5264. * @num_bytes: the number of bytes we are releasing.
  5265. * @qgroup_free: free qgroup reservation or convert it to per-trans reservation
  5266. *
  5267. * This will release the metadata reservation for an inode. This can be called
  5268. * once we complete IO for a given set of bytes to release their metadata
  5269. * reservations, or on error for the same reason.
  5270. */
  5271. void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  5272. bool qgroup_free)
  5273. {
  5274. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5275. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5276. spin_lock(&inode->lock);
  5277. inode->csum_bytes -= num_bytes;
  5278. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5279. spin_unlock(&inode->lock);
  5280. if (btrfs_is_testing(fs_info))
  5281. return;
  5282. btrfs_inode_rsv_release(inode, qgroup_free);
  5283. }
  5284. /**
  5285. * btrfs_delalloc_release_extents - release our outstanding_extents
  5286. * @inode: the inode to balance the reservation for.
  5287. * @num_bytes: the number of bytes we originally reserved with
  5288. * @qgroup_free: do we need to free qgroup meta reservation or convert them.
  5289. *
  5290. * When we reserve space we increase outstanding_extents for the extents we may
  5291. * add. Once we've set the range as delalloc or created our ordered extents we
  5292. * have outstanding_extents to track the real usage, so we use this to free our
  5293. * temporarily tracked outstanding_extents. This _must_ be used in conjunction
  5294. * with btrfs_delalloc_reserve_metadata.
  5295. */
  5296. void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
  5297. bool qgroup_free)
  5298. {
  5299. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5300. unsigned num_extents;
  5301. spin_lock(&inode->lock);
  5302. num_extents = count_max_extents(num_bytes);
  5303. btrfs_mod_outstanding_extents(inode, -num_extents);
  5304. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5305. spin_unlock(&inode->lock);
  5306. if (btrfs_is_testing(fs_info))
  5307. return;
  5308. btrfs_inode_rsv_release(inode, qgroup_free);
  5309. }
  5310. /**
  5311. * btrfs_delalloc_reserve_space - reserve data and metadata space for
  5312. * delalloc
  5313. * @inode: inode we're writing to
  5314. * @start: start range we are writing to
  5315. * @len: how long the range we are writing to
  5316. * @reserved: mandatory parameter, record actually reserved qgroup ranges of
  5317. * current reservation.
  5318. *
  5319. * This will do the following things
  5320. *
  5321. * o reserve space in data space info for num bytes
  5322. * and reserve precious corresponding qgroup space
  5323. * (Done in check_data_free_space)
  5324. *
  5325. * o reserve space for metadata space, based on the number of outstanding
  5326. * extents and how much csums will be needed
  5327. * also reserve metadata space in a per root over-reserve method.
  5328. * o add to the inodes->delalloc_bytes
  5329. * o add it to the fs_info's delalloc inodes list.
  5330. * (Above 3 all done in delalloc_reserve_metadata)
  5331. *
  5332. * Return 0 for success
  5333. * Return <0 for error(-ENOSPC or -EQUOT)
  5334. */
  5335. int btrfs_delalloc_reserve_space(struct inode *inode,
  5336. struct extent_changeset **reserved, u64 start, u64 len)
  5337. {
  5338. int ret;
  5339. ret = btrfs_check_data_free_space(inode, reserved, start, len);
  5340. if (ret < 0)
  5341. return ret;
  5342. ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
  5343. if (ret < 0)
  5344. btrfs_free_reserved_data_space(inode, *reserved, start, len);
  5345. return ret;
  5346. }
  5347. /**
  5348. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  5349. * @inode: inode we're releasing space for
  5350. * @start: start position of the space already reserved
  5351. * @len: the len of the space already reserved
  5352. * @release_bytes: the len of the space we consumed or didn't use
  5353. *
  5354. * This function will release the metadata space that was not used and will
  5355. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  5356. * list if there are no delalloc bytes left.
  5357. * Also it will handle the qgroup reserved space.
  5358. */
  5359. void btrfs_delalloc_release_space(struct inode *inode,
  5360. struct extent_changeset *reserved,
  5361. u64 start, u64 len, bool qgroup_free)
  5362. {
  5363. btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free);
  5364. btrfs_free_reserved_data_space(inode, reserved, start, len);
  5365. }
  5366. static int update_block_group(struct btrfs_trans_handle *trans,
  5367. struct btrfs_fs_info *info, u64 bytenr,
  5368. u64 num_bytes, int alloc)
  5369. {
  5370. struct btrfs_block_group_cache *cache = NULL;
  5371. u64 total = num_bytes;
  5372. u64 old_val;
  5373. u64 byte_in_group;
  5374. int factor;
  5375. /* block accounting for super block */
  5376. spin_lock(&info->delalloc_root_lock);
  5377. old_val = btrfs_super_bytes_used(info->super_copy);
  5378. if (alloc)
  5379. old_val += num_bytes;
  5380. else
  5381. old_val -= num_bytes;
  5382. btrfs_set_super_bytes_used(info->super_copy, old_val);
  5383. spin_unlock(&info->delalloc_root_lock);
  5384. while (total) {
  5385. cache = btrfs_lookup_block_group(info, bytenr);
  5386. if (!cache)
  5387. return -ENOENT;
  5388. factor = btrfs_bg_type_to_factor(cache->flags);
  5389. /*
  5390. * If this block group has free space cache written out, we
  5391. * need to make sure to load it if we are removing space. This
  5392. * is because we need the unpinning stage to actually add the
  5393. * space back to the block group, otherwise we will leak space.
  5394. */
  5395. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  5396. cache_block_group(cache, 1);
  5397. byte_in_group = bytenr - cache->key.objectid;
  5398. WARN_ON(byte_in_group > cache->key.offset);
  5399. spin_lock(&cache->space_info->lock);
  5400. spin_lock(&cache->lock);
  5401. if (btrfs_test_opt(info, SPACE_CACHE) &&
  5402. cache->disk_cache_state < BTRFS_DC_CLEAR)
  5403. cache->disk_cache_state = BTRFS_DC_CLEAR;
  5404. old_val = btrfs_block_group_used(&cache->item);
  5405. num_bytes = min(total, cache->key.offset - byte_in_group);
  5406. if (alloc) {
  5407. old_val += num_bytes;
  5408. btrfs_set_block_group_used(&cache->item, old_val);
  5409. cache->reserved -= num_bytes;
  5410. cache->space_info->bytes_reserved -= num_bytes;
  5411. cache->space_info->bytes_used += num_bytes;
  5412. cache->space_info->disk_used += num_bytes * factor;
  5413. spin_unlock(&cache->lock);
  5414. spin_unlock(&cache->space_info->lock);
  5415. } else {
  5416. old_val -= num_bytes;
  5417. btrfs_set_block_group_used(&cache->item, old_val);
  5418. cache->pinned += num_bytes;
  5419. cache->space_info->bytes_pinned += num_bytes;
  5420. cache->space_info->bytes_used -= num_bytes;
  5421. cache->space_info->disk_used -= num_bytes * factor;
  5422. spin_unlock(&cache->lock);
  5423. spin_unlock(&cache->space_info->lock);
  5424. trace_btrfs_space_reservation(info, "pinned",
  5425. cache->space_info->flags,
  5426. num_bytes, 1);
  5427. percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
  5428. num_bytes,
  5429. BTRFS_TOTAL_BYTES_PINNED_BATCH);
  5430. set_extent_dirty(info->pinned_extents,
  5431. bytenr, bytenr + num_bytes - 1,
  5432. GFP_NOFS | __GFP_NOFAIL);
  5433. }
  5434. spin_lock(&trans->transaction->dirty_bgs_lock);
  5435. if (list_empty(&cache->dirty_list)) {
  5436. list_add_tail(&cache->dirty_list,
  5437. &trans->transaction->dirty_bgs);
  5438. trans->transaction->num_dirty_bgs++;
  5439. btrfs_get_block_group(cache);
  5440. }
  5441. spin_unlock(&trans->transaction->dirty_bgs_lock);
  5442. /*
  5443. * No longer have used bytes in this block group, queue it for
  5444. * deletion. We do this after adding the block group to the
  5445. * dirty list to avoid races between cleaner kthread and space
  5446. * cache writeout.
  5447. */
  5448. if (!alloc && old_val == 0)
  5449. btrfs_mark_bg_unused(cache);
  5450. btrfs_put_block_group(cache);
  5451. total -= num_bytes;
  5452. bytenr += num_bytes;
  5453. }
  5454. return 0;
  5455. }
  5456. static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
  5457. {
  5458. struct btrfs_block_group_cache *cache;
  5459. u64 bytenr;
  5460. spin_lock(&fs_info->block_group_cache_lock);
  5461. bytenr = fs_info->first_logical_byte;
  5462. spin_unlock(&fs_info->block_group_cache_lock);
  5463. if (bytenr < (u64)-1)
  5464. return bytenr;
  5465. cache = btrfs_lookup_first_block_group(fs_info, search_start);
  5466. if (!cache)
  5467. return 0;
  5468. bytenr = cache->key.objectid;
  5469. btrfs_put_block_group(cache);
  5470. return bytenr;
  5471. }
  5472. static int pin_down_extent(struct btrfs_fs_info *fs_info,
  5473. struct btrfs_block_group_cache *cache,
  5474. u64 bytenr, u64 num_bytes, int reserved)
  5475. {
  5476. spin_lock(&cache->space_info->lock);
  5477. spin_lock(&cache->lock);
  5478. cache->pinned += num_bytes;
  5479. cache->space_info->bytes_pinned += num_bytes;
  5480. if (reserved) {
  5481. cache->reserved -= num_bytes;
  5482. cache->space_info->bytes_reserved -= num_bytes;
  5483. }
  5484. spin_unlock(&cache->lock);
  5485. spin_unlock(&cache->space_info->lock);
  5486. trace_btrfs_space_reservation(fs_info, "pinned",
  5487. cache->space_info->flags, num_bytes, 1);
  5488. percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
  5489. num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
  5490. set_extent_dirty(fs_info->pinned_extents, bytenr,
  5491. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  5492. return 0;
  5493. }
  5494. /*
  5495. * this function must be called within transaction
  5496. */
  5497. int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
  5498. u64 bytenr, u64 num_bytes, int reserved)
  5499. {
  5500. struct btrfs_block_group_cache *cache;
  5501. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5502. BUG_ON(!cache); /* Logic error */
  5503. pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
  5504. btrfs_put_block_group(cache);
  5505. return 0;
  5506. }
  5507. /*
  5508. * this function must be called within transaction
  5509. */
  5510. int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
  5511. u64 bytenr, u64 num_bytes)
  5512. {
  5513. struct btrfs_block_group_cache *cache;
  5514. int ret;
  5515. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5516. if (!cache)
  5517. return -EINVAL;
  5518. /*
  5519. * pull in the free space cache (if any) so that our pin
  5520. * removes the free space from the cache. We have load_only set
  5521. * to one because the slow code to read in the free extents does check
  5522. * the pinned extents.
  5523. */
  5524. cache_block_group(cache, 1);
  5525. pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
  5526. /* remove us from the free space cache (if we're there at all) */
  5527. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  5528. btrfs_put_block_group(cache);
  5529. return ret;
  5530. }
  5531. static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
  5532. u64 start, u64 num_bytes)
  5533. {
  5534. int ret;
  5535. struct btrfs_block_group_cache *block_group;
  5536. struct btrfs_caching_control *caching_ctl;
  5537. block_group = btrfs_lookup_block_group(fs_info, start);
  5538. if (!block_group)
  5539. return -EINVAL;
  5540. cache_block_group(block_group, 0);
  5541. caching_ctl = get_caching_control(block_group);
  5542. if (!caching_ctl) {
  5543. /* Logic error */
  5544. BUG_ON(!block_group_cache_done(block_group));
  5545. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5546. } else {
  5547. mutex_lock(&caching_ctl->mutex);
  5548. if (start >= caching_ctl->progress) {
  5549. ret = add_excluded_extent(fs_info, start, num_bytes);
  5550. } else if (start + num_bytes <= caching_ctl->progress) {
  5551. ret = btrfs_remove_free_space(block_group,
  5552. start, num_bytes);
  5553. } else {
  5554. num_bytes = caching_ctl->progress - start;
  5555. ret = btrfs_remove_free_space(block_group,
  5556. start, num_bytes);
  5557. if (ret)
  5558. goto out_lock;
  5559. num_bytes = (start + num_bytes) -
  5560. caching_ctl->progress;
  5561. start = caching_ctl->progress;
  5562. ret = add_excluded_extent(fs_info, start, num_bytes);
  5563. }
  5564. out_lock:
  5565. mutex_unlock(&caching_ctl->mutex);
  5566. put_caching_control(caching_ctl);
  5567. }
  5568. btrfs_put_block_group(block_group);
  5569. return ret;
  5570. }
  5571. int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
  5572. struct extent_buffer *eb)
  5573. {
  5574. struct btrfs_file_extent_item *item;
  5575. struct btrfs_key key;
  5576. int found_type;
  5577. int i;
  5578. int ret = 0;
  5579. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
  5580. return 0;
  5581. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  5582. btrfs_item_key_to_cpu(eb, &key, i);
  5583. if (key.type != BTRFS_EXTENT_DATA_KEY)
  5584. continue;
  5585. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  5586. found_type = btrfs_file_extent_type(eb, item);
  5587. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  5588. continue;
  5589. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  5590. continue;
  5591. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  5592. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  5593. ret = __exclude_logged_extent(fs_info, key.objectid, key.offset);
  5594. if (ret)
  5595. break;
  5596. }
  5597. return ret;
  5598. }
  5599. static void
  5600. btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
  5601. {
  5602. atomic_inc(&bg->reservations);
  5603. }
  5604. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  5605. const u64 start)
  5606. {
  5607. struct btrfs_block_group_cache *bg;
  5608. bg = btrfs_lookup_block_group(fs_info, start);
  5609. ASSERT(bg);
  5610. if (atomic_dec_and_test(&bg->reservations))
  5611. wake_up_var(&bg->reservations);
  5612. btrfs_put_block_group(bg);
  5613. }
  5614. void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  5615. {
  5616. struct btrfs_space_info *space_info = bg->space_info;
  5617. ASSERT(bg->ro);
  5618. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  5619. return;
  5620. /*
  5621. * Our block group is read only but before we set it to read only,
  5622. * some task might have had allocated an extent from it already, but it
  5623. * has not yet created a respective ordered extent (and added it to a
  5624. * root's list of ordered extents).
  5625. * Therefore wait for any task currently allocating extents, since the
  5626. * block group's reservations counter is incremented while a read lock
  5627. * on the groups' semaphore is held and decremented after releasing
  5628. * the read access on that semaphore and creating the ordered extent.
  5629. */
  5630. down_write(&space_info->groups_sem);
  5631. up_write(&space_info->groups_sem);
  5632. wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
  5633. }
  5634. /**
  5635. * btrfs_add_reserved_bytes - update the block_group and space info counters
  5636. * @cache: The cache we are manipulating
  5637. * @ram_bytes: The number of bytes of file content, and will be same to
  5638. * @num_bytes except for the compress path.
  5639. * @num_bytes: The number of bytes in question
  5640. * @delalloc: The blocks are allocated for the delalloc write
  5641. *
  5642. * This is called by the allocator when it reserves space. If this is a
  5643. * reservation and the block group has become read only we cannot make the
  5644. * reservation and return -EAGAIN, otherwise this function always succeeds.
  5645. */
  5646. static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
  5647. u64 ram_bytes, u64 num_bytes, int delalloc)
  5648. {
  5649. struct btrfs_space_info *space_info = cache->space_info;
  5650. int ret = 0;
  5651. spin_lock(&space_info->lock);
  5652. spin_lock(&cache->lock);
  5653. if (cache->ro) {
  5654. ret = -EAGAIN;
  5655. } else {
  5656. cache->reserved += num_bytes;
  5657. space_info->bytes_reserved += num_bytes;
  5658. space_info->bytes_may_use -= ram_bytes;
  5659. if (delalloc)
  5660. cache->delalloc_bytes += num_bytes;
  5661. }
  5662. spin_unlock(&cache->lock);
  5663. spin_unlock(&space_info->lock);
  5664. return ret;
  5665. }
  5666. /**
  5667. * btrfs_free_reserved_bytes - update the block_group and space info counters
  5668. * @cache: The cache we are manipulating
  5669. * @num_bytes: The number of bytes in question
  5670. * @delalloc: The blocks are allocated for the delalloc write
  5671. *
  5672. * This is called by somebody who is freeing space that was never actually used
  5673. * on disk. For example if you reserve some space for a new leaf in transaction
  5674. * A and before transaction A commits you free that leaf, you call this with
  5675. * reserve set to 0 in order to clear the reservation.
  5676. */
  5677. static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
  5678. u64 num_bytes, int delalloc)
  5679. {
  5680. struct btrfs_space_info *space_info = cache->space_info;
  5681. spin_lock(&space_info->lock);
  5682. spin_lock(&cache->lock);
  5683. if (cache->ro)
  5684. space_info->bytes_readonly += num_bytes;
  5685. cache->reserved -= num_bytes;
  5686. space_info->bytes_reserved -= num_bytes;
  5687. space_info->max_extent_size = 0;
  5688. if (delalloc)
  5689. cache->delalloc_bytes -= num_bytes;
  5690. spin_unlock(&cache->lock);
  5691. spin_unlock(&space_info->lock);
  5692. }
  5693. void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
  5694. {
  5695. struct btrfs_caching_control *next;
  5696. struct btrfs_caching_control *caching_ctl;
  5697. struct btrfs_block_group_cache *cache;
  5698. down_write(&fs_info->commit_root_sem);
  5699. list_for_each_entry_safe(caching_ctl, next,
  5700. &fs_info->caching_block_groups, list) {
  5701. cache = caching_ctl->block_group;
  5702. if (block_group_cache_done(cache)) {
  5703. cache->last_byte_to_unpin = (u64)-1;
  5704. list_del_init(&caching_ctl->list);
  5705. put_caching_control(caching_ctl);
  5706. } else {
  5707. cache->last_byte_to_unpin = caching_ctl->progress;
  5708. }
  5709. }
  5710. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5711. fs_info->pinned_extents = &fs_info->freed_extents[1];
  5712. else
  5713. fs_info->pinned_extents = &fs_info->freed_extents[0];
  5714. up_write(&fs_info->commit_root_sem);
  5715. update_global_block_rsv(fs_info);
  5716. }
  5717. /*
  5718. * Returns the free cluster for the given space info and sets empty_cluster to
  5719. * what it should be based on the mount options.
  5720. */
  5721. static struct btrfs_free_cluster *
  5722. fetch_cluster_info(struct btrfs_fs_info *fs_info,
  5723. struct btrfs_space_info *space_info, u64 *empty_cluster)
  5724. {
  5725. struct btrfs_free_cluster *ret = NULL;
  5726. *empty_cluster = 0;
  5727. if (btrfs_mixed_space_info(space_info))
  5728. return ret;
  5729. if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  5730. ret = &fs_info->meta_alloc_cluster;
  5731. if (btrfs_test_opt(fs_info, SSD))
  5732. *empty_cluster = SZ_2M;
  5733. else
  5734. *empty_cluster = SZ_64K;
  5735. } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
  5736. btrfs_test_opt(fs_info, SSD_SPREAD)) {
  5737. *empty_cluster = SZ_2M;
  5738. ret = &fs_info->data_alloc_cluster;
  5739. }
  5740. return ret;
  5741. }
  5742. static int unpin_extent_range(struct btrfs_fs_info *fs_info,
  5743. u64 start, u64 end,
  5744. const bool return_free_space)
  5745. {
  5746. struct btrfs_block_group_cache *cache = NULL;
  5747. struct btrfs_space_info *space_info;
  5748. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5749. struct btrfs_free_cluster *cluster = NULL;
  5750. u64 len;
  5751. u64 total_unpinned = 0;
  5752. u64 empty_cluster = 0;
  5753. bool readonly;
  5754. while (start <= end) {
  5755. readonly = false;
  5756. if (!cache ||
  5757. start >= cache->key.objectid + cache->key.offset) {
  5758. if (cache)
  5759. btrfs_put_block_group(cache);
  5760. total_unpinned = 0;
  5761. cache = btrfs_lookup_block_group(fs_info, start);
  5762. BUG_ON(!cache); /* Logic error */
  5763. cluster = fetch_cluster_info(fs_info,
  5764. cache->space_info,
  5765. &empty_cluster);
  5766. empty_cluster <<= 1;
  5767. }
  5768. len = cache->key.objectid + cache->key.offset - start;
  5769. len = min(len, end + 1 - start);
  5770. if (start < cache->last_byte_to_unpin) {
  5771. len = min(len, cache->last_byte_to_unpin - start);
  5772. if (return_free_space)
  5773. btrfs_add_free_space(cache, start, len);
  5774. }
  5775. start += len;
  5776. total_unpinned += len;
  5777. space_info = cache->space_info;
  5778. /*
  5779. * If this space cluster has been marked as fragmented and we've
  5780. * unpinned enough in this block group to potentially allow a
  5781. * cluster to be created inside of it go ahead and clear the
  5782. * fragmented check.
  5783. */
  5784. if (cluster && cluster->fragmented &&
  5785. total_unpinned > empty_cluster) {
  5786. spin_lock(&cluster->lock);
  5787. cluster->fragmented = 0;
  5788. spin_unlock(&cluster->lock);
  5789. }
  5790. spin_lock(&space_info->lock);
  5791. spin_lock(&cache->lock);
  5792. cache->pinned -= len;
  5793. space_info->bytes_pinned -= len;
  5794. trace_btrfs_space_reservation(fs_info, "pinned",
  5795. space_info->flags, len, 0);
  5796. space_info->max_extent_size = 0;
  5797. percpu_counter_add_batch(&space_info->total_bytes_pinned,
  5798. -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
  5799. if (cache->ro) {
  5800. space_info->bytes_readonly += len;
  5801. readonly = true;
  5802. }
  5803. spin_unlock(&cache->lock);
  5804. if (!readonly && return_free_space &&
  5805. global_rsv->space_info == space_info) {
  5806. u64 to_add = len;
  5807. spin_lock(&global_rsv->lock);
  5808. if (!global_rsv->full) {
  5809. to_add = min(len, global_rsv->size -
  5810. global_rsv->reserved);
  5811. global_rsv->reserved += to_add;
  5812. space_info->bytes_may_use += to_add;
  5813. if (global_rsv->reserved >= global_rsv->size)
  5814. global_rsv->full = 1;
  5815. trace_btrfs_space_reservation(fs_info,
  5816. "space_info",
  5817. space_info->flags,
  5818. to_add, 1);
  5819. len -= to_add;
  5820. }
  5821. spin_unlock(&global_rsv->lock);
  5822. /* Add to any tickets we may have */
  5823. if (len)
  5824. space_info_add_new_bytes(fs_info, space_info,
  5825. len);
  5826. }
  5827. spin_unlock(&space_info->lock);
  5828. }
  5829. if (cache)
  5830. btrfs_put_block_group(cache);
  5831. return 0;
  5832. }
  5833. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
  5834. {
  5835. struct btrfs_fs_info *fs_info = trans->fs_info;
  5836. struct btrfs_block_group_cache *block_group, *tmp;
  5837. struct list_head *deleted_bgs;
  5838. struct extent_io_tree *unpin;
  5839. u64 start;
  5840. u64 end;
  5841. int ret;
  5842. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5843. unpin = &fs_info->freed_extents[1];
  5844. else
  5845. unpin = &fs_info->freed_extents[0];
  5846. while (!trans->aborted) {
  5847. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  5848. ret = find_first_extent_bit(unpin, 0, &start, &end,
  5849. EXTENT_DIRTY, NULL);
  5850. if (ret) {
  5851. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  5852. break;
  5853. }
  5854. if (btrfs_test_opt(fs_info, DISCARD))
  5855. ret = btrfs_discard_extent(fs_info, start,
  5856. end + 1 - start, NULL);
  5857. clear_extent_dirty(unpin, start, end);
  5858. unpin_extent_range(fs_info, start, end, true);
  5859. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  5860. cond_resched();
  5861. }
  5862. /*
  5863. * Transaction is finished. We don't need the lock anymore. We
  5864. * do need to clean up the block groups in case of a transaction
  5865. * abort.
  5866. */
  5867. deleted_bgs = &trans->transaction->deleted_bgs;
  5868. list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
  5869. u64 trimmed = 0;
  5870. ret = -EROFS;
  5871. if (!trans->aborted)
  5872. ret = btrfs_discard_extent(fs_info,
  5873. block_group->key.objectid,
  5874. block_group->key.offset,
  5875. &trimmed);
  5876. list_del_init(&block_group->bg_list);
  5877. btrfs_put_block_group_trimming(block_group);
  5878. btrfs_put_block_group(block_group);
  5879. if (ret) {
  5880. const char *errstr = btrfs_decode_error(ret);
  5881. btrfs_warn(fs_info,
  5882. "discard failed while removing blockgroup: errno=%d %s",
  5883. ret, errstr);
  5884. }
  5885. }
  5886. return 0;
  5887. }
  5888. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  5889. struct btrfs_delayed_ref_node *node, u64 parent,
  5890. u64 root_objectid, u64 owner_objectid,
  5891. u64 owner_offset, int refs_to_drop,
  5892. struct btrfs_delayed_extent_op *extent_op)
  5893. {
  5894. struct btrfs_fs_info *info = trans->fs_info;
  5895. struct btrfs_key key;
  5896. struct btrfs_path *path;
  5897. struct btrfs_root *extent_root = info->extent_root;
  5898. struct extent_buffer *leaf;
  5899. struct btrfs_extent_item *ei;
  5900. struct btrfs_extent_inline_ref *iref;
  5901. int ret;
  5902. int is_data;
  5903. int extent_slot = 0;
  5904. int found_extent = 0;
  5905. int num_to_del = 1;
  5906. u32 item_size;
  5907. u64 refs;
  5908. u64 bytenr = node->bytenr;
  5909. u64 num_bytes = node->num_bytes;
  5910. int last_ref = 0;
  5911. bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
  5912. path = btrfs_alloc_path();
  5913. if (!path)
  5914. return -ENOMEM;
  5915. path->reada = READA_FORWARD;
  5916. path->leave_spinning = 1;
  5917. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  5918. BUG_ON(!is_data && refs_to_drop != 1);
  5919. if (is_data)
  5920. skinny_metadata = false;
  5921. ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
  5922. parent, root_objectid, owner_objectid,
  5923. owner_offset);
  5924. if (ret == 0) {
  5925. extent_slot = path->slots[0];
  5926. while (extent_slot >= 0) {
  5927. btrfs_item_key_to_cpu(path->nodes[0], &key,
  5928. extent_slot);
  5929. if (key.objectid != bytenr)
  5930. break;
  5931. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  5932. key.offset == num_bytes) {
  5933. found_extent = 1;
  5934. break;
  5935. }
  5936. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  5937. key.offset == owner_objectid) {
  5938. found_extent = 1;
  5939. break;
  5940. }
  5941. if (path->slots[0] - extent_slot > 5)
  5942. break;
  5943. extent_slot--;
  5944. }
  5945. if (!found_extent) {
  5946. BUG_ON(iref);
  5947. ret = remove_extent_backref(trans, path, NULL,
  5948. refs_to_drop,
  5949. is_data, &last_ref);
  5950. if (ret) {
  5951. btrfs_abort_transaction(trans, ret);
  5952. goto out;
  5953. }
  5954. btrfs_release_path(path);
  5955. path->leave_spinning = 1;
  5956. key.objectid = bytenr;
  5957. key.type = BTRFS_EXTENT_ITEM_KEY;
  5958. key.offset = num_bytes;
  5959. if (!is_data && skinny_metadata) {
  5960. key.type = BTRFS_METADATA_ITEM_KEY;
  5961. key.offset = owner_objectid;
  5962. }
  5963. ret = btrfs_search_slot(trans, extent_root,
  5964. &key, path, -1, 1);
  5965. if (ret > 0 && skinny_metadata && path->slots[0]) {
  5966. /*
  5967. * Couldn't find our skinny metadata item,
  5968. * see if we have ye olde extent item.
  5969. */
  5970. path->slots[0]--;
  5971. btrfs_item_key_to_cpu(path->nodes[0], &key,
  5972. path->slots[0]);
  5973. if (key.objectid == bytenr &&
  5974. key.type == BTRFS_EXTENT_ITEM_KEY &&
  5975. key.offset == num_bytes)
  5976. ret = 0;
  5977. }
  5978. if (ret > 0 && skinny_metadata) {
  5979. skinny_metadata = false;
  5980. key.objectid = bytenr;
  5981. key.type = BTRFS_EXTENT_ITEM_KEY;
  5982. key.offset = num_bytes;
  5983. btrfs_release_path(path);
  5984. ret = btrfs_search_slot(trans, extent_root,
  5985. &key, path, -1, 1);
  5986. }
  5987. if (ret) {
  5988. btrfs_err(info,
  5989. "umm, got %d back from search, was looking for %llu",
  5990. ret, bytenr);
  5991. if (ret > 0)
  5992. btrfs_print_leaf(path->nodes[0]);
  5993. }
  5994. if (ret < 0) {
  5995. btrfs_abort_transaction(trans, ret);
  5996. goto out;
  5997. }
  5998. extent_slot = path->slots[0];
  5999. }
  6000. } else if (WARN_ON(ret == -ENOENT)) {
  6001. btrfs_print_leaf(path->nodes[0]);
  6002. btrfs_err(info,
  6003. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  6004. bytenr, parent, root_objectid, owner_objectid,
  6005. owner_offset);
  6006. btrfs_abort_transaction(trans, ret);
  6007. goto out;
  6008. } else {
  6009. btrfs_abort_transaction(trans, ret);
  6010. goto out;
  6011. }
  6012. leaf = path->nodes[0];
  6013. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6014. if (unlikely(item_size < sizeof(*ei))) {
  6015. ret = -EINVAL;
  6016. btrfs_print_v0_err(info);
  6017. btrfs_abort_transaction(trans, ret);
  6018. goto out;
  6019. }
  6020. ei = btrfs_item_ptr(leaf, extent_slot,
  6021. struct btrfs_extent_item);
  6022. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  6023. key.type == BTRFS_EXTENT_ITEM_KEY) {
  6024. struct btrfs_tree_block_info *bi;
  6025. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  6026. bi = (struct btrfs_tree_block_info *)(ei + 1);
  6027. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  6028. }
  6029. refs = btrfs_extent_refs(leaf, ei);
  6030. if (refs < refs_to_drop) {
  6031. btrfs_err(info,
  6032. "trying to drop %d refs but we only have %Lu for bytenr %Lu",
  6033. refs_to_drop, refs, bytenr);
  6034. ret = -EINVAL;
  6035. btrfs_abort_transaction(trans, ret);
  6036. goto out;
  6037. }
  6038. refs -= refs_to_drop;
  6039. if (refs > 0) {
  6040. if (extent_op)
  6041. __run_delayed_extent_op(extent_op, leaf, ei);
  6042. /*
  6043. * In the case of inline back ref, reference count will
  6044. * be updated by remove_extent_backref
  6045. */
  6046. if (iref) {
  6047. BUG_ON(!found_extent);
  6048. } else {
  6049. btrfs_set_extent_refs(leaf, ei, refs);
  6050. btrfs_mark_buffer_dirty(leaf);
  6051. }
  6052. if (found_extent) {
  6053. ret = remove_extent_backref(trans, path, iref,
  6054. refs_to_drop, is_data,
  6055. &last_ref);
  6056. if (ret) {
  6057. btrfs_abort_transaction(trans, ret);
  6058. goto out;
  6059. }
  6060. }
  6061. } else {
  6062. if (found_extent) {
  6063. BUG_ON(is_data && refs_to_drop !=
  6064. extent_data_ref_count(path, iref));
  6065. if (iref) {
  6066. BUG_ON(path->slots[0] != extent_slot);
  6067. } else {
  6068. BUG_ON(path->slots[0] != extent_slot + 1);
  6069. path->slots[0] = extent_slot;
  6070. num_to_del = 2;
  6071. }
  6072. }
  6073. last_ref = 1;
  6074. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  6075. num_to_del);
  6076. if (ret) {
  6077. btrfs_abort_transaction(trans, ret);
  6078. goto out;
  6079. }
  6080. btrfs_release_path(path);
  6081. if (is_data) {
  6082. ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
  6083. if (ret) {
  6084. btrfs_abort_transaction(trans, ret);
  6085. goto out;
  6086. }
  6087. }
  6088. ret = add_to_free_space_tree(trans, bytenr, num_bytes);
  6089. if (ret) {
  6090. btrfs_abort_transaction(trans, ret);
  6091. goto out;
  6092. }
  6093. ret = update_block_group(trans, info, bytenr, num_bytes, 0);
  6094. if (ret) {
  6095. btrfs_abort_transaction(trans, ret);
  6096. goto out;
  6097. }
  6098. }
  6099. btrfs_release_path(path);
  6100. out:
  6101. btrfs_free_path(path);
  6102. return ret;
  6103. }
  6104. /*
  6105. * when we free an block, it is possible (and likely) that we free the last
  6106. * delayed ref for that extent as well. This searches the delayed ref tree for
  6107. * a given extent, and if there are no other delayed refs to be processed, it
  6108. * removes it from the tree.
  6109. */
  6110. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  6111. u64 bytenr)
  6112. {
  6113. struct btrfs_delayed_ref_head *head;
  6114. struct btrfs_delayed_ref_root *delayed_refs;
  6115. int ret = 0;
  6116. delayed_refs = &trans->transaction->delayed_refs;
  6117. spin_lock(&delayed_refs->lock);
  6118. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  6119. if (!head)
  6120. goto out_delayed_unlock;
  6121. spin_lock(&head->lock);
  6122. if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
  6123. goto out;
  6124. if (head->extent_op) {
  6125. if (!head->must_insert_reserved)
  6126. goto out;
  6127. btrfs_free_delayed_extent_op(head->extent_op);
  6128. head->extent_op = NULL;
  6129. }
  6130. /*
  6131. * waiting for the lock here would deadlock. If someone else has it
  6132. * locked they are already in the process of dropping it anyway
  6133. */
  6134. if (!mutex_trylock(&head->mutex))
  6135. goto out;
  6136. /*
  6137. * at this point we have a head with no other entries. Go
  6138. * ahead and process it.
  6139. */
  6140. rb_erase_cached(&head->href_node, &delayed_refs->href_root);
  6141. RB_CLEAR_NODE(&head->href_node);
  6142. atomic_dec(&delayed_refs->num_entries);
  6143. /*
  6144. * we don't take a ref on the node because we're removing it from the
  6145. * tree, so we just steal the ref the tree was holding.
  6146. */
  6147. delayed_refs->num_heads--;
  6148. if (head->processing == 0)
  6149. delayed_refs->num_heads_ready--;
  6150. head->processing = 0;
  6151. spin_unlock(&head->lock);
  6152. spin_unlock(&delayed_refs->lock);
  6153. BUG_ON(head->extent_op);
  6154. if (head->must_insert_reserved)
  6155. ret = 1;
  6156. mutex_unlock(&head->mutex);
  6157. btrfs_put_delayed_ref_head(head);
  6158. return ret;
  6159. out:
  6160. spin_unlock(&head->lock);
  6161. out_delayed_unlock:
  6162. spin_unlock(&delayed_refs->lock);
  6163. return 0;
  6164. }
  6165. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  6166. struct btrfs_root *root,
  6167. struct extent_buffer *buf,
  6168. u64 parent, int last_ref)
  6169. {
  6170. struct btrfs_fs_info *fs_info = root->fs_info;
  6171. int pin = 1;
  6172. int ret;
  6173. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6174. int old_ref_mod, new_ref_mod;
  6175. btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
  6176. root->root_key.objectid,
  6177. btrfs_header_level(buf), 0,
  6178. BTRFS_DROP_DELAYED_REF);
  6179. ret = btrfs_add_delayed_tree_ref(trans, buf->start,
  6180. buf->len, parent,
  6181. root->root_key.objectid,
  6182. btrfs_header_level(buf),
  6183. BTRFS_DROP_DELAYED_REF, NULL,
  6184. &old_ref_mod, &new_ref_mod);
  6185. BUG_ON(ret); /* -ENOMEM */
  6186. pin = old_ref_mod >= 0 && new_ref_mod < 0;
  6187. }
  6188. if (last_ref && btrfs_header_generation(buf) == trans->transid) {
  6189. struct btrfs_block_group_cache *cache;
  6190. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6191. ret = check_ref_cleanup(trans, buf->start);
  6192. if (!ret)
  6193. goto out;
  6194. }
  6195. pin = 0;
  6196. cache = btrfs_lookup_block_group(fs_info, buf->start);
  6197. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  6198. pin_down_extent(fs_info, cache, buf->start,
  6199. buf->len, 1);
  6200. btrfs_put_block_group(cache);
  6201. goto out;
  6202. }
  6203. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  6204. btrfs_add_free_space(cache, buf->start, buf->len);
  6205. btrfs_free_reserved_bytes(cache, buf->len, 0);
  6206. btrfs_put_block_group(cache);
  6207. trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
  6208. }
  6209. out:
  6210. if (pin)
  6211. add_pinned_bytes(fs_info, buf->len, true,
  6212. root->root_key.objectid);
  6213. if (last_ref) {
  6214. /*
  6215. * Deleting the buffer, clear the corrupt flag since it doesn't
  6216. * matter anymore.
  6217. */
  6218. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  6219. }
  6220. }
  6221. /* Can return -ENOMEM */
  6222. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  6223. struct btrfs_root *root,
  6224. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  6225. u64 owner, u64 offset)
  6226. {
  6227. struct btrfs_fs_info *fs_info = root->fs_info;
  6228. int old_ref_mod, new_ref_mod;
  6229. int ret;
  6230. if (btrfs_is_testing(fs_info))
  6231. return 0;
  6232. if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
  6233. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
  6234. root_objectid, owner, offset,
  6235. BTRFS_DROP_DELAYED_REF);
  6236. /*
  6237. * tree log blocks never actually go into the extent allocation
  6238. * tree, just update pinning info and exit early.
  6239. */
  6240. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  6241. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  6242. /* unlocks the pinned mutex */
  6243. btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
  6244. old_ref_mod = new_ref_mod = 0;
  6245. ret = 0;
  6246. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  6247. ret = btrfs_add_delayed_tree_ref(trans, bytenr,
  6248. num_bytes, parent,
  6249. root_objectid, (int)owner,
  6250. BTRFS_DROP_DELAYED_REF, NULL,
  6251. &old_ref_mod, &new_ref_mod);
  6252. } else {
  6253. ret = btrfs_add_delayed_data_ref(trans, bytenr,
  6254. num_bytes, parent,
  6255. root_objectid, owner, offset,
  6256. 0, BTRFS_DROP_DELAYED_REF,
  6257. &old_ref_mod, &new_ref_mod);
  6258. }
  6259. if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) {
  6260. bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
  6261. add_pinned_bytes(fs_info, num_bytes, metadata, root_objectid);
  6262. }
  6263. return ret;
  6264. }
  6265. /*
  6266. * when we wait for progress in the block group caching, its because
  6267. * our allocation attempt failed at least once. So, we must sleep
  6268. * and let some progress happen before we try again.
  6269. *
  6270. * This function will sleep at least once waiting for new free space to
  6271. * show up, and then it will check the block group free space numbers
  6272. * for our min num_bytes. Another option is to have it go ahead
  6273. * and look in the rbtree for a free extent of a given size, but this
  6274. * is a good start.
  6275. *
  6276. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  6277. * any of the information in this block group.
  6278. */
  6279. static noinline void
  6280. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  6281. u64 num_bytes)
  6282. {
  6283. struct btrfs_caching_control *caching_ctl;
  6284. caching_ctl = get_caching_control(cache);
  6285. if (!caching_ctl)
  6286. return;
  6287. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  6288. (cache->free_space_ctl->free_space >= num_bytes));
  6289. put_caching_control(caching_ctl);
  6290. }
  6291. static noinline int
  6292. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  6293. {
  6294. struct btrfs_caching_control *caching_ctl;
  6295. int ret = 0;
  6296. caching_ctl = get_caching_control(cache);
  6297. if (!caching_ctl)
  6298. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  6299. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  6300. if (cache->cached == BTRFS_CACHE_ERROR)
  6301. ret = -EIO;
  6302. put_caching_control(caching_ctl);
  6303. return ret;
  6304. }
  6305. enum btrfs_loop_type {
  6306. LOOP_CACHING_NOWAIT = 0,
  6307. LOOP_CACHING_WAIT = 1,
  6308. LOOP_ALLOC_CHUNK = 2,
  6309. LOOP_NO_EMPTY_SIZE = 3,
  6310. };
  6311. static inline void
  6312. btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
  6313. int delalloc)
  6314. {
  6315. if (delalloc)
  6316. down_read(&cache->data_rwsem);
  6317. }
  6318. static inline void
  6319. btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
  6320. int delalloc)
  6321. {
  6322. btrfs_get_block_group(cache);
  6323. if (delalloc)
  6324. down_read(&cache->data_rwsem);
  6325. }
  6326. static struct btrfs_block_group_cache *
  6327. btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
  6328. struct btrfs_free_cluster *cluster,
  6329. int delalloc)
  6330. {
  6331. struct btrfs_block_group_cache *used_bg = NULL;
  6332. spin_lock(&cluster->refill_lock);
  6333. while (1) {
  6334. used_bg = cluster->block_group;
  6335. if (!used_bg)
  6336. return NULL;
  6337. if (used_bg == block_group)
  6338. return used_bg;
  6339. btrfs_get_block_group(used_bg);
  6340. if (!delalloc)
  6341. return used_bg;
  6342. if (down_read_trylock(&used_bg->data_rwsem))
  6343. return used_bg;
  6344. spin_unlock(&cluster->refill_lock);
  6345. /* We should only have one-level nested. */
  6346. down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
  6347. spin_lock(&cluster->refill_lock);
  6348. if (used_bg == cluster->block_group)
  6349. return used_bg;
  6350. up_read(&used_bg->data_rwsem);
  6351. btrfs_put_block_group(used_bg);
  6352. }
  6353. }
  6354. static inline void
  6355. btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  6356. int delalloc)
  6357. {
  6358. if (delalloc)
  6359. up_read(&cache->data_rwsem);
  6360. btrfs_put_block_group(cache);
  6361. }
  6362. /*
  6363. * walks the btree of allocated extents and find a hole of a given size.
  6364. * The key ins is changed to record the hole:
  6365. * ins->objectid == start position
  6366. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  6367. * ins->offset == the size of the hole.
  6368. * Any available blocks before search_start are skipped.
  6369. *
  6370. * If there is no suitable free space, we will record the max size of
  6371. * the free space extent currently.
  6372. */
  6373. static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
  6374. u64 ram_bytes, u64 num_bytes, u64 empty_size,
  6375. u64 hint_byte, struct btrfs_key *ins,
  6376. u64 flags, int delalloc)
  6377. {
  6378. int ret = 0;
  6379. struct btrfs_root *root = fs_info->extent_root;
  6380. struct btrfs_free_cluster *last_ptr = NULL;
  6381. struct btrfs_block_group_cache *block_group = NULL;
  6382. u64 search_start = 0;
  6383. u64 max_extent_size = 0;
  6384. u64 max_free_space = 0;
  6385. u64 empty_cluster = 0;
  6386. struct btrfs_space_info *space_info;
  6387. int loop = 0;
  6388. int index = btrfs_bg_flags_to_raid_index(flags);
  6389. bool failed_cluster_refill = false;
  6390. bool failed_alloc = false;
  6391. bool use_cluster = true;
  6392. bool have_caching_bg = false;
  6393. bool orig_have_caching_bg = false;
  6394. bool full_search = false;
  6395. WARN_ON(num_bytes < fs_info->sectorsize);
  6396. ins->type = BTRFS_EXTENT_ITEM_KEY;
  6397. ins->objectid = 0;
  6398. ins->offset = 0;
  6399. trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
  6400. space_info = __find_space_info(fs_info, flags);
  6401. if (!space_info) {
  6402. btrfs_err(fs_info, "No space info for %llu", flags);
  6403. return -ENOSPC;
  6404. }
  6405. /*
  6406. * If our free space is heavily fragmented we may not be able to make
  6407. * big contiguous allocations, so instead of doing the expensive search
  6408. * for free space, simply return ENOSPC with our max_extent_size so we
  6409. * can go ahead and search for a more manageable chunk.
  6410. *
  6411. * If our max_extent_size is large enough for our allocation simply
  6412. * disable clustering since we will likely not be able to find enough
  6413. * space to create a cluster and induce latency trying.
  6414. */
  6415. if (unlikely(space_info->max_extent_size)) {
  6416. spin_lock(&space_info->lock);
  6417. if (space_info->max_extent_size &&
  6418. num_bytes > space_info->max_extent_size) {
  6419. ins->offset = space_info->max_extent_size;
  6420. spin_unlock(&space_info->lock);
  6421. return -ENOSPC;
  6422. } else if (space_info->max_extent_size) {
  6423. use_cluster = false;
  6424. }
  6425. spin_unlock(&space_info->lock);
  6426. }
  6427. last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
  6428. if (last_ptr) {
  6429. spin_lock(&last_ptr->lock);
  6430. if (last_ptr->block_group)
  6431. hint_byte = last_ptr->window_start;
  6432. if (last_ptr->fragmented) {
  6433. /*
  6434. * We still set window_start so we can keep track of the
  6435. * last place we found an allocation to try and save
  6436. * some time.
  6437. */
  6438. hint_byte = last_ptr->window_start;
  6439. use_cluster = false;
  6440. }
  6441. spin_unlock(&last_ptr->lock);
  6442. }
  6443. search_start = max(search_start, first_logical_byte(fs_info, 0));
  6444. search_start = max(search_start, hint_byte);
  6445. if (search_start == hint_byte) {
  6446. block_group = btrfs_lookup_block_group(fs_info, search_start);
  6447. /*
  6448. * we don't want to use the block group if it doesn't match our
  6449. * allocation bits, or if its not cached.
  6450. *
  6451. * However if we are re-searching with an ideal block group
  6452. * picked out then we don't care that the block group is cached.
  6453. */
  6454. if (block_group && block_group_bits(block_group, flags) &&
  6455. block_group->cached != BTRFS_CACHE_NO) {
  6456. down_read(&space_info->groups_sem);
  6457. if (list_empty(&block_group->list) ||
  6458. block_group->ro) {
  6459. /*
  6460. * someone is removing this block group,
  6461. * we can't jump into the have_block_group
  6462. * target because our list pointers are not
  6463. * valid
  6464. */
  6465. btrfs_put_block_group(block_group);
  6466. up_read(&space_info->groups_sem);
  6467. } else {
  6468. index = btrfs_bg_flags_to_raid_index(
  6469. block_group->flags);
  6470. btrfs_lock_block_group(block_group, delalloc);
  6471. goto have_block_group;
  6472. }
  6473. } else if (block_group) {
  6474. btrfs_put_block_group(block_group);
  6475. }
  6476. }
  6477. search:
  6478. have_caching_bg = false;
  6479. if (index == 0 || index == btrfs_bg_flags_to_raid_index(flags))
  6480. full_search = true;
  6481. down_read(&space_info->groups_sem);
  6482. list_for_each_entry(block_group, &space_info->block_groups[index],
  6483. list) {
  6484. u64 offset;
  6485. int cached;
  6486. /* If the block group is read-only, we can skip it entirely. */
  6487. if (unlikely(block_group->ro))
  6488. continue;
  6489. btrfs_grab_block_group(block_group, delalloc);
  6490. search_start = block_group->key.objectid;
  6491. /*
  6492. * this can happen if we end up cycling through all the
  6493. * raid types, but we want to make sure we only allocate
  6494. * for the proper type.
  6495. */
  6496. if (!block_group_bits(block_group, flags)) {
  6497. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  6498. BTRFS_BLOCK_GROUP_RAID1 |
  6499. BTRFS_BLOCK_GROUP_RAID5 |
  6500. BTRFS_BLOCK_GROUP_RAID6 |
  6501. BTRFS_BLOCK_GROUP_RAID10;
  6502. /*
  6503. * if they asked for extra copies and this block group
  6504. * doesn't provide them, bail. This does allow us to
  6505. * fill raid0 from raid1.
  6506. */
  6507. if ((flags & extra) && !(block_group->flags & extra))
  6508. goto loop;
  6509. }
  6510. have_block_group:
  6511. cached = block_group_cache_done(block_group);
  6512. if (unlikely(!cached)) {
  6513. have_caching_bg = true;
  6514. ret = cache_block_group(block_group, 0);
  6515. BUG_ON(ret < 0);
  6516. ret = 0;
  6517. }
  6518. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  6519. goto loop;
  6520. /*
  6521. * Ok we want to try and use the cluster allocator, so
  6522. * lets look there
  6523. */
  6524. if (last_ptr && use_cluster) {
  6525. struct btrfs_block_group_cache *used_block_group;
  6526. unsigned long aligned_cluster;
  6527. /*
  6528. * the refill lock keeps out other
  6529. * people trying to start a new cluster
  6530. */
  6531. used_block_group = btrfs_lock_cluster(block_group,
  6532. last_ptr,
  6533. delalloc);
  6534. if (!used_block_group)
  6535. goto refill_cluster;
  6536. if (used_block_group != block_group &&
  6537. (used_block_group->ro ||
  6538. !block_group_bits(used_block_group, flags)))
  6539. goto release_cluster;
  6540. offset = btrfs_alloc_from_cluster(used_block_group,
  6541. last_ptr,
  6542. num_bytes,
  6543. used_block_group->key.objectid,
  6544. &max_extent_size);
  6545. if (offset) {
  6546. /* we have a block, we're done */
  6547. spin_unlock(&last_ptr->refill_lock);
  6548. trace_btrfs_reserve_extent_cluster(
  6549. used_block_group,
  6550. search_start, num_bytes);
  6551. if (used_block_group != block_group) {
  6552. btrfs_release_block_group(block_group,
  6553. delalloc);
  6554. block_group = used_block_group;
  6555. }
  6556. goto checks;
  6557. }
  6558. WARN_ON(last_ptr->block_group != used_block_group);
  6559. release_cluster:
  6560. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  6561. * set up a new clusters, so lets just skip it
  6562. * and let the allocator find whatever block
  6563. * it can find. If we reach this point, we
  6564. * will have tried the cluster allocator
  6565. * plenty of times and not have found
  6566. * anything, so we are likely way too
  6567. * fragmented for the clustering stuff to find
  6568. * anything.
  6569. *
  6570. * However, if the cluster is taken from the
  6571. * current block group, release the cluster
  6572. * first, so that we stand a better chance of
  6573. * succeeding in the unclustered
  6574. * allocation. */
  6575. if (loop >= LOOP_NO_EMPTY_SIZE &&
  6576. used_block_group != block_group) {
  6577. spin_unlock(&last_ptr->refill_lock);
  6578. btrfs_release_block_group(used_block_group,
  6579. delalloc);
  6580. goto unclustered_alloc;
  6581. }
  6582. /*
  6583. * this cluster didn't work out, free it and
  6584. * start over
  6585. */
  6586. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6587. if (used_block_group != block_group)
  6588. btrfs_release_block_group(used_block_group,
  6589. delalloc);
  6590. refill_cluster:
  6591. if (loop >= LOOP_NO_EMPTY_SIZE) {
  6592. spin_unlock(&last_ptr->refill_lock);
  6593. goto unclustered_alloc;
  6594. }
  6595. aligned_cluster = max_t(unsigned long,
  6596. empty_cluster + empty_size,
  6597. block_group->full_stripe_len);
  6598. /* allocate a cluster in this block group */
  6599. ret = btrfs_find_space_cluster(fs_info, block_group,
  6600. last_ptr, search_start,
  6601. num_bytes,
  6602. aligned_cluster);
  6603. if (ret == 0) {
  6604. /*
  6605. * now pull our allocation out of this
  6606. * cluster
  6607. */
  6608. offset = btrfs_alloc_from_cluster(block_group,
  6609. last_ptr,
  6610. num_bytes,
  6611. search_start,
  6612. &max_extent_size);
  6613. if (offset) {
  6614. /* we found one, proceed */
  6615. spin_unlock(&last_ptr->refill_lock);
  6616. trace_btrfs_reserve_extent_cluster(
  6617. block_group, search_start,
  6618. num_bytes);
  6619. goto checks;
  6620. }
  6621. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  6622. && !failed_cluster_refill) {
  6623. spin_unlock(&last_ptr->refill_lock);
  6624. failed_cluster_refill = true;
  6625. wait_block_group_cache_progress(block_group,
  6626. num_bytes + empty_cluster + empty_size);
  6627. goto have_block_group;
  6628. }
  6629. /*
  6630. * at this point we either didn't find a cluster
  6631. * or we weren't able to allocate a block from our
  6632. * cluster. Free the cluster we've been trying
  6633. * to use, and go to the next block group
  6634. */
  6635. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6636. spin_unlock(&last_ptr->refill_lock);
  6637. goto loop;
  6638. }
  6639. unclustered_alloc:
  6640. /*
  6641. * We are doing an unclustered alloc, set the fragmented flag so
  6642. * we don't bother trying to setup a cluster again until we get
  6643. * more space.
  6644. */
  6645. if (unlikely(last_ptr)) {
  6646. spin_lock(&last_ptr->lock);
  6647. last_ptr->fragmented = 1;
  6648. spin_unlock(&last_ptr->lock);
  6649. }
  6650. if (cached) {
  6651. struct btrfs_free_space_ctl *ctl =
  6652. block_group->free_space_ctl;
  6653. spin_lock(&ctl->tree_lock);
  6654. if (ctl->free_space <
  6655. num_bytes + empty_cluster + empty_size) {
  6656. max_free_space = max(max_free_space,
  6657. ctl->free_space);
  6658. spin_unlock(&ctl->tree_lock);
  6659. goto loop;
  6660. }
  6661. spin_unlock(&ctl->tree_lock);
  6662. }
  6663. offset = btrfs_find_space_for_alloc(block_group, search_start,
  6664. num_bytes, empty_size,
  6665. &max_extent_size);
  6666. /*
  6667. * If we didn't find a chunk, and we haven't failed on this
  6668. * block group before, and this block group is in the middle of
  6669. * caching and we are ok with waiting, then go ahead and wait
  6670. * for progress to be made, and set failed_alloc to true.
  6671. *
  6672. * If failed_alloc is true then we've already waited on this
  6673. * block group once and should move on to the next block group.
  6674. */
  6675. if (!offset && !failed_alloc && !cached &&
  6676. loop > LOOP_CACHING_NOWAIT) {
  6677. wait_block_group_cache_progress(block_group,
  6678. num_bytes + empty_size);
  6679. failed_alloc = true;
  6680. goto have_block_group;
  6681. } else if (!offset) {
  6682. goto loop;
  6683. }
  6684. checks:
  6685. search_start = round_up(offset, fs_info->stripesize);
  6686. /* move on to the next group */
  6687. if (search_start + num_bytes >
  6688. block_group->key.objectid + block_group->key.offset) {
  6689. btrfs_add_free_space(block_group, offset, num_bytes);
  6690. goto loop;
  6691. }
  6692. if (offset < search_start)
  6693. btrfs_add_free_space(block_group, offset,
  6694. search_start - offset);
  6695. ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
  6696. num_bytes, delalloc);
  6697. if (ret == -EAGAIN) {
  6698. btrfs_add_free_space(block_group, offset, num_bytes);
  6699. goto loop;
  6700. }
  6701. btrfs_inc_block_group_reservations(block_group);
  6702. /* we are all good, lets return */
  6703. ins->objectid = search_start;
  6704. ins->offset = num_bytes;
  6705. trace_btrfs_reserve_extent(block_group, search_start, num_bytes);
  6706. btrfs_release_block_group(block_group, delalloc);
  6707. break;
  6708. loop:
  6709. failed_cluster_refill = false;
  6710. failed_alloc = false;
  6711. BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
  6712. index);
  6713. btrfs_release_block_group(block_group, delalloc);
  6714. cond_resched();
  6715. }
  6716. up_read(&space_info->groups_sem);
  6717. if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
  6718. && !orig_have_caching_bg)
  6719. orig_have_caching_bg = true;
  6720. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  6721. goto search;
  6722. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  6723. goto search;
  6724. /*
  6725. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  6726. * caching kthreads as we move along
  6727. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  6728. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  6729. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  6730. * again
  6731. */
  6732. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  6733. index = 0;
  6734. if (loop == LOOP_CACHING_NOWAIT) {
  6735. /*
  6736. * We want to skip the LOOP_CACHING_WAIT step if we
  6737. * don't have any uncached bgs and we've already done a
  6738. * full search through.
  6739. */
  6740. if (orig_have_caching_bg || !full_search)
  6741. loop = LOOP_CACHING_WAIT;
  6742. else
  6743. loop = LOOP_ALLOC_CHUNK;
  6744. } else {
  6745. loop++;
  6746. }
  6747. if (loop == LOOP_ALLOC_CHUNK) {
  6748. struct btrfs_trans_handle *trans;
  6749. int exist = 0;
  6750. trans = current->journal_info;
  6751. if (trans)
  6752. exist = 1;
  6753. else
  6754. trans = btrfs_join_transaction(root);
  6755. if (IS_ERR(trans)) {
  6756. ret = PTR_ERR(trans);
  6757. goto out;
  6758. }
  6759. ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
  6760. /*
  6761. * If we can't allocate a new chunk we've already looped
  6762. * through at least once, move on to the NO_EMPTY_SIZE
  6763. * case.
  6764. */
  6765. if (ret == -ENOSPC)
  6766. loop = LOOP_NO_EMPTY_SIZE;
  6767. /*
  6768. * Do not bail out on ENOSPC since we
  6769. * can do more things.
  6770. */
  6771. if (ret < 0 && ret != -ENOSPC)
  6772. btrfs_abort_transaction(trans, ret);
  6773. else
  6774. ret = 0;
  6775. if (!exist)
  6776. btrfs_end_transaction(trans);
  6777. if (ret)
  6778. goto out;
  6779. }
  6780. if (loop == LOOP_NO_EMPTY_SIZE) {
  6781. /*
  6782. * Don't loop again if we already have no empty_size and
  6783. * no empty_cluster.
  6784. */
  6785. if (empty_size == 0 &&
  6786. empty_cluster == 0) {
  6787. ret = -ENOSPC;
  6788. goto out;
  6789. }
  6790. empty_size = 0;
  6791. empty_cluster = 0;
  6792. }
  6793. goto search;
  6794. } else if (!ins->objectid) {
  6795. ret = -ENOSPC;
  6796. } else if (ins->objectid) {
  6797. if (!use_cluster && last_ptr) {
  6798. spin_lock(&last_ptr->lock);
  6799. last_ptr->window_start = ins->objectid;
  6800. spin_unlock(&last_ptr->lock);
  6801. }
  6802. ret = 0;
  6803. }
  6804. out:
  6805. if (ret == -ENOSPC) {
  6806. if (!max_extent_size)
  6807. max_extent_size = max_free_space;
  6808. spin_lock(&space_info->lock);
  6809. space_info->max_extent_size = max_extent_size;
  6810. spin_unlock(&space_info->lock);
  6811. ins->offset = max_extent_size;
  6812. }
  6813. return ret;
  6814. }
  6815. static void dump_space_info(struct btrfs_fs_info *fs_info,
  6816. struct btrfs_space_info *info, u64 bytes,
  6817. int dump_block_groups)
  6818. {
  6819. struct btrfs_block_group_cache *cache;
  6820. int index = 0;
  6821. spin_lock(&info->lock);
  6822. btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
  6823. info->flags,
  6824. info->total_bytes - btrfs_space_info_used(info, true),
  6825. info->full ? "" : "not ");
  6826. btrfs_info(fs_info,
  6827. "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
  6828. info->total_bytes, info->bytes_used, info->bytes_pinned,
  6829. info->bytes_reserved, info->bytes_may_use,
  6830. info->bytes_readonly);
  6831. spin_unlock(&info->lock);
  6832. if (!dump_block_groups)
  6833. return;
  6834. down_read(&info->groups_sem);
  6835. again:
  6836. list_for_each_entry(cache, &info->block_groups[index], list) {
  6837. spin_lock(&cache->lock);
  6838. btrfs_info(fs_info,
  6839. "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
  6840. cache->key.objectid, cache->key.offset,
  6841. btrfs_block_group_used(&cache->item), cache->pinned,
  6842. cache->reserved, cache->ro ? "[readonly]" : "");
  6843. btrfs_dump_free_space(cache, bytes);
  6844. spin_unlock(&cache->lock);
  6845. }
  6846. if (++index < BTRFS_NR_RAID_TYPES)
  6847. goto again;
  6848. up_read(&info->groups_sem);
  6849. }
  6850. /*
  6851. * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
  6852. * hole that is at least as big as @num_bytes.
  6853. *
  6854. * @root - The root that will contain this extent
  6855. *
  6856. * @ram_bytes - The amount of space in ram that @num_bytes take. This
  6857. * is used for accounting purposes. This value differs
  6858. * from @num_bytes only in the case of compressed extents.
  6859. *
  6860. * @num_bytes - Number of bytes to allocate on-disk.
  6861. *
  6862. * @min_alloc_size - Indicates the minimum amount of space that the
  6863. * allocator should try to satisfy. In some cases
  6864. * @num_bytes may be larger than what is required and if
  6865. * the filesystem is fragmented then allocation fails.
  6866. * However, the presence of @min_alloc_size gives a
  6867. * chance to try and satisfy the smaller allocation.
  6868. *
  6869. * @empty_size - A hint that you plan on doing more COW. This is the
  6870. * size in bytes the allocator should try to find free
  6871. * next to the block it returns. This is just a hint and
  6872. * may be ignored by the allocator.
  6873. *
  6874. * @hint_byte - Hint to the allocator to start searching above the byte
  6875. * address passed. It might be ignored.
  6876. *
  6877. * @ins - This key is modified to record the found hole. It will
  6878. * have the following values:
  6879. * ins->objectid == start position
  6880. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  6881. * ins->offset == the size of the hole.
  6882. *
  6883. * @is_data - Boolean flag indicating whether an extent is
  6884. * allocated for data (true) or metadata (false)
  6885. *
  6886. * @delalloc - Boolean flag indicating whether this allocation is for
  6887. * delalloc or not. If 'true' data_rwsem of block groups
  6888. * is going to be acquired.
  6889. *
  6890. *
  6891. * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
  6892. * case -ENOSPC is returned then @ins->offset will contain the size of the
  6893. * largest available hole the allocator managed to find.
  6894. */
  6895. int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
  6896. u64 num_bytes, u64 min_alloc_size,
  6897. u64 empty_size, u64 hint_byte,
  6898. struct btrfs_key *ins, int is_data, int delalloc)
  6899. {
  6900. struct btrfs_fs_info *fs_info = root->fs_info;
  6901. bool final_tried = num_bytes == min_alloc_size;
  6902. u64 flags;
  6903. int ret;
  6904. flags = get_alloc_profile_by_root(root, is_data);
  6905. again:
  6906. WARN_ON(num_bytes < fs_info->sectorsize);
  6907. ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
  6908. hint_byte, ins, flags, delalloc);
  6909. if (!ret && !is_data) {
  6910. btrfs_dec_block_group_reservations(fs_info, ins->objectid);
  6911. } else if (ret == -ENOSPC) {
  6912. if (!final_tried && ins->offset) {
  6913. num_bytes = min(num_bytes >> 1, ins->offset);
  6914. num_bytes = round_down(num_bytes,
  6915. fs_info->sectorsize);
  6916. num_bytes = max(num_bytes, min_alloc_size);
  6917. ram_bytes = num_bytes;
  6918. if (num_bytes == min_alloc_size)
  6919. final_tried = true;
  6920. goto again;
  6921. } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  6922. struct btrfs_space_info *sinfo;
  6923. sinfo = __find_space_info(fs_info, flags);
  6924. btrfs_err(fs_info,
  6925. "allocation failed flags %llu, wanted %llu",
  6926. flags, num_bytes);
  6927. if (sinfo)
  6928. dump_space_info(fs_info, sinfo, num_bytes, 1);
  6929. }
  6930. }
  6931. return ret;
  6932. }
  6933. static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  6934. u64 start, u64 len,
  6935. int pin, int delalloc)
  6936. {
  6937. struct btrfs_block_group_cache *cache;
  6938. int ret = 0;
  6939. cache = btrfs_lookup_block_group(fs_info, start);
  6940. if (!cache) {
  6941. btrfs_err(fs_info, "Unable to find block group for %llu",
  6942. start);
  6943. return -ENOSPC;
  6944. }
  6945. if (pin)
  6946. pin_down_extent(fs_info, cache, start, len, 1);
  6947. else {
  6948. if (btrfs_test_opt(fs_info, DISCARD))
  6949. ret = btrfs_discard_extent(fs_info, start, len, NULL);
  6950. btrfs_add_free_space(cache, start, len);
  6951. btrfs_free_reserved_bytes(cache, len, delalloc);
  6952. trace_btrfs_reserved_extent_free(fs_info, start, len);
  6953. }
  6954. btrfs_put_block_group(cache);
  6955. return ret;
  6956. }
  6957. int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  6958. u64 start, u64 len, int delalloc)
  6959. {
  6960. return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
  6961. }
  6962. int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
  6963. u64 start, u64 len)
  6964. {
  6965. return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
  6966. }
  6967. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  6968. u64 parent, u64 root_objectid,
  6969. u64 flags, u64 owner, u64 offset,
  6970. struct btrfs_key *ins, int ref_mod)
  6971. {
  6972. struct btrfs_fs_info *fs_info = trans->fs_info;
  6973. int ret;
  6974. struct btrfs_extent_item *extent_item;
  6975. struct btrfs_extent_inline_ref *iref;
  6976. struct btrfs_path *path;
  6977. struct extent_buffer *leaf;
  6978. int type;
  6979. u32 size;
  6980. if (parent > 0)
  6981. type = BTRFS_SHARED_DATA_REF_KEY;
  6982. else
  6983. type = BTRFS_EXTENT_DATA_REF_KEY;
  6984. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  6985. path = btrfs_alloc_path();
  6986. if (!path)
  6987. return -ENOMEM;
  6988. path->leave_spinning = 1;
  6989. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  6990. ins, size);
  6991. if (ret) {
  6992. btrfs_free_path(path);
  6993. return ret;
  6994. }
  6995. leaf = path->nodes[0];
  6996. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  6997. struct btrfs_extent_item);
  6998. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  6999. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7000. btrfs_set_extent_flags(leaf, extent_item,
  7001. flags | BTRFS_EXTENT_FLAG_DATA);
  7002. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7003. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  7004. if (parent > 0) {
  7005. struct btrfs_shared_data_ref *ref;
  7006. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  7007. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7008. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  7009. } else {
  7010. struct btrfs_extent_data_ref *ref;
  7011. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  7012. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  7013. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  7014. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  7015. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  7016. }
  7017. btrfs_mark_buffer_dirty(path->nodes[0]);
  7018. btrfs_free_path(path);
  7019. ret = remove_from_free_space_tree(trans, ins->objectid, ins->offset);
  7020. if (ret)
  7021. return ret;
  7022. ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
  7023. if (ret) { /* -ENOENT, logic error */
  7024. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7025. ins->objectid, ins->offset);
  7026. BUG();
  7027. }
  7028. trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
  7029. return ret;
  7030. }
  7031. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  7032. struct btrfs_delayed_ref_node *node,
  7033. struct btrfs_delayed_extent_op *extent_op)
  7034. {
  7035. struct btrfs_fs_info *fs_info = trans->fs_info;
  7036. int ret;
  7037. struct btrfs_extent_item *extent_item;
  7038. struct btrfs_key extent_key;
  7039. struct btrfs_tree_block_info *block_info;
  7040. struct btrfs_extent_inline_ref *iref;
  7041. struct btrfs_path *path;
  7042. struct extent_buffer *leaf;
  7043. struct btrfs_delayed_tree_ref *ref;
  7044. u32 size = sizeof(*extent_item) + sizeof(*iref);
  7045. u64 num_bytes;
  7046. u64 flags = extent_op->flags_to_set;
  7047. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7048. ref = btrfs_delayed_node_to_tree_ref(node);
  7049. extent_key.objectid = node->bytenr;
  7050. if (skinny_metadata) {
  7051. extent_key.offset = ref->level;
  7052. extent_key.type = BTRFS_METADATA_ITEM_KEY;
  7053. num_bytes = fs_info->nodesize;
  7054. } else {
  7055. extent_key.offset = node->num_bytes;
  7056. extent_key.type = BTRFS_EXTENT_ITEM_KEY;
  7057. size += sizeof(*block_info);
  7058. num_bytes = node->num_bytes;
  7059. }
  7060. path = btrfs_alloc_path();
  7061. if (!path)
  7062. return -ENOMEM;
  7063. path->leave_spinning = 1;
  7064. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7065. &extent_key, size);
  7066. if (ret) {
  7067. btrfs_free_path(path);
  7068. return ret;
  7069. }
  7070. leaf = path->nodes[0];
  7071. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7072. struct btrfs_extent_item);
  7073. btrfs_set_extent_refs(leaf, extent_item, 1);
  7074. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7075. btrfs_set_extent_flags(leaf, extent_item,
  7076. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  7077. if (skinny_metadata) {
  7078. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7079. } else {
  7080. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  7081. btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
  7082. btrfs_set_tree_block_level(leaf, block_info, ref->level);
  7083. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  7084. }
  7085. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
  7086. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  7087. btrfs_set_extent_inline_ref_type(leaf, iref,
  7088. BTRFS_SHARED_BLOCK_REF_KEY);
  7089. btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
  7090. } else {
  7091. btrfs_set_extent_inline_ref_type(leaf, iref,
  7092. BTRFS_TREE_BLOCK_REF_KEY);
  7093. btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
  7094. }
  7095. btrfs_mark_buffer_dirty(leaf);
  7096. btrfs_free_path(path);
  7097. ret = remove_from_free_space_tree(trans, extent_key.objectid,
  7098. num_bytes);
  7099. if (ret)
  7100. return ret;
  7101. ret = update_block_group(trans, fs_info, extent_key.objectid,
  7102. fs_info->nodesize, 1);
  7103. if (ret) { /* -ENOENT, logic error */
  7104. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7105. extent_key.objectid, extent_key.offset);
  7106. BUG();
  7107. }
  7108. trace_btrfs_reserved_extent_alloc(fs_info, extent_key.objectid,
  7109. fs_info->nodesize);
  7110. return ret;
  7111. }
  7112. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7113. struct btrfs_root *root, u64 owner,
  7114. u64 offset, u64 ram_bytes,
  7115. struct btrfs_key *ins)
  7116. {
  7117. int ret;
  7118. BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
  7119. btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
  7120. root->root_key.objectid, owner, offset,
  7121. BTRFS_ADD_DELAYED_EXTENT);
  7122. ret = btrfs_add_delayed_data_ref(trans, ins->objectid,
  7123. ins->offset, 0,
  7124. root->root_key.objectid, owner,
  7125. offset, ram_bytes,
  7126. BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
  7127. return ret;
  7128. }
  7129. /*
  7130. * this is used by the tree logging recovery code. It records that
  7131. * an extent has been allocated and makes sure to clear the free
  7132. * space cache bits as well
  7133. */
  7134. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  7135. u64 root_objectid, u64 owner, u64 offset,
  7136. struct btrfs_key *ins)
  7137. {
  7138. struct btrfs_fs_info *fs_info = trans->fs_info;
  7139. int ret;
  7140. struct btrfs_block_group_cache *block_group;
  7141. struct btrfs_space_info *space_info;
  7142. /*
  7143. * Mixed block groups will exclude before processing the log so we only
  7144. * need to do the exclude dance if this fs isn't mixed.
  7145. */
  7146. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
  7147. ret = __exclude_logged_extent(fs_info, ins->objectid,
  7148. ins->offset);
  7149. if (ret)
  7150. return ret;
  7151. }
  7152. block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
  7153. if (!block_group)
  7154. return -EINVAL;
  7155. space_info = block_group->space_info;
  7156. spin_lock(&space_info->lock);
  7157. spin_lock(&block_group->lock);
  7158. space_info->bytes_reserved += ins->offset;
  7159. block_group->reserved += ins->offset;
  7160. spin_unlock(&block_group->lock);
  7161. spin_unlock(&space_info->lock);
  7162. ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
  7163. offset, ins, 1);
  7164. btrfs_put_block_group(block_group);
  7165. return ret;
  7166. }
  7167. static struct extent_buffer *
  7168. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  7169. u64 bytenr, int level, u64 owner)
  7170. {
  7171. struct btrfs_fs_info *fs_info = root->fs_info;
  7172. struct extent_buffer *buf;
  7173. buf = btrfs_find_create_tree_block(fs_info, bytenr);
  7174. if (IS_ERR(buf))
  7175. return buf;
  7176. /*
  7177. * Extra safety check in case the extent tree is corrupted and extent
  7178. * allocator chooses to use a tree block which is already used and
  7179. * locked.
  7180. */
  7181. if (buf->lock_owner == current->pid) {
  7182. btrfs_err_rl(fs_info,
  7183. "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
  7184. buf->start, btrfs_header_owner(buf), current->pid);
  7185. free_extent_buffer(buf);
  7186. return ERR_PTR(-EUCLEAN);
  7187. }
  7188. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  7189. btrfs_tree_lock(buf);
  7190. clean_tree_block(fs_info, buf);
  7191. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  7192. btrfs_set_lock_blocking(buf);
  7193. set_extent_buffer_uptodate(buf);
  7194. memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
  7195. btrfs_set_header_level(buf, level);
  7196. btrfs_set_header_bytenr(buf, buf->start);
  7197. btrfs_set_header_generation(buf, trans->transid);
  7198. btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
  7199. btrfs_set_header_owner(buf, owner);
  7200. write_extent_buffer_fsid(buf, fs_info->fsid);
  7201. write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
  7202. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  7203. buf->log_index = root->log_transid % 2;
  7204. /*
  7205. * we allow two log transactions at a time, use different
  7206. * EXENT bit to differentiate dirty pages.
  7207. */
  7208. if (buf->log_index == 0)
  7209. set_extent_dirty(&root->dirty_log_pages, buf->start,
  7210. buf->start + buf->len - 1, GFP_NOFS);
  7211. else
  7212. set_extent_new(&root->dirty_log_pages, buf->start,
  7213. buf->start + buf->len - 1);
  7214. } else {
  7215. buf->log_index = -1;
  7216. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  7217. buf->start + buf->len - 1, GFP_NOFS);
  7218. }
  7219. trans->dirty = true;
  7220. /* this returns a buffer locked for blocking */
  7221. return buf;
  7222. }
  7223. static struct btrfs_block_rsv *
  7224. use_block_rsv(struct btrfs_trans_handle *trans,
  7225. struct btrfs_root *root, u32 blocksize)
  7226. {
  7227. struct btrfs_fs_info *fs_info = root->fs_info;
  7228. struct btrfs_block_rsv *block_rsv;
  7229. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  7230. int ret;
  7231. bool global_updated = false;
  7232. block_rsv = get_block_rsv(trans, root);
  7233. if (unlikely(block_rsv->size == 0))
  7234. goto try_reserve;
  7235. again:
  7236. ret = block_rsv_use_bytes(block_rsv, blocksize);
  7237. if (!ret)
  7238. return block_rsv;
  7239. if (block_rsv->failfast)
  7240. return ERR_PTR(ret);
  7241. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  7242. global_updated = true;
  7243. update_global_block_rsv(fs_info);
  7244. goto again;
  7245. }
  7246. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  7247. static DEFINE_RATELIMIT_STATE(_rs,
  7248. DEFAULT_RATELIMIT_INTERVAL * 10,
  7249. /*DEFAULT_RATELIMIT_BURST*/ 1);
  7250. if (__ratelimit(&_rs))
  7251. WARN(1, KERN_DEBUG
  7252. "BTRFS: block rsv returned %d\n", ret);
  7253. }
  7254. try_reserve:
  7255. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  7256. BTRFS_RESERVE_NO_FLUSH);
  7257. if (!ret)
  7258. return block_rsv;
  7259. /*
  7260. * If we couldn't reserve metadata bytes try and use some from
  7261. * the global reserve if its space type is the same as the global
  7262. * reservation.
  7263. */
  7264. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  7265. block_rsv->space_info == global_rsv->space_info) {
  7266. ret = block_rsv_use_bytes(global_rsv, blocksize);
  7267. if (!ret)
  7268. return global_rsv;
  7269. }
  7270. return ERR_PTR(ret);
  7271. }
  7272. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  7273. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  7274. {
  7275. block_rsv_add_bytes(block_rsv, blocksize, false);
  7276. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
  7277. }
  7278. /*
  7279. * finds a free extent and does all the dirty work required for allocation
  7280. * returns the tree buffer or an ERR_PTR on error.
  7281. */
  7282. struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
  7283. struct btrfs_root *root,
  7284. u64 parent, u64 root_objectid,
  7285. const struct btrfs_disk_key *key,
  7286. int level, u64 hint,
  7287. u64 empty_size)
  7288. {
  7289. struct btrfs_fs_info *fs_info = root->fs_info;
  7290. struct btrfs_key ins;
  7291. struct btrfs_block_rsv *block_rsv;
  7292. struct extent_buffer *buf;
  7293. struct btrfs_delayed_extent_op *extent_op;
  7294. u64 flags = 0;
  7295. int ret;
  7296. u32 blocksize = fs_info->nodesize;
  7297. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7298. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  7299. if (btrfs_is_testing(fs_info)) {
  7300. buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
  7301. level, root_objectid);
  7302. if (!IS_ERR(buf))
  7303. root->alloc_bytenr += blocksize;
  7304. return buf;
  7305. }
  7306. #endif
  7307. block_rsv = use_block_rsv(trans, root, blocksize);
  7308. if (IS_ERR(block_rsv))
  7309. return ERR_CAST(block_rsv);
  7310. ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
  7311. empty_size, hint, &ins, 0, 0);
  7312. if (ret)
  7313. goto out_unuse;
  7314. buf = btrfs_init_new_buffer(trans, root, ins.objectid, level,
  7315. root_objectid);
  7316. if (IS_ERR(buf)) {
  7317. ret = PTR_ERR(buf);
  7318. goto out_free_reserved;
  7319. }
  7320. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  7321. if (parent == 0)
  7322. parent = ins.objectid;
  7323. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7324. } else
  7325. BUG_ON(parent > 0);
  7326. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  7327. extent_op = btrfs_alloc_delayed_extent_op();
  7328. if (!extent_op) {
  7329. ret = -ENOMEM;
  7330. goto out_free_buf;
  7331. }
  7332. if (key)
  7333. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  7334. else
  7335. memset(&extent_op->key, 0, sizeof(extent_op->key));
  7336. extent_op->flags_to_set = flags;
  7337. extent_op->update_key = skinny_metadata ? false : true;
  7338. extent_op->update_flags = true;
  7339. extent_op->is_data = false;
  7340. extent_op->level = level;
  7341. btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
  7342. root_objectid, level, 0,
  7343. BTRFS_ADD_DELAYED_EXTENT);
  7344. ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
  7345. ins.offset, parent,
  7346. root_objectid, level,
  7347. BTRFS_ADD_DELAYED_EXTENT,
  7348. extent_op, NULL, NULL);
  7349. if (ret)
  7350. goto out_free_delayed;
  7351. }
  7352. return buf;
  7353. out_free_delayed:
  7354. btrfs_free_delayed_extent_op(extent_op);
  7355. out_free_buf:
  7356. free_extent_buffer(buf);
  7357. out_free_reserved:
  7358. btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
  7359. out_unuse:
  7360. unuse_block_rsv(fs_info, block_rsv, blocksize);
  7361. return ERR_PTR(ret);
  7362. }
  7363. struct walk_control {
  7364. u64 refs[BTRFS_MAX_LEVEL];
  7365. u64 flags[BTRFS_MAX_LEVEL];
  7366. struct btrfs_key update_progress;
  7367. int stage;
  7368. int level;
  7369. int shared_level;
  7370. int update_ref;
  7371. int keep_locks;
  7372. int reada_slot;
  7373. int reada_count;
  7374. };
  7375. #define DROP_REFERENCE 1
  7376. #define UPDATE_BACKREF 2
  7377. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  7378. struct btrfs_root *root,
  7379. struct walk_control *wc,
  7380. struct btrfs_path *path)
  7381. {
  7382. struct btrfs_fs_info *fs_info = root->fs_info;
  7383. u64 bytenr;
  7384. u64 generation;
  7385. u64 refs;
  7386. u64 flags;
  7387. u32 nritems;
  7388. struct btrfs_key key;
  7389. struct extent_buffer *eb;
  7390. int ret;
  7391. int slot;
  7392. int nread = 0;
  7393. if (path->slots[wc->level] < wc->reada_slot) {
  7394. wc->reada_count = wc->reada_count * 2 / 3;
  7395. wc->reada_count = max(wc->reada_count, 2);
  7396. } else {
  7397. wc->reada_count = wc->reada_count * 3 / 2;
  7398. wc->reada_count = min_t(int, wc->reada_count,
  7399. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  7400. }
  7401. eb = path->nodes[wc->level];
  7402. nritems = btrfs_header_nritems(eb);
  7403. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  7404. if (nread >= wc->reada_count)
  7405. break;
  7406. cond_resched();
  7407. bytenr = btrfs_node_blockptr(eb, slot);
  7408. generation = btrfs_node_ptr_generation(eb, slot);
  7409. if (slot == path->slots[wc->level])
  7410. goto reada;
  7411. if (wc->stage == UPDATE_BACKREF &&
  7412. generation <= root->root_key.offset)
  7413. continue;
  7414. /* We don't lock the tree block, it's OK to be racy here */
  7415. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
  7416. wc->level - 1, 1, &refs,
  7417. &flags);
  7418. /* We don't care about errors in readahead. */
  7419. if (ret < 0)
  7420. continue;
  7421. BUG_ON(refs == 0);
  7422. if (wc->stage == DROP_REFERENCE) {
  7423. if (refs == 1)
  7424. goto reada;
  7425. if (wc->level == 1 &&
  7426. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7427. continue;
  7428. if (!wc->update_ref ||
  7429. generation <= root->root_key.offset)
  7430. continue;
  7431. btrfs_node_key_to_cpu(eb, &key, slot);
  7432. ret = btrfs_comp_cpu_keys(&key,
  7433. &wc->update_progress);
  7434. if (ret < 0)
  7435. continue;
  7436. } else {
  7437. if (wc->level == 1 &&
  7438. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7439. continue;
  7440. }
  7441. reada:
  7442. readahead_tree_block(fs_info, bytenr);
  7443. nread++;
  7444. }
  7445. wc->reada_slot = slot;
  7446. }
  7447. /*
  7448. * helper to process tree block while walking down the tree.
  7449. *
  7450. * when wc->stage == UPDATE_BACKREF, this function updates
  7451. * back refs for pointers in the block.
  7452. *
  7453. * NOTE: return value 1 means we should stop walking down.
  7454. */
  7455. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  7456. struct btrfs_root *root,
  7457. struct btrfs_path *path,
  7458. struct walk_control *wc, int lookup_info)
  7459. {
  7460. struct btrfs_fs_info *fs_info = root->fs_info;
  7461. int level = wc->level;
  7462. struct extent_buffer *eb = path->nodes[level];
  7463. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7464. int ret;
  7465. if (wc->stage == UPDATE_BACKREF &&
  7466. btrfs_header_owner(eb) != root->root_key.objectid)
  7467. return 1;
  7468. /*
  7469. * when reference count of tree block is 1, it won't increase
  7470. * again. once full backref flag is set, we never clear it.
  7471. */
  7472. if (lookup_info &&
  7473. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  7474. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  7475. BUG_ON(!path->locks[level]);
  7476. ret = btrfs_lookup_extent_info(trans, fs_info,
  7477. eb->start, level, 1,
  7478. &wc->refs[level],
  7479. &wc->flags[level]);
  7480. BUG_ON(ret == -ENOMEM);
  7481. if (ret)
  7482. return ret;
  7483. BUG_ON(wc->refs[level] == 0);
  7484. }
  7485. if (wc->stage == DROP_REFERENCE) {
  7486. if (wc->refs[level] > 1)
  7487. return 1;
  7488. if (path->locks[level] && !wc->keep_locks) {
  7489. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7490. path->locks[level] = 0;
  7491. }
  7492. return 0;
  7493. }
  7494. /* wc->stage == UPDATE_BACKREF */
  7495. if (!(wc->flags[level] & flag)) {
  7496. BUG_ON(!path->locks[level]);
  7497. ret = btrfs_inc_ref(trans, root, eb, 1);
  7498. BUG_ON(ret); /* -ENOMEM */
  7499. ret = btrfs_dec_ref(trans, root, eb, 0);
  7500. BUG_ON(ret); /* -ENOMEM */
  7501. ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
  7502. eb->len, flag,
  7503. btrfs_header_level(eb), 0);
  7504. BUG_ON(ret); /* -ENOMEM */
  7505. wc->flags[level] |= flag;
  7506. }
  7507. /*
  7508. * the block is shared by multiple trees, so it's not good to
  7509. * keep the tree lock
  7510. */
  7511. if (path->locks[level] && level > 0) {
  7512. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7513. path->locks[level] = 0;
  7514. }
  7515. return 0;
  7516. }
  7517. /*
  7518. * helper to process tree block pointer.
  7519. *
  7520. * when wc->stage == DROP_REFERENCE, this function checks
  7521. * reference count of the block pointed to. if the block
  7522. * is shared and we need update back refs for the subtree
  7523. * rooted at the block, this function changes wc->stage to
  7524. * UPDATE_BACKREF. if the block is shared and there is no
  7525. * need to update back, this function drops the reference
  7526. * to the block.
  7527. *
  7528. * NOTE: return value 1 means we should stop walking down.
  7529. */
  7530. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  7531. struct btrfs_root *root,
  7532. struct btrfs_path *path,
  7533. struct walk_control *wc, int *lookup_info)
  7534. {
  7535. struct btrfs_fs_info *fs_info = root->fs_info;
  7536. u64 bytenr;
  7537. u64 generation;
  7538. u64 parent;
  7539. u32 blocksize;
  7540. struct btrfs_key key;
  7541. struct btrfs_key first_key;
  7542. struct extent_buffer *next;
  7543. int level = wc->level;
  7544. int reada = 0;
  7545. int ret = 0;
  7546. bool need_account = false;
  7547. generation = btrfs_node_ptr_generation(path->nodes[level],
  7548. path->slots[level]);
  7549. /*
  7550. * if the lower level block was created before the snapshot
  7551. * was created, we know there is no need to update back refs
  7552. * for the subtree
  7553. */
  7554. if (wc->stage == UPDATE_BACKREF &&
  7555. generation <= root->root_key.offset) {
  7556. *lookup_info = 1;
  7557. return 1;
  7558. }
  7559. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  7560. btrfs_node_key_to_cpu(path->nodes[level], &first_key,
  7561. path->slots[level]);
  7562. blocksize = fs_info->nodesize;
  7563. next = find_extent_buffer(fs_info, bytenr);
  7564. if (!next) {
  7565. next = btrfs_find_create_tree_block(fs_info, bytenr);
  7566. if (IS_ERR(next))
  7567. return PTR_ERR(next);
  7568. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  7569. level - 1);
  7570. reada = 1;
  7571. }
  7572. btrfs_tree_lock(next);
  7573. btrfs_set_lock_blocking(next);
  7574. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
  7575. &wc->refs[level - 1],
  7576. &wc->flags[level - 1]);
  7577. if (ret < 0)
  7578. goto out_unlock;
  7579. if (unlikely(wc->refs[level - 1] == 0)) {
  7580. btrfs_err(fs_info, "Missing references.");
  7581. ret = -EIO;
  7582. goto out_unlock;
  7583. }
  7584. *lookup_info = 0;
  7585. if (wc->stage == DROP_REFERENCE) {
  7586. if (wc->refs[level - 1] > 1) {
  7587. need_account = true;
  7588. if (level == 1 &&
  7589. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7590. goto skip;
  7591. if (!wc->update_ref ||
  7592. generation <= root->root_key.offset)
  7593. goto skip;
  7594. btrfs_node_key_to_cpu(path->nodes[level], &key,
  7595. path->slots[level]);
  7596. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  7597. if (ret < 0)
  7598. goto skip;
  7599. wc->stage = UPDATE_BACKREF;
  7600. wc->shared_level = level - 1;
  7601. }
  7602. } else {
  7603. if (level == 1 &&
  7604. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7605. goto skip;
  7606. }
  7607. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  7608. btrfs_tree_unlock(next);
  7609. free_extent_buffer(next);
  7610. next = NULL;
  7611. *lookup_info = 1;
  7612. }
  7613. if (!next) {
  7614. if (reada && level == 1)
  7615. reada_walk_down(trans, root, wc, path);
  7616. next = read_tree_block(fs_info, bytenr, generation, level - 1,
  7617. &first_key);
  7618. if (IS_ERR(next)) {
  7619. return PTR_ERR(next);
  7620. } else if (!extent_buffer_uptodate(next)) {
  7621. free_extent_buffer(next);
  7622. return -EIO;
  7623. }
  7624. btrfs_tree_lock(next);
  7625. btrfs_set_lock_blocking(next);
  7626. }
  7627. level--;
  7628. ASSERT(level == btrfs_header_level(next));
  7629. if (level != btrfs_header_level(next)) {
  7630. btrfs_err(root->fs_info, "mismatched level");
  7631. ret = -EIO;
  7632. goto out_unlock;
  7633. }
  7634. path->nodes[level] = next;
  7635. path->slots[level] = 0;
  7636. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7637. wc->level = level;
  7638. if (wc->level == 1)
  7639. wc->reada_slot = 0;
  7640. return 0;
  7641. skip:
  7642. wc->refs[level - 1] = 0;
  7643. wc->flags[level - 1] = 0;
  7644. if (wc->stage == DROP_REFERENCE) {
  7645. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  7646. parent = path->nodes[level]->start;
  7647. } else {
  7648. ASSERT(root->root_key.objectid ==
  7649. btrfs_header_owner(path->nodes[level]));
  7650. if (root->root_key.objectid !=
  7651. btrfs_header_owner(path->nodes[level])) {
  7652. btrfs_err(root->fs_info,
  7653. "mismatched block owner");
  7654. ret = -EIO;
  7655. goto out_unlock;
  7656. }
  7657. parent = 0;
  7658. }
  7659. /*
  7660. * Reloc tree doesn't contribute to qgroup numbers, and we have
  7661. * already accounted them at merge time (replace_path),
  7662. * thus we could skip expensive subtree trace here.
  7663. */
  7664. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
  7665. need_account) {
  7666. ret = btrfs_qgroup_trace_subtree(trans, next,
  7667. generation, level - 1);
  7668. if (ret) {
  7669. btrfs_err_rl(fs_info,
  7670. "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
  7671. ret);
  7672. }
  7673. }
  7674. ret = btrfs_free_extent(trans, root, bytenr, blocksize,
  7675. parent, root->root_key.objectid,
  7676. level - 1, 0);
  7677. if (ret)
  7678. goto out_unlock;
  7679. }
  7680. *lookup_info = 1;
  7681. ret = 1;
  7682. out_unlock:
  7683. btrfs_tree_unlock(next);
  7684. free_extent_buffer(next);
  7685. return ret;
  7686. }
  7687. /*
  7688. * helper to process tree block while walking up the tree.
  7689. *
  7690. * when wc->stage == DROP_REFERENCE, this function drops
  7691. * reference count on the block.
  7692. *
  7693. * when wc->stage == UPDATE_BACKREF, this function changes
  7694. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  7695. * to UPDATE_BACKREF previously while processing the block.
  7696. *
  7697. * NOTE: return value 1 means we should stop walking up.
  7698. */
  7699. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  7700. struct btrfs_root *root,
  7701. struct btrfs_path *path,
  7702. struct walk_control *wc)
  7703. {
  7704. struct btrfs_fs_info *fs_info = root->fs_info;
  7705. int ret;
  7706. int level = wc->level;
  7707. struct extent_buffer *eb = path->nodes[level];
  7708. u64 parent = 0;
  7709. if (wc->stage == UPDATE_BACKREF) {
  7710. BUG_ON(wc->shared_level < level);
  7711. if (level < wc->shared_level)
  7712. goto out;
  7713. ret = find_next_key(path, level + 1, &wc->update_progress);
  7714. if (ret > 0)
  7715. wc->update_ref = 0;
  7716. wc->stage = DROP_REFERENCE;
  7717. wc->shared_level = -1;
  7718. path->slots[level] = 0;
  7719. /*
  7720. * check reference count again if the block isn't locked.
  7721. * we should start walking down the tree again if reference
  7722. * count is one.
  7723. */
  7724. if (!path->locks[level]) {
  7725. BUG_ON(level == 0);
  7726. btrfs_tree_lock(eb);
  7727. btrfs_set_lock_blocking(eb);
  7728. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7729. ret = btrfs_lookup_extent_info(trans, fs_info,
  7730. eb->start, level, 1,
  7731. &wc->refs[level],
  7732. &wc->flags[level]);
  7733. if (ret < 0) {
  7734. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7735. path->locks[level] = 0;
  7736. return ret;
  7737. }
  7738. BUG_ON(wc->refs[level] == 0);
  7739. if (wc->refs[level] == 1) {
  7740. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7741. path->locks[level] = 0;
  7742. return 1;
  7743. }
  7744. }
  7745. }
  7746. /* wc->stage == DROP_REFERENCE */
  7747. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  7748. if (wc->refs[level] == 1) {
  7749. if (level == 0) {
  7750. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7751. ret = btrfs_dec_ref(trans, root, eb, 1);
  7752. else
  7753. ret = btrfs_dec_ref(trans, root, eb, 0);
  7754. BUG_ON(ret); /* -ENOMEM */
  7755. ret = btrfs_qgroup_trace_leaf_items(trans, eb);
  7756. if (ret) {
  7757. btrfs_err_rl(fs_info,
  7758. "error %d accounting leaf items. Quota is out of sync, rescan required.",
  7759. ret);
  7760. }
  7761. }
  7762. /* make block locked assertion in clean_tree_block happy */
  7763. if (!path->locks[level] &&
  7764. btrfs_header_generation(eb) == trans->transid) {
  7765. btrfs_tree_lock(eb);
  7766. btrfs_set_lock_blocking(eb);
  7767. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7768. }
  7769. clean_tree_block(fs_info, eb);
  7770. }
  7771. if (eb == root->node) {
  7772. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7773. parent = eb->start;
  7774. else if (root->root_key.objectid != btrfs_header_owner(eb))
  7775. goto owner_mismatch;
  7776. } else {
  7777. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7778. parent = path->nodes[level + 1]->start;
  7779. else if (root->root_key.objectid !=
  7780. btrfs_header_owner(path->nodes[level + 1]))
  7781. goto owner_mismatch;
  7782. }
  7783. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  7784. out:
  7785. wc->refs[level] = 0;
  7786. wc->flags[level] = 0;
  7787. return 0;
  7788. owner_mismatch:
  7789. btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
  7790. btrfs_header_owner(eb), root->root_key.objectid);
  7791. return -EUCLEAN;
  7792. }
  7793. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  7794. struct btrfs_root *root,
  7795. struct btrfs_path *path,
  7796. struct walk_control *wc)
  7797. {
  7798. int level = wc->level;
  7799. int lookup_info = 1;
  7800. int ret;
  7801. while (level >= 0) {
  7802. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  7803. if (ret > 0)
  7804. break;
  7805. if (level == 0)
  7806. break;
  7807. if (path->slots[level] >=
  7808. btrfs_header_nritems(path->nodes[level]))
  7809. break;
  7810. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  7811. if (ret > 0) {
  7812. path->slots[level]++;
  7813. continue;
  7814. } else if (ret < 0)
  7815. return ret;
  7816. level = wc->level;
  7817. }
  7818. return 0;
  7819. }
  7820. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  7821. struct btrfs_root *root,
  7822. struct btrfs_path *path,
  7823. struct walk_control *wc, int max_level)
  7824. {
  7825. int level = wc->level;
  7826. int ret;
  7827. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  7828. while (level < max_level && path->nodes[level]) {
  7829. wc->level = level;
  7830. if (path->slots[level] + 1 <
  7831. btrfs_header_nritems(path->nodes[level])) {
  7832. path->slots[level]++;
  7833. return 0;
  7834. } else {
  7835. ret = walk_up_proc(trans, root, path, wc);
  7836. if (ret > 0)
  7837. return 0;
  7838. if (ret < 0)
  7839. return ret;
  7840. if (path->locks[level]) {
  7841. btrfs_tree_unlock_rw(path->nodes[level],
  7842. path->locks[level]);
  7843. path->locks[level] = 0;
  7844. }
  7845. free_extent_buffer(path->nodes[level]);
  7846. path->nodes[level] = NULL;
  7847. level++;
  7848. }
  7849. }
  7850. return 1;
  7851. }
  7852. /*
  7853. * drop a subvolume tree.
  7854. *
  7855. * this function traverses the tree freeing any blocks that only
  7856. * referenced by the tree.
  7857. *
  7858. * when a shared tree block is found. this function decreases its
  7859. * reference count by one. if update_ref is true, this function
  7860. * also make sure backrefs for the shared block and all lower level
  7861. * blocks are properly updated.
  7862. *
  7863. * If called with for_reloc == 0, may exit early with -EAGAIN
  7864. */
  7865. int btrfs_drop_snapshot(struct btrfs_root *root,
  7866. struct btrfs_block_rsv *block_rsv, int update_ref,
  7867. int for_reloc)
  7868. {
  7869. struct btrfs_fs_info *fs_info = root->fs_info;
  7870. struct btrfs_path *path;
  7871. struct btrfs_trans_handle *trans;
  7872. struct btrfs_root *tree_root = fs_info->tree_root;
  7873. struct btrfs_root_item *root_item = &root->root_item;
  7874. struct walk_control *wc;
  7875. struct btrfs_key key;
  7876. int err = 0;
  7877. int ret;
  7878. int level;
  7879. bool root_dropped = false;
  7880. btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
  7881. path = btrfs_alloc_path();
  7882. if (!path) {
  7883. err = -ENOMEM;
  7884. goto out;
  7885. }
  7886. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  7887. if (!wc) {
  7888. btrfs_free_path(path);
  7889. err = -ENOMEM;
  7890. goto out;
  7891. }
  7892. trans = btrfs_start_transaction(tree_root, 0);
  7893. if (IS_ERR(trans)) {
  7894. err = PTR_ERR(trans);
  7895. goto out_free;
  7896. }
  7897. if (block_rsv)
  7898. trans->block_rsv = block_rsv;
  7899. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  7900. level = btrfs_header_level(root->node);
  7901. path->nodes[level] = btrfs_lock_root_node(root);
  7902. btrfs_set_lock_blocking(path->nodes[level]);
  7903. path->slots[level] = 0;
  7904. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7905. memset(&wc->update_progress, 0,
  7906. sizeof(wc->update_progress));
  7907. } else {
  7908. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  7909. memcpy(&wc->update_progress, &key,
  7910. sizeof(wc->update_progress));
  7911. level = root_item->drop_level;
  7912. BUG_ON(level == 0);
  7913. path->lowest_level = level;
  7914. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  7915. path->lowest_level = 0;
  7916. if (ret < 0) {
  7917. err = ret;
  7918. goto out_end_trans;
  7919. }
  7920. WARN_ON(ret > 0);
  7921. /*
  7922. * unlock our path, this is safe because only this
  7923. * function is allowed to delete this snapshot
  7924. */
  7925. btrfs_unlock_up_safe(path, 0);
  7926. level = btrfs_header_level(root->node);
  7927. while (1) {
  7928. btrfs_tree_lock(path->nodes[level]);
  7929. btrfs_set_lock_blocking(path->nodes[level]);
  7930. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7931. ret = btrfs_lookup_extent_info(trans, fs_info,
  7932. path->nodes[level]->start,
  7933. level, 1, &wc->refs[level],
  7934. &wc->flags[level]);
  7935. if (ret < 0) {
  7936. err = ret;
  7937. goto out_end_trans;
  7938. }
  7939. BUG_ON(wc->refs[level] == 0);
  7940. if (level == root_item->drop_level)
  7941. break;
  7942. btrfs_tree_unlock(path->nodes[level]);
  7943. path->locks[level] = 0;
  7944. WARN_ON(wc->refs[level] != 1);
  7945. level--;
  7946. }
  7947. }
  7948. wc->level = level;
  7949. wc->shared_level = -1;
  7950. wc->stage = DROP_REFERENCE;
  7951. wc->update_ref = update_ref;
  7952. wc->keep_locks = 0;
  7953. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  7954. while (1) {
  7955. ret = walk_down_tree(trans, root, path, wc);
  7956. if (ret < 0) {
  7957. err = ret;
  7958. break;
  7959. }
  7960. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  7961. if (ret < 0) {
  7962. err = ret;
  7963. break;
  7964. }
  7965. if (ret > 0) {
  7966. BUG_ON(wc->stage != DROP_REFERENCE);
  7967. break;
  7968. }
  7969. if (wc->stage == DROP_REFERENCE) {
  7970. level = wc->level;
  7971. btrfs_node_key(path->nodes[level],
  7972. &root_item->drop_progress,
  7973. path->slots[level]);
  7974. root_item->drop_level = level;
  7975. }
  7976. BUG_ON(wc->level == 0);
  7977. if (btrfs_should_end_transaction(trans) ||
  7978. (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
  7979. ret = btrfs_update_root(trans, tree_root,
  7980. &root->root_key,
  7981. root_item);
  7982. if (ret) {
  7983. btrfs_abort_transaction(trans, ret);
  7984. err = ret;
  7985. goto out_end_trans;
  7986. }
  7987. btrfs_end_transaction_throttle(trans);
  7988. if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
  7989. btrfs_debug(fs_info,
  7990. "drop snapshot early exit");
  7991. err = -EAGAIN;
  7992. goto out_free;
  7993. }
  7994. trans = btrfs_start_transaction(tree_root, 0);
  7995. if (IS_ERR(trans)) {
  7996. err = PTR_ERR(trans);
  7997. goto out_free;
  7998. }
  7999. if (block_rsv)
  8000. trans->block_rsv = block_rsv;
  8001. }
  8002. }
  8003. btrfs_release_path(path);
  8004. if (err)
  8005. goto out_end_trans;
  8006. ret = btrfs_del_root(trans, &root->root_key);
  8007. if (ret) {
  8008. btrfs_abort_transaction(trans, ret);
  8009. err = ret;
  8010. goto out_end_trans;
  8011. }
  8012. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  8013. ret = btrfs_find_root(tree_root, &root->root_key, path,
  8014. NULL, NULL);
  8015. if (ret < 0) {
  8016. btrfs_abort_transaction(trans, ret);
  8017. err = ret;
  8018. goto out_end_trans;
  8019. } else if (ret > 0) {
  8020. /* if we fail to delete the orphan item this time
  8021. * around, it'll get picked up the next time.
  8022. *
  8023. * The most common failure here is just -ENOENT.
  8024. */
  8025. btrfs_del_orphan_item(trans, tree_root,
  8026. root->root_key.objectid);
  8027. }
  8028. }
  8029. if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
  8030. btrfs_add_dropped_root(trans, root);
  8031. } else {
  8032. free_extent_buffer(root->node);
  8033. free_extent_buffer(root->commit_root);
  8034. btrfs_put_fs_root(root);
  8035. }
  8036. root_dropped = true;
  8037. out_end_trans:
  8038. btrfs_end_transaction_throttle(trans);
  8039. out_free:
  8040. kfree(wc);
  8041. btrfs_free_path(path);
  8042. out:
  8043. /*
  8044. * So if we need to stop dropping the snapshot for whatever reason we
  8045. * need to make sure to add it back to the dead root list so that we
  8046. * keep trying to do the work later. This also cleans up roots if we
  8047. * don't have it in the radix (like when we recover after a power fail
  8048. * or unmount) so we don't leak memory.
  8049. */
  8050. if (!for_reloc && !root_dropped)
  8051. btrfs_add_dead_root(root);
  8052. if (err && err != -EAGAIN)
  8053. btrfs_handle_fs_error(fs_info, err, NULL);
  8054. return err;
  8055. }
  8056. /*
  8057. * drop subtree rooted at tree block 'node'.
  8058. *
  8059. * NOTE: this function will unlock and release tree block 'node'
  8060. * only used by relocation code
  8061. */
  8062. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  8063. struct btrfs_root *root,
  8064. struct extent_buffer *node,
  8065. struct extent_buffer *parent)
  8066. {
  8067. struct btrfs_fs_info *fs_info = root->fs_info;
  8068. struct btrfs_path *path;
  8069. struct walk_control *wc;
  8070. int level;
  8071. int parent_level;
  8072. int ret = 0;
  8073. int wret;
  8074. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  8075. path = btrfs_alloc_path();
  8076. if (!path)
  8077. return -ENOMEM;
  8078. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8079. if (!wc) {
  8080. btrfs_free_path(path);
  8081. return -ENOMEM;
  8082. }
  8083. btrfs_assert_tree_locked(parent);
  8084. parent_level = btrfs_header_level(parent);
  8085. extent_buffer_get(parent);
  8086. path->nodes[parent_level] = parent;
  8087. path->slots[parent_level] = btrfs_header_nritems(parent);
  8088. btrfs_assert_tree_locked(node);
  8089. level = btrfs_header_level(node);
  8090. path->nodes[level] = node;
  8091. path->slots[level] = 0;
  8092. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8093. wc->refs[parent_level] = 1;
  8094. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  8095. wc->level = level;
  8096. wc->shared_level = -1;
  8097. wc->stage = DROP_REFERENCE;
  8098. wc->update_ref = 0;
  8099. wc->keep_locks = 1;
  8100. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  8101. while (1) {
  8102. wret = walk_down_tree(trans, root, path, wc);
  8103. if (wret < 0) {
  8104. ret = wret;
  8105. break;
  8106. }
  8107. wret = walk_up_tree(trans, root, path, wc, parent_level);
  8108. if (wret < 0)
  8109. ret = wret;
  8110. if (wret != 0)
  8111. break;
  8112. }
  8113. kfree(wc);
  8114. btrfs_free_path(path);
  8115. return ret;
  8116. }
  8117. static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
  8118. {
  8119. u64 num_devices;
  8120. u64 stripped;
  8121. /*
  8122. * if restripe for this chunk_type is on pick target profile and
  8123. * return, otherwise do the usual balance
  8124. */
  8125. stripped = get_restripe_target(fs_info, flags);
  8126. if (stripped)
  8127. return extended_to_chunk(stripped);
  8128. num_devices = fs_info->fs_devices->rw_devices;
  8129. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  8130. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  8131. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  8132. if (num_devices == 1) {
  8133. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8134. stripped = flags & ~stripped;
  8135. /* turn raid0 into single device chunks */
  8136. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  8137. return stripped;
  8138. /* turn mirroring into duplication */
  8139. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8140. BTRFS_BLOCK_GROUP_RAID10))
  8141. return stripped | BTRFS_BLOCK_GROUP_DUP;
  8142. } else {
  8143. /* they already had raid on here, just return */
  8144. if (flags & stripped)
  8145. return flags;
  8146. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8147. stripped = flags & ~stripped;
  8148. /* switch duplicated blocks with raid1 */
  8149. if (flags & BTRFS_BLOCK_GROUP_DUP)
  8150. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  8151. /* this is drive concat, leave it alone */
  8152. }
  8153. return flags;
  8154. }
  8155. static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  8156. {
  8157. struct btrfs_space_info *sinfo = cache->space_info;
  8158. u64 num_bytes;
  8159. u64 min_allocable_bytes;
  8160. int ret = -ENOSPC;
  8161. /*
  8162. * We need some metadata space and system metadata space for
  8163. * allocating chunks in some corner cases until we force to set
  8164. * it to be readonly.
  8165. */
  8166. if ((sinfo->flags &
  8167. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  8168. !force)
  8169. min_allocable_bytes = SZ_1M;
  8170. else
  8171. min_allocable_bytes = 0;
  8172. spin_lock(&sinfo->lock);
  8173. spin_lock(&cache->lock);
  8174. if (cache->ro) {
  8175. cache->ro++;
  8176. ret = 0;
  8177. goto out;
  8178. }
  8179. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  8180. cache->bytes_super - btrfs_block_group_used(&cache->item);
  8181. if (btrfs_space_info_used(sinfo, true) + num_bytes +
  8182. min_allocable_bytes <= sinfo->total_bytes) {
  8183. sinfo->bytes_readonly += num_bytes;
  8184. cache->ro++;
  8185. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  8186. ret = 0;
  8187. }
  8188. out:
  8189. spin_unlock(&cache->lock);
  8190. spin_unlock(&sinfo->lock);
  8191. return ret;
  8192. }
  8193. int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
  8194. {
  8195. struct btrfs_fs_info *fs_info = cache->fs_info;
  8196. struct btrfs_trans_handle *trans;
  8197. u64 alloc_flags;
  8198. int ret;
  8199. again:
  8200. trans = btrfs_join_transaction(fs_info->extent_root);
  8201. if (IS_ERR(trans))
  8202. return PTR_ERR(trans);
  8203. /*
  8204. * we're not allowed to set block groups readonly after the dirty
  8205. * block groups cache has started writing. If it already started,
  8206. * back off and let this transaction commit
  8207. */
  8208. mutex_lock(&fs_info->ro_block_group_mutex);
  8209. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  8210. u64 transid = trans->transid;
  8211. mutex_unlock(&fs_info->ro_block_group_mutex);
  8212. btrfs_end_transaction(trans);
  8213. ret = btrfs_wait_for_commit(fs_info, transid);
  8214. if (ret)
  8215. return ret;
  8216. goto again;
  8217. }
  8218. /*
  8219. * if we are changing raid levels, try to allocate a corresponding
  8220. * block group with the new raid level.
  8221. */
  8222. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8223. if (alloc_flags != cache->flags) {
  8224. ret = do_chunk_alloc(trans, alloc_flags,
  8225. CHUNK_ALLOC_FORCE);
  8226. /*
  8227. * ENOSPC is allowed here, we may have enough space
  8228. * already allocated at the new raid level to
  8229. * carry on
  8230. */
  8231. if (ret == -ENOSPC)
  8232. ret = 0;
  8233. if (ret < 0)
  8234. goto out;
  8235. }
  8236. ret = inc_block_group_ro(cache, 0);
  8237. if (!ret)
  8238. goto out;
  8239. alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
  8240. ret = do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  8241. if (ret < 0)
  8242. goto out;
  8243. ret = inc_block_group_ro(cache, 0);
  8244. out:
  8245. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  8246. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8247. mutex_lock(&fs_info->chunk_mutex);
  8248. check_system_chunk(trans, alloc_flags);
  8249. mutex_unlock(&fs_info->chunk_mutex);
  8250. }
  8251. mutex_unlock(&fs_info->ro_block_group_mutex);
  8252. btrfs_end_transaction(trans);
  8253. return ret;
  8254. }
  8255. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
  8256. {
  8257. u64 alloc_flags = get_alloc_profile(trans->fs_info, type);
  8258. return do_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
  8259. }
  8260. /*
  8261. * helper to account the unused space of all the readonly block group in the
  8262. * space_info. takes mirrors into account.
  8263. */
  8264. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  8265. {
  8266. struct btrfs_block_group_cache *block_group;
  8267. u64 free_bytes = 0;
  8268. int factor;
  8269. /* It's df, we don't care if it's racy */
  8270. if (list_empty(&sinfo->ro_bgs))
  8271. return 0;
  8272. spin_lock(&sinfo->lock);
  8273. list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
  8274. spin_lock(&block_group->lock);
  8275. if (!block_group->ro) {
  8276. spin_unlock(&block_group->lock);
  8277. continue;
  8278. }
  8279. factor = btrfs_bg_type_to_factor(block_group->flags);
  8280. free_bytes += (block_group->key.offset -
  8281. btrfs_block_group_used(&block_group->item)) *
  8282. factor;
  8283. spin_unlock(&block_group->lock);
  8284. }
  8285. spin_unlock(&sinfo->lock);
  8286. return free_bytes;
  8287. }
  8288. void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
  8289. {
  8290. struct btrfs_space_info *sinfo = cache->space_info;
  8291. u64 num_bytes;
  8292. BUG_ON(!cache->ro);
  8293. spin_lock(&sinfo->lock);
  8294. spin_lock(&cache->lock);
  8295. if (!--cache->ro) {
  8296. num_bytes = cache->key.offset - cache->reserved -
  8297. cache->pinned - cache->bytes_super -
  8298. btrfs_block_group_used(&cache->item);
  8299. sinfo->bytes_readonly -= num_bytes;
  8300. list_del_init(&cache->ro_list);
  8301. }
  8302. spin_unlock(&cache->lock);
  8303. spin_unlock(&sinfo->lock);
  8304. }
  8305. /*
  8306. * checks to see if its even possible to relocate this block group.
  8307. *
  8308. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  8309. * ok to go ahead and try.
  8310. */
  8311. int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
  8312. {
  8313. struct btrfs_root *root = fs_info->extent_root;
  8314. struct btrfs_block_group_cache *block_group;
  8315. struct btrfs_space_info *space_info;
  8316. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  8317. struct btrfs_device *device;
  8318. struct btrfs_trans_handle *trans;
  8319. u64 min_free;
  8320. u64 dev_min = 1;
  8321. u64 dev_nr = 0;
  8322. u64 target;
  8323. int debug;
  8324. int index;
  8325. int full = 0;
  8326. int ret = 0;
  8327. debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
  8328. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  8329. /* odd, couldn't find the block group, leave it alone */
  8330. if (!block_group) {
  8331. if (debug)
  8332. btrfs_warn(fs_info,
  8333. "can't find block group for bytenr %llu",
  8334. bytenr);
  8335. return -1;
  8336. }
  8337. min_free = btrfs_block_group_used(&block_group->item);
  8338. /* no bytes used, we're good */
  8339. if (!min_free)
  8340. goto out;
  8341. space_info = block_group->space_info;
  8342. spin_lock(&space_info->lock);
  8343. full = space_info->full;
  8344. /*
  8345. * if this is the last block group we have in this space, we can't
  8346. * relocate it unless we're able to allocate a new chunk below.
  8347. *
  8348. * Otherwise, we need to make sure we have room in the space to handle
  8349. * all of the extents from this block group. If we can, we're good
  8350. */
  8351. if ((space_info->total_bytes != block_group->key.offset) &&
  8352. (btrfs_space_info_used(space_info, false) + min_free <
  8353. space_info->total_bytes)) {
  8354. spin_unlock(&space_info->lock);
  8355. goto out;
  8356. }
  8357. spin_unlock(&space_info->lock);
  8358. /*
  8359. * ok we don't have enough space, but maybe we have free space on our
  8360. * devices to allocate new chunks for relocation, so loop through our
  8361. * alloc devices and guess if we have enough space. if this block
  8362. * group is going to be restriped, run checks against the target
  8363. * profile instead of the current one.
  8364. */
  8365. ret = -1;
  8366. /*
  8367. * index:
  8368. * 0: raid10
  8369. * 1: raid1
  8370. * 2: dup
  8371. * 3: raid0
  8372. * 4: single
  8373. */
  8374. target = get_restripe_target(fs_info, block_group->flags);
  8375. if (target) {
  8376. index = btrfs_bg_flags_to_raid_index(extended_to_chunk(target));
  8377. } else {
  8378. /*
  8379. * this is just a balance, so if we were marked as full
  8380. * we know there is no space for a new chunk
  8381. */
  8382. if (full) {
  8383. if (debug)
  8384. btrfs_warn(fs_info,
  8385. "no space to alloc new chunk for block group %llu",
  8386. block_group->key.objectid);
  8387. goto out;
  8388. }
  8389. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  8390. }
  8391. if (index == BTRFS_RAID_RAID10) {
  8392. dev_min = 4;
  8393. /* Divide by 2 */
  8394. min_free >>= 1;
  8395. } else if (index == BTRFS_RAID_RAID1) {
  8396. dev_min = 2;
  8397. } else if (index == BTRFS_RAID_DUP) {
  8398. /* Multiply by 2 */
  8399. min_free <<= 1;
  8400. } else if (index == BTRFS_RAID_RAID0) {
  8401. dev_min = fs_devices->rw_devices;
  8402. min_free = div64_u64(min_free, dev_min);
  8403. }
  8404. /* We need to do this so that we can look at pending chunks */
  8405. trans = btrfs_join_transaction(root);
  8406. if (IS_ERR(trans)) {
  8407. ret = PTR_ERR(trans);
  8408. goto out;
  8409. }
  8410. mutex_lock(&fs_info->chunk_mutex);
  8411. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  8412. u64 dev_offset;
  8413. /*
  8414. * check to make sure we can actually find a chunk with enough
  8415. * space to fit our block group in.
  8416. */
  8417. if (device->total_bytes > device->bytes_used + min_free &&
  8418. !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
  8419. ret = find_free_dev_extent(trans, device, min_free,
  8420. &dev_offset, NULL);
  8421. if (!ret)
  8422. dev_nr++;
  8423. if (dev_nr >= dev_min)
  8424. break;
  8425. ret = -1;
  8426. }
  8427. }
  8428. if (debug && ret == -1)
  8429. btrfs_warn(fs_info,
  8430. "no space to allocate a new chunk for block group %llu",
  8431. block_group->key.objectid);
  8432. mutex_unlock(&fs_info->chunk_mutex);
  8433. btrfs_end_transaction(trans);
  8434. out:
  8435. btrfs_put_block_group(block_group);
  8436. return ret;
  8437. }
  8438. static int find_first_block_group(struct btrfs_fs_info *fs_info,
  8439. struct btrfs_path *path,
  8440. struct btrfs_key *key)
  8441. {
  8442. struct btrfs_root *root = fs_info->extent_root;
  8443. int ret = 0;
  8444. struct btrfs_key found_key;
  8445. struct extent_buffer *leaf;
  8446. struct btrfs_block_group_item bg;
  8447. u64 flags;
  8448. int slot;
  8449. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  8450. if (ret < 0)
  8451. goto out;
  8452. while (1) {
  8453. slot = path->slots[0];
  8454. leaf = path->nodes[0];
  8455. if (slot >= btrfs_header_nritems(leaf)) {
  8456. ret = btrfs_next_leaf(root, path);
  8457. if (ret == 0)
  8458. continue;
  8459. if (ret < 0)
  8460. goto out;
  8461. break;
  8462. }
  8463. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  8464. if (found_key.objectid >= key->objectid &&
  8465. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  8466. struct extent_map_tree *em_tree;
  8467. struct extent_map *em;
  8468. em_tree = &root->fs_info->mapping_tree.map_tree;
  8469. read_lock(&em_tree->lock);
  8470. em = lookup_extent_mapping(em_tree, found_key.objectid,
  8471. found_key.offset);
  8472. read_unlock(&em_tree->lock);
  8473. if (!em) {
  8474. btrfs_err(fs_info,
  8475. "logical %llu len %llu found bg but no related chunk",
  8476. found_key.objectid, found_key.offset);
  8477. ret = -ENOENT;
  8478. } else if (em->start != found_key.objectid ||
  8479. em->len != found_key.offset) {
  8480. btrfs_err(fs_info,
  8481. "block group %llu len %llu mismatch with chunk %llu len %llu",
  8482. found_key.objectid, found_key.offset,
  8483. em->start, em->len);
  8484. ret = -EUCLEAN;
  8485. } else {
  8486. read_extent_buffer(leaf, &bg,
  8487. btrfs_item_ptr_offset(leaf, slot),
  8488. sizeof(bg));
  8489. flags = btrfs_block_group_flags(&bg) &
  8490. BTRFS_BLOCK_GROUP_TYPE_MASK;
  8491. if (flags != (em->map_lookup->type &
  8492. BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  8493. btrfs_err(fs_info,
  8494. "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
  8495. found_key.objectid,
  8496. found_key.offset, flags,
  8497. (BTRFS_BLOCK_GROUP_TYPE_MASK &
  8498. em->map_lookup->type));
  8499. ret = -EUCLEAN;
  8500. } else {
  8501. ret = 0;
  8502. }
  8503. }
  8504. free_extent_map(em);
  8505. goto out;
  8506. }
  8507. path->slots[0]++;
  8508. }
  8509. out:
  8510. return ret;
  8511. }
  8512. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  8513. {
  8514. struct btrfs_block_group_cache *block_group;
  8515. u64 last = 0;
  8516. while (1) {
  8517. struct inode *inode;
  8518. block_group = btrfs_lookup_first_block_group(info, last);
  8519. while (block_group) {
  8520. wait_block_group_cache_done(block_group);
  8521. spin_lock(&block_group->lock);
  8522. if (block_group->iref)
  8523. break;
  8524. spin_unlock(&block_group->lock);
  8525. block_group = next_block_group(info, block_group);
  8526. }
  8527. if (!block_group) {
  8528. if (last == 0)
  8529. break;
  8530. last = 0;
  8531. continue;
  8532. }
  8533. inode = block_group->inode;
  8534. block_group->iref = 0;
  8535. block_group->inode = NULL;
  8536. spin_unlock(&block_group->lock);
  8537. ASSERT(block_group->io_ctl.inode == NULL);
  8538. iput(inode);
  8539. last = block_group->key.objectid + block_group->key.offset;
  8540. btrfs_put_block_group(block_group);
  8541. }
  8542. }
  8543. /*
  8544. * Must be called only after stopping all workers, since we could have block
  8545. * group caching kthreads running, and therefore they could race with us if we
  8546. * freed the block groups before stopping them.
  8547. */
  8548. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  8549. {
  8550. struct btrfs_block_group_cache *block_group;
  8551. struct btrfs_space_info *space_info;
  8552. struct btrfs_caching_control *caching_ctl;
  8553. struct rb_node *n;
  8554. down_write(&info->commit_root_sem);
  8555. while (!list_empty(&info->caching_block_groups)) {
  8556. caching_ctl = list_entry(info->caching_block_groups.next,
  8557. struct btrfs_caching_control, list);
  8558. list_del(&caching_ctl->list);
  8559. put_caching_control(caching_ctl);
  8560. }
  8561. up_write(&info->commit_root_sem);
  8562. spin_lock(&info->unused_bgs_lock);
  8563. while (!list_empty(&info->unused_bgs)) {
  8564. block_group = list_first_entry(&info->unused_bgs,
  8565. struct btrfs_block_group_cache,
  8566. bg_list);
  8567. list_del_init(&block_group->bg_list);
  8568. btrfs_put_block_group(block_group);
  8569. }
  8570. spin_unlock(&info->unused_bgs_lock);
  8571. spin_lock(&info->block_group_cache_lock);
  8572. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  8573. block_group = rb_entry(n, struct btrfs_block_group_cache,
  8574. cache_node);
  8575. rb_erase(&block_group->cache_node,
  8576. &info->block_group_cache_tree);
  8577. RB_CLEAR_NODE(&block_group->cache_node);
  8578. spin_unlock(&info->block_group_cache_lock);
  8579. down_write(&block_group->space_info->groups_sem);
  8580. list_del(&block_group->list);
  8581. up_write(&block_group->space_info->groups_sem);
  8582. /*
  8583. * We haven't cached this block group, which means we could
  8584. * possibly have excluded extents on this block group.
  8585. */
  8586. if (block_group->cached == BTRFS_CACHE_NO ||
  8587. block_group->cached == BTRFS_CACHE_ERROR)
  8588. free_excluded_extents(block_group);
  8589. btrfs_remove_free_space_cache(block_group);
  8590. ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
  8591. ASSERT(list_empty(&block_group->dirty_list));
  8592. ASSERT(list_empty(&block_group->io_list));
  8593. ASSERT(list_empty(&block_group->bg_list));
  8594. ASSERT(atomic_read(&block_group->count) == 1);
  8595. btrfs_put_block_group(block_group);
  8596. spin_lock(&info->block_group_cache_lock);
  8597. }
  8598. spin_unlock(&info->block_group_cache_lock);
  8599. /* now that all the block groups are freed, go through and
  8600. * free all the space_info structs. This is only called during
  8601. * the final stages of unmount, and so we know nobody is
  8602. * using them. We call synchronize_rcu() once before we start,
  8603. * just to be on the safe side.
  8604. */
  8605. synchronize_rcu();
  8606. release_global_block_rsv(info);
  8607. while (!list_empty(&info->space_info)) {
  8608. int i;
  8609. space_info = list_entry(info->space_info.next,
  8610. struct btrfs_space_info,
  8611. list);
  8612. /*
  8613. * Do not hide this behind enospc_debug, this is actually
  8614. * important and indicates a real bug if this happens.
  8615. */
  8616. if (WARN_ON(space_info->bytes_pinned > 0 ||
  8617. space_info->bytes_reserved > 0 ||
  8618. space_info->bytes_may_use > 0))
  8619. dump_space_info(info, space_info, 0, 0);
  8620. list_del(&space_info->list);
  8621. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  8622. struct kobject *kobj;
  8623. kobj = space_info->block_group_kobjs[i];
  8624. space_info->block_group_kobjs[i] = NULL;
  8625. if (kobj) {
  8626. kobject_del(kobj);
  8627. kobject_put(kobj);
  8628. }
  8629. }
  8630. kobject_del(&space_info->kobj);
  8631. kobject_put(&space_info->kobj);
  8632. }
  8633. return 0;
  8634. }
  8635. /* link_block_group will queue up kobjects to add when we're reclaim-safe */
  8636. void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
  8637. {
  8638. struct btrfs_space_info *space_info;
  8639. struct raid_kobject *rkobj;
  8640. LIST_HEAD(list);
  8641. int index;
  8642. int ret = 0;
  8643. spin_lock(&fs_info->pending_raid_kobjs_lock);
  8644. list_splice_init(&fs_info->pending_raid_kobjs, &list);
  8645. spin_unlock(&fs_info->pending_raid_kobjs_lock);
  8646. list_for_each_entry(rkobj, &list, list) {
  8647. space_info = __find_space_info(fs_info, rkobj->flags);
  8648. index = btrfs_bg_flags_to_raid_index(rkobj->flags);
  8649. ret = kobject_add(&rkobj->kobj, &space_info->kobj,
  8650. "%s", get_raid_name(index));
  8651. if (ret) {
  8652. kobject_put(&rkobj->kobj);
  8653. break;
  8654. }
  8655. }
  8656. if (ret)
  8657. btrfs_warn(fs_info,
  8658. "failed to add kobject for block cache, ignoring");
  8659. }
  8660. static void link_block_group(struct btrfs_block_group_cache *cache)
  8661. {
  8662. struct btrfs_space_info *space_info = cache->space_info;
  8663. struct btrfs_fs_info *fs_info = cache->fs_info;
  8664. int index = btrfs_bg_flags_to_raid_index(cache->flags);
  8665. bool first = false;
  8666. down_write(&space_info->groups_sem);
  8667. if (list_empty(&space_info->block_groups[index]))
  8668. first = true;
  8669. list_add_tail(&cache->list, &space_info->block_groups[index]);
  8670. up_write(&space_info->groups_sem);
  8671. if (first) {
  8672. struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
  8673. if (!rkobj) {
  8674. btrfs_warn(cache->fs_info,
  8675. "couldn't alloc memory for raid level kobject");
  8676. return;
  8677. }
  8678. rkobj->flags = cache->flags;
  8679. kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
  8680. spin_lock(&fs_info->pending_raid_kobjs_lock);
  8681. list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
  8682. spin_unlock(&fs_info->pending_raid_kobjs_lock);
  8683. space_info->block_group_kobjs[index] = &rkobj->kobj;
  8684. }
  8685. }
  8686. static struct btrfs_block_group_cache *
  8687. btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
  8688. u64 start, u64 size)
  8689. {
  8690. struct btrfs_block_group_cache *cache;
  8691. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  8692. if (!cache)
  8693. return NULL;
  8694. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  8695. GFP_NOFS);
  8696. if (!cache->free_space_ctl) {
  8697. kfree(cache);
  8698. return NULL;
  8699. }
  8700. cache->key.objectid = start;
  8701. cache->key.offset = size;
  8702. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8703. cache->fs_info = fs_info;
  8704. cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
  8705. set_free_space_tree_thresholds(cache);
  8706. atomic_set(&cache->count, 1);
  8707. spin_lock_init(&cache->lock);
  8708. init_rwsem(&cache->data_rwsem);
  8709. INIT_LIST_HEAD(&cache->list);
  8710. INIT_LIST_HEAD(&cache->cluster_list);
  8711. INIT_LIST_HEAD(&cache->bg_list);
  8712. INIT_LIST_HEAD(&cache->ro_list);
  8713. INIT_LIST_HEAD(&cache->dirty_list);
  8714. INIT_LIST_HEAD(&cache->io_list);
  8715. btrfs_init_free_space_ctl(cache);
  8716. atomic_set(&cache->trimming, 0);
  8717. mutex_init(&cache->free_space_lock);
  8718. btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
  8719. return cache;
  8720. }
  8721. /*
  8722. * Iterate all chunks and verify that each of them has the corresponding block
  8723. * group
  8724. */
  8725. static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
  8726. {
  8727. struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
  8728. struct extent_map *em;
  8729. struct btrfs_block_group_cache *bg;
  8730. u64 start = 0;
  8731. int ret = 0;
  8732. while (1) {
  8733. read_lock(&map_tree->map_tree.lock);
  8734. /*
  8735. * lookup_extent_mapping will return the first extent map
  8736. * intersecting the range, so setting @len to 1 is enough to
  8737. * get the first chunk.
  8738. */
  8739. em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
  8740. read_unlock(&map_tree->map_tree.lock);
  8741. if (!em)
  8742. break;
  8743. bg = btrfs_lookup_block_group(fs_info, em->start);
  8744. if (!bg) {
  8745. btrfs_err(fs_info,
  8746. "chunk start=%llu len=%llu doesn't have corresponding block group",
  8747. em->start, em->len);
  8748. ret = -EUCLEAN;
  8749. free_extent_map(em);
  8750. break;
  8751. }
  8752. if (bg->key.objectid != em->start ||
  8753. bg->key.offset != em->len ||
  8754. (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
  8755. (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
  8756. btrfs_err(fs_info,
  8757. "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
  8758. em->start, em->len,
  8759. em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
  8760. bg->key.objectid, bg->key.offset,
  8761. bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
  8762. ret = -EUCLEAN;
  8763. free_extent_map(em);
  8764. btrfs_put_block_group(bg);
  8765. break;
  8766. }
  8767. start = em->start + em->len;
  8768. free_extent_map(em);
  8769. btrfs_put_block_group(bg);
  8770. }
  8771. return ret;
  8772. }
  8773. int btrfs_read_block_groups(struct btrfs_fs_info *info)
  8774. {
  8775. struct btrfs_path *path;
  8776. int ret;
  8777. struct btrfs_block_group_cache *cache;
  8778. struct btrfs_space_info *space_info;
  8779. struct btrfs_key key;
  8780. struct btrfs_key found_key;
  8781. struct extent_buffer *leaf;
  8782. int need_clear = 0;
  8783. u64 cache_gen;
  8784. u64 feature;
  8785. int mixed;
  8786. feature = btrfs_super_incompat_flags(info->super_copy);
  8787. mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
  8788. key.objectid = 0;
  8789. key.offset = 0;
  8790. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8791. path = btrfs_alloc_path();
  8792. if (!path)
  8793. return -ENOMEM;
  8794. path->reada = READA_FORWARD;
  8795. cache_gen = btrfs_super_cache_generation(info->super_copy);
  8796. if (btrfs_test_opt(info, SPACE_CACHE) &&
  8797. btrfs_super_generation(info->super_copy) != cache_gen)
  8798. need_clear = 1;
  8799. if (btrfs_test_opt(info, CLEAR_CACHE))
  8800. need_clear = 1;
  8801. while (1) {
  8802. ret = find_first_block_group(info, path, &key);
  8803. if (ret > 0)
  8804. break;
  8805. if (ret != 0)
  8806. goto error;
  8807. leaf = path->nodes[0];
  8808. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  8809. cache = btrfs_create_block_group_cache(info, found_key.objectid,
  8810. found_key.offset);
  8811. if (!cache) {
  8812. ret = -ENOMEM;
  8813. goto error;
  8814. }
  8815. if (need_clear) {
  8816. /*
  8817. * When we mount with old space cache, we need to
  8818. * set BTRFS_DC_CLEAR and set dirty flag.
  8819. *
  8820. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  8821. * truncate the old free space cache inode and
  8822. * setup a new one.
  8823. * b) Setting 'dirty flag' makes sure that we flush
  8824. * the new space cache info onto disk.
  8825. */
  8826. if (btrfs_test_opt(info, SPACE_CACHE))
  8827. cache->disk_cache_state = BTRFS_DC_CLEAR;
  8828. }
  8829. read_extent_buffer(leaf, &cache->item,
  8830. btrfs_item_ptr_offset(leaf, path->slots[0]),
  8831. sizeof(cache->item));
  8832. cache->flags = btrfs_block_group_flags(&cache->item);
  8833. if (!mixed &&
  8834. ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  8835. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  8836. btrfs_err(info,
  8837. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  8838. cache->key.objectid);
  8839. ret = -EINVAL;
  8840. goto error;
  8841. }
  8842. key.objectid = found_key.objectid + found_key.offset;
  8843. btrfs_release_path(path);
  8844. /*
  8845. * We need to exclude the super stripes now so that the space
  8846. * info has super bytes accounted for, otherwise we'll think
  8847. * we have more space than we actually do.
  8848. */
  8849. ret = exclude_super_stripes(cache);
  8850. if (ret) {
  8851. /*
  8852. * We may have excluded something, so call this just in
  8853. * case.
  8854. */
  8855. free_excluded_extents(cache);
  8856. btrfs_put_block_group(cache);
  8857. goto error;
  8858. }
  8859. /*
  8860. * check for two cases, either we are full, and therefore
  8861. * don't need to bother with the caching work since we won't
  8862. * find any space, or we are empty, and we can just add all
  8863. * the space in and be done with it. This saves us _alot_ of
  8864. * time, particularly in the full case.
  8865. */
  8866. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  8867. cache->last_byte_to_unpin = (u64)-1;
  8868. cache->cached = BTRFS_CACHE_FINISHED;
  8869. free_excluded_extents(cache);
  8870. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8871. cache->last_byte_to_unpin = (u64)-1;
  8872. cache->cached = BTRFS_CACHE_FINISHED;
  8873. add_new_free_space(cache, found_key.objectid,
  8874. found_key.objectid +
  8875. found_key.offset);
  8876. free_excluded_extents(cache);
  8877. }
  8878. ret = btrfs_add_block_group_cache(info, cache);
  8879. if (ret) {
  8880. btrfs_remove_free_space_cache(cache);
  8881. btrfs_put_block_group(cache);
  8882. goto error;
  8883. }
  8884. trace_btrfs_add_block_group(info, cache, 0);
  8885. update_space_info(info, cache->flags, found_key.offset,
  8886. btrfs_block_group_used(&cache->item),
  8887. cache->bytes_super, &space_info);
  8888. cache->space_info = space_info;
  8889. link_block_group(cache);
  8890. set_avail_alloc_bits(info, cache->flags);
  8891. if (btrfs_chunk_readonly(info, cache->key.objectid)) {
  8892. inc_block_group_ro(cache, 1);
  8893. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8894. ASSERT(list_empty(&cache->bg_list));
  8895. btrfs_mark_bg_unused(cache);
  8896. }
  8897. }
  8898. list_for_each_entry_rcu(space_info, &info->space_info, list) {
  8899. if (!(get_alloc_profile(info, space_info->flags) &
  8900. (BTRFS_BLOCK_GROUP_RAID10 |
  8901. BTRFS_BLOCK_GROUP_RAID1 |
  8902. BTRFS_BLOCK_GROUP_RAID5 |
  8903. BTRFS_BLOCK_GROUP_RAID6 |
  8904. BTRFS_BLOCK_GROUP_DUP)))
  8905. continue;
  8906. /*
  8907. * avoid allocating from un-mirrored block group if there are
  8908. * mirrored block groups.
  8909. */
  8910. list_for_each_entry(cache,
  8911. &space_info->block_groups[BTRFS_RAID_RAID0],
  8912. list)
  8913. inc_block_group_ro(cache, 1);
  8914. list_for_each_entry(cache,
  8915. &space_info->block_groups[BTRFS_RAID_SINGLE],
  8916. list)
  8917. inc_block_group_ro(cache, 1);
  8918. }
  8919. btrfs_add_raid_kobjects(info);
  8920. init_global_block_rsv(info);
  8921. ret = check_chunk_block_group_mappings(info);
  8922. error:
  8923. btrfs_free_path(path);
  8924. return ret;
  8925. }
  8926. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
  8927. {
  8928. struct btrfs_fs_info *fs_info = trans->fs_info;
  8929. struct btrfs_block_group_cache *block_group;
  8930. struct btrfs_root *extent_root = fs_info->extent_root;
  8931. struct btrfs_block_group_item item;
  8932. struct btrfs_key key;
  8933. int ret = 0;
  8934. if (!trans->can_flush_pending_bgs)
  8935. return;
  8936. while (!list_empty(&trans->new_bgs)) {
  8937. block_group = list_first_entry(&trans->new_bgs,
  8938. struct btrfs_block_group_cache,
  8939. bg_list);
  8940. if (ret)
  8941. goto next;
  8942. spin_lock(&block_group->lock);
  8943. memcpy(&item, &block_group->item, sizeof(item));
  8944. memcpy(&key, &block_group->key, sizeof(key));
  8945. spin_unlock(&block_group->lock);
  8946. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  8947. sizeof(item));
  8948. if (ret)
  8949. btrfs_abort_transaction(trans, ret);
  8950. ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
  8951. if (ret)
  8952. btrfs_abort_transaction(trans, ret);
  8953. add_block_group_free_space(trans, block_group);
  8954. /* already aborted the transaction if it failed. */
  8955. next:
  8956. list_del_init(&block_group->bg_list);
  8957. }
  8958. btrfs_trans_release_chunk_metadata(trans);
  8959. }
  8960. int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
  8961. u64 type, u64 chunk_offset, u64 size)
  8962. {
  8963. struct btrfs_fs_info *fs_info = trans->fs_info;
  8964. struct btrfs_block_group_cache *cache;
  8965. int ret;
  8966. btrfs_set_log_full_commit(fs_info, trans);
  8967. cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
  8968. if (!cache)
  8969. return -ENOMEM;
  8970. btrfs_set_block_group_used(&cache->item, bytes_used);
  8971. btrfs_set_block_group_chunk_objectid(&cache->item,
  8972. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  8973. btrfs_set_block_group_flags(&cache->item, type);
  8974. cache->flags = type;
  8975. cache->last_byte_to_unpin = (u64)-1;
  8976. cache->cached = BTRFS_CACHE_FINISHED;
  8977. cache->needs_free_space = 1;
  8978. ret = exclude_super_stripes(cache);
  8979. if (ret) {
  8980. /*
  8981. * We may have excluded something, so call this just in
  8982. * case.
  8983. */
  8984. free_excluded_extents(cache);
  8985. btrfs_put_block_group(cache);
  8986. return ret;
  8987. }
  8988. add_new_free_space(cache, chunk_offset, chunk_offset + size);
  8989. free_excluded_extents(cache);
  8990. #ifdef CONFIG_BTRFS_DEBUG
  8991. if (btrfs_should_fragment_free_space(cache)) {
  8992. u64 new_bytes_used = size - bytes_used;
  8993. bytes_used += new_bytes_used >> 1;
  8994. fragment_free_space(cache);
  8995. }
  8996. #endif
  8997. /*
  8998. * Ensure the corresponding space_info object is created and
  8999. * assigned to our block group. We want our bg to be added to the rbtree
  9000. * with its ->space_info set.
  9001. */
  9002. cache->space_info = __find_space_info(fs_info, cache->flags);
  9003. ASSERT(cache->space_info);
  9004. ret = btrfs_add_block_group_cache(fs_info, cache);
  9005. if (ret) {
  9006. btrfs_remove_free_space_cache(cache);
  9007. btrfs_put_block_group(cache);
  9008. return ret;
  9009. }
  9010. /*
  9011. * Now that our block group has its ->space_info set and is inserted in
  9012. * the rbtree, update the space info's counters.
  9013. */
  9014. trace_btrfs_add_block_group(fs_info, cache, 1);
  9015. update_space_info(fs_info, cache->flags, size, bytes_used,
  9016. cache->bytes_super, &cache->space_info);
  9017. update_global_block_rsv(fs_info);
  9018. link_block_group(cache);
  9019. list_add_tail(&cache->bg_list, &trans->new_bgs);
  9020. set_avail_alloc_bits(fs_info, type);
  9021. return 0;
  9022. }
  9023. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  9024. {
  9025. u64 extra_flags = chunk_to_extended(flags) &
  9026. BTRFS_EXTENDED_PROFILE_MASK;
  9027. write_seqlock(&fs_info->profiles_lock);
  9028. if (flags & BTRFS_BLOCK_GROUP_DATA)
  9029. fs_info->avail_data_alloc_bits &= ~extra_flags;
  9030. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  9031. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  9032. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  9033. fs_info->avail_system_alloc_bits &= ~extra_flags;
  9034. write_sequnlock(&fs_info->profiles_lock);
  9035. }
  9036. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  9037. u64 group_start, struct extent_map *em)
  9038. {
  9039. struct btrfs_fs_info *fs_info = trans->fs_info;
  9040. struct btrfs_root *root = fs_info->extent_root;
  9041. struct btrfs_path *path;
  9042. struct btrfs_block_group_cache *block_group;
  9043. struct btrfs_free_cluster *cluster;
  9044. struct btrfs_root *tree_root = fs_info->tree_root;
  9045. struct btrfs_key key;
  9046. struct inode *inode;
  9047. struct kobject *kobj = NULL;
  9048. int ret;
  9049. int index;
  9050. int factor;
  9051. struct btrfs_caching_control *caching_ctl = NULL;
  9052. bool remove_em;
  9053. block_group = btrfs_lookup_block_group(fs_info, group_start);
  9054. BUG_ON(!block_group);
  9055. BUG_ON(!block_group->ro);
  9056. trace_btrfs_remove_block_group(block_group);
  9057. /*
  9058. * Free the reserved super bytes from this block group before
  9059. * remove it.
  9060. */
  9061. free_excluded_extents(block_group);
  9062. btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
  9063. block_group->key.offset);
  9064. memcpy(&key, &block_group->key, sizeof(key));
  9065. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  9066. factor = btrfs_bg_type_to_factor(block_group->flags);
  9067. /* make sure this block group isn't part of an allocation cluster */
  9068. cluster = &fs_info->data_alloc_cluster;
  9069. spin_lock(&cluster->refill_lock);
  9070. btrfs_return_cluster_to_free_space(block_group, cluster);
  9071. spin_unlock(&cluster->refill_lock);
  9072. /*
  9073. * make sure this block group isn't part of a metadata
  9074. * allocation cluster
  9075. */
  9076. cluster = &fs_info->meta_alloc_cluster;
  9077. spin_lock(&cluster->refill_lock);
  9078. btrfs_return_cluster_to_free_space(block_group, cluster);
  9079. spin_unlock(&cluster->refill_lock);
  9080. path = btrfs_alloc_path();
  9081. if (!path) {
  9082. ret = -ENOMEM;
  9083. goto out;
  9084. }
  9085. /*
  9086. * get the inode first so any iput calls done for the io_list
  9087. * aren't the final iput (no unlinks allowed now)
  9088. */
  9089. inode = lookup_free_space_inode(fs_info, block_group, path);
  9090. mutex_lock(&trans->transaction->cache_write_mutex);
  9091. /*
  9092. * make sure our free spache cache IO is done before remove the
  9093. * free space inode
  9094. */
  9095. spin_lock(&trans->transaction->dirty_bgs_lock);
  9096. if (!list_empty(&block_group->io_list)) {
  9097. list_del_init(&block_group->io_list);
  9098. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  9099. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9100. btrfs_wait_cache_io(trans, block_group, path);
  9101. btrfs_put_block_group(block_group);
  9102. spin_lock(&trans->transaction->dirty_bgs_lock);
  9103. }
  9104. if (!list_empty(&block_group->dirty_list)) {
  9105. list_del_init(&block_group->dirty_list);
  9106. btrfs_put_block_group(block_group);
  9107. }
  9108. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9109. mutex_unlock(&trans->transaction->cache_write_mutex);
  9110. if (!IS_ERR(inode)) {
  9111. ret = btrfs_orphan_add(trans, BTRFS_I(inode));
  9112. if (ret) {
  9113. btrfs_add_delayed_iput(inode);
  9114. goto out;
  9115. }
  9116. clear_nlink(inode);
  9117. /* One for the block groups ref */
  9118. spin_lock(&block_group->lock);
  9119. if (block_group->iref) {
  9120. block_group->iref = 0;
  9121. block_group->inode = NULL;
  9122. spin_unlock(&block_group->lock);
  9123. iput(inode);
  9124. } else {
  9125. spin_unlock(&block_group->lock);
  9126. }
  9127. /* One for our lookup ref */
  9128. btrfs_add_delayed_iput(inode);
  9129. }
  9130. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  9131. key.offset = block_group->key.objectid;
  9132. key.type = 0;
  9133. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  9134. if (ret < 0)
  9135. goto out;
  9136. if (ret > 0)
  9137. btrfs_release_path(path);
  9138. if (ret == 0) {
  9139. ret = btrfs_del_item(trans, tree_root, path);
  9140. if (ret)
  9141. goto out;
  9142. btrfs_release_path(path);
  9143. }
  9144. spin_lock(&fs_info->block_group_cache_lock);
  9145. rb_erase(&block_group->cache_node,
  9146. &fs_info->block_group_cache_tree);
  9147. RB_CLEAR_NODE(&block_group->cache_node);
  9148. if (fs_info->first_logical_byte == block_group->key.objectid)
  9149. fs_info->first_logical_byte = (u64)-1;
  9150. spin_unlock(&fs_info->block_group_cache_lock);
  9151. down_write(&block_group->space_info->groups_sem);
  9152. /*
  9153. * we must use list_del_init so people can check to see if they
  9154. * are still on the list after taking the semaphore
  9155. */
  9156. list_del_init(&block_group->list);
  9157. if (list_empty(&block_group->space_info->block_groups[index])) {
  9158. kobj = block_group->space_info->block_group_kobjs[index];
  9159. block_group->space_info->block_group_kobjs[index] = NULL;
  9160. clear_avail_alloc_bits(fs_info, block_group->flags);
  9161. }
  9162. up_write(&block_group->space_info->groups_sem);
  9163. if (kobj) {
  9164. kobject_del(kobj);
  9165. kobject_put(kobj);
  9166. }
  9167. if (block_group->has_caching_ctl)
  9168. caching_ctl = get_caching_control(block_group);
  9169. if (block_group->cached == BTRFS_CACHE_STARTED)
  9170. wait_block_group_cache_done(block_group);
  9171. if (block_group->has_caching_ctl) {
  9172. down_write(&fs_info->commit_root_sem);
  9173. if (!caching_ctl) {
  9174. struct btrfs_caching_control *ctl;
  9175. list_for_each_entry(ctl,
  9176. &fs_info->caching_block_groups, list)
  9177. if (ctl->block_group == block_group) {
  9178. caching_ctl = ctl;
  9179. refcount_inc(&caching_ctl->count);
  9180. break;
  9181. }
  9182. }
  9183. if (caching_ctl)
  9184. list_del_init(&caching_ctl->list);
  9185. up_write(&fs_info->commit_root_sem);
  9186. if (caching_ctl) {
  9187. /* Once for the caching bgs list and once for us. */
  9188. put_caching_control(caching_ctl);
  9189. put_caching_control(caching_ctl);
  9190. }
  9191. }
  9192. spin_lock(&trans->transaction->dirty_bgs_lock);
  9193. if (!list_empty(&block_group->dirty_list)) {
  9194. WARN_ON(1);
  9195. }
  9196. if (!list_empty(&block_group->io_list)) {
  9197. WARN_ON(1);
  9198. }
  9199. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9200. btrfs_remove_free_space_cache(block_group);
  9201. spin_lock(&block_group->space_info->lock);
  9202. list_del_init(&block_group->ro_list);
  9203. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  9204. WARN_ON(block_group->space_info->total_bytes
  9205. < block_group->key.offset);
  9206. WARN_ON(block_group->space_info->bytes_readonly
  9207. < block_group->key.offset);
  9208. WARN_ON(block_group->space_info->disk_total
  9209. < block_group->key.offset * factor);
  9210. }
  9211. block_group->space_info->total_bytes -= block_group->key.offset;
  9212. block_group->space_info->bytes_readonly -= block_group->key.offset;
  9213. block_group->space_info->disk_total -= block_group->key.offset * factor;
  9214. spin_unlock(&block_group->space_info->lock);
  9215. memcpy(&key, &block_group->key, sizeof(key));
  9216. mutex_lock(&fs_info->chunk_mutex);
  9217. if (!list_empty(&em->list)) {
  9218. /* We're in the transaction->pending_chunks list. */
  9219. free_extent_map(em);
  9220. }
  9221. spin_lock(&block_group->lock);
  9222. block_group->removed = 1;
  9223. /*
  9224. * At this point trimming can't start on this block group, because we
  9225. * removed the block group from the tree fs_info->block_group_cache_tree
  9226. * so no one can't find it anymore and even if someone already got this
  9227. * block group before we removed it from the rbtree, they have already
  9228. * incremented block_group->trimming - if they didn't, they won't find
  9229. * any free space entries because we already removed them all when we
  9230. * called btrfs_remove_free_space_cache().
  9231. *
  9232. * And we must not remove the extent map from the fs_info->mapping_tree
  9233. * to prevent the same logical address range and physical device space
  9234. * ranges from being reused for a new block group. This is because our
  9235. * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  9236. * completely transactionless, so while it is trimming a range the
  9237. * currently running transaction might finish and a new one start,
  9238. * allowing for new block groups to be created that can reuse the same
  9239. * physical device locations unless we take this special care.
  9240. *
  9241. * There may also be an implicit trim operation if the file system
  9242. * is mounted with -odiscard. The same protections must remain
  9243. * in place until the extents have been discarded completely when
  9244. * the transaction commit has completed.
  9245. */
  9246. remove_em = (atomic_read(&block_group->trimming) == 0);
  9247. /*
  9248. * Make sure a trimmer task always sees the em in the pinned_chunks list
  9249. * if it sees block_group->removed == 1 (needs to lock block_group->lock
  9250. * before checking block_group->removed).
  9251. */
  9252. if (!remove_em) {
  9253. /*
  9254. * Our em might be in trans->transaction->pending_chunks which
  9255. * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
  9256. * and so is the fs_info->pinned_chunks list.
  9257. *
  9258. * So at this point we must be holding the chunk_mutex to avoid
  9259. * any races with chunk allocation (more specifically at
  9260. * volumes.c:contains_pending_extent()), to ensure it always
  9261. * sees the em, either in the pending_chunks list or in the
  9262. * pinned_chunks list.
  9263. */
  9264. list_move_tail(&em->list, &fs_info->pinned_chunks);
  9265. }
  9266. spin_unlock(&block_group->lock);
  9267. if (remove_em) {
  9268. struct extent_map_tree *em_tree;
  9269. em_tree = &fs_info->mapping_tree.map_tree;
  9270. write_lock(&em_tree->lock);
  9271. /*
  9272. * The em might be in the pending_chunks list, so make sure the
  9273. * chunk mutex is locked, since remove_extent_mapping() will
  9274. * delete us from that list.
  9275. */
  9276. remove_extent_mapping(em_tree, em);
  9277. write_unlock(&em_tree->lock);
  9278. /* once for the tree */
  9279. free_extent_map(em);
  9280. }
  9281. mutex_unlock(&fs_info->chunk_mutex);
  9282. ret = remove_block_group_free_space(trans, block_group);
  9283. if (ret)
  9284. goto out;
  9285. btrfs_put_block_group(block_group);
  9286. btrfs_put_block_group(block_group);
  9287. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  9288. if (ret > 0)
  9289. ret = -EIO;
  9290. if (ret < 0)
  9291. goto out;
  9292. ret = btrfs_del_item(trans, root, path);
  9293. out:
  9294. btrfs_free_path(path);
  9295. return ret;
  9296. }
  9297. struct btrfs_trans_handle *
  9298. btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
  9299. const u64 chunk_offset)
  9300. {
  9301. struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
  9302. struct extent_map *em;
  9303. struct map_lookup *map;
  9304. unsigned int num_items;
  9305. read_lock(&em_tree->lock);
  9306. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  9307. read_unlock(&em_tree->lock);
  9308. ASSERT(em && em->start == chunk_offset);
  9309. /*
  9310. * We need to reserve 3 + N units from the metadata space info in order
  9311. * to remove a block group (done at btrfs_remove_chunk() and at
  9312. * btrfs_remove_block_group()), which are used for:
  9313. *
  9314. * 1 unit for adding the free space inode's orphan (located in the tree
  9315. * of tree roots).
  9316. * 1 unit for deleting the block group item (located in the extent
  9317. * tree).
  9318. * 1 unit for deleting the free space item (located in tree of tree
  9319. * roots).
  9320. * N units for deleting N device extent items corresponding to each
  9321. * stripe (located in the device tree).
  9322. *
  9323. * In order to remove a block group we also need to reserve units in the
  9324. * system space info in order to update the chunk tree (update one or
  9325. * more device items and remove one chunk item), but this is done at
  9326. * btrfs_remove_chunk() through a call to check_system_chunk().
  9327. */
  9328. map = em->map_lookup;
  9329. num_items = 3 + map->num_stripes;
  9330. free_extent_map(em);
  9331. return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
  9332. num_items, 1);
  9333. }
  9334. /*
  9335. * Process the unused_bgs list and remove any that don't have any allocated
  9336. * space inside of them.
  9337. */
  9338. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  9339. {
  9340. struct btrfs_block_group_cache *block_group;
  9341. struct btrfs_space_info *space_info;
  9342. struct btrfs_trans_handle *trans;
  9343. int ret = 0;
  9344. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  9345. return;
  9346. spin_lock(&fs_info->unused_bgs_lock);
  9347. while (!list_empty(&fs_info->unused_bgs)) {
  9348. u64 start, end;
  9349. int trimming;
  9350. block_group = list_first_entry(&fs_info->unused_bgs,
  9351. struct btrfs_block_group_cache,
  9352. bg_list);
  9353. list_del_init(&block_group->bg_list);
  9354. space_info = block_group->space_info;
  9355. if (ret || btrfs_mixed_space_info(space_info)) {
  9356. btrfs_put_block_group(block_group);
  9357. continue;
  9358. }
  9359. spin_unlock(&fs_info->unused_bgs_lock);
  9360. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  9361. /* Don't want to race with allocators so take the groups_sem */
  9362. down_write(&space_info->groups_sem);
  9363. spin_lock(&block_group->lock);
  9364. if (block_group->reserved || block_group->pinned ||
  9365. btrfs_block_group_used(&block_group->item) ||
  9366. block_group->ro ||
  9367. list_is_singular(&block_group->list)) {
  9368. /*
  9369. * We want to bail if we made new allocations or have
  9370. * outstanding allocations in this block group. We do
  9371. * the ro check in case balance is currently acting on
  9372. * this block group.
  9373. */
  9374. trace_btrfs_skip_unused_block_group(block_group);
  9375. spin_unlock(&block_group->lock);
  9376. up_write(&space_info->groups_sem);
  9377. goto next;
  9378. }
  9379. spin_unlock(&block_group->lock);
  9380. /* We don't want to force the issue, only flip if it's ok. */
  9381. ret = inc_block_group_ro(block_group, 0);
  9382. up_write(&space_info->groups_sem);
  9383. if (ret < 0) {
  9384. ret = 0;
  9385. goto next;
  9386. }
  9387. /*
  9388. * Want to do this before we do anything else so we can recover
  9389. * properly if we fail to join the transaction.
  9390. */
  9391. trans = btrfs_start_trans_remove_block_group(fs_info,
  9392. block_group->key.objectid);
  9393. if (IS_ERR(trans)) {
  9394. btrfs_dec_block_group_ro(block_group);
  9395. ret = PTR_ERR(trans);
  9396. goto next;
  9397. }
  9398. /*
  9399. * We could have pending pinned extents for this block group,
  9400. * just delete them, we don't care about them anymore.
  9401. */
  9402. start = block_group->key.objectid;
  9403. end = start + block_group->key.offset - 1;
  9404. /*
  9405. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  9406. * btrfs_finish_extent_commit(). If we are at transaction N,
  9407. * another task might be running finish_extent_commit() for the
  9408. * previous transaction N - 1, and have seen a range belonging
  9409. * to the block group in freed_extents[] before we were able to
  9410. * clear the whole block group range from freed_extents[]. This
  9411. * means that task can lookup for the block group after we
  9412. * unpinned it from freed_extents[] and removed it, leading to
  9413. * a BUG_ON() at btrfs_unpin_extent_range().
  9414. */
  9415. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  9416. ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
  9417. EXTENT_DIRTY);
  9418. if (ret) {
  9419. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9420. btrfs_dec_block_group_ro(block_group);
  9421. goto end_trans;
  9422. }
  9423. ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
  9424. EXTENT_DIRTY);
  9425. if (ret) {
  9426. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9427. btrfs_dec_block_group_ro(block_group);
  9428. goto end_trans;
  9429. }
  9430. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9431. /* Reset pinned so btrfs_put_block_group doesn't complain */
  9432. spin_lock(&space_info->lock);
  9433. spin_lock(&block_group->lock);
  9434. space_info->bytes_pinned -= block_group->pinned;
  9435. space_info->bytes_readonly += block_group->pinned;
  9436. percpu_counter_add_batch(&space_info->total_bytes_pinned,
  9437. -block_group->pinned,
  9438. BTRFS_TOTAL_BYTES_PINNED_BATCH);
  9439. block_group->pinned = 0;
  9440. spin_unlock(&block_group->lock);
  9441. spin_unlock(&space_info->lock);
  9442. /* DISCARD can flip during remount */
  9443. trimming = btrfs_test_opt(fs_info, DISCARD);
  9444. /* Implicit trim during transaction commit. */
  9445. if (trimming)
  9446. btrfs_get_block_group_trimming(block_group);
  9447. /*
  9448. * Btrfs_remove_chunk will abort the transaction if things go
  9449. * horribly wrong.
  9450. */
  9451. ret = btrfs_remove_chunk(trans, block_group->key.objectid);
  9452. if (ret) {
  9453. if (trimming)
  9454. btrfs_put_block_group_trimming(block_group);
  9455. goto end_trans;
  9456. }
  9457. /*
  9458. * If we're not mounted with -odiscard, we can just forget
  9459. * about this block group. Otherwise we'll need to wait
  9460. * until transaction commit to do the actual discard.
  9461. */
  9462. if (trimming) {
  9463. spin_lock(&fs_info->unused_bgs_lock);
  9464. /*
  9465. * A concurrent scrub might have added us to the list
  9466. * fs_info->unused_bgs, so use a list_move operation
  9467. * to add the block group to the deleted_bgs list.
  9468. */
  9469. list_move(&block_group->bg_list,
  9470. &trans->transaction->deleted_bgs);
  9471. spin_unlock(&fs_info->unused_bgs_lock);
  9472. btrfs_get_block_group(block_group);
  9473. }
  9474. end_trans:
  9475. btrfs_end_transaction(trans);
  9476. next:
  9477. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  9478. btrfs_put_block_group(block_group);
  9479. spin_lock(&fs_info->unused_bgs_lock);
  9480. }
  9481. spin_unlock(&fs_info->unused_bgs_lock);
  9482. }
  9483. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  9484. {
  9485. struct btrfs_super_block *disk_super;
  9486. u64 features;
  9487. u64 flags;
  9488. int mixed = 0;
  9489. int ret;
  9490. disk_super = fs_info->super_copy;
  9491. if (!btrfs_super_root(disk_super))
  9492. return -EINVAL;
  9493. features = btrfs_super_incompat_flags(disk_super);
  9494. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  9495. mixed = 1;
  9496. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  9497. ret = create_space_info(fs_info, flags);
  9498. if (ret)
  9499. goto out;
  9500. if (mixed) {
  9501. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  9502. ret = create_space_info(fs_info, flags);
  9503. } else {
  9504. flags = BTRFS_BLOCK_GROUP_METADATA;
  9505. ret = create_space_info(fs_info, flags);
  9506. if (ret)
  9507. goto out;
  9508. flags = BTRFS_BLOCK_GROUP_DATA;
  9509. ret = create_space_info(fs_info, flags);
  9510. }
  9511. out:
  9512. return ret;
  9513. }
  9514. int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
  9515. u64 start, u64 end)
  9516. {
  9517. return unpin_extent_range(fs_info, start, end, false);
  9518. }
  9519. /*
  9520. * It used to be that old block groups would be left around forever.
  9521. * Iterating over them would be enough to trim unused space. Since we
  9522. * now automatically remove them, we also need to iterate over unallocated
  9523. * space.
  9524. *
  9525. * We don't want a transaction for this since the discard may take a
  9526. * substantial amount of time. We don't require that a transaction be
  9527. * running, but we do need to take a running transaction into account
  9528. * to ensure that we're not discarding chunks that were released or
  9529. * allocated in the current transaction.
  9530. *
  9531. * Holding the chunks lock will prevent other threads from allocating
  9532. * or releasing chunks, but it won't prevent a running transaction
  9533. * from committing and releasing the memory that the pending chunks
  9534. * list head uses. For that, we need to take a reference to the
  9535. * transaction and hold the commit root sem. We only need to hold
  9536. * it while performing the free space search since we have already
  9537. * held back allocations.
  9538. */
  9539. static int btrfs_trim_free_extents(struct btrfs_device *device,
  9540. u64 minlen, u64 *trimmed)
  9541. {
  9542. u64 start = 0, len = 0;
  9543. int ret;
  9544. *trimmed = 0;
  9545. /* Discard not supported = nothing to do. */
  9546. if (!blk_queue_discard(bdev_get_queue(device->bdev)))
  9547. return 0;
  9548. /* Not writeable = nothing to do. */
  9549. if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
  9550. return 0;
  9551. /* No free space = nothing to do. */
  9552. if (device->total_bytes <= device->bytes_used)
  9553. return 0;
  9554. ret = 0;
  9555. while (1) {
  9556. struct btrfs_fs_info *fs_info = device->fs_info;
  9557. struct btrfs_transaction *trans;
  9558. u64 bytes;
  9559. ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
  9560. if (ret)
  9561. break;
  9562. ret = down_read_killable(&fs_info->commit_root_sem);
  9563. if (ret) {
  9564. mutex_unlock(&fs_info->chunk_mutex);
  9565. break;
  9566. }
  9567. spin_lock(&fs_info->trans_lock);
  9568. trans = fs_info->running_transaction;
  9569. if (trans)
  9570. refcount_inc(&trans->use_count);
  9571. spin_unlock(&fs_info->trans_lock);
  9572. if (!trans)
  9573. up_read(&fs_info->commit_root_sem);
  9574. ret = find_free_dev_extent_start(trans, device, minlen, start,
  9575. &start, &len);
  9576. if (trans) {
  9577. up_read(&fs_info->commit_root_sem);
  9578. btrfs_put_transaction(trans);
  9579. }
  9580. if (ret) {
  9581. mutex_unlock(&fs_info->chunk_mutex);
  9582. if (ret == -ENOSPC)
  9583. ret = 0;
  9584. break;
  9585. }
  9586. ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
  9587. mutex_unlock(&fs_info->chunk_mutex);
  9588. if (ret)
  9589. break;
  9590. start += len;
  9591. *trimmed += bytes;
  9592. if (fatal_signal_pending(current)) {
  9593. ret = -ERESTARTSYS;
  9594. break;
  9595. }
  9596. cond_resched();
  9597. }
  9598. return ret;
  9599. }
  9600. /*
  9601. * Trim the whole filesystem by:
  9602. * 1) trimming the free space in each block group
  9603. * 2) trimming the unallocated space on each device
  9604. *
  9605. * This will also continue trimming even if a block group or device encounters
  9606. * an error. The return value will be the last error, or 0 if nothing bad
  9607. * happens.
  9608. */
  9609. int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
  9610. {
  9611. struct btrfs_block_group_cache *cache = NULL;
  9612. struct btrfs_device *device;
  9613. struct list_head *devices;
  9614. u64 group_trimmed;
  9615. u64 start;
  9616. u64 end;
  9617. u64 trimmed = 0;
  9618. u64 bg_failed = 0;
  9619. u64 dev_failed = 0;
  9620. int bg_ret = 0;
  9621. int dev_ret = 0;
  9622. int ret = 0;
  9623. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  9624. for (; cache; cache = next_block_group(fs_info, cache)) {
  9625. if (cache->key.objectid >= (range->start + range->len)) {
  9626. btrfs_put_block_group(cache);
  9627. break;
  9628. }
  9629. start = max(range->start, cache->key.objectid);
  9630. end = min(range->start + range->len,
  9631. cache->key.objectid + cache->key.offset);
  9632. if (end - start >= range->minlen) {
  9633. if (!block_group_cache_done(cache)) {
  9634. ret = cache_block_group(cache, 0);
  9635. if (ret) {
  9636. bg_failed++;
  9637. bg_ret = ret;
  9638. continue;
  9639. }
  9640. ret = wait_block_group_cache_done(cache);
  9641. if (ret) {
  9642. bg_failed++;
  9643. bg_ret = ret;
  9644. continue;
  9645. }
  9646. }
  9647. ret = btrfs_trim_block_group(cache,
  9648. &group_trimmed,
  9649. start,
  9650. end,
  9651. range->minlen);
  9652. trimmed += group_trimmed;
  9653. if (ret) {
  9654. bg_failed++;
  9655. bg_ret = ret;
  9656. continue;
  9657. }
  9658. }
  9659. }
  9660. if (bg_failed)
  9661. btrfs_warn(fs_info,
  9662. "failed to trim %llu block group(s), last error %d",
  9663. bg_failed, bg_ret);
  9664. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  9665. devices = &fs_info->fs_devices->devices;
  9666. list_for_each_entry(device, devices, dev_list) {
  9667. ret = btrfs_trim_free_extents(device, range->minlen,
  9668. &group_trimmed);
  9669. if (ret) {
  9670. dev_failed++;
  9671. dev_ret = ret;
  9672. break;
  9673. }
  9674. trimmed += group_trimmed;
  9675. }
  9676. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  9677. if (dev_failed)
  9678. btrfs_warn(fs_info,
  9679. "failed to trim %llu device(s), last error %d",
  9680. dev_failed, dev_ret);
  9681. range->len = trimmed;
  9682. if (bg_ret)
  9683. return bg_ret;
  9684. return dev_ret;
  9685. }
  9686. /*
  9687. * btrfs_{start,end}_write_no_snapshotting() are similar to
  9688. * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
  9689. * data into the page cache through nocow before the subvolume is snapshoted,
  9690. * but flush the data into disk after the snapshot creation, or to prevent
  9691. * operations while snapshotting is ongoing and that cause the snapshot to be
  9692. * inconsistent (writes followed by expanding truncates for example).
  9693. */
  9694. void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
  9695. {
  9696. percpu_counter_dec(&root->subv_writers->counter);
  9697. cond_wake_up(&root->subv_writers->wait);
  9698. }
  9699. int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
  9700. {
  9701. if (atomic_read(&root->will_be_snapshotted))
  9702. return 0;
  9703. percpu_counter_inc(&root->subv_writers->counter);
  9704. /*
  9705. * Make sure counter is updated before we check for snapshot creation.
  9706. */
  9707. smp_mb();
  9708. if (atomic_read(&root->will_be_snapshotted)) {
  9709. btrfs_end_write_no_snapshotting(root);
  9710. return 0;
  9711. }
  9712. return 1;
  9713. }
  9714. void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
  9715. {
  9716. while (true) {
  9717. int ret;
  9718. ret = btrfs_start_write_no_snapshotting(root);
  9719. if (ret)
  9720. break;
  9721. wait_var_event(&root->will_be_snapshotted,
  9722. !atomic_read(&root->will_be_snapshotted));
  9723. }
  9724. }
  9725. void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
  9726. {
  9727. struct btrfs_fs_info *fs_info = bg->fs_info;
  9728. spin_lock(&fs_info->unused_bgs_lock);
  9729. if (list_empty(&bg->bg_list)) {
  9730. btrfs_get_block_group(bg);
  9731. trace_btrfs_add_unused_block_group(bg);
  9732. list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
  9733. }
  9734. spin_unlock(&fs_info->unused_bgs_lock);
  9735. }