extent-tree.c 254 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/sort.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/kthread.h>
  25. #include <linux/slab.h>
  26. #include <linux/ratelimit.h>
  27. #include <linux/percpu_counter.h>
  28. #include "hash.h"
  29. #include "tree-log.h"
  30. #include "disk-io.h"
  31. #include "print-tree.h"
  32. #include "volumes.h"
  33. #include "raid56.h"
  34. #include "locking.h"
  35. #include "free-space-cache.h"
  36. #include "math.h"
  37. #include "sysfs.h"
  38. #include "qgroup.h"
  39. #undef SCRAMBLE_DELAYED_REFS
  40. /*
  41. * control flags for do_chunk_alloc's force field
  42. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  43. * if we really need one.
  44. *
  45. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  46. * if we have very few chunks already allocated. This is
  47. * used as part of the clustering code to help make sure
  48. * we have a good pool of storage to cluster in, without
  49. * filling the FS with empty chunks
  50. *
  51. * CHUNK_ALLOC_FORCE means it must try to allocate one
  52. *
  53. */
  54. enum {
  55. CHUNK_ALLOC_NO_FORCE = 0,
  56. CHUNK_ALLOC_LIMITED = 1,
  57. CHUNK_ALLOC_FORCE = 2,
  58. };
  59. /*
  60. * Control how reservations are dealt with.
  61. *
  62. * RESERVE_FREE - freeing a reservation.
  63. * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
  64. * ENOSPC accounting
  65. * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
  66. * bytes_may_use as the ENOSPC accounting is done elsewhere
  67. */
  68. enum {
  69. RESERVE_FREE = 0,
  70. RESERVE_ALLOC = 1,
  71. RESERVE_ALLOC_NO_ACCOUNT = 2,
  72. };
  73. static int update_block_group(struct btrfs_root *root,
  74. u64 bytenr, u64 num_bytes, int alloc);
  75. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  76. struct btrfs_root *root,
  77. u64 bytenr, u64 num_bytes, u64 parent,
  78. u64 root_objectid, u64 owner_objectid,
  79. u64 owner_offset, int refs_to_drop,
  80. struct btrfs_delayed_extent_op *extra_op,
  81. int no_quota);
  82. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  83. struct extent_buffer *leaf,
  84. struct btrfs_extent_item *ei);
  85. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  86. struct btrfs_root *root,
  87. u64 parent, u64 root_objectid,
  88. u64 flags, u64 owner, u64 offset,
  89. struct btrfs_key *ins, int ref_mod);
  90. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  91. struct btrfs_root *root,
  92. u64 parent, u64 root_objectid,
  93. u64 flags, struct btrfs_disk_key *key,
  94. int level, struct btrfs_key *ins,
  95. int no_quota);
  96. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  97. struct btrfs_root *extent_root, u64 flags,
  98. int force);
  99. static int find_next_key(struct btrfs_path *path, int level,
  100. struct btrfs_key *key);
  101. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  102. int dump_block_groups);
  103. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  104. u64 num_bytes, int reserve,
  105. int delalloc);
  106. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  107. u64 num_bytes);
  108. int btrfs_pin_extent(struct btrfs_root *root,
  109. u64 bytenr, u64 num_bytes, int reserved);
  110. static noinline int
  111. block_group_cache_done(struct btrfs_block_group_cache *cache)
  112. {
  113. smp_mb();
  114. return cache->cached == BTRFS_CACHE_FINISHED ||
  115. cache->cached == BTRFS_CACHE_ERROR;
  116. }
  117. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  118. {
  119. return (cache->flags & bits) == bits;
  120. }
  121. static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  122. {
  123. atomic_inc(&cache->count);
  124. }
  125. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  126. {
  127. if (atomic_dec_and_test(&cache->count)) {
  128. WARN_ON(cache->pinned > 0);
  129. WARN_ON(cache->reserved > 0);
  130. kfree(cache->free_space_ctl);
  131. kfree(cache);
  132. }
  133. }
  134. /*
  135. * this adds the block group to the fs_info rb tree for the block group
  136. * cache
  137. */
  138. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  139. struct btrfs_block_group_cache *block_group)
  140. {
  141. struct rb_node **p;
  142. struct rb_node *parent = NULL;
  143. struct btrfs_block_group_cache *cache;
  144. spin_lock(&info->block_group_cache_lock);
  145. p = &info->block_group_cache_tree.rb_node;
  146. while (*p) {
  147. parent = *p;
  148. cache = rb_entry(parent, struct btrfs_block_group_cache,
  149. cache_node);
  150. if (block_group->key.objectid < cache->key.objectid) {
  151. p = &(*p)->rb_left;
  152. } else if (block_group->key.objectid > cache->key.objectid) {
  153. p = &(*p)->rb_right;
  154. } else {
  155. spin_unlock(&info->block_group_cache_lock);
  156. return -EEXIST;
  157. }
  158. }
  159. rb_link_node(&block_group->cache_node, parent, p);
  160. rb_insert_color(&block_group->cache_node,
  161. &info->block_group_cache_tree);
  162. if (info->first_logical_byte > block_group->key.objectid)
  163. info->first_logical_byte = block_group->key.objectid;
  164. spin_unlock(&info->block_group_cache_lock);
  165. return 0;
  166. }
  167. /*
  168. * This will return the block group at or after bytenr if contains is 0, else
  169. * it will return the block group that contains the bytenr
  170. */
  171. static struct btrfs_block_group_cache *
  172. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  173. int contains)
  174. {
  175. struct btrfs_block_group_cache *cache, *ret = NULL;
  176. struct rb_node *n;
  177. u64 end, start;
  178. spin_lock(&info->block_group_cache_lock);
  179. n = info->block_group_cache_tree.rb_node;
  180. while (n) {
  181. cache = rb_entry(n, struct btrfs_block_group_cache,
  182. cache_node);
  183. end = cache->key.objectid + cache->key.offset - 1;
  184. start = cache->key.objectid;
  185. if (bytenr < start) {
  186. if (!contains && (!ret || start < ret->key.objectid))
  187. ret = cache;
  188. n = n->rb_left;
  189. } else if (bytenr > start) {
  190. if (contains && bytenr <= end) {
  191. ret = cache;
  192. break;
  193. }
  194. n = n->rb_right;
  195. } else {
  196. ret = cache;
  197. break;
  198. }
  199. }
  200. if (ret) {
  201. btrfs_get_block_group(ret);
  202. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  203. info->first_logical_byte = ret->key.objectid;
  204. }
  205. spin_unlock(&info->block_group_cache_lock);
  206. return ret;
  207. }
  208. static int add_excluded_extent(struct btrfs_root *root,
  209. u64 start, u64 num_bytes)
  210. {
  211. u64 end = start + num_bytes - 1;
  212. set_extent_bits(&root->fs_info->freed_extents[0],
  213. start, end, EXTENT_UPTODATE, GFP_NOFS);
  214. set_extent_bits(&root->fs_info->freed_extents[1],
  215. start, end, EXTENT_UPTODATE, GFP_NOFS);
  216. return 0;
  217. }
  218. static void free_excluded_extents(struct btrfs_root *root,
  219. struct btrfs_block_group_cache *cache)
  220. {
  221. u64 start, end;
  222. start = cache->key.objectid;
  223. end = start + cache->key.offset - 1;
  224. clear_extent_bits(&root->fs_info->freed_extents[0],
  225. start, end, EXTENT_UPTODATE, GFP_NOFS);
  226. clear_extent_bits(&root->fs_info->freed_extents[1],
  227. start, end, EXTENT_UPTODATE, GFP_NOFS);
  228. }
  229. static int exclude_super_stripes(struct btrfs_root *root,
  230. struct btrfs_block_group_cache *cache)
  231. {
  232. u64 bytenr;
  233. u64 *logical;
  234. int stripe_len;
  235. int i, nr, ret;
  236. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  237. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  238. cache->bytes_super += stripe_len;
  239. ret = add_excluded_extent(root, cache->key.objectid,
  240. stripe_len);
  241. if (ret)
  242. return ret;
  243. }
  244. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  245. bytenr = btrfs_sb_offset(i);
  246. ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
  247. cache->key.objectid, bytenr,
  248. 0, &logical, &nr, &stripe_len);
  249. if (ret)
  250. return ret;
  251. while (nr--) {
  252. u64 start, len;
  253. if (logical[nr] > cache->key.objectid +
  254. cache->key.offset)
  255. continue;
  256. if (logical[nr] + stripe_len <= cache->key.objectid)
  257. continue;
  258. start = logical[nr];
  259. if (start < cache->key.objectid) {
  260. start = cache->key.objectid;
  261. len = (logical[nr] + stripe_len) - start;
  262. } else {
  263. len = min_t(u64, stripe_len,
  264. cache->key.objectid +
  265. cache->key.offset - start);
  266. }
  267. cache->bytes_super += len;
  268. ret = add_excluded_extent(root, start, len);
  269. if (ret) {
  270. kfree(logical);
  271. return ret;
  272. }
  273. }
  274. kfree(logical);
  275. }
  276. return 0;
  277. }
  278. static struct btrfs_caching_control *
  279. get_caching_control(struct btrfs_block_group_cache *cache)
  280. {
  281. struct btrfs_caching_control *ctl;
  282. spin_lock(&cache->lock);
  283. if (cache->cached != BTRFS_CACHE_STARTED) {
  284. spin_unlock(&cache->lock);
  285. return NULL;
  286. }
  287. /* We're loading it the fast way, so we don't have a caching_ctl. */
  288. if (!cache->caching_ctl) {
  289. spin_unlock(&cache->lock);
  290. return NULL;
  291. }
  292. ctl = cache->caching_ctl;
  293. atomic_inc(&ctl->count);
  294. spin_unlock(&cache->lock);
  295. return ctl;
  296. }
  297. static void put_caching_control(struct btrfs_caching_control *ctl)
  298. {
  299. if (atomic_dec_and_test(&ctl->count))
  300. kfree(ctl);
  301. }
  302. /*
  303. * this is only called by cache_block_group, since we could have freed extents
  304. * we need to check the pinned_extents for any extents that can't be used yet
  305. * since their free space will be released as soon as the transaction commits.
  306. */
  307. static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  308. struct btrfs_fs_info *info, u64 start, u64 end)
  309. {
  310. u64 extent_start, extent_end, size, total_added = 0;
  311. int ret;
  312. while (start < end) {
  313. ret = find_first_extent_bit(info->pinned_extents, start,
  314. &extent_start, &extent_end,
  315. EXTENT_DIRTY | EXTENT_UPTODATE,
  316. NULL);
  317. if (ret)
  318. break;
  319. if (extent_start <= start) {
  320. start = extent_end + 1;
  321. } else if (extent_start > start && extent_start < end) {
  322. size = extent_start - start;
  323. total_added += size;
  324. ret = btrfs_add_free_space(block_group, start,
  325. size);
  326. BUG_ON(ret); /* -ENOMEM or logic error */
  327. start = extent_end + 1;
  328. } else {
  329. break;
  330. }
  331. }
  332. if (start < end) {
  333. size = end - start;
  334. total_added += size;
  335. ret = btrfs_add_free_space(block_group, start, size);
  336. BUG_ON(ret); /* -ENOMEM or logic error */
  337. }
  338. return total_added;
  339. }
  340. static noinline void caching_thread(struct btrfs_work *work)
  341. {
  342. struct btrfs_block_group_cache *block_group;
  343. struct btrfs_fs_info *fs_info;
  344. struct btrfs_caching_control *caching_ctl;
  345. struct btrfs_root *extent_root;
  346. struct btrfs_path *path;
  347. struct extent_buffer *leaf;
  348. struct btrfs_key key;
  349. u64 total_found = 0;
  350. u64 last = 0;
  351. u32 nritems;
  352. int ret = -ENOMEM;
  353. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  354. block_group = caching_ctl->block_group;
  355. fs_info = block_group->fs_info;
  356. extent_root = fs_info->extent_root;
  357. path = btrfs_alloc_path();
  358. if (!path)
  359. goto out;
  360. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  361. /*
  362. * We don't want to deadlock with somebody trying to allocate a new
  363. * extent for the extent root while also trying to search the extent
  364. * root to add free space. So we skip locking and search the commit
  365. * root, since its read-only
  366. */
  367. path->skip_locking = 1;
  368. path->search_commit_root = 1;
  369. path->reada = 1;
  370. key.objectid = last;
  371. key.offset = 0;
  372. key.type = BTRFS_EXTENT_ITEM_KEY;
  373. again:
  374. mutex_lock(&caching_ctl->mutex);
  375. /* need to make sure the commit_root doesn't disappear */
  376. down_read(&fs_info->commit_root_sem);
  377. next:
  378. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  379. if (ret < 0)
  380. goto err;
  381. leaf = path->nodes[0];
  382. nritems = btrfs_header_nritems(leaf);
  383. while (1) {
  384. if (btrfs_fs_closing(fs_info) > 1) {
  385. last = (u64)-1;
  386. break;
  387. }
  388. if (path->slots[0] < nritems) {
  389. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  390. } else {
  391. ret = find_next_key(path, 0, &key);
  392. if (ret)
  393. break;
  394. if (need_resched() ||
  395. rwsem_is_contended(&fs_info->commit_root_sem)) {
  396. caching_ctl->progress = last;
  397. btrfs_release_path(path);
  398. up_read(&fs_info->commit_root_sem);
  399. mutex_unlock(&caching_ctl->mutex);
  400. cond_resched();
  401. goto again;
  402. }
  403. ret = btrfs_next_leaf(extent_root, path);
  404. if (ret < 0)
  405. goto err;
  406. if (ret)
  407. break;
  408. leaf = path->nodes[0];
  409. nritems = btrfs_header_nritems(leaf);
  410. continue;
  411. }
  412. if (key.objectid < last) {
  413. key.objectid = last;
  414. key.offset = 0;
  415. key.type = BTRFS_EXTENT_ITEM_KEY;
  416. caching_ctl->progress = last;
  417. btrfs_release_path(path);
  418. goto next;
  419. }
  420. if (key.objectid < block_group->key.objectid) {
  421. path->slots[0]++;
  422. continue;
  423. }
  424. if (key.objectid >= block_group->key.objectid +
  425. block_group->key.offset)
  426. break;
  427. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  428. key.type == BTRFS_METADATA_ITEM_KEY) {
  429. total_found += add_new_free_space(block_group,
  430. fs_info, last,
  431. key.objectid);
  432. if (key.type == BTRFS_METADATA_ITEM_KEY)
  433. last = key.objectid +
  434. fs_info->tree_root->leafsize;
  435. else
  436. last = key.objectid + key.offset;
  437. if (total_found > (1024 * 1024 * 2)) {
  438. total_found = 0;
  439. wake_up(&caching_ctl->wait);
  440. }
  441. }
  442. path->slots[0]++;
  443. }
  444. ret = 0;
  445. total_found += add_new_free_space(block_group, fs_info, last,
  446. block_group->key.objectid +
  447. block_group->key.offset);
  448. caching_ctl->progress = (u64)-1;
  449. spin_lock(&block_group->lock);
  450. block_group->caching_ctl = NULL;
  451. block_group->cached = BTRFS_CACHE_FINISHED;
  452. spin_unlock(&block_group->lock);
  453. err:
  454. btrfs_free_path(path);
  455. up_read(&fs_info->commit_root_sem);
  456. free_excluded_extents(extent_root, block_group);
  457. mutex_unlock(&caching_ctl->mutex);
  458. out:
  459. if (ret) {
  460. spin_lock(&block_group->lock);
  461. block_group->caching_ctl = NULL;
  462. block_group->cached = BTRFS_CACHE_ERROR;
  463. spin_unlock(&block_group->lock);
  464. }
  465. wake_up(&caching_ctl->wait);
  466. put_caching_control(caching_ctl);
  467. btrfs_put_block_group(block_group);
  468. }
  469. static int cache_block_group(struct btrfs_block_group_cache *cache,
  470. int load_cache_only)
  471. {
  472. DEFINE_WAIT(wait);
  473. struct btrfs_fs_info *fs_info = cache->fs_info;
  474. struct btrfs_caching_control *caching_ctl;
  475. int ret = 0;
  476. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  477. if (!caching_ctl)
  478. return -ENOMEM;
  479. INIT_LIST_HEAD(&caching_ctl->list);
  480. mutex_init(&caching_ctl->mutex);
  481. init_waitqueue_head(&caching_ctl->wait);
  482. caching_ctl->block_group = cache;
  483. caching_ctl->progress = cache->key.objectid;
  484. atomic_set(&caching_ctl->count, 1);
  485. btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
  486. caching_thread, NULL, NULL);
  487. spin_lock(&cache->lock);
  488. /*
  489. * This should be a rare occasion, but this could happen I think in the
  490. * case where one thread starts to load the space cache info, and then
  491. * some other thread starts a transaction commit which tries to do an
  492. * allocation while the other thread is still loading the space cache
  493. * info. The previous loop should have kept us from choosing this block
  494. * group, but if we've moved to the state where we will wait on caching
  495. * block groups we need to first check if we're doing a fast load here,
  496. * so we can wait for it to finish, otherwise we could end up allocating
  497. * from a block group who's cache gets evicted for one reason or
  498. * another.
  499. */
  500. while (cache->cached == BTRFS_CACHE_FAST) {
  501. struct btrfs_caching_control *ctl;
  502. ctl = cache->caching_ctl;
  503. atomic_inc(&ctl->count);
  504. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  505. spin_unlock(&cache->lock);
  506. schedule();
  507. finish_wait(&ctl->wait, &wait);
  508. put_caching_control(ctl);
  509. spin_lock(&cache->lock);
  510. }
  511. if (cache->cached != BTRFS_CACHE_NO) {
  512. spin_unlock(&cache->lock);
  513. kfree(caching_ctl);
  514. return 0;
  515. }
  516. WARN_ON(cache->caching_ctl);
  517. cache->caching_ctl = caching_ctl;
  518. cache->cached = BTRFS_CACHE_FAST;
  519. spin_unlock(&cache->lock);
  520. if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
  521. ret = load_free_space_cache(fs_info, cache);
  522. spin_lock(&cache->lock);
  523. if (ret == 1) {
  524. cache->caching_ctl = NULL;
  525. cache->cached = BTRFS_CACHE_FINISHED;
  526. cache->last_byte_to_unpin = (u64)-1;
  527. } else {
  528. if (load_cache_only) {
  529. cache->caching_ctl = NULL;
  530. cache->cached = BTRFS_CACHE_NO;
  531. } else {
  532. cache->cached = BTRFS_CACHE_STARTED;
  533. }
  534. }
  535. spin_unlock(&cache->lock);
  536. wake_up(&caching_ctl->wait);
  537. if (ret == 1) {
  538. put_caching_control(caching_ctl);
  539. free_excluded_extents(fs_info->extent_root, cache);
  540. return 0;
  541. }
  542. } else {
  543. /*
  544. * We are not going to do the fast caching, set cached to the
  545. * appropriate value and wakeup any waiters.
  546. */
  547. spin_lock(&cache->lock);
  548. if (load_cache_only) {
  549. cache->caching_ctl = NULL;
  550. cache->cached = BTRFS_CACHE_NO;
  551. } else {
  552. cache->cached = BTRFS_CACHE_STARTED;
  553. }
  554. spin_unlock(&cache->lock);
  555. wake_up(&caching_ctl->wait);
  556. }
  557. if (load_cache_only) {
  558. put_caching_control(caching_ctl);
  559. return 0;
  560. }
  561. down_write(&fs_info->commit_root_sem);
  562. atomic_inc(&caching_ctl->count);
  563. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  564. up_write(&fs_info->commit_root_sem);
  565. btrfs_get_block_group(cache);
  566. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  567. return ret;
  568. }
  569. /*
  570. * return the block group that starts at or after bytenr
  571. */
  572. static struct btrfs_block_group_cache *
  573. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  574. {
  575. struct btrfs_block_group_cache *cache;
  576. cache = block_group_cache_tree_search(info, bytenr, 0);
  577. return cache;
  578. }
  579. /*
  580. * return the block group that contains the given bytenr
  581. */
  582. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  583. struct btrfs_fs_info *info,
  584. u64 bytenr)
  585. {
  586. struct btrfs_block_group_cache *cache;
  587. cache = block_group_cache_tree_search(info, bytenr, 1);
  588. return cache;
  589. }
  590. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  591. u64 flags)
  592. {
  593. struct list_head *head = &info->space_info;
  594. struct btrfs_space_info *found;
  595. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  596. rcu_read_lock();
  597. list_for_each_entry_rcu(found, head, list) {
  598. if (found->flags & flags) {
  599. rcu_read_unlock();
  600. return found;
  601. }
  602. }
  603. rcu_read_unlock();
  604. return NULL;
  605. }
  606. /*
  607. * after adding space to the filesystem, we need to clear the full flags
  608. * on all the space infos.
  609. */
  610. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  611. {
  612. struct list_head *head = &info->space_info;
  613. struct btrfs_space_info *found;
  614. rcu_read_lock();
  615. list_for_each_entry_rcu(found, head, list)
  616. found->full = 0;
  617. rcu_read_unlock();
  618. }
  619. /* simple helper to search for an existing extent at a given offset */
  620. int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
  621. {
  622. int ret;
  623. struct btrfs_key key;
  624. struct btrfs_path *path;
  625. path = btrfs_alloc_path();
  626. if (!path)
  627. return -ENOMEM;
  628. key.objectid = start;
  629. key.offset = len;
  630. key.type = BTRFS_EXTENT_ITEM_KEY;
  631. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  632. 0, 0);
  633. if (ret > 0) {
  634. btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
  635. if (key.objectid == start &&
  636. key.type == BTRFS_METADATA_ITEM_KEY)
  637. ret = 0;
  638. }
  639. btrfs_free_path(path);
  640. return ret;
  641. }
  642. /*
  643. * helper function to lookup reference count and flags of a tree block.
  644. *
  645. * the head node for delayed ref is used to store the sum of all the
  646. * reference count modifications queued up in the rbtree. the head
  647. * node may also store the extent flags to set. This way you can check
  648. * to see what the reference count and extent flags would be if all of
  649. * the delayed refs are not processed.
  650. */
  651. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  652. struct btrfs_root *root, u64 bytenr,
  653. u64 offset, int metadata, u64 *refs, u64 *flags)
  654. {
  655. struct btrfs_delayed_ref_head *head;
  656. struct btrfs_delayed_ref_root *delayed_refs;
  657. struct btrfs_path *path;
  658. struct btrfs_extent_item *ei;
  659. struct extent_buffer *leaf;
  660. struct btrfs_key key;
  661. u32 item_size;
  662. u64 num_refs;
  663. u64 extent_flags;
  664. int ret;
  665. /*
  666. * If we don't have skinny metadata, don't bother doing anything
  667. * different
  668. */
  669. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
  670. offset = root->leafsize;
  671. metadata = 0;
  672. }
  673. path = btrfs_alloc_path();
  674. if (!path)
  675. return -ENOMEM;
  676. if (!trans) {
  677. path->skip_locking = 1;
  678. path->search_commit_root = 1;
  679. }
  680. search_again:
  681. key.objectid = bytenr;
  682. key.offset = offset;
  683. if (metadata)
  684. key.type = BTRFS_METADATA_ITEM_KEY;
  685. else
  686. key.type = BTRFS_EXTENT_ITEM_KEY;
  687. again:
  688. ret = btrfs_search_slot(trans, root->fs_info->extent_root,
  689. &key, path, 0, 0);
  690. if (ret < 0)
  691. goto out_free;
  692. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  693. if (path->slots[0]) {
  694. path->slots[0]--;
  695. btrfs_item_key_to_cpu(path->nodes[0], &key,
  696. path->slots[0]);
  697. if (key.objectid == bytenr &&
  698. key.type == BTRFS_EXTENT_ITEM_KEY &&
  699. key.offset == root->leafsize)
  700. ret = 0;
  701. }
  702. if (ret) {
  703. key.objectid = bytenr;
  704. key.type = BTRFS_EXTENT_ITEM_KEY;
  705. key.offset = root->leafsize;
  706. btrfs_release_path(path);
  707. goto again;
  708. }
  709. }
  710. if (ret == 0) {
  711. leaf = path->nodes[0];
  712. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  713. if (item_size >= sizeof(*ei)) {
  714. ei = btrfs_item_ptr(leaf, path->slots[0],
  715. struct btrfs_extent_item);
  716. num_refs = btrfs_extent_refs(leaf, ei);
  717. extent_flags = btrfs_extent_flags(leaf, ei);
  718. } else {
  719. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  720. struct btrfs_extent_item_v0 *ei0;
  721. BUG_ON(item_size != sizeof(*ei0));
  722. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  723. struct btrfs_extent_item_v0);
  724. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  725. /* FIXME: this isn't correct for data */
  726. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  727. #else
  728. BUG();
  729. #endif
  730. }
  731. BUG_ON(num_refs == 0);
  732. } else {
  733. num_refs = 0;
  734. extent_flags = 0;
  735. ret = 0;
  736. }
  737. if (!trans)
  738. goto out;
  739. delayed_refs = &trans->transaction->delayed_refs;
  740. spin_lock(&delayed_refs->lock);
  741. head = btrfs_find_delayed_ref_head(trans, bytenr);
  742. if (head) {
  743. if (!mutex_trylock(&head->mutex)) {
  744. atomic_inc(&head->node.refs);
  745. spin_unlock(&delayed_refs->lock);
  746. btrfs_release_path(path);
  747. /*
  748. * Mutex was contended, block until it's released and try
  749. * again
  750. */
  751. mutex_lock(&head->mutex);
  752. mutex_unlock(&head->mutex);
  753. btrfs_put_delayed_ref(&head->node);
  754. goto search_again;
  755. }
  756. spin_lock(&head->lock);
  757. if (head->extent_op && head->extent_op->update_flags)
  758. extent_flags |= head->extent_op->flags_to_set;
  759. else
  760. BUG_ON(num_refs == 0);
  761. num_refs += head->node.ref_mod;
  762. spin_unlock(&head->lock);
  763. mutex_unlock(&head->mutex);
  764. }
  765. spin_unlock(&delayed_refs->lock);
  766. out:
  767. WARN_ON(num_refs == 0);
  768. if (refs)
  769. *refs = num_refs;
  770. if (flags)
  771. *flags = extent_flags;
  772. out_free:
  773. btrfs_free_path(path);
  774. return ret;
  775. }
  776. /*
  777. * Back reference rules. Back refs have three main goals:
  778. *
  779. * 1) differentiate between all holders of references to an extent so that
  780. * when a reference is dropped we can make sure it was a valid reference
  781. * before freeing the extent.
  782. *
  783. * 2) Provide enough information to quickly find the holders of an extent
  784. * if we notice a given block is corrupted or bad.
  785. *
  786. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  787. * maintenance. This is actually the same as #2, but with a slightly
  788. * different use case.
  789. *
  790. * There are two kinds of back refs. The implicit back refs is optimized
  791. * for pointers in non-shared tree blocks. For a given pointer in a block,
  792. * back refs of this kind provide information about the block's owner tree
  793. * and the pointer's key. These information allow us to find the block by
  794. * b-tree searching. The full back refs is for pointers in tree blocks not
  795. * referenced by their owner trees. The location of tree block is recorded
  796. * in the back refs. Actually the full back refs is generic, and can be
  797. * used in all cases the implicit back refs is used. The major shortcoming
  798. * of the full back refs is its overhead. Every time a tree block gets
  799. * COWed, we have to update back refs entry for all pointers in it.
  800. *
  801. * For a newly allocated tree block, we use implicit back refs for
  802. * pointers in it. This means most tree related operations only involve
  803. * implicit back refs. For a tree block created in old transaction, the
  804. * only way to drop a reference to it is COW it. So we can detect the
  805. * event that tree block loses its owner tree's reference and do the
  806. * back refs conversion.
  807. *
  808. * When a tree block is COW'd through a tree, there are four cases:
  809. *
  810. * The reference count of the block is one and the tree is the block's
  811. * owner tree. Nothing to do in this case.
  812. *
  813. * The reference count of the block is one and the tree is not the
  814. * block's owner tree. In this case, full back refs is used for pointers
  815. * in the block. Remove these full back refs, add implicit back refs for
  816. * every pointers in the new block.
  817. *
  818. * The reference count of the block is greater than one and the tree is
  819. * the block's owner tree. In this case, implicit back refs is used for
  820. * pointers in the block. Add full back refs for every pointers in the
  821. * block, increase lower level extents' reference counts. The original
  822. * implicit back refs are entailed to the new block.
  823. *
  824. * The reference count of the block is greater than one and the tree is
  825. * not the block's owner tree. Add implicit back refs for every pointer in
  826. * the new block, increase lower level extents' reference count.
  827. *
  828. * Back Reference Key composing:
  829. *
  830. * The key objectid corresponds to the first byte in the extent,
  831. * The key type is used to differentiate between types of back refs.
  832. * There are different meanings of the key offset for different types
  833. * of back refs.
  834. *
  835. * File extents can be referenced by:
  836. *
  837. * - multiple snapshots, subvolumes, or different generations in one subvol
  838. * - different files inside a single subvolume
  839. * - different offsets inside a file (bookend extents in file.c)
  840. *
  841. * The extent ref structure for the implicit back refs has fields for:
  842. *
  843. * - Objectid of the subvolume root
  844. * - objectid of the file holding the reference
  845. * - original offset in the file
  846. * - how many bookend extents
  847. *
  848. * The key offset for the implicit back refs is hash of the first
  849. * three fields.
  850. *
  851. * The extent ref structure for the full back refs has field for:
  852. *
  853. * - number of pointers in the tree leaf
  854. *
  855. * The key offset for the implicit back refs is the first byte of
  856. * the tree leaf
  857. *
  858. * When a file extent is allocated, The implicit back refs is used.
  859. * the fields are filled in:
  860. *
  861. * (root_key.objectid, inode objectid, offset in file, 1)
  862. *
  863. * When a file extent is removed file truncation, we find the
  864. * corresponding implicit back refs and check the following fields:
  865. *
  866. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  867. *
  868. * Btree extents can be referenced by:
  869. *
  870. * - Different subvolumes
  871. *
  872. * Both the implicit back refs and the full back refs for tree blocks
  873. * only consist of key. The key offset for the implicit back refs is
  874. * objectid of block's owner tree. The key offset for the full back refs
  875. * is the first byte of parent block.
  876. *
  877. * When implicit back refs is used, information about the lowest key and
  878. * level of the tree block are required. These information are stored in
  879. * tree block info structure.
  880. */
  881. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  882. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  883. struct btrfs_root *root,
  884. struct btrfs_path *path,
  885. u64 owner, u32 extra_size)
  886. {
  887. struct btrfs_extent_item *item;
  888. struct btrfs_extent_item_v0 *ei0;
  889. struct btrfs_extent_ref_v0 *ref0;
  890. struct btrfs_tree_block_info *bi;
  891. struct extent_buffer *leaf;
  892. struct btrfs_key key;
  893. struct btrfs_key found_key;
  894. u32 new_size = sizeof(*item);
  895. u64 refs;
  896. int ret;
  897. leaf = path->nodes[0];
  898. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  899. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  900. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  901. struct btrfs_extent_item_v0);
  902. refs = btrfs_extent_refs_v0(leaf, ei0);
  903. if (owner == (u64)-1) {
  904. while (1) {
  905. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  906. ret = btrfs_next_leaf(root, path);
  907. if (ret < 0)
  908. return ret;
  909. BUG_ON(ret > 0); /* Corruption */
  910. leaf = path->nodes[0];
  911. }
  912. btrfs_item_key_to_cpu(leaf, &found_key,
  913. path->slots[0]);
  914. BUG_ON(key.objectid != found_key.objectid);
  915. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  916. path->slots[0]++;
  917. continue;
  918. }
  919. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  920. struct btrfs_extent_ref_v0);
  921. owner = btrfs_ref_objectid_v0(leaf, ref0);
  922. break;
  923. }
  924. }
  925. btrfs_release_path(path);
  926. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  927. new_size += sizeof(*bi);
  928. new_size -= sizeof(*ei0);
  929. ret = btrfs_search_slot(trans, root, &key, path,
  930. new_size + extra_size, 1);
  931. if (ret < 0)
  932. return ret;
  933. BUG_ON(ret); /* Corruption */
  934. btrfs_extend_item(root, path, new_size);
  935. leaf = path->nodes[0];
  936. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  937. btrfs_set_extent_refs(leaf, item, refs);
  938. /* FIXME: get real generation */
  939. btrfs_set_extent_generation(leaf, item, 0);
  940. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  941. btrfs_set_extent_flags(leaf, item,
  942. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  943. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  944. bi = (struct btrfs_tree_block_info *)(item + 1);
  945. /* FIXME: get first key of the block */
  946. memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
  947. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  948. } else {
  949. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  950. }
  951. btrfs_mark_buffer_dirty(leaf);
  952. return 0;
  953. }
  954. #endif
  955. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  956. {
  957. u32 high_crc = ~(u32)0;
  958. u32 low_crc = ~(u32)0;
  959. __le64 lenum;
  960. lenum = cpu_to_le64(root_objectid);
  961. high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
  962. lenum = cpu_to_le64(owner);
  963. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  964. lenum = cpu_to_le64(offset);
  965. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  966. return ((u64)high_crc << 31) ^ (u64)low_crc;
  967. }
  968. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  969. struct btrfs_extent_data_ref *ref)
  970. {
  971. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  972. btrfs_extent_data_ref_objectid(leaf, ref),
  973. btrfs_extent_data_ref_offset(leaf, ref));
  974. }
  975. static int match_extent_data_ref(struct extent_buffer *leaf,
  976. struct btrfs_extent_data_ref *ref,
  977. u64 root_objectid, u64 owner, u64 offset)
  978. {
  979. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  980. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  981. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  982. return 0;
  983. return 1;
  984. }
  985. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  986. struct btrfs_root *root,
  987. struct btrfs_path *path,
  988. u64 bytenr, u64 parent,
  989. u64 root_objectid,
  990. u64 owner, u64 offset)
  991. {
  992. struct btrfs_key key;
  993. struct btrfs_extent_data_ref *ref;
  994. struct extent_buffer *leaf;
  995. u32 nritems;
  996. int ret;
  997. int recow;
  998. int err = -ENOENT;
  999. key.objectid = bytenr;
  1000. if (parent) {
  1001. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1002. key.offset = parent;
  1003. } else {
  1004. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1005. key.offset = hash_extent_data_ref(root_objectid,
  1006. owner, offset);
  1007. }
  1008. again:
  1009. recow = 0;
  1010. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1011. if (ret < 0) {
  1012. err = ret;
  1013. goto fail;
  1014. }
  1015. if (parent) {
  1016. if (!ret)
  1017. return 0;
  1018. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1019. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1020. btrfs_release_path(path);
  1021. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1022. if (ret < 0) {
  1023. err = ret;
  1024. goto fail;
  1025. }
  1026. if (!ret)
  1027. return 0;
  1028. #endif
  1029. goto fail;
  1030. }
  1031. leaf = path->nodes[0];
  1032. nritems = btrfs_header_nritems(leaf);
  1033. while (1) {
  1034. if (path->slots[0] >= nritems) {
  1035. ret = btrfs_next_leaf(root, path);
  1036. if (ret < 0)
  1037. err = ret;
  1038. if (ret)
  1039. goto fail;
  1040. leaf = path->nodes[0];
  1041. nritems = btrfs_header_nritems(leaf);
  1042. recow = 1;
  1043. }
  1044. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1045. if (key.objectid != bytenr ||
  1046. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1047. goto fail;
  1048. ref = btrfs_item_ptr(leaf, path->slots[0],
  1049. struct btrfs_extent_data_ref);
  1050. if (match_extent_data_ref(leaf, ref, root_objectid,
  1051. owner, offset)) {
  1052. if (recow) {
  1053. btrfs_release_path(path);
  1054. goto again;
  1055. }
  1056. err = 0;
  1057. break;
  1058. }
  1059. path->slots[0]++;
  1060. }
  1061. fail:
  1062. return err;
  1063. }
  1064. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1065. struct btrfs_root *root,
  1066. struct btrfs_path *path,
  1067. u64 bytenr, u64 parent,
  1068. u64 root_objectid, u64 owner,
  1069. u64 offset, int refs_to_add)
  1070. {
  1071. struct btrfs_key key;
  1072. struct extent_buffer *leaf;
  1073. u32 size;
  1074. u32 num_refs;
  1075. int ret;
  1076. key.objectid = bytenr;
  1077. if (parent) {
  1078. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1079. key.offset = parent;
  1080. size = sizeof(struct btrfs_shared_data_ref);
  1081. } else {
  1082. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1083. key.offset = hash_extent_data_ref(root_objectid,
  1084. owner, offset);
  1085. size = sizeof(struct btrfs_extent_data_ref);
  1086. }
  1087. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1088. if (ret && ret != -EEXIST)
  1089. goto fail;
  1090. leaf = path->nodes[0];
  1091. if (parent) {
  1092. struct btrfs_shared_data_ref *ref;
  1093. ref = btrfs_item_ptr(leaf, path->slots[0],
  1094. struct btrfs_shared_data_ref);
  1095. if (ret == 0) {
  1096. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1097. } else {
  1098. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1099. num_refs += refs_to_add;
  1100. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1101. }
  1102. } else {
  1103. struct btrfs_extent_data_ref *ref;
  1104. while (ret == -EEXIST) {
  1105. ref = btrfs_item_ptr(leaf, path->slots[0],
  1106. struct btrfs_extent_data_ref);
  1107. if (match_extent_data_ref(leaf, ref, root_objectid,
  1108. owner, offset))
  1109. break;
  1110. btrfs_release_path(path);
  1111. key.offset++;
  1112. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1113. size);
  1114. if (ret && ret != -EEXIST)
  1115. goto fail;
  1116. leaf = path->nodes[0];
  1117. }
  1118. ref = btrfs_item_ptr(leaf, path->slots[0],
  1119. struct btrfs_extent_data_ref);
  1120. if (ret == 0) {
  1121. btrfs_set_extent_data_ref_root(leaf, ref,
  1122. root_objectid);
  1123. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1124. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1125. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1126. } else {
  1127. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1128. num_refs += refs_to_add;
  1129. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1130. }
  1131. }
  1132. btrfs_mark_buffer_dirty(leaf);
  1133. ret = 0;
  1134. fail:
  1135. btrfs_release_path(path);
  1136. return ret;
  1137. }
  1138. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1139. struct btrfs_root *root,
  1140. struct btrfs_path *path,
  1141. int refs_to_drop, int *last_ref)
  1142. {
  1143. struct btrfs_key key;
  1144. struct btrfs_extent_data_ref *ref1 = NULL;
  1145. struct btrfs_shared_data_ref *ref2 = NULL;
  1146. struct extent_buffer *leaf;
  1147. u32 num_refs = 0;
  1148. int ret = 0;
  1149. leaf = path->nodes[0];
  1150. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1151. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1152. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1153. struct btrfs_extent_data_ref);
  1154. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1155. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1156. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1157. struct btrfs_shared_data_ref);
  1158. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1159. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1160. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1161. struct btrfs_extent_ref_v0 *ref0;
  1162. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1163. struct btrfs_extent_ref_v0);
  1164. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1165. #endif
  1166. } else {
  1167. BUG();
  1168. }
  1169. BUG_ON(num_refs < refs_to_drop);
  1170. num_refs -= refs_to_drop;
  1171. if (num_refs == 0) {
  1172. ret = btrfs_del_item(trans, root, path);
  1173. *last_ref = 1;
  1174. } else {
  1175. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1176. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1177. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1178. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1179. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1180. else {
  1181. struct btrfs_extent_ref_v0 *ref0;
  1182. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1183. struct btrfs_extent_ref_v0);
  1184. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1185. }
  1186. #endif
  1187. btrfs_mark_buffer_dirty(leaf);
  1188. }
  1189. return ret;
  1190. }
  1191. static noinline u32 extent_data_ref_count(struct btrfs_root *root,
  1192. struct btrfs_path *path,
  1193. struct btrfs_extent_inline_ref *iref)
  1194. {
  1195. struct btrfs_key key;
  1196. struct extent_buffer *leaf;
  1197. struct btrfs_extent_data_ref *ref1;
  1198. struct btrfs_shared_data_ref *ref2;
  1199. u32 num_refs = 0;
  1200. leaf = path->nodes[0];
  1201. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1202. if (iref) {
  1203. if (btrfs_extent_inline_ref_type(leaf, iref) ==
  1204. BTRFS_EXTENT_DATA_REF_KEY) {
  1205. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1206. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1207. } else {
  1208. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1209. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1210. }
  1211. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1212. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1213. struct btrfs_extent_data_ref);
  1214. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1215. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1216. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1217. struct btrfs_shared_data_ref);
  1218. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1219. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1220. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1221. struct btrfs_extent_ref_v0 *ref0;
  1222. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1223. struct btrfs_extent_ref_v0);
  1224. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1225. #endif
  1226. } else {
  1227. WARN_ON(1);
  1228. }
  1229. return num_refs;
  1230. }
  1231. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1232. struct btrfs_root *root,
  1233. struct btrfs_path *path,
  1234. u64 bytenr, u64 parent,
  1235. u64 root_objectid)
  1236. {
  1237. struct btrfs_key key;
  1238. int ret;
  1239. key.objectid = bytenr;
  1240. if (parent) {
  1241. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1242. key.offset = parent;
  1243. } else {
  1244. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1245. key.offset = root_objectid;
  1246. }
  1247. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1248. if (ret > 0)
  1249. ret = -ENOENT;
  1250. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1251. if (ret == -ENOENT && parent) {
  1252. btrfs_release_path(path);
  1253. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1254. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1255. if (ret > 0)
  1256. ret = -ENOENT;
  1257. }
  1258. #endif
  1259. return ret;
  1260. }
  1261. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1262. struct btrfs_root *root,
  1263. struct btrfs_path *path,
  1264. u64 bytenr, u64 parent,
  1265. u64 root_objectid)
  1266. {
  1267. struct btrfs_key key;
  1268. int ret;
  1269. key.objectid = bytenr;
  1270. if (parent) {
  1271. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1272. key.offset = parent;
  1273. } else {
  1274. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1275. key.offset = root_objectid;
  1276. }
  1277. ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
  1278. btrfs_release_path(path);
  1279. return ret;
  1280. }
  1281. static inline int extent_ref_type(u64 parent, u64 owner)
  1282. {
  1283. int type;
  1284. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1285. if (parent > 0)
  1286. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1287. else
  1288. type = BTRFS_TREE_BLOCK_REF_KEY;
  1289. } else {
  1290. if (parent > 0)
  1291. type = BTRFS_SHARED_DATA_REF_KEY;
  1292. else
  1293. type = BTRFS_EXTENT_DATA_REF_KEY;
  1294. }
  1295. return type;
  1296. }
  1297. static int find_next_key(struct btrfs_path *path, int level,
  1298. struct btrfs_key *key)
  1299. {
  1300. for (; level < BTRFS_MAX_LEVEL; level++) {
  1301. if (!path->nodes[level])
  1302. break;
  1303. if (path->slots[level] + 1 >=
  1304. btrfs_header_nritems(path->nodes[level]))
  1305. continue;
  1306. if (level == 0)
  1307. btrfs_item_key_to_cpu(path->nodes[level], key,
  1308. path->slots[level] + 1);
  1309. else
  1310. btrfs_node_key_to_cpu(path->nodes[level], key,
  1311. path->slots[level] + 1);
  1312. return 0;
  1313. }
  1314. return 1;
  1315. }
  1316. /*
  1317. * look for inline back ref. if back ref is found, *ref_ret is set
  1318. * to the address of inline back ref, and 0 is returned.
  1319. *
  1320. * if back ref isn't found, *ref_ret is set to the address where it
  1321. * should be inserted, and -ENOENT is returned.
  1322. *
  1323. * if insert is true and there are too many inline back refs, the path
  1324. * points to the extent item, and -EAGAIN is returned.
  1325. *
  1326. * NOTE: inline back refs are ordered in the same way that back ref
  1327. * items in the tree are ordered.
  1328. */
  1329. static noinline_for_stack
  1330. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1331. struct btrfs_root *root,
  1332. struct btrfs_path *path,
  1333. struct btrfs_extent_inline_ref **ref_ret,
  1334. u64 bytenr, u64 num_bytes,
  1335. u64 parent, u64 root_objectid,
  1336. u64 owner, u64 offset, int insert)
  1337. {
  1338. struct btrfs_key key;
  1339. struct extent_buffer *leaf;
  1340. struct btrfs_extent_item *ei;
  1341. struct btrfs_extent_inline_ref *iref;
  1342. u64 flags;
  1343. u64 item_size;
  1344. unsigned long ptr;
  1345. unsigned long end;
  1346. int extra_size;
  1347. int type;
  1348. int want;
  1349. int ret;
  1350. int err = 0;
  1351. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  1352. SKINNY_METADATA);
  1353. key.objectid = bytenr;
  1354. key.type = BTRFS_EXTENT_ITEM_KEY;
  1355. key.offset = num_bytes;
  1356. want = extent_ref_type(parent, owner);
  1357. if (insert) {
  1358. extra_size = btrfs_extent_inline_ref_size(want);
  1359. path->keep_locks = 1;
  1360. } else
  1361. extra_size = -1;
  1362. /*
  1363. * Owner is our parent level, so we can just add one to get the level
  1364. * for the block we are interested in.
  1365. */
  1366. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1367. key.type = BTRFS_METADATA_ITEM_KEY;
  1368. key.offset = owner;
  1369. }
  1370. again:
  1371. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1372. if (ret < 0) {
  1373. err = ret;
  1374. goto out;
  1375. }
  1376. /*
  1377. * We may be a newly converted file system which still has the old fat
  1378. * extent entries for metadata, so try and see if we have one of those.
  1379. */
  1380. if (ret > 0 && skinny_metadata) {
  1381. skinny_metadata = false;
  1382. if (path->slots[0]) {
  1383. path->slots[0]--;
  1384. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1385. path->slots[0]);
  1386. if (key.objectid == bytenr &&
  1387. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1388. key.offset == num_bytes)
  1389. ret = 0;
  1390. }
  1391. if (ret) {
  1392. key.objectid = bytenr;
  1393. key.type = BTRFS_EXTENT_ITEM_KEY;
  1394. key.offset = num_bytes;
  1395. btrfs_release_path(path);
  1396. goto again;
  1397. }
  1398. }
  1399. if (ret && !insert) {
  1400. err = -ENOENT;
  1401. goto out;
  1402. } else if (WARN_ON(ret)) {
  1403. err = -EIO;
  1404. goto out;
  1405. }
  1406. leaf = path->nodes[0];
  1407. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1408. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1409. if (item_size < sizeof(*ei)) {
  1410. if (!insert) {
  1411. err = -ENOENT;
  1412. goto out;
  1413. }
  1414. ret = convert_extent_item_v0(trans, root, path, owner,
  1415. extra_size);
  1416. if (ret < 0) {
  1417. err = ret;
  1418. goto out;
  1419. }
  1420. leaf = path->nodes[0];
  1421. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1422. }
  1423. #endif
  1424. BUG_ON(item_size < sizeof(*ei));
  1425. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1426. flags = btrfs_extent_flags(leaf, ei);
  1427. ptr = (unsigned long)(ei + 1);
  1428. end = (unsigned long)ei + item_size;
  1429. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1430. ptr += sizeof(struct btrfs_tree_block_info);
  1431. BUG_ON(ptr > end);
  1432. }
  1433. err = -ENOENT;
  1434. while (1) {
  1435. if (ptr >= end) {
  1436. WARN_ON(ptr > end);
  1437. break;
  1438. }
  1439. iref = (struct btrfs_extent_inline_ref *)ptr;
  1440. type = btrfs_extent_inline_ref_type(leaf, iref);
  1441. if (want < type)
  1442. break;
  1443. if (want > type) {
  1444. ptr += btrfs_extent_inline_ref_size(type);
  1445. continue;
  1446. }
  1447. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1448. struct btrfs_extent_data_ref *dref;
  1449. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1450. if (match_extent_data_ref(leaf, dref, root_objectid,
  1451. owner, offset)) {
  1452. err = 0;
  1453. break;
  1454. }
  1455. if (hash_extent_data_ref_item(leaf, dref) <
  1456. hash_extent_data_ref(root_objectid, owner, offset))
  1457. break;
  1458. } else {
  1459. u64 ref_offset;
  1460. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1461. if (parent > 0) {
  1462. if (parent == ref_offset) {
  1463. err = 0;
  1464. break;
  1465. }
  1466. if (ref_offset < parent)
  1467. break;
  1468. } else {
  1469. if (root_objectid == ref_offset) {
  1470. err = 0;
  1471. break;
  1472. }
  1473. if (ref_offset < root_objectid)
  1474. break;
  1475. }
  1476. }
  1477. ptr += btrfs_extent_inline_ref_size(type);
  1478. }
  1479. if (err == -ENOENT && insert) {
  1480. if (item_size + extra_size >=
  1481. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1482. err = -EAGAIN;
  1483. goto out;
  1484. }
  1485. /*
  1486. * To add new inline back ref, we have to make sure
  1487. * there is no corresponding back ref item.
  1488. * For simplicity, we just do not add new inline back
  1489. * ref if there is any kind of item for this block
  1490. */
  1491. if (find_next_key(path, 0, &key) == 0 &&
  1492. key.objectid == bytenr &&
  1493. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1494. err = -EAGAIN;
  1495. goto out;
  1496. }
  1497. }
  1498. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1499. out:
  1500. if (insert) {
  1501. path->keep_locks = 0;
  1502. btrfs_unlock_up_safe(path, 1);
  1503. }
  1504. return err;
  1505. }
  1506. /*
  1507. * helper to add new inline back ref
  1508. */
  1509. static noinline_for_stack
  1510. void setup_inline_extent_backref(struct btrfs_root *root,
  1511. struct btrfs_path *path,
  1512. struct btrfs_extent_inline_ref *iref,
  1513. u64 parent, u64 root_objectid,
  1514. u64 owner, u64 offset, int refs_to_add,
  1515. struct btrfs_delayed_extent_op *extent_op)
  1516. {
  1517. struct extent_buffer *leaf;
  1518. struct btrfs_extent_item *ei;
  1519. unsigned long ptr;
  1520. unsigned long end;
  1521. unsigned long item_offset;
  1522. u64 refs;
  1523. int size;
  1524. int type;
  1525. leaf = path->nodes[0];
  1526. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1527. item_offset = (unsigned long)iref - (unsigned long)ei;
  1528. type = extent_ref_type(parent, owner);
  1529. size = btrfs_extent_inline_ref_size(type);
  1530. btrfs_extend_item(root, path, size);
  1531. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1532. refs = btrfs_extent_refs(leaf, ei);
  1533. refs += refs_to_add;
  1534. btrfs_set_extent_refs(leaf, ei, refs);
  1535. if (extent_op)
  1536. __run_delayed_extent_op(extent_op, leaf, ei);
  1537. ptr = (unsigned long)ei + item_offset;
  1538. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1539. if (ptr < end - size)
  1540. memmove_extent_buffer(leaf, ptr + size, ptr,
  1541. end - size - ptr);
  1542. iref = (struct btrfs_extent_inline_ref *)ptr;
  1543. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1544. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1545. struct btrfs_extent_data_ref *dref;
  1546. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1547. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1548. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1549. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1550. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1551. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1552. struct btrfs_shared_data_ref *sref;
  1553. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1554. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1555. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1556. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1557. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1558. } else {
  1559. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1560. }
  1561. btrfs_mark_buffer_dirty(leaf);
  1562. }
  1563. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1564. struct btrfs_root *root,
  1565. struct btrfs_path *path,
  1566. struct btrfs_extent_inline_ref **ref_ret,
  1567. u64 bytenr, u64 num_bytes, u64 parent,
  1568. u64 root_objectid, u64 owner, u64 offset)
  1569. {
  1570. int ret;
  1571. ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
  1572. bytenr, num_bytes, parent,
  1573. root_objectid, owner, offset, 0);
  1574. if (ret != -ENOENT)
  1575. return ret;
  1576. btrfs_release_path(path);
  1577. *ref_ret = NULL;
  1578. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1579. ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
  1580. root_objectid);
  1581. } else {
  1582. ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
  1583. root_objectid, owner, offset);
  1584. }
  1585. return ret;
  1586. }
  1587. /*
  1588. * helper to update/remove inline back ref
  1589. */
  1590. static noinline_for_stack
  1591. void update_inline_extent_backref(struct btrfs_root *root,
  1592. struct btrfs_path *path,
  1593. struct btrfs_extent_inline_ref *iref,
  1594. int refs_to_mod,
  1595. struct btrfs_delayed_extent_op *extent_op,
  1596. int *last_ref)
  1597. {
  1598. struct extent_buffer *leaf;
  1599. struct btrfs_extent_item *ei;
  1600. struct btrfs_extent_data_ref *dref = NULL;
  1601. struct btrfs_shared_data_ref *sref = NULL;
  1602. unsigned long ptr;
  1603. unsigned long end;
  1604. u32 item_size;
  1605. int size;
  1606. int type;
  1607. u64 refs;
  1608. leaf = path->nodes[0];
  1609. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1610. refs = btrfs_extent_refs(leaf, ei);
  1611. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1612. refs += refs_to_mod;
  1613. btrfs_set_extent_refs(leaf, ei, refs);
  1614. if (extent_op)
  1615. __run_delayed_extent_op(extent_op, leaf, ei);
  1616. type = btrfs_extent_inline_ref_type(leaf, iref);
  1617. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1618. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1619. refs = btrfs_extent_data_ref_count(leaf, dref);
  1620. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1621. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1622. refs = btrfs_shared_data_ref_count(leaf, sref);
  1623. } else {
  1624. refs = 1;
  1625. BUG_ON(refs_to_mod != -1);
  1626. }
  1627. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1628. refs += refs_to_mod;
  1629. if (refs > 0) {
  1630. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1631. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1632. else
  1633. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1634. } else {
  1635. *last_ref = 1;
  1636. size = btrfs_extent_inline_ref_size(type);
  1637. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1638. ptr = (unsigned long)iref;
  1639. end = (unsigned long)ei + item_size;
  1640. if (ptr + size < end)
  1641. memmove_extent_buffer(leaf, ptr, ptr + size,
  1642. end - ptr - size);
  1643. item_size -= size;
  1644. btrfs_truncate_item(root, path, item_size, 1);
  1645. }
  1646. btrfs_mark_buffer_dirty(leaf);
  1647. }
  1648. static noinline_for_stack
  1649. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1650. struct btrfs_root *root,
  1651. struct btrfs_path *path,
  1652. u64 bytenr, u64 num_bytes, u64 parent,
  1653. u64 root_objectid, u64 owner,
  1654. u64 offset, int refs_to_add,
  1655. struct btrfs_delayed_extent_op *extent_op)
  1656. {
  1657. struct btrfs_extent_inline_ref *iref;
  1658. int ret;
  1659. ret = lookup_inline_extent_backref(trans, root, path, &iref,
  1660. bytenr, num_bytes, parent,
  1661. root_objectid, owner, offset, 1);
  1662. if (ret == 0) {
  1663. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1664. update_inline_extent_backref(root, path, iref,
  1665. refs_to_add, extent_op, NULL);
  1666. } else if (ret == -ENOENT) {
  1667. setup_inline_extent_backref(root, path, iref, parent,
  1668. root_objectid, owner, offset,
  1669. refs_to_add, extent_op);
  1670. ret = 0;
  1671. }
  1672. return ret;
  1673. }
  1674. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1675. struct btrfs_root *root,
  1676. struct btrfs_path *path,
  1677. u64 bytenr, u64 parent, u64 root_objectid,
  1678. u64 owner, u64 offset, int refs_to_add)
  1679. {
  1680. int ret;
  1681. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1682. BUG_ON(refs_to_add != 1);
  1683. ret = insert_tree_block_ref(trans, root, path, bytenr,
  1684. parent, root_objectid);
  1685. } else {
  1686. ret = insert_extent_data_ref(trans, root, path, bytenr,
  1687. parent, root_objectid,
  1688. owner, offset, refs_to_add);
  1689. }
  1690. return ret;
  1691. }
  1692. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1693. struct btrfs_root *root,
  1694. struct btrfs_path *path,
  1695. struct btrfs_extent_inline_ref *iref,
  1696. int refs_to_drop, int is_data, int *last_ref)
  1697. {
  1698. int ret = 0;
  1699. BUG_ON(!is_data && refs_to_drop != 1);
  1700. if (iref) {
  1701. update_inline_extent_backref(root, path, iref,
  1702. -refs_to_drop, NULL, last_ref);
  1703. } else if (is_data) {
  1704. ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
  1705. last_ref);
  1706. } else {
  1707. *last_ref = 1;
  1708. ret = btrfs_del_item(trans, root, path);
  1709. }
  1710. return ret;
  1711. }
  1712. static int btrfs_issue_discard(struct block_device *bdev,
  1713. u64 start, u64 len)
  1714. {
  1715. return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
  1716. }
  1717. static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
  1718. u64 num_bytes, u64 *actual_bytes)
  1719. {
  1720. int ret;
  1721. u64 discarded_bytes = 0;
  1722. struct btrfs_bio *bbio = NULL;
  1723. /* Tell the block device(s) that the sectors can be discarded */
  1724. ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
  1725. bytenr, &num_bytes, &bbio, 0);
  1726. /* Error condition is -ENOMEM */
  1727. if (!ret) {
  1728. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1729. int i;
  1730. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1731. if (!stripe->dev->can_discard)
  1732. continue;
  1733. ret = btrfs_issue_discard(stripe->dev->bdev,
  1734. stripe->physical,
  1735. stripe->length);
  1736. if (!ret)
  1737. discarded_bytes += stripe->length;
  1738. else if (ret != -EOPNOTSUPP)
  1739. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1740. /*
  1741. * Just in case we get back EOPNOTSUPP for some reason,
  1742. * just ignore the return value so we don't screw up
  1743. * people calling discard_extent.
  1744. */
  1745. ret = 0;
  1746. }
  1747. kfree(bbio);
  1748. }
  1749. if (actual_bytes)
  1750. *actual_bytes = discarded_bytes;
  1751. if (ret == -EOPNOTSUPP)
  1752. ret = 0;
  1753. return ret;
  1754. }
  1755. /* Can return -ENOMEM */
  1756. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1757. struct btrfs_root *root,
  1758. u64 bytenr, u64 num_bytes, u64 parent,
  1759. u64 root_objectid, u64 owner, u64 offset,
  1760. int no_quota)
  1761. {
  1762. int ret;
  1763. struct btrfs_fs_info *fs_info = root->fs_info;
  1764. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1765. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1766. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1767. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1768. num_bytes,
  1769. parent, root_objectid, (int)owner,
  1770. BTRFS_ADD_DELAYED_REF, NULL, no_quota);
  1771. } else {
  1772. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1773. num_bytes,
  1774. parent, root_objectid, owner, offset,
  1775. BTRFS_ADD_DELAYED_REF, NULL, no_quota);
  1776. }
  1777. return ret;
  1778. }
  1779. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1780. struct btrfs_root *root,
  1781. u64 bytenr, u64 num_bytes,
  1782. u64 parent, u64 root_objectid,
  1783. u64 owner, u64 offset, int refs_to_add,
  1784. int no_quota,
  1785. struct btrfs_delayed_extent_op *extent_op)
  1786. {
  1787. struct btrfs_fs_info *fs_info = root->fs_info;
  1788. struct btrfs_path *path;
  1789. struct extent_buffer *leaf;
  1790. struct btrfs_extent_item *item;
  1791. struct btrfs_key key;
  1792. u64 refs;
  1793. int ret;
  1794. enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
  1795. path = btrfs_alloc_path();
  1796. if (!path)
  1797. return -ENOMEM;
  1798. if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
  1799. no_quota = 1;
  1800. path->reada = 1;
  1801. path->leave_spinning = 1;
  1802. /* this will setup the path even if it fails to insert the back ref */
  1803. ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
  1804. bytenr, num_bytes, parent,
  1805. root_objectid, owner, offset,
  1806. refs_to_add, extent_op);
  1807. if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
  1808. goto out;
  1809. /*
  1810. * Ok we were able to insert an inline extent and it appears to be a new
  1811. * reference, deal with the qgroup accounting.
  1812. */
  1813. if (!ret && !no_quota) {
  1814. ASSERT(root->fs_info->quota_enabled);
  1815. leaf = path->nodes[0];
  1816. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1817. item = btrfs_item_ptr(leaf, path->slots[0],
  1818. struct btrfs_extent_item);
  1819. if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
  1820. type = BTRFS_QGROUP_OPER_ADD_SHARED;
  1821. btrfs_release_path(path);
  1822. ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
  1823. bytenr, num_bytes, type, 0);
  1824. goto out;
  1825. }
  1826. /*
  1827. * Ok we had -EAGAIN which means we didn't have space to insert and
  1828. * inline extent ref, so just update the reference count and add a
  1829. * normal backref.
  1830. */
  1831. leaf = path->nodes[0];
  1832. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1833. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1834. refs = btrfs_extent_refs(leaf, item);
  1835. if (refs)
  1836. type = BTRFS_QGROUP_OPER_ADD_SHARED;
  1837. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  1838. if (extent_op)
  1839. __run_delayed_extent_op(extent_op, leaf, item);
  1840. btrfs_mark_buffer_dirty(leaf);
  1841. btrfs_release_path(path);
  1842. if (!no_quota) {
  1843. ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
  1844. bytenr, num_bytes, type, 0);
  1845. if (ret)
  1846. goto out;
  1847. }
  1848. path->reada = 1;
  1849. path->leave_spinning = 1;
  1850. /* now insert the actual backref */
  1851. ret = insert_extent_backref(trans, root->fs_info->extent_root,
  1852. path, bytenr, parent, root_objectid,
  1853. owner, offset, refs_to_add);
  1854. if (ret)
  1855. btrfs_abort_transaction(trans, root, ret);
  1856. out:
  1857. btrfs_free_path(path);
  1858. return ret;
  1859. }
  1860. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  1861. struct btrfs_root *root,
  1862. struct btrfs_delayed_ref_node *node,
  1863. struct btrfs_delayed_extent_op *extent_op,
  1864. int insert_reserved)
  1865. {
  1866. int ret = 0;
  1867. struct btrfs_delayed_data_ref *ref;
  1868. struct btrfs_key ins;
  1869. u64 parent = 0;
  1870. u64 ref_root = 0;
  1871. u64 flags = 0;
  1872. ins.objectid = node->bytenr;
  1873. ins.offset = node->num_bytes;
  1874. ins.type = BTRFS_EXTENT_ITEM_KEY;
  1875. ref = btrfs_delayed_node_to_data_ref(node);
  1876. trace_run_delayed_data_ref(node, ref, node->action);
  1877. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  1878. parent = ref->parent;
  1879. ref_root = ref->root;
  1880. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  1881. if (extent_op)
  1882. flags |= extent_op->flags_to_set;
  1883. ret = alloc_reserved_file_extent(trans, root,
  1884. parent, ref_root, flags,
  1885. ref->objectid, ref->offset,
  1886. &ins, node->ref_mod);
  1887. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  1888. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  1889. node->num_bytes, parent,
  1890. ref_root, ref->objectid,
  1891. ref->offset, node->ref_mod,
  1892. node->no_quota, extent_op);
  1893. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  1894. ret = __btrfs_free_extent(trans, root, node->bytenr,
  1895. node->num_bytes, parent,
  1896. ref_root, ref->objectid,
  1897. ref->offset, node->ref_mod,
  1898. extent_op, node->no_quota);
  1899. } else {
  1900. BUG();
  1901. }
  1902. return ret;
  1903. }
  1904. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  1905. struct extent_buffer *leaf,
  1906. struct btrfs_extent_item *ei)
  1907. {
  1908. u64 flags = btrfs_extent_flags(leaf, ei);
  1909. if (extent_op->update_flags) {
  1910. flags |= extent_op->flags_to_set;
  1911. btrfs_set_extent_flags(leaf, ei, flags);
  1912. }
  1913. if (extent_op->update_key) {
  1914. struct btrfs_tree_block_info *bi;
  1915. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  1916. bi = (struct btrfs_tree_block_info *)(ei + 1);
  1917. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  1918. }
  1919. }
  1920. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  1921. struct btrfs_root *root,
  1922. struct btrfs_delayed_ref_node *node,
  1923. struct btrfs_delayed_extent_op *extent_op)
  1924. {
  1925. struct btrfs_key key;
  1926. struct btrfs_path *path;
  1927. struct btrfs_extent_item *ei;
  1928. struct extent_buffer *leaf;
  1929. u32 item_size;
  1930. int ret;
  1931. int err = 0;
  1932. int metadata = !extent_op->is_data;
  1933. if (trans->aborted)
  1934. return 0;
  1935. if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  1936. metadata = 0;
  1937. path = btrfs_alloc_path();
  1938. if (!path)
  1939. return -ENOMEM;
  1940. key.objectid = node->bytenr;
  1941. if (metadata) {
  1942. key.type = BTRFS_METADATA_ITEM_KEY;
  1943. key.offset = extent_op->level;
  1944. } else {
  1945. key.type = BTRFS_EXTENT_ITEM_KEY;
  1946. key.offset = node->num_bytes;
  1947. }
  1948. again:
  1949. path->reada = 1;
  1950. path->leave_spinning = 1;
  1951. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
  1952. path, 0, 1);
  1953. if (ret < 0) {
  1954. err = ret;
  1955. goto out;
  1956. }
  1957. if (ret > 0) {
  1958. if (metadata) {
  1959. if (path->slots[0] > 0) {
  1960. path->slots[0]--;
  1961. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1962. path->slots[0]);
  1963. if (key.objectid == node->bytenr &&
  1964. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1965. key.offset == node->num_bytes)
  1966. ret = 0;
  1967. }
  1968. if (ret > 0) {
  1969. btrfs_release_path(path);
  1970. metadata = 0;
  1971. key.objectid = node->bytenr;
  1972. key.offset = node->num_bytes;
  1973. key.type = BTRFS_EXTENT_ITEM_KEY;
  1974. goto again;
  1975. }
  1976. } else {
  1977. err = -EIO;
  1978. goto out;
  1979. }
  1980. }
  1981. leaf = path->nodes[0];
  1982. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1983. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1984. if (item_size < sizeof(*ei)) {
  1985. ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
  1986. path, (u64)-1, 0);
  1987. if (ret < 0) {
  1988. err = ret;
  1989. goto out;
  1990. }
  1991. leaf = path->nodes[0];
  1992. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1993. }
  1994. #endif
  1995. BUG_ON(item_size < sizeof(*ei));
  1996. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1997. __run_delayed_extent_op(extent_op, leaf, ei);
  1998. btrfs_mark_buffer_dirty(leaf);
  1999. out:
  2000. btrfs_free_path(path);
  2001. return err;
  2002. }
  2003. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  2004. struct btrfs_root *root,
  2005. struct btrfs_delayed_ref_node *node,
  2006. struct btrfs_delayed_extent_op *extent_op,
  2007. int insert_reserved)
  2008. {
  2009. int ret = 0;
  2010. struct btrfs_delayed_tree_ref *ref;
  2011. struct btrfs_key ins;
  2012. u64 parent = 0;
  2013. u64 ref_root = 0;
  2014. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  2015. SKINNY_METADATA);
  2016. ref = btrfs_delayed_node_to_tree_ref(node);
  2017. trace_run_delayed_tree_ref(node, ref, node->action);
  2018. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2019. parent = ref->parent;
  2020. ref_root = ref->root;
  2021. ins.objectid = node->bytenr;
  2022. if (skinny_metadata) {
  2023. ins.offset = ref->level;
  2024. ins.type = BTRFS_METADATA_ITEM_KEY;
  2025. } else {
  2026. ins.offset = node->num_bytes;
  2027. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2028. }
  2029. BUG_ON(node->ref_mod != 1);
  2030. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2031. BUG_ON(!extent_op || !extent_op->update_flags);
  2032. ret = alloc_reserved_tree_block(trans, root,
  2033. parent, ref_root,
  2034. extent_op->flags_to_set,
  2035. &extent_op->key,
  2036. ref->level, &ins,
  2037. node->no_quota);
  2038. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2039. ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
  2040. node->num_bytes, parent, ref_root,
  2041. ref->level, 0, 1, node->no_quota,
  2042. extent_op);
  2043. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2044. ret = __btrfs_free_extent(trans, root, node->bytenr,
  2045. node->num_bytes, parent, ref_root,
  2046. ref->level, 0, 1, extent_op,
  2047. node->no_quota);
  2048. } else {
  2049. BUG();
  2050. }
  2051. return ret;
  2052. }
  2053. /* helper function to actually process a single delayed ref entry */
  2054. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2055. struct btrfs_root *root,
  2056. struct btrfs_delayed_ref_node *node,
  2057. struct btrfs_delayed_extent_op *extent_op,
  2058. int insert_reserved)
  2059. {
  2060. int ret = 0;
  2061. if (trans->aborted) {
  2062. if (insert_reserved)
  2063. btrfs_pin_extent(root, node->bytenr,
  2064. node->num_bytes, 1);
  2065. return 0;
  2066. }
  2067. if (btrfs_delayed_ref_is_head(node)) {
  2068. struct btrfs_delayed_ref_head *head;
  2069. /*
  2070. * we've hit the end of the chain and we were supposed
  2071. * to insert this extent into the tree. But, it got
  2072. * deleted before we ever needed to insert it, so all
  2073. * we have to do is clean up the accounting
  2074. */
  2075. BUG_ON(extent_op);
  2076. head = btrfs_delayed_node_to_head(node);
  2077. trace_run_delayed_ref_head(node, head, node->action);
  2078. if (insert_reserved) {
  2079. btrfs_pin_extent(root, node->bytenr,
  2080. node->num_bytes, 1);
  2081. if (head->is_data) {
  2082. ret = btrfs_del_csums(trans, root,
  2083. node->bytenr,
  2084. node->num_bytes);
  2085. }
  2086. }
  2087. return ret;
  2088. }
  2089. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2090. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2091. ret = run_delayed_tree_ref(trans, root, node, extent_op,
  2092. insert_reserved);
  2093. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2094. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2095. ret = run_delayed_data_ref(trans, root, node, extent_op,
  2096. insert_reserved);
  2097. else
  2098. BUG();
  2099. return ret;
  2100. }
  2101. static noinline struct btrfs_delayed_ref_node *
  2102. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2103. {
  2104. struct rb_node *node;
  2105. struct btrfs_delayed_ref_node *ref, *last = NULL;;
  2106. /*
  2107. * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2108. * this prevents ref count from going down to zero when
  2109. * there still are pending delayed ref.
  2110. */
  2111. node = rb_first(&head->ref_root);
  2112. while (node) {
  2113. ref = rb_entry(node, struct btrfs_delayed_ref_node,
  2114. rb_node);
  2115. if (ref->action == BTRFS_ADD_DELAYED_REF)
  2116. return ref;
  2117. else if (last == NULL)
  2118. last = ref;
  2119. node = rb_next(node);
  2120. }
  2121. return last;
  2122. }
  2123. /*
  2124. * Returns 0 on success or if called with an already aborted transaction.
  2125. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2126. */
  2127. static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2128. struct btrfs_root *root,
  2129. unsigned long nr)
  2130. {
  2131. struct btrfs_delayed_ref_root *delayed_refs;
  2132. struct btrfs_delayed_ref_node *ref;
  2133. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2134. struct btrfs_delayed_extent_op *extent_op;
  2135. struct btrfs_fs_info *fs_info = root->fs_info;
  2136. ktime_t start = ktime_get();
  2137. int ret;
  2138. unsigned long count = 0;
  2139. unsigned long actual_count = 0;
  2140. int must_insert_reserved = 0;
  2141. delayed_refs = &trans->transaction->delayed_refs;
  2142. while (1) {
  2143. if (!locked_ref) {
  2144. if (count >= nr)
  2145. break;
  2146. spin_lock(&delayed_refs->lock);
  2147. locked_ref = btrfs_select_ref_head(trans);
  2148. if (!locked_ref) {
  2149. spin_unlock(&delayed_refs->lock);
  2150. break;
  2151. }
  2152. /* grab the lock that says we are going to process
  2153. * all the refs for this head */
  2154. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2155. spin_unlock(&delayed_refs->lock);
  2156. /*
  2157. * we may have dropped the spin lock to get the head
  2158. * mutex lock, and that might have given someone else
  2159. * time to free the head. If that's true, it has been
  2160. * removed from our list and we can move on.
  2161. */
  2162. if (ret == -EAGAIN) {
  2163. locked_ref = NULL;
  2164. count++;
  2165. continue;
  2166. }
  2167. }
  2168. /*
  2169. * We need to try and merge add/drops of the same ref since we
  2170. * can run into issues with relocate dropping the implicit ref
  2171. * and then it being added back again before the drop can
  2172. * finish. If we merged anything we need to re-loop so we can
  2173. * get a good ref.
  2174. */
  2175. spin_lock(&locked_ref->lock);
  2176. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2177. locked_ref);
  2178. /*
  2179. * locked_ref is the head node, so we have to go one
  2180. * node back for any delayed ref updates
  2181. */
  2182. ref = select_delayed_ref(locked_ref);
  2183. if (ref && ref->seq &&
  2184. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2185. spin_unlock(&locked_ref->lock);
  2186. btrfs_delayed_ref_unlock(locked_ref);
  2187. spin_lock(&delayed_refs->lock);
  2188. locked_ref->processing = 0;
  2189. delayed_refs->num_heads_ready++;
  2190. spin_unlock(&delayed_refs->lock);
  2191. locked_ref = NULL;
  2192. cond_resched();
  2193. count++;
  2194. continue;
  2195. }
  2196. /*
  2197. * record the must insert reserved flag before we
  2198. * drop the spin lock.
  2199. */
  2200. must_insert_reserved = locked_ref->must_insert_reserved;
  2201. locked_ref->must_insert_reserved = 0;
  2202. extent_op = locked_ref->extent_op;
  2203. locked_ref->extent_op = NULL;
  2204. if (!ref) {
  2205. /* All delayed refs have been processed, Go ahead
  2206. * and send the head node to run_one_delayed_ref,
  2207. * so that any accounting fixes can happen
  2208. */
  2209. ref = &locked_ref->node;
  2210. if (extent_op && must_insert_reserved) {
  2211. btrfs_free_delayed_extent_op(extent_op);
  2212. extent_op = NULL;
  2213. }
  2214. if (extent_op) {
  2215. spin_unlock(&locked_ref->lock);
  2216. ret = run_delayed_extent_op(trans, root,
  2217. ref, extent_op);
  2218. btrfs_free_delayed_extent_op(extent_op);
  2219. if (ret) {
  2220. /*
  2221. * Need to reset must_insert_reserved if
  2222. * there was an error so the abort stuff
  2223. * can cleanup the reserved space
  2224. * properly.
  2225. */
  2226. if (must_insert_reserved)
  2227. locked_ref->must_insert_reserved = 1;
  2228. locked_ref->processing = 0;
  2229. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2230. btrfs_delayed_ref_unlock(locked_ref);
  2231. return ret;
  2232. }
  2233. continue;
  2234. }
  2235. /*
  2236. * Need to drop our head ref lock and re-aqcuire the
  2237. * delayed ref lock and then re-check to make sure
  2238. * nobody got added.
  2239. */
  2240. spin_unlock(&locked_ref->lock);
  2241. spin_lock(&delayed_refs->lock);
  2242. spin_lock(&locked_ref->lock);
  2243. if (rb_first(&locked_ref->ref_root) ||
  2244. locked_ref->extent_op) {
  2245. spin_unlock(&locked_ref->lock);
  2246. spin_unlock(&delayed_refs->lock);
  2247. continue;
  2248. }
  2249. ref->in_tree = 0;
  2250. delayed_refs->num_heads--;
  2251. rb_erase(&locked_ref->href_node,
  2252. &delayed_refs->href_root);
  2253. spin_unlock(&delayed_refs->lock);
  2254. } else {
  2255. actual_count++;
  2256. ref->in_tree = 0;
  2257. rb_erase(&ref->rb_node, &locked_ref->ref_root);
  2258. }
  2259. atomic_dec(&delayed_refs->num_entries);
  2260. if (!btrfs_delayed_ref_is_head(ref)) {
  2261. /*
  2262. * when we play the delayed ref, also correct the
  2263. * ref_mod on head
  2264. */
  2265. switch (ref->action) {
  2266. case BTRFS_ADD_DELAYED_REF:
  2267. case BTRFS_ADD_DELAYED_EXTENT:
  2268. locked_ref->node.ref_mod -= ref->ref_mod;
  2269. break;
  2270. case BTRFS_DROP_DELAYED_REF:
  2271. locked_ref->node.ref_mod += ref->ref_mod;
  2272. break;
  2273. default:
  2274. WARN_ON(1);
  2275. }
  2276. }
  2277. spin_unlock(&locked_ref->lock);
  2278. ret = run_one_delayed_ref(trans, root, ref, extent_op,
  2279. must_insert_reserved);
  2280. btrfs_free_delayed_extent_op(extent_op);
  2281. if (ret) {
  2282. locked_ref->processing = 0;
  2283. btrfs_delayed_ref_unlock(locked_ref);
  2284. btrfs_put_delayed_ref(ref);
  2285. btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
  2286. return ret;
  2287. }
  2288. /*
  2289. * If this node is a head, that means all the refs in this head
  2290. * have been dealt with, and we will pick the next head to deal
  2291. * with, so we must unlock the head and drop it from the cluster
  2292. * list before we release it.
  2293. */
  2294. if (btrfs_delayed_ref_is_head(ref)) {
  2295. btrfs_delayed_ref_unlock(locked_ref);
  2296. locked_ref = NULL;
  2297. }
  2298. btrfs_put_delayed_ref(ref);
  2299. count++;
  2300. cond_resched();
  2301. }
  2302. /*
  2303. * We don't want to include ref heads since we can have empty ref heads
  2304. * and those will drastically skew our runtime down since we just do
  2305. * accounting, no actual extent tree updates.
  2306. */
  2307. if (actual_count > 0) {
  2308. u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
  2309. u64 avg;
  2310. /*
  2311. * We weigh the current average higher than our current runtime
  2312. * to avoid large swings in the average.
  2313. */
  2314. spin_lock(&delayed_refs->lock);
  2315. avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
  2316. avg = div64_u64(avg, 4);
  2317. fs_info->avg_delayed_ref_runtime = avg;
  2318. spin_unlock(&delayed_refs->lock);
  2319. }
  2320. return 0;
  2321. }
  2322. #ifdef SCRAMBLE_DELAYED_REFS
  2323. /*
  2324. * Normally delayed refs get processed in ascending bytenr order. This
  2325. * correlates in most cases to the order added. To expose dependencies on this
  2326. * order, we start to process the tree in the middle instead of the beginning
  2327. */
  2328. static u64 find_middle(struct rb_root *root)
  2329. {
  2330. struct rb_node *n = root->rb_node;
  2331. struct btrfs_delayed_ref_node *entry;
  2332. int alt = 1;
  2333. u64 middle;
  2334. u64 first = 0, last = 0;
  2335. n = rb_first(root);
  2336. if (n) {
  2337. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2338. first = entry->bytenr;
  2339. }
  2340. n = rb_last(root);
  2341. if (n) {
  2342. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2343. last = entry->bytenr;
  2344. }
  2345. n = root->rb_node;
  2346. while (n) {
  2347. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2348. WARN_ON(!entry->in_tree);
  2349. middle = entry->bytenr;
  2350. if (alt)
  2351. n = n->rb_left;
  2352. else
  2353. n = n->rb_right;
  2354. alt = 1 - alt;
  2355. }
  2356. return middle;
  2357. }
  2358. #endif
  2359. static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
  2360. {
  2361. u64 num_bytes;
  2362. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2363. sizeof(struct btrfs_extent_inline_ref));
  2364. if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
  2365. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2366. /*
  2367. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2368. * closer to what we're really going to want to ouse.
  2369. */
  2370. return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
  2371. }
  2372. int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
  2373. struct btrfs_root *root)
  2374. {
  2375. struct btrfs_block_rsv *global_rsv;
  2376. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2377. u64 num_bytes;
  2378. int ret = 0;
  2379. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  2380. num_heads = heads_to_leaves(root, num_heads);
  2381. if (num_heads > 1)
  2382. num_bytes += (num_heads - 1) * root->leafsize;
  2383. num_bytes <<= 1;
  2384. global_rsv = &root->fs_info->global_block_rsv;
  2385. /*
  2386. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2387. * wiggle room since running delayed refs can create more delayed refs.
  2388. */
  2389. if (global_rsv->space_info->full)
  2390. num_bytes <<= 1;
  2391. spin_lock(&global_rsv->lock);
  2392. if (global_rsv->reserved <= num_bytes)
  2393. ret = 1;
  2394. spin_unlock(&global_rsv->lock);
  2395. return ret;
  2396. }
  2397. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
  2398. struct btrfs_root *root)
  2399. {
  2400. struct btrfs_fs_info *fs_info = root->fs_info;
  2401. u64 num_entries =
  2402. atomic_read(&trans->transaction->delayed_refs.num_entries);
  2403. u64 avg_runtime;
  2404. u64 val;
  2405. smp_mb();
  2406. avg_runtime = fs_info->avg_delayed_ref_runtime;
  2407. val = num_entries * avg_runtime;
  2408. if (num_entries * avg_runtime >= NSEC_PER_SEC)
  2409. return 1;
  2410. if (val >= NSEC_PER_SEC / 2)
  2411. return 2;
  2412. return btrfs_check_space_for_delayed_refs(trans, root);
  2413. }
  2414. struct async_delayed_refs {
  2415. struct btrfs_root *root;
  2416. int count;
  2417. int error;
  2418. int sync;
  2419. struct completion wait;
  2420. struct btrfs_work work;
  2421. };
  2422. static void delayed_ref_async_start(struct btrfs_work *work)
  2423. {
  2424. struct async_delayed_refs *async;
  2425. struct btrfs_trans_handle *trans;
  2426. int ret;
  2427. async = container_of(work, struct async_delayed_refs, work);
  2428. trans = btrfs_join_transaction(async->root);
  2429. if (IS_ERR(trans)) {
  2430. async->error = PTR_ERR(trans);
  2431. goto done;
  2432. }
  2433. /*
  2434. * trans->sync means that when we call end_transaciton, we won't
  2435. * wait on delayed refs
  2436. */
  2437. trans->sync = true;
  2438. ret = btrfs_run_delayed_refs(trans, async->root, async->count);
  2439. if (ret)
  2440. async->error = ret;
  2441. ret = btrfs_end_transaction(trans, async->root);
  2442. if (ret && !async->error)
  2443. async->error = ret;
  2444. done:
  2445. if (async->sync)
  2446. complete(&async->wait);
  2447. else
  2448. kfree(async);
  2449. }
  2450. int btrfs_async_run_delayed_refs(struct btrfs_root *root,
  2451. unsigned long count, int wait)
  2452. {
  2453. struct async_delayed_refs *async;
  2454. int ret;
  2455. async = kmalloc(sizeof(*async), GFP_NOFS);
  2456. if (!async)
  2457. return -ENOMEM;
  2458. async->root = root->fs_info->tree_root;
  2459. async->count = count;
  2460. async->error = 0;
  2461. if (wait)
  2462. async->sync = 1;
  2463. else
  2464. async->sync = 0;
  2465. init_completion(&async->wait);
  2466. btrfs_init_work(&async->work, btrfs_extent_refs_helper,
  2467. delayed_ref_async_start, NULL, NULL);
  2468. btrfs_queue_work(root->fs_info->extent_workers, &async->work);
  2469. if (wait) {
  2470. wait_for_completion(&async->wait);
  2471. ret = async->error;
  2472. kfree(async);
  2473. return ret;
  2474. }
  2475. return 0;
  2476. }
  2477. /*
  2478. * this starts processing the delayed reference count updates and
  2479. * extent insertions we have queued up so far. count can be
  2480. * 0, which means to process everything in the tree at the start
  2481. * of the run (but not newly added entries), or it can be some target
  2482. * number you'd like to process.
  2483. *
  2484. * Returns 0 on success or if called with an aborted transaction
  2485. * Returns <0 on error and aborts the transaction
  2486. */
  2487. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2488. struct btrfs_root *root, unsigned long count)
  2489. {
  2490. struct rb_node *node;
  2491. struct btrfs_delayed_ref_root *delayed_refs;
  2492. struct btrfs_delayed_ref_head *head;
  2493. int ret;
  2494. int run_all = count == (unsigned long)-1;
  2495. int run_most = 0;
  2496. /* We'll clean this up in btrfs_cleanup_transaction */
  2497. if (trans->aborted)
  2498. return 0;
  2499. if (root == root->fs_info->extent_root)
  2500. root = root->fs_info->tree_root;
  2501. delayed_refs = &trans->transaction->delayed_refs;
  2502. if (count == 0) {
  2503. count = atomic_read(&delayed_refs->num_entries) * 2;
  2504. run_most = 1;
  2505. }
  2506. again:
  2507. #ifdef SCRAMBLE_DELAYED_REFS
  2508. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2509. #endif
  2510. ret = __btrfs_run_delayed_refs(trans, root, count);
  2511. if (ret < 0) {
  2512. btrfs_abort_transaction(trans, root, ret);
  2513. return ret;
  2514. }
  2515. if (run_all) {
  2516. if (!list_empty(&trans->new_bgs))
  2517. btrfs_create_pending_block_groups(trans, root);
  2518. spin_lock(&delayed_refs->lock);
  2519. node = rb_first(&delayed_refs->href_root);
  2520. if (!node) {
  2521. spin_unlock(&delayed_refs->lock);
  2522. goto out;
  2523. }
  2524. count = (unsigned long)-1;
  2525. while (node) {
  2526. head = rb_entry(node, struct btrfs_delayed_ref_head,
  2527. href_node);
  2528. if (btrfs_delayed_ref_is_head(&head->node)) {
  2529. struct btrfs_delayed_ref_node *ref;
  2530. ref = &head->node;
  2531. atomic_inc(&ref->refs);
  2532. spin_unlock(&delayed_refs->lock);
  2533. /*
  2534. * Mutex was contended, block until it's
  2535. * released and try again
  2536. */
  2537. mutex_lock(&head->mutex);
  2538. mutex_unlock(&head->mutex);
  2539. btrfs_put_delayed_ref(ref);
  2540. cond_resched();
  2541. goto again;
  2542. } else {
  2543. WARN_ON(1);
  2544. }
  2545. node = rb_next(node);
  2546. }
  2547. spin_unlock(&delayed_refs->lock);
  2548. cond_resched();
  2549. goto again;
  2550. }
  2551. out:
  2552. ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
  2553. if (ret)
  2554. return ret;
  2555. assert_qgroups_uptodate(trans);
  2556. return 0;
  2557. }
  2558. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2559. struct btrfs_root *root,
  2560. u64 bytenr, u64 num_bytes, u64 flags,
  2561. int level, int is_data)
  2562. {
  2563. struct btrfs_delayed_extent_op *extent_op;
  2564. int ret;
  2565. extent_op = btrfs_alloc_delayed_extent_op();
  2566. if (!extent_op)
  2567. return -ENOMEM;
  2568. extent_op->flags_to_set = flags;
  2569. extent_op->update_flags = 1;
  2570. extent_op->update_key = 0;
  2571. extent_op->is_data = is_data ? 1 : 0;
  2572. extent_op->level = level;
  2573. ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
  2574. num_bytes, extent_op);
  2575. if (ret)
  2576. btrfs_free_delayed_extent_op(extent_op);
  2577. return ret;
  2578. }
  2579. static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
  2580. struct btrfs_root *root,
  2581. struct btrfs_path *path,
  2582. u64 objectid, u64 offset, u64 bytenr)
  2583. {
  2584. struct btrfs_delayed_ref_head *head;
  2585. struct btrfs_delayed_ref_node *ref;
  2586. struct btrfs_delayed_data_ref *data_ref;
  2587. struct btrfs_delayed_ref_root *delayed_refs;
  2588. struct rb_node *node;
  2589. int ret = 0;
  2590. delayed_refs = &trans->transaction->delayed_refs;
  2591. spin_lock(&delayed_refs->lock);
  2592. head = btrfs_find_delayed_ref_head(trans, bytenr);
  2593. if (!head) {
  2594. spin_unlock(&delayed_refs->lock);
  2595. return 0;
  2596. }
  2597. if (!mutex_trylock(&head->mutex)) {
  2598. atomic_inc(&head->node.refs);
  2599. spin_unlock(&delayed_refs->lock);
  2600. btrfs_release_path(path);
  2601. /*
  2602. * Mutex was contended, block until it's released and let
  2603. * caller try again
  2604. */
  2605. mutex_lock(&head->mutex);
  2606. mutex_unlock(&head->mutex);
  2607. btrfs_put_delayed_ref(&head->node);
  2608. return -EAGAIN;
  2609. }
  2610. spin_unlock(&delayed_refs->lock);
  2611. spin_lock(&head->lock);
  2612. node = rb_first(&head->ref_root);
  2613. while (node) {
  2614. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  2615. node = rb_next(node);
  2616. /* If it's a shared ref we know a cross reference exists */
  2617. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
  2618. ret = 1;
  2619. break;
  2620. }
  2621. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2622. /*
  2623. * If our ref doesn't match the one we're currently looking at
  2624. * then we have a cross reference.
  2625. */
  2626. if (data_ref->root != root->root_key.objectid ||
  2627. data_ref->objectid != objectid ||
  2628. data_ref->offset != offset) {
  2629. ret = 1;
  2630. break;
  2631. }
  2632. }
  2633. spin_unlock(&head->lock);
  2634. mutex_unlock(&head->mutex);
  2635. return ret;
  2636. }
  2637. static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
  2638. struct btrfs_root *root,
  2639. struct btrfs_path *path,
  2640. u64 objectid, u64 offset, u64 bytenr)
  2641. {
  2642. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2643. struct extent_buffer *leaf;
  2644. struct btrfs_extent_data_ref *ref;
  2645. struct btrfs_extent_inline_ref *iref;
  2646. struct btrfs_extent_item *ei;
  2647. struct btrfs_key key;
  2648. u32 item_size;
  2649. int ret;
  2650. key.objectid = bytenr;
  2651. key.offset = (u64)-1;
  2652. key.type = BTRFS_EXTENT_ITEM_KEY;
  2653. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2654. if (ret < 0)
  2655. goto out;
  2656. BUG_ON(ret == 0); /* Corruption */
  2657. ret = -ENOENT;
  2658. if (path->slots[0] == 0)
  2659. goto out;
  2660. path->slots[0]--;
  2661. leaf = path->nodes[0];
  2662. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2663. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2664. goto out;
  2665. ret = 1;
  2666. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2667. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2668. if (item_size < sizeof(*ei)) {
  2669. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2670. goto out;
  2671. }
  2672. #endif
  2673. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2674. if (item_size != sizeof(*ei) +
  2675. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2676. goto out;
  2677. if (btrfs_extent_generation(leaf, ei) <=
  2678. btrfs_root_last_snapshot(&root->root_item))
  2679. goto out;
  2680. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2681. if (btrfs_extent_inline_ref_type(leaf, iref) !=
  2682. BTRFS_EXTENT_DATA_REF_KEY)
  2683. goto out;
  2684. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2685. if (btrfs_extent_refs(leaf, ei) !=
  2686. btrfs_extent_data_ref_count(leaf, ref) ||
  2687. btrfs_extent_data_ref_root(leaf, ref) !=
  2688. root->root_key.objectid ||
  2689. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2690. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2691. goto out;
  2692. ret = 0;
  2693. out:
  2694. return ret;
  2695. }
  2696. int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
  2697. struct btrfs_root *root,
  2698. u64 objectid, u64 offset, u64 bytenr)
  2699. {
  2700. struct btrfs_path *path;
  2701. int ret;
  2702. int ret2;
  2703. path = btrfs_alloc_path();
  2704. if (!path)
  2705. return -ENOENT;
  2706. do {
  2707. ret = check_committed_ref(trans, root, path, objectid,
  2708. offset, bytenr);
  2709. if (ret && ret != -ENOENT)
  2710. goto out;
  2711. ret2 = check_delayed_ref(trans, root, path, objectid,
  2712. offset, bytenr);
  2713. } while (ret2 == -EAGAIN);
  2714. if (ret2 && ret2 != -ENOENT) {
  2715. ret = ret2;
  2716. goto out;
  2717. }
  2718. if (ret != -ENOENT || ret2 != -ENOENT)
  2719. ret = 0;
  2720. out:
  2721. btrfs_free_path(path);
  2722. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2723. WARN_ON(ret > 0);
  2724. return ret;
  2725. }
  2726. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2727. struct btrfs_root *root,
  2728. struct extent_buffer *buf,
  2729. int full_backref, int inc)
  2730. {
  2731. u64 bytenr;
  2732. u64 num_bytes;
  2733. u64 parent;
  2734. u64 ref_root;
  2735. u32 nritems;
  2736. struct btrfs_key key;
  2737. struct btrfs_file_extent_item *fi;
  2738. int i;
  2739. int level;
  2740. int ret = 0;
  2741. int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
  2742. u64, u64, u64, u64, u64, u64, int);
  2743. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  2744. if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
  2745. return 0;
  2746. #endif
  2747. ref_root = btrfs_header_owner(buf);
  2748. nritems = btrfs_header_nritems(buf);
  2749. level = btrfs_header_level(buf);
  2750. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
  2751. return 0;
  2752. if (inc)
  2753. process_func = btrfs_inc_extent_ref;
  2754. else
  2755. process_func = btrfs_free_extent;
  2756. if (full_backref)
  2757. parent = buf->start;
  2758. else
  2759. parent = 0;
  2760. for (i = 0; i < nritems; i++) {
  2761. if (level == 0) {
  2762. btrfs_item_key_to_cpu(buf, &key, i);
  2763. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2764. continue;
  2765. fi = btrfs_item_ptr(buf, i,
  2766. struct btrfs_file_extent_item);
  2767. if (btrfs_file_extent_type(buf, fi) ==
  2768. BTRFS_FILE_EXTENT_INLINE)
  2769. continue;
  2770. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2771. if (bytenr == 0)
  2772. continue;
  2773. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2774. key.offset -= btrfs_file_extent_offset(buf, fi);
  2775. ret = process_func(trans, root, bytenr, num_bytes,
  2776. parent, ref_root, key.objectid,
  2777. key.offset, 1);
  2778. if (ret)
  2779. goto fail;
  2780. } else {
  2781. bytenr = btrfs_node_blockptr(buf, i);
  2782. num_bytes = btrfs_level_size(root, level - 1);
  2783. ret = process_func(trans, root, bytenr, num_bytes,
  2784. parent, ref_root, level - 1, 0,
  2785. 1);
  2786. if (ret)
  2787. goto fail;
  2788. }
  2789. }
  2790. return 0;
  2791. fail:
  2792. return ret;
  2793. }
  2794. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2795. struct extent_buffer *buf, int full_backref)
  2796. {
  2797. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  2798. }
  2799. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2800. struct extent_buffer *buf, int full_backref)
  2801. {
  2802. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  2803. }
  2804. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  2805. struct btrfs_root *root,
  2806. struct btrfs_path *path,
  2807. struct btrfs_block_group_cache *cache)
  2808. {
  2809. int ret;
  2810. struct btrfs_root *extent_root = root->fs_info->extent_root;
  2811. unsigned long bi;
  2812. struct extent_buffer *leaf;
  2813. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  2814. if (ret < 0)
  2815. goto fail;
  2816. BUG_ON(ret); /* Corruption */
  2817. leaf = path->nodes[0];
  2818. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  2819. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  2820. btrfs_mark_buffer_dirty(leaf);
  2821. btrfs_release_path(path);
  2822. fail:
  2823. if (ret) {
  2824. btrfs_abort_transaction(trans, root, ret);
  2825. return ret;
  2826. }
  2827. return 0;
  2828. }
  2829. static struct btrfs_block_group_cache *
  2830. next_block_group(struct btrfs_root *root,
  2831. struct btrfs_block_group_cache *cache)
  2832. {
  2833. struct rb_node *node;
  2834. spin_lock(&root->fs_info->block_group_cache_lock);
  2835. node = rb_next(&cache->cache_node);
  2836. btrfs_put_block_group(cache);
  2837. if (node) {
  2838. cache = rb_entry(node, struct btrfs_block_group_cache,
  2839. cache_node);
  2840. btrfs_get_block_group(cache);
  2841. } else
  2842. cache = NULL;
  2843. spin_unlock(&root->fs_info->block_group_cache_lock);
  2844. return cache;
  2845. }
  2846. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  2847. struct btrfs_trans_handle *trans,
  2848. struct btrfs_path *path)
  2849. {
  2850. struct btrfs_root *root = block_group->fs_info->tree_root;
  2851. struct inode *inode = NULL;
  2852. u64 alloc_hint = 0;
  2853. int dcs = BTRFS_DC_ERROR;
  2854. int num_pages = 0;
  2855. int retries = 0;
  2856. int ret = 0;
  2857. /*
  2858. * If this block group is smaller than 100 megs don't bother caching the
  2859. * block group.
  2860. */
  2861. if (block_group->key.offset < (100 * 1024 * 1024)) {
  2862. spin_lock(&block_group->lock);
  2863. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  2864. spin_unlock(&block_group->lock);
  2865. return 0;
  2866. }
  2867. again:
  2868. inode = lookup_free_space_inode(root, block_group, path);
  2869. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  2870. ret = PTR_ERR(inode);
  2871. btrfs_release_path(path);
  2872. goto out;
  2873. }
  2874. if (IS_ERR(inode)) {
  2875. BUG_ON(retries);
  2876. retries++;
  2877. if (block_group->ro)
  2878. goto out_free;
  2879. ret = create_free_space_inode(root, trans, block_group, path);
  2880. if (ret)
  2881. goto out_free;
  2882. goto again;
  2883. }
  2884. /* We've already setup this transaction, go ahead and exit */
  2885. if (block_group->cache_generation == trans->transid &&
  2886. i_size_read(inode)) {
  2887. dcs = BTRFS_DC_SETUP;
  2888. goto out_put;
  2889. }
  2890. /*
  2891. * We want to set the generation to 0, that way if anything goes wrong
  2892. * from here on out we know not to trust this cache when we load up next
  2893. * time.
  2894. */
  2895. BTRFS_I(inode)->generation = 0;
  2896. ret = btrfs_update_inode(trans, root, inode);
  2897. WARN_ON(ret);
  2898. if (i_size_read(inode) > 0) {
  2899. ret = btrfs_check_trunc_cache_free_space(root,
  2900. &root->fs_info->global_block_rsv);
  2901. if (ret)
  2902. goto out_put;
  2903. ret = btrfs_truncate_free_space_cache(root, trans, inode);
  2904. if (ret)
  2905. goto out_put;
  2906. }
  2907. spin_lock(&block_group->lock);
  2908. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  2909. !btrfs_test_opt(root, SPACE_CACHE) ||
  2910. block_group->delalloc_bytes) {
  2911. /*
  2912. * don't bother trying to write stuff out _if_
  2913. * a) we're not cached,
  2914. * b) we're with nospace_cache mount option.
  2915. */
  2916. dcs = BTRFS_DC_WRITTEN;
  2917. spin_unlock(&block_group->lock);
  2918. goto out_put;
  2919. }
  2920. spin_unlock(&block_group->lock);
  2921. /*
  2922. * Try to preallocate enough space based on how big the block group is.
  2923. * Keep in mind this has to include any pinned space which could end up
  2924. * taking up quite a bit since it's not folded into the other space
  2925. * cache.
  2926. */
  2927. num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
  2928. if (!num_pages)
  2929. num_pages = 1;
  2930. num_pages *= 16;
  2931. num_pages *= PAGE_CACHE_SIZE;
  2932. ret = btrfs_check_data_free_space(inode, num_pages);
  2933. if (ret)
  2934. goto out_put;
  2935. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  2936. num_pages, num_pages,
  2937. &alloc_hint);
  2938. if (!ret)
  2939. dcs = BTRFS_DC_SETUP;
  2940. btrfs_free_reserved_data_space(inode, num_pages);
  2941. out_put:
  2942. iput(inode);
  2943. out_free:
  2944. btrfs_release_path(path);
  2945. out:
  2946. spin_lock(&block_group->lock);
  2947. if (!ret && dcs == BTRFS_DC_SETUP)
  2948. block_group->cache_generation = trans->transid;
  2949. block_group->disk_cache_state = dcs;
  2950. spin_unlock(&block_group->lock);
  2951. return ret;
  2952. }
  2953. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  2954. struct btrfs_root *root)
  2955. {
  2956. struct btrfs_block_group_cache *cache;
  2957. int err = 0;
  2958. struct btrfs_path *path;
  2959. u64 last = 0;
  2960. path = btrfs_alloc_path();
  2961. if (!path)
  2962. return -ENOMEM;
  2963. again:
  2964. while (1) {
  2965. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2966. while (cache) {
  2967. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  2968. break;
  2969. cache = next_block_group(root, cache);
  2970. }
  2971. if (!cache) {
  2972. if (last == 0)
  2973. break;
  2974. last = 0;
  2975. continue;
  2976. }
  2977. err = cache_save_setup(cache, trans, path);
  2978. last = cache->key.objectid + cache->key.offset;
  2979. btrfs_put_block_group(cache);
  2980. }
  2981. while (1) {
  2982. if (last == 0) {
  2983. err = btrfs_run_delayed_refs(trans, root,
  2984. (unsigned long)-1);
  2985. if (err) /* File system offline */
  2986. goto out;
  2987. }
  2988. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  2989. while (cache) {
  2990. if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
  2991. btrfs_put_block_group(cache);
  2992. goto again;
  2993. }
  2994. if (cache->dirty)
  2995. break;
  2996. cache = next_block_group(root, cache);
  2997. }
  2998. if (!cache) {
  2999. if (last == 0)
  3000. break;
  3001. last = 0;
  3002. continue;
  3003. }
  3004. if (cache->disk_cache_state == BTRFS_DC_SETUP)
  3005. cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
  3006. cache->dirty = 0;
  3007. last = cache->key.objectid + cache->key.offset;
  3008. err = write_one_cache_group(trans, root, path, cache);
  3009. btrfs_put_block_group(cache);
  3010. if (err) /* File system offline */
  3011. goto out;
  3012. }
  3013. while (1) {
  3014. /*
  3015. * I don't think this is needed since we're just marking our
  3016. * preallocated extent as written, but just in case it can't
  3017. * hurt.
  3018. */
  3019. if (last == 0) {
  3020. err = btrfs_run_delayed_refs(trans, root,
  3021. (unsigned long)-1);
  3022. if (err) /* File system offline */
  3023. goto out;
  3024. }
  3025. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  3026. while (cache) {
  3027. /*
  3028. * Really this shouldn't happen, but it could if we
  3029. * couldn't write the entire preallocated extent and
  3030. * splitting the extent resulted in a new block.
  3031. */
  3032. if (cache->dirty) {
  3033. btrfs_put_block_group(cache);
  3034. goto again;
  3035. }
  3036. if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  3037. break;
  3038. cache = next_block_group(root, cache);
  3039. }
  3040. if (!cache) {
  3041. if (last == 0)
  3042. break;
  3043. last = 0;
  3044. continue;
  3045. }
  3046. err = btrfs_write_out_cache(root, trans, cache, path);
  3047. /*
  3048. * If we didn't have an error then the cache state is still
  3049. * NEED_WRITE, so we can set it to WRITTEN.
  3050. */
  3051. if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
  3052. cache->disk_cache_state = BTRFS_DC_WRITTEN;
  3053. last = cache->key.objectid + cache->key.offset;
  3054. btrfs_put_block_group(cache);
  3055. }
  3056. out:
  3057. btrfs_free_path(path);
  3058. return err;
  3059. }
  3060. int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
  3061. {
  3062. struct btrfs_block_group_cache *block_group;
  3063. int readonly = 0;
  3064. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  3065. if (!block_group || block_group->ro)
  3066. readonly = 1;
  3067. if (block_group)
  3068. btrfs_put_block_group(block_group);
  3069. return readonly;
  3070. }
  3071. static const char *alloc_name(u64 flags)
  3072. {
  3073. switch (flags) {
  3074. case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
  3075. return "mixed";
  3076. case BTRFS_BLOCK_GROUP_METADATA:
  3077. return "metadata";
  3078. case BTRFS_BLOCK_GROUP_DATA:
  3079. return "data";
  3080. case BTRFS_BLOCK_GROUP_SYSTEM:
  3081. return "system";
  3082. default:
  3083. WARN_ON(1);
  3084. return "invalid-combination";
  3085. };
  3086. }
  3087. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  3088. u64 total_bytes, u64 bytes_used,
  3089. struct btrfs_space_info **space_info)
  3090. {
  3091. struct btrfs_space_info *found;
  3092. int i;
  3093. int factor;
  3094. int ret;
  3095. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  3096. BTRFS_BLOCK_GROUP_RAID10))
  3097. factor = 2;
  3098. else
  3099. factor = 1;
  3100. found = __find_space_info(info, flags);
  3101. if (found) {
  3102. spin_lock(&found->lock);
  3103. found->total_bytes += total_bytes;
  3104. found->disk_total += total_bytes * factor;
  3105. found->bytes_used += bytes_used;
  3106. found->disk_used += bytes_used * factor;
  3107. found->full = 0;
  3108. spin_unlock(&found->lock);
  3109. *space_info = found;
  3110. return 0;
  3111. }
  3112. found = kzalloc(sizeof(*found), GFP_NOFS);
  3113. if (!found)
  3114. return -ENOMEM;
  3115. ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
  3116. if (ret) {
  3117. kfree(found);
  3118. return ret;
  3119. }
  3120. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3121. INIT_LIST_HEAD(&found->block_groups[i]);
  3122. init_rwsem(&found->groups_sem);
  3123. spin_lock_init(&found->lock);
  3124. found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3125. found->total_bytes = total_bytes;
  3126. found->disk_total = total_bytes * factor;
  3127. found->bytes_used = bytes_used;
  3128. found->disk_used = bytes_used * factor;
  3129. found->bytes_pinned = 0;
  3130. found->bytes_reserved = 0;
  3131. found->bytes_readonly = 0;
  3132. found->bytes_may_use = 0;
  3133. found->full = 0;
  3134. found->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3135. found->chunk_alloc = 0;
  3136. found->flush = 0;
  3137. init_waitqueue_head(&found->wait);
  3138. ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
  3139. info->space_info_kobj, "%s",
  3140. alloc_name(found->flags));
  3141. if (ret) {
  3142. kfree(found);
  3143. return ret;
  3144. }
  3145. *space_info = found;
  3146. list_add_rcu(&found->list, &info->space_info);
  3147. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3148. info->data_sinfo = found;
  3149. return ret;
  3150. }
  3151. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3152. {
  3153. u64 extra_flags = chunk_to_extended(flags) &
  3154. BTRFS_EXTENDED_PROFILE_MASK;
  3155. write_seqlock(&fs_info->profiles_lock);
  3156. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3157. fs_info->avail_data_alloc_bits |= extra_flags;
  3158. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3159. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3160. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3161. fs_info->avail_system_alloc_bits |= extra_flags;
  3162. write_sequnlock(&fs_info->profiles_lock);
  3163. }
  3164. /*
  3165. * returns target flags in extended format or 0 if restripe for this
  3166. * chunk_type is not in progress
  3167. *
  3168. * should be called with either volume_mutex or balance_lock held
  3169. */
  3170. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3171. {
  3172. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3173. u64 target = 0;
  3174. if (!bctl)
  3175. return 0;
  3176. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3177. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3178. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3179. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3180. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3181. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3182. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3183. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3184. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3185. }
  3186. return target;
  3187. }
  3188. /*
  3189. * @flags: available profiles in extended format (see ctree.h)
  3190. *
  3191. * Returns reduced profile in chunk format. If profile changing is in
  3192. * progress (either running or paused) picks the target profile (if it's
  3193. * already available), otherwise falls back to plain reducing.
  3194. */
  3195. static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  3196. {
  3197. u64 num_devices = root->fs_info->fs_devices->rw_devices;
  3198. u64 target;
  3199. u64 tmp;
  3200. /*
  3201. * see if restripe for this chunk_type is in progress, if so
  3202. * try to reduce to the target profile
  3203. */
  3204. spin_lock(&root->fs_info->balance_lock);
  3205. target = get_restripe_target(root->fs_info, flags);
  3206. if (target) {
  3207. /* pick target profile only if it's already available */
  3208. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3209. spin_unlock(&root->fs_info->balance_lock);
  3210. return extended_to_chunk(target);
  3211. }
  3212. }
  3213. spin_unlock(&root->fs_info->balance_lock);
  3214. /* First, mask out the RAID levels which aren't possible */
  3215. if (num_devices == 1)
  3216. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
  3217. BTRFS_BLOCK_GROUP_RAID5);
  3218. if (num_devices < 3)
  3219. flags &= ~BTRFS_BLOCK_GROUP_RAID6;
  3220. if (num_devices < 4)
  3221. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  3222. tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
  3223. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
  3224. BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
  3225. flags &= ~tmp;
  3226. if (tmp & BTRFS_BLOCK_GROUP_RAID6)
  3227. tmp = BTRFS_BLOCK_GROUP_RAID6;
  3228. else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
  3229. tmp = BTRFS_BLOCK_GROUP_RAID5;
  3230. else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
  3231. tmp = BTRFS_BLOCK_GROUP_RAID10;
  3232. else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
  3233. tmp = BTRFS_BLOCK_GROUP_RAID1;
  3234. else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
  3235. tmp = BTRFS_BLOCK_GROUP_RAID0;
  3236. return extended_to_chunk(flags | tmp);
  3237. }
  3238. static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
  3239. {
  3240. unsigned seq;
  3241. u64 flags;
  3242. do {
  3243. flags = orig_flags;
  3244. seq = read_seqbegin(&root->fs_info->profiles_lock);
  3245. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3246. flags |= root->fs_info->avail_data_alloc_bits;
  3247. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3248. flags |= root->fs_info->avail_system_alloc_bits;
  3249. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3250. flags |= root->fs_info->avail_metadata_alloc_bits;
  3251. } while (read_seqretry(&root->fs_info->profiles_lock, seq));
  3252. return btrfs_reduce_alloc_profile(root, flags);
  3253. }
  3254. u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
  3255. {
  3256. u64 flags;
  3257. u64 ret;
  3258. if (data)
  3259. flags = BTRFS_BLOCK_GROUP_DATA;
  3260. else if (root == root->fs_info->chunk_root)
  3261. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3262. else
  3263. flags = BTRFS_BLOCK_GROUP_METADATA;
  3264. ret = get_alloc_profile(root, flags);
  3265. return ret;
  3266. }
  3267. /*
  3268. * This will check the space that the inode allocates from to make sure we have
  3269. * enough space for bytes.
  3270. */
  3271. int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
  3272. {
  3273. struct btrfs_space_info *data_sinfo;
  3274. struct btrfs_root *root = BTRFS_I(inode)->root;
  3275. struct btrfs_fs_info *fs_info = root->fs_info;
  3276. u64 used;
  3277. int ret = 0, committed = 0, alloc_chunk = 1;
  3278. /* make sure bytes are sectorsize aligned */
  3279. bytes = ALIGN(bytes, root->sectorsize);
  3280. if (btrfs_is_free_space_inode(inode)) {
  3281. committed = 1;
  3282. ASSERT(current->journal_info);
  3283. }
  3284. data_sinfo = fs_info->data_sinfo;
  3285. if (!data_sinfo)
  3286. goto alloc;
  3287. again:
  3288. /* make sure we have enough space to handle the data first */
  3289. spin_lock(&data_sinfo->lock);
  3290. used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
  3291. data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
  3292. data_sinfo->bytes_may_use;
  3293. if (used + bytes > data_sinfo->total_bytes) {
  3294. struct btrfs_trans_handle *trans;
  3295. /*
  3296. * if we don't have enough free bytes in this space then we need
  3297. * to alloc a new chunk.
  3298. */
  3299. if (!data_sinfo->full && alloc_chunk) {
  3300. u64 alloc_target;
  3301. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3302. spin_unlock(&data_sinfo->lock);
  3303. alloc:
  3304. alloc_target = btrfs_get_alloc_profile(root, 1);
  3305. /*
  3306. * It is ugly that we don't call nolock join
  3307. * transaction for the free space inode case here.
  3308. * But it is safe because we only do the data space
  3309. * reservation for the free space cache in the
  3310. * transaction context, the common join transaction
  3311. * just increase the counter of the current transaction
  3312. * handler, doesn't try to acquire the trans_lock of
  3313. * the fs.
  3314. */
  3315. trans = btrfs_join_transaction(root);
  3316. if (IS_ERR(trans))
  3317. return PTR_ERR(trans);
  3318. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3319. alloc_target,
  3320. CHUNK_ALLOC_NO_FORCE);
  3321. btrfs_end_transaction(trans, root);
  3322. if (ret < 0) {
  3323. if (ret != -ENOSPC)
  3324. return ret;
  3325. else
  3326. goto commit_trans;
  3327. }
  3328. if (!data_sinfo)
  3329. data_sinfo = fs_info->data_sinfo;
  3330. goto again;
  3331. }
  3332. /*
  3333. * If we don't have enough pinned space to deal with this
  3334. * allocation don't bother committing the transaction.
  3335. */
  3336. if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
  3337. bytes) < 0)
  3338. committed = 1;
  3339. spin_unlock(&data_sinfo->lock);
  3340. /* commit the current transaction and try again */
  3341. commit_trans:
  3342. if (!committed &&
  3343. !atomic_read(&root->fs_info->open_ioctl_trans)) {
  3344. committed = 1;
  3345. trans = btrfs_join_transaction(root);
  3346. if (IS_ERR(trans))
  3347. return PTR_ERR(trans);
  3348. ret = btrfs_commit_transaction(trans, root);
  3349. if (ret)
  3350. return ret;
  3351. goto again;
  3352. }
  3353. trace_btrfs_space_reservation(root->fs_info,
  3354. "space_info:enospc",
  3355. data_sinfo->flags, bytes, 1);
  3356. return -ENOSPC;
  3357. }
  3358. data_sinfo->bytes_may_use += bytes;
  3359. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3360. data_sinfo->flags, bytes, 1);
  3361. spin_unlock(&data_sinfo->lock);
  3362. return 0;
  3363. }
  3364. /*
  3365. * Called if we need to clear a data reservation for this inode.
  3366. */
  3367. void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
  3368. {
  3369. struct btrfs_root *root = BTRFS_I(inode)->root;
  3370. struct btrfs_space_info *data_sinfo;
  3371. /* make sure bytes are sectorsize aligned */
  3372. bytes = ALIGN(bytes, root->sectorsize);
  3373. data_sinfo = root->fs_info->data_sinfo;
  3374. spin_lock(&data_sinfo->lock);
  3375. WARN_ON(data_sinfo->bytes_may_use < bytes);
  3376. data_sinfo->bytes_may_use -= bytes;
  3377. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3378. data_sinfo->flags, bytes, 0);
  3379. spin_unlock(&data_sinfo->lock);
  3380. }
  3381. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3382. {
  3383. struct list_head *head = &info->space_info;
  3384. struct btrfs_space_info *found;
  3385. rcu_read_lock();
  3386. list_for_each_entry_rcu(found, head, list) {
  3387. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3388. found->force_alloc = CHUNK_ALLOC_FORCE;
  3389. }
  3390. rcu_read_unlock();
  3391. }
  3392. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3393. {
  3394. return (global->size << 1);
  3395. }
  3396. static int should_alloc_chunk(struct btrfs_root *root,
  3397. struct btrfs_space_info *sinfo, int force)
  3398. {
  3399. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3400. u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
  3401. u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
  3402. u64 thresh;
  3403. if (force == CHUNK_ALLOC_FORCE)
  3404. return 1;
  3405. /*
  3406. * We need to take into account the global rsv because for all intents
  3407. * and purposes it's used space. Don't worry about locking the
  3408. * global_rsv, it doesn't change except when the transaction commits.
  3409. */
  3410. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3411. num_allocated += calc_global_rsv_need_space(global_rsv);
  3412. /*
  3413. * in limited mode, we want to have some free space up to
  3414. * about 1% of the FS size.
  3415. */
  3416. if (force == CHUNK_ALLOC_LIMITED) {
  3417. thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
  3418. thresh = max_t(u64, 64 * 1024 * 1024,
  3419. div_factor_fine(thresh, 1));
  3420. if (num_bytes - num_allocated < thresh)
  3421. return 1;
  3422. }
  3423. if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
  3424. return 0;
  3425. return 1;
  3426. }
  3427. static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
  3428. {
  3429. u64 num_dev;
  3430. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3431. BTRFS_BLOCK_GROUP_RAID0 |
  3432. BTRFS_BLOCK_GROUP_RAID5 |
  3433. BTRFS_BLOCK_GROUP_RAID6))
  3434. num_dev = root->fs_info->fs_devices->rw_devices;
  3435. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3436. num_dev = 2;
  3437. else
  3438. num_dev = 1; /* DUP or single */
  3439. /* metadata for updaing devices and chunk tree */
  3440. return btrfs_calc_trans_metadata_size(root, num_dev + 1);
  3441. }
  3442. static void check_system_chunk(struct btrfs_trans_handle *trans,
  3443. struct btrfs_root *root, u64 type)
  3444. {
  3445. struct btrfs_space_info *info;
  3446. u64 left;
  3447. u64 thresh;
  3448. info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3449. spin_lock(&info->lock);
  3450. left = info->total_bytes - info->bytes_used - info->bytes_pinned -
  3451. info->bytes_reserved - info->bytes_readonly;
  3452. spin_unlock(&info->lock);
  3453. thresh = get_system_chunk_thresh(root, type);
  3454. if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
  3455. btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
  3456. left, thresh, type);
  3457. dump_space_info(info, 0, 0);
  3458. }
  3459. if (left < thresh) {
  3460. u64 flags;
  3461. flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
  3462. btrfs_alloc_chunk(trans, root, flags);
  3463. }
  3464. }
  3465. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  3466. struct btrfs_root *extent_root, u64 flags, int force)
  3467. {
  3468. struct btrfs_space_info *space_info;
  3469. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3470. int wait_for_alloc = 0;
  3471. int ret = 0;
  3472. /* Don't re-enter if we're already allocating a chunk */
  3473. if (trans->allocating_chunk)
  3474. return -ENOSPC;
  3475. space_info = __find_space_info(extent_root->fs_info, flags);
  3476. if (!space_info) {
  3477. ret = update_space_info(extent_root->fs_info, flags,
  3478. 0, 0, &space_info);
  3479. BUG_ON(ret); /* -ENOMEM */
  3480. }
  3481. BUG_ON(!space_info); /* Logic error */
  3482. again:
  3483. spin_lock(&space_info->lock);
  3484. if (force < space_info->force_alloc)
  3485. force = space_info->force_alloc;
  3486. if (space_info->full) {
  3487. if (should_alloc_chunk(extent_root, space_info, force))
  3488. ret = -ENOSPC;
  3489. else
  3490. ret = 0;
  3491. spin_unlock(&space_info->lock);
  3492. return ret;
  3493. }
  3494. if (!should_alloc_chunk(extent_root, space_info, force)) {
  3495. spin_unlock(&space_info->lock);
  3496. return 0;
  3497. } else if (space_info->chunk_alloc) {
  3498. wait_for_alloc = 1;
  3499. } else {
  3500. space_info->chunk_alloc = 1;
  3501. }
  3502. spin_unlock(&space_info->lock);
  3503. mutex_lock(&fs_info->chunk_mutex);
  3504. /*
  3505. * The chunk_mutex is held throughout the entirety of a chunk
  3506. * allocation, so once we've acquired the chunk_mutex we know that the
  3507. * other guy is done and we need to recheck and see if we should
  3508. * allocate.
  3509. */
  3510. if (wait_for_alloc) {
  3511. mutex_unlock(&fs_info->chunk_mutex);
  3512. wait_for_alloc = 0;
  3513. goto again;
  3514. }
  3515. trans->allocating_chunk = true;
  3516. /*
  3517. * If we have mixed data/metadata chunks we want to make sure we keep
  3518. * allocating mixed chunks instead of individual chunks.
  3519. */
  3520. if (btrfs_mixed_space_info(space_info))
  3521. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  3522. /*
  3523. * if we're doing a data chunk, go ahead and make sure that
  3524. * we keep a reasonable number of metadata chunks allocated in the
  3525. * FS as well.
  3526. */
  3527. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  3528. fs_info->data_chunk_allocations++;
  3529. if (!(fs_info->data_chunk_allocations %
  3530. fs_info->metadata_ratio))
  3531. force_metadata_allocation(fs_info);
  3532. }
  3533. /*
  3534. * Check if we have enough space in SYSTEM chunk because we may need
  3535. * to update devices.
  3536. */
  3537. check_system_chunk(trans, extent_root, flags);
  3538. ret = btrfs_alloc_chunk(trans, extent_root, flags);
  3539. trans->allocating_chunk = false;
  3540. spin_lock(&space_info->lock);
  3541. if (ret < 0 && ret != -ENOSPC)
  3542. goto out;
  3543. if (ret)
  3544. space_info->full = 1;
  3545. else
  3546. ret = 1;
  3547. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3548. out:
  3549. space_info->chunk_alloc = 0;
  3550. spin_unlock(&space_info->lock);
  3551. mutex_unlock(&fs_info->chunk_mutex);
  3552. return ret;
  3553. }
  3554. static int can_overcommit(struct btrfs_root *root,
  3555. struct btrfs_space_info *space_info, u64 bytes,
  3556. enum btrfs_reserve_flush_enum flush)
  3557. {
  3558. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  3559. u64 profile = btrfs_get_alloc_profile(root, 0);
  3560. u64 space_size;
  3561. u64 avail;
  3562. u64 used;
  3563. used = space_info->bytes_used + space_info->bytes_reserved +
  3564. space_info->bytes_pinned + space_info->bytes_readonly;
  3565. /*
  3566. * We only want to allow over committing if we have lots of actual space
  3567. * free, but if we don't have enough space to handle the global reserve
  3568. * space then we could end up having a real enospc problem when trying
  3569. * to allocate a chunk or some other such important allocation.
  3570. */
  3571. spin_lock(&global_rsv->lock);
  3572. space_size = calc_global_rsv_need_space(global_rsv);
  3573. spin_unlock(&global_rsv->lock);
  3574. if (used + space_size >= space_info->total_bytes)
  3575. return 0;
  3576. used += space_info->bytes_may_use;
  3577. spin_lock(&root->fs_info->free_chunk_lock);
  3578. avail = root->fs_info->free_chunk_space;
  3579. spin_unlock(&root->fs_info->free_chunk_lock);
  3580. /*
  3581. * If we have dup, raid1 or raid10 then only half of the free
  3582. * space is actually useable. For raid56, the space info used
  3583. * doesn't include the parity drive, so we don't have to
  3584. * change the math
  3585. */
  3586. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  3587. BTRFS_BLOCK_GROUP_RAID1 |
  3588. BTRFS_BLOCK_GROUP_RAID10))
  3589. avail >>= 1;
  3590. /*
  3591. * If we aren't flushing all things, let us overcommit up to
  3592. * 1/2th of the space. If we can flush, don't let us overcommit
  3593. * too much, let it overcommit up to 1/8 of the space.
  3594. */
  3595. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  3596. avail >>= 3;
  3597. else
  3598. avail >>= 1;
  3599. if (used + bytes < space_info->total_bytes + avail)
  3600. return 1;
  3601. return 0;
  3602. }
  3603. static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
  3604. unsigned long nr_pages, int nr_items)
  3605. {
  3606. struct super_block *sb = root->fs_info->sb;
  3607. if (down_read_trylock(&sb->s_umount)) {
  3608. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  3609. up_read(&sb->s_umount);
  3610. } else {
  3611. /*
  3612. * We needn't worry the filesystem going from r/w to r/o though
  3613. * we don't acquire ->s_umount mutex, because the filesystem
  3614. * should guarantee the delalloc inodes list be empty after
  3615. * the filesystem is readonly(all dirty pages are written to
  3616. * the disk).
  3617. */
  3618. btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
  3619. if (!current->journal_info)
  3620. btrfs_wait_ordered_roots(root->fs_info, nr_items);
  3621. }
  3622. }
  3623. static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
  3624. {
  3625. u64 bytes;
  3626. int nr;
  3627. bytes = btrfs_calc_trans_metadata_size(root, 1);
  3628. nr = (int)div64_u64(to_reclaim, bytes);
  3629. if (!nr)
  3630. nr = 1;
  3631. return nr;
  3632. }
  3633. #define EXTENT_SIZE_PER_ITEM (256 * 1024)
  3634. /*
  3635. * shrink metadata reservation for delalloc
  3636. */
  3637. static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
  3638. bool wait_ordered)
  3639. {
  3640. struct btrfs_block_rsv *block_rsv;
  3641. struct btrfs_space_info *space_info;
  3642. struct btrfs_trans_handle *trans;
  3643. u64 delalloc_bytes;
  3644. u64 max_reclaim;
  3645. long time_left;
  3646. unsigned long nr_pages;
  3647. int loops;
  3648. int items;
  3649. enum btrfs_reserve_flush_enum flush;
  3650. /* Calc the number of the pages we need flush for space reservation */
  3651. items = calc_reclaim_items_nr(root, to_reclaim);
  3652. to_reclaim = items * EXTENT_SIZE_PER_ITEM;
  3653. trans = (struct btrfs_trans_handle *)current->journal_info;
  3654. block_rsv = &root->fs_info->delalloc_block_rsv;
  3655. space_info = block_rsv->space_info;
  3656. delalloc_bytes = percpu_counter_sum_positive(
  3657. &root->fs_info->delalloc_bytes);
  3658. if (delalloc_bytes == 0) {
  3659. if (trans)
  3660. return;
  3661. if (wait_ordered)
  3662. btrfs_wait_ordered_roots(root->fs_info, items);
  3663. return;
  3664. }
  3665. loops = 0;
  3666. while (delalloc_bytes && loops < 3) {
  3667. max_reclaim = min(delalloc_bytes, to_reclaim);
  3668. nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
  3669. btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
  3670. /*
  3671. * We need to wait for the async pages to actually start before
  3672. * we do anything.
  3673. */
  3674. max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
  3675. if (!max_reclaim)
  3676. goto skip_async;
  3677. if (max_reclaim <= nr_pages)
  3678. max_reclaim = 0;
  3679. else
  3680. max_reclaim -= nr_pages;
  3681. wait_event(root->fs_info->async_submit_wait,
  3682. atomic_read(&root->fs_info->async_delalloc_pages) <=
  3683. (int)max_reclaim);
  3684. skip_async:
  3685. if (!trans)
  3686. flush = BTRFS_RESERVE_FLUSH_ALL;
  3687. else
  3688. flush = BTRFS_RESERVE_NO_FLUSH;
  3689. spin_lock(&space_info->lock);
  3690. if (can_overcommit(root, space_info, orig, flush)) {
  3691. spin_unlock(&space_info->lock);
  3692. break;
  3693. }
  3694. spin_unlock(&space_info->lock);
  3695. loops++;
  3696. if (wait_ordered && !trans) {
  3697. btrfs_wait_ordered_roots(root->fs_info, items);
  3698. } else {
  3699. time_left = schedule_timeout_killable(1);
  3700. if (time_left)
  3701. break;
  3702. }
  3703. delalloc_bytes = percpu_counter_sum_positive(
  3704. &root->fs_info->delalloc_bytes);
  3705. }
  3706. }
  3707. /**
  3708. * maybe_commit_transaction - possibly commit the transaction if its ok to
  3709. * @root - the root we're allocating for
  3710. * @bytes - the number of bytes we want to reserve
  3711. * @force - force the commit
  3712. *
  3713. * This will check to make sure that committing the transaction will actually
  3714. * get us somewhere and then commit the transaction if it does. Otherwise it
  3715. * will return -ENOSPC.
  3716. */
  3717. static int may_commit_transaction(struct btrfs_root *root,
  3718. struct btrfs_space_info *space_info,
  3719. u64 bytes, int force)
  3720. {
  3721. struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
  3722. struct btrfs_trans_handle *trans;
  3723. trans = (struct btrfs_trans_handle *)current->journal_info;
  3724. if (trans)
  3725. return -EAGAIN;
  3726. if (force)
  3727. goto commit;
  3728. /* See if there is enough pinned space to make this reservation */
  3729. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  3730. bytes) >= 0)
  3731. goto commit;
  3732. /*
  3733. * See if there is some space in the delayed insertion reservation for
  3734. * this reservation.
  3735. */
  3736. if (space_info != delayed_rsv->space_info)
  3737. return -ENOSPC;
  3738. spin_lock(&delayed_rsv->lock);
  3739. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  3740. bytes - delayed_rsv->size) >= 0) {
  3741. spin_unlock(&delayed_rsv->lock);
  3742. return -ENOSPC;
  3743. }
  3744. spin_unlock(&delayed_rsv->lock);
  3745. commit:
  3746. trans = btrfs_join_transaction(root);
  3747. if (IS_ERR(trans))
  3748. return -ENOSPC;
  3749. return btrfs_commit_transaction(trans, root);
  3750. }
  3751. enum flush_state {
  3752. FLUSH_DELAYED_ITEMS_NR = 1,
  3753. FLUSH_DELAYED_ITEMS = 2,
  3754. FLUSH_DELALLOC = 3,
  3755. FLUSH_DELALLOC_WAIT = 4,
  3756. ALLOC_CHUNK = 5,
  3757. COMMIT_TRANS = 6,
  3758. };
  3759. static int flush_space(struct btrfs_root *root,
  3760. struct btrfs_space_info *space_info, u64 num_bytes,
  3761. u64 orig_bytes, int state)
  3762. {
  3763. struct btrfs_trans_handle *trans;
  3764. int nr;
  3765. int ret = 0;
  3766. switch (state) {
  3767. case FLUSH_DELAYED_ITEMS_NR:
  3768. case FLUSH_DELAYED_ITEMS:
  3769. if (state == FLUSH_DELAYED_ITEMS_NR)
  3770. nr = calc_reclaim_items_nr(root, num_bytes) * 2;
  3771. else
  3772. nr = -1;
  3773. trans = btrfs_join_transaction(root);
  3774. if (IS_ERR(trans)) {
  3775. ret = PTR_ERR(trans);
  3776. break;
  3777. }
  3778. ret = btrfs_run_delayed_items_nr(trans, root, nr);
  3779. btrfs_end_transaction(trans, root);
  3780. break;
  3781. case FLUSH_DELALLOC:
  3782. case FLUSH_DELALLOC_WAIT:
  3783. shrink_delalloc(root, num_bytes * 2, orig_bytes,
  3784. state == FLUSH_DELALLOC_WAIT);
  3785. break;
  3786. case ALLOC_CHUNK:
  3787. trans = btrfs_join_transaction(root);
  3788. if (IS_ERR(trans)) {
  3789. ret = PTR_ERR(trans);
  3790. break;
  3791. }
  3792. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  3793. btrfs_get_alloc_profile(root, 0),
  3794. CHUNK_ALLOC_NO_FORCE);
  3795. btrfs_end_transaction(trans, root);
  3796. if (ret == -ENOSPC)
  3797. ret = 0;
  3798. break;
  3799. case COMMIT_TRANS:
  3800. ret = may_commit_transaction(root, space_info, orig_bytes, 0);
  3801. break;
  3802. default:
  3803. ret = -ENOSPC;
  3804. break;
  3805. }
  3806. return ret;
  3807. }
  3808. static inline u64
  3809. btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
  3810. struct btrfs_space_info *space_info)
  3811. {
  3812. u64 used;
  3813. u64 expected;
  3814. u64 to_reclaim;
  3815. to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
  3816. 16 * 1024 * 1024);
  3817. spin_lock(&space_info->lock);
  3818. if (can_overcommit(root, space_info, to_reclaim,
  3819. BTRFS_RESERVE_FLUSH_ALL)) {
  3820. to_reclaim = 0;
  3821. goto out;
  3822. }
  3823. used = space_info->bytes_used + space_info->bytes_reserved +
  3824. space_info->bytes_pinned + space_info->bytes_readonly +
  3825. space_info->bytes_may_use;
  3826. if (can_overcommit(root, space_info, 1024 * 1024,
  3827. BTRFS_RESERVE_FLUSH_ALL))
  3828. expected = div_factor_fine(space_info->total_bytes, 95);
  3829. else
  3830. expected = div_factor_fine(space_info->total_bytes, 90);
  3831. if (used > expected)
  3832. to_reclaim = used - expected;
  3833. else
  3834. to_reclaim = 0;
  3835. to_reclaim = min(to_reclaim, space_info->bytes_may_use +
  3836. space_info->bytes_reserved);
  3837. out:
  3838. spin_unlock(&space_info->lock);
  3839. return to_reclaim;
  3840. }
  3841. static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
  3842. struct btrfs_fs_info *fs_info, u64 used)
  3843. {
  3844. return (used >= div_factor_fine(space_info->total_bytes, 98) &&
  3845. !btrfs_fs_closing(fs_info) &&
  3846. !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
  3847. }
  3848. static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
  3849. struct btrfs_fs_info *fs_info)
  3850. {
  3851. u64 used;
  3852. spin_lock(&space_info->lock);
  3853. used = space_info->bytes_used + space_info->bytes_reserved +
  3854. space_info->bytes_pinned + space_info->bytes_readonly +
  3855. space_info->bytes_may_use;
  3856. if (need_do_async_reclaim(space_info, fs_info, used)) {
  3857. spin_unlock(&space_info->lock);
  3858. return 1;
  3859. }
  3860. spin_unlock(&space_info->lock);
  3861. return 0;
  3862. }
  3863. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  3864. {
  3865. struct btrfs_fs_info *fs_info;
  3866. struct btrfs_space_info *space_info;
  3867. u64 to_reclaim;
  3868. int flush_state;
  3869. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  3870. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3871. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
  3872. space_info);
  3873. if (!to_reclaim)
  3874. return;
  3875. flush_state = FLUSH_DELAYED_ITEMS_NR;
  3876. do {
  3877. flush_space(fs_info->fs_root, space_info, to_reclaim,
  3878. to_reclaim, flush_state);
  3879. flush_state++;
  3880. if (!btrfs_need_do_async_reclaim(space_info, fs_info))
  3881. return;
  3882. } while (flush_state <= COMMIT_TRANS);
  3883. if (btrfs_need_do_async_reclaim(space_info, fs_info))
  3884. queue_work(system_unbound_wq, work);
  3885. }
  3886. void btrfs_init_async_reclaim_work(struct work_struct *work)
  3887. {
  3888. INIT_WORK(work, btrfs_async_reclaim_metadata_space);
  3889. }
  3890. /**
  3891. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  3892. * @root - the root we're allocating for
  3893. * @block_rsv - the block_rsv we're allocating for
  3894. * @orig_bytes - the number of bytes we want
  3895. * @flush - whether or not we can flush to make our reservation
  3896. *
  3897. * This will reserve orgi_bytes number of bytes from the space info associated
  3898. * with the block_rsv. If there is not enough space it will make an attempt to
  3899. * flush out space to make room. It will do this by flushing delalloc if
  3900. * possible or committing the transaction. If flush is 0 then no attempts to
  3901. * regain reservations will be made and this will fail if there is not enough
  3902. * space already.
  3903. */
  3904. static int reserve_metadata_bytes(struct btrfs_root *root,
  3905. struct btrfs_block_rsv *block_rsv,
  3906. u64 orig_bytes,
  3907. enum btrfs_reserve_flush_enum flush)
  3908. {
  3909. struct btrfs_space_info *space_info = block_rsv->space_info;
  3910. u64 used;
  3911. u64 num_bytes = orig_bytes;
  3912. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  3913. int ret = 0;
  3914. bool flushing = false;
  3915. again:
  3916. ret = 0;
  3917. spin_lock(&space_info->lock);
  3918. /*
  3919. * We only want to wait if somebody other than us is flushing and we
  3920. * are actually allowed to flush all things.
  3921. */
  3922. while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
  3923. space_info->flush) {
  3924. spin_unlock(&space_info->lock);
  3925. /*
  3926. * If we have a trans handle we can't wait because the flusher
  3927. * may have to commit the transaction, which would mean we would
  3928. * deadlock since we are waiting for the flusher to finish, but
  3929. * hold the current transaction open.
  3930. */
  3931. if (current->journal_info)
  3932. return -EAGAIN;
  3933. ret = wait_event_killable(space_info->wait, !space_info->flush);
  3934. /* Must have been killed, return */
  3935. if (ret)
  3936. return -EINTR;
  3937. spin_lock(&space_info->lock);
  3938. }
  3939. ret = -ENOSPC;
  3940. used = space_info->bytes_used + space_info->bytes_reserved +
  3941. space_info->bytes_pinned + space_info->bytes_readonly +
  3942. space_info->bytes_may_use;
  3943. /*
  3944. * The idea here is that we've not already over-reserved the block group
  3945. * then we can go ahead and save our reservation first and then start
  3946. * flushing if we need to. Otherwise if we've already overcommitted
  3947. * lets start flushing stuff first and then come back and try to make
  3948. * our reservation.
  3949. */
  3950. if (used <= space_info->total_bytes) {
  3951. if (used + orig_bytes <= space_info->total_bytes) {
  3952. space_info->bytes_may_use += orig_bytes;
  3953. trace_btrfs_space_reservation(root->fs_info,
  3954. "space_info", space_info->flags, orig_bytes, 1);
  3955. ret = 0;
  3956. } else {
  3957. /*
  3958. * Ok set num_bytes to orig_bytes since we aren't
  3959. * overocmmitted, this way we only try and reclaim what
  3960. * we need.
  3961. */
  3962. num_bytes = orig_bytes;
  3963. }
  3964. } else {
  3965. /*
  3966. * Ok we're over committed, set num_bytes to the overcommitted
  3967. * amount plus the amount of bytes that we need for this
  3968. * reservation.
  3969. */
  3970. num_bytes = used - space_info->total_bytes +
  3971. (orig_bytes * 2);
  3972. }
  3973. if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
  3974. space_info->bytes_may_use += orig_bytes;
  3975. trace_btrfs_space_reservation(root->fs_info, "space_info",
  3976. space_info->flags, orig_bytes,
  3977. 1);
  3978. ret = 0;
  3979. }
  3980. /*
  3981. * Couldn't make our reservation, save our place so while we're trying
  3982. * to reclaim space we can actually use it instead of somebody else
  3983. * stealing it from us.
  3984. *
  3985. * We make the other tasks wait for the flush only when we can flush
  3986. * all things.
  3987. */
  3988. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  3989. flushing = true;
  3990. space_info->flush = 1;
  3991. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  3992. used += orig_bytes;
  3993. if (need_do_async_reclaim(space_info, root->fs_info, used) &&
  3994. !work_busy(&root->fs_info->async_reclaim_work))
  3995. queue_work(system_unbound_wq,
  3996. &root->fs_info->async_reclaim_work);
  3997. }
  3998. spin_unlock(&space_info->lock);
  3999. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  4000. goto out;
  4001. ret = flush_space(root, space_info, num_bytes, orig_bytes,
  4002. flush_state);
  4003. flush_state++;
  4004. /*
  4005. * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
  4006. * would happen. So skip delalloc flush.
  4007. */
  4008. if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  4009. (flush_state == FLUSH_DELALLOC ||
  4010. flush_state == FLUSH_DELALLOC_WAIT))
  4011. flush_state = ALLOC_CHUNK;
  4012. if (!ret)
  4013. goto again;
  4014. else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
  4015. flush_state < COMMIT_TRANS)
  4016. goto again;
  4017. else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
  4018. flush_state <= COMMIT_TRANS)
  4019. goto again;
  4020. out:
  4021. if (ret == -ENOSPC &&
  4022. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  4023. struct btrfs_block_rsv *global_rsv =
  4024. &root->fs_info->global_block_rsv;
  4025. if (block_rsv != global_rsv &&
  4026. !block_rsv_use_bytes(global_rsv, orig_bytes))
  4027. ret = 0;
  4028. }
  4029. if (ret == -ENOSPC)
  4030. trace_btrfs_space_reservation(root->fs_info,
  4031. "space_info:enospc",
  4032. space_info->flags, orig_bytes, 1);
  4033. if (flushing) {
  4034. spin_lock(&space_info->lock);
  4035. space_info->flush = 0;
  4036. wake_up_all(&space_info->wait);
  4037. spin_unlock(&space_info->lock);
  4038. }
  4039. return ret;
  4040. }
  4041. static struct btrfs_block_rsv *get_block_rsv(
  4042. const struct btrfs_trans_handle *trans,
  4043. const struct btrfs_root *root)
  4044. {
  4045. struct btrfs_block_rsv *block_rsv = NULL;
  4046. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  4047. block_rsv = trans->block_rsv;
  4048. if (root == root->fs_info->csum_root && trans->adding_csums)
  4049. block_rsv = trans->block_rsv;
  4050. if (root == root->fs_info->uuid_root)
  4051. block_rsv = trans->block_rsv;
  4052. if (!block_rsv)
  4053. block_rsv = root->block_rsv;
  4054. if (!block_rsv)
  4055. block_rsv = &root->fs_info->empty_block_rsv;
  4056. return block_rsv;
  4057. }
  4058. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  4059. u64 num_bytes)
  4060. {
  4061. int ret = -ENOSPC;
  4062. spin_lock(&block_rsv->lock);
  4063. if (block_rsv->reserved >= num_bytes) {
  4064. block_rsv->reserved -= num_bytes;
  4065. if (block_rsv->reserved < block_rsv->size)
  4066. block_rsv->full = 0;
  4067. ret = 0;
  4068. }
  4069. spin_unlock(&block_rsv->lock);
  4070. return ret;
  4071. }
  4072. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  4073. u64 num_bytes, int update_size)
  4074. {
  4075. spin_lock(&block_rsv->lock);
  4076. block_rsv->reserved += num_bytes;
  4077. if (update_size)
  4078. block_rsv->size += num_bytes;
  4079. else if (block_rsv->reserved >= block_rsv->size)
  4080. block_rsv->full = 1;
  4081. spin_unlock(&block_rsv->lock);
  4082. }
  4083. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  4084. struct btrfs_block_rsv *dest, u64 num_bytes,
  4085. int min_factor)
  4086. {
  4087. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4088. u64 min_bytes;
  4089. if (global_rsv->space_info != dest->space_info)
  4090. return -ENOSPC;
  4091. spin_lock(&global_rsv->lock);
  4092. min_bytes = div_factor(global_rsv->size, min_factor);
  4093. if (global_rsv->reserved < min_bytes + num_bytes) {
  4094. spin_unlock(&global_rsv->lock);
  4095. return -ENOSPC;
  4096. }
  4097. global_rsv->reserved -= num_bytes;
  4098. if (global_rsv->reserved < global_rsv->size)
  4099. global_rsv->full = 0;
  4100. spin_unlock(&global_rsv->lock);
  4101. block_rsv_add_bytes(dest, num_bytes, 1);
  4102. return 0;
  4103. }
  4104. static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  4105. struct btrfs_block_rsv *block_rsv,
  4106. struct btrfs_block_rsv *dest, u64 num_bytes)
  4107. {
  4108. struct btrfs_space_info *space_info = block_rsv->space_info;
  4109. spin_lock(&block_rsv->lock);
  4110. if (num_bytes == (u64)-1)
  4111. num_bytes = block_rsv->size;
  4112. block_rsv->size -= num_bytes;
  4113. if (block_rsv->reserved >= block_rsv->size) {
  4114. num_bytes = block_rsv->reserved - block_rsv->size;
  4115. block_rsv->reserved = block_rsv->size;
  4116. block_rsv->full = 1;
  4117. } else {
  4118. num_bytes = 0;
  4119. }
  4120. spin_unlock(&block_rsv->lock);
  4121. if (num_bytes > 0) {
  4122. if (dest) {
  4123. spin_lock(&dest->lock);
  4124. if (!dest->full) {
  4125. u64 bytes_to_add;
  4126. bytes_to_add = dest->size - dest->reserved;
  4127. bytes_to_add = min(num_bytes, bytes_to_add);
  4128. dest->reserved += bytes_to_add;
  4129. if (dest->reserved >= dest->size)
  4130. dest->full = 1;
  4131. num_bytes -= bytes_to_add;
  4132. }
  4133. spin_unlock(&dest->lock);
  4134. }
  4135. if (num_bytes) {
  4136. spin_lock(&space_info->lock);
  4137. space_info->bytes_may_use -= num_bytes;
  4138. trace_btrfs_space_reservation(fs_info, "space_info",
  4139. space_info->flags, num_bytes, 0);
  4140. spin_unlock(&space_info->lock);
  4141. }
  4142. }
  4143. }
  4144. static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
  4145. struct btrfs_block_rsv *dst, u64 num_bytes)
  4146. {
  4147. int ret;
  4148. ret = block_rsv_use_bytes(src, num_bytes);
  4149. if (ret)
  4150. return ret;
  4151. block_rsv_add_bytes(dst, num_bytes, 1);
  4152. return 0;
  4153. }
  4154. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  4155. {
  4156. memset(rsv, 0, sizeof(*rsv));
  4157. spin_lock_init(&rsv->lock);
  4158. rsv->type = type;
  4159. }
  4160. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
  4161. unsigned short type)
  4162. {
  4163. struct btrfs_block_rsv *block_rsv;
  4164. struct btrfs_fs_info *fs_info = root->fs_info;
  4165. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  4166. if (!block_rsv)
  4167. return NULL;
  4168. btrfs_init_block_rsv(block_rsv, type);
  4169. block_rsv->space_info = __find_space_info(fs_info,
  4170. BTRFS_BLOCK_GROUP_METADATA);
  4171. return block_rsv;
  4172. }
  4173. void btrfs_free_block_rsv(struct btrfs_root *root,
  4174. struct btrfs_block_rsv *rsv)
  4175. {
  4176. if (!rsv)
  4177. return;
  4178. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4179. kfree(rsv);
  4180. }
  4181. int btrfs_block_rsv_add(struct btrfs_root *root,
  4182. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  4183. enum btrfs_reserve_flush_enum flush)
  4184. {
  4185. int ret;
  4186. if (num_bytes == 0)
  4187. return 0;
  4188. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4189. if (!ret) {
  4190. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  4191. return 0;
  4192. }
  4193. return ret;
  4194. }
  4195. int btrfs_block_rsv_check(struct btrfs_root *root,
  4196. struct btrfs_block_rsv *block_rsv, int min_factor)
  4197. {
  4198. u64 num_bytes = 0;
  4199. int ret = -ENOSPC;
  4200. if (!block_rsv)
  4201. return 0;
  4202. spin_lock(&block_rsv->lock);
  4203. num_bytes = div_factor(block_rsv->size, min_factor);
  4204. if (block_rsv->reserved >= num_bytes)
  4205. ret = 0;
  4206. spin_unlock(&block_rsv->lock);
  4207. return ret;
  4208. }
  4209. int btrfs_block_rsv_refill(struct btrfs_root *root,
  4210. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  4211. enum btrfs_reserve_flush_enum flush)
  4212. {
  4213. u64 num_bytes = 0;
  4214. int ret = -ENOSPC;
  4215. if (!block_rsv)
  4216. return 0;
  4217. spin_lock(&block_rsv->lock);
  4218. num_bytes = min_reserved;
  4219. if (block_rsv->reserved >= num_bytes)
  4220. ret = 0;
  4221. else
  4222. num_bytes -= block_rsv->reserved;
  4223. spin_unlock(&block_rsv->lock);
  4224. if (!ret)
  4225. return 0;
  4226. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  4227. if (!ret) {
  4228. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  4229. return 0;
  4230. }
  4231. return ret;
  4232. }
  4233. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
  4234. struct btrfs_block_rsv *dst_rsv,
  4235. u64 num_bytes)
  4236. {
  4237. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  4238. }
  4239. void btrfs_block_rsv_release(struct btrfs_root *root,
  4240. struct btrfs_block_rsv *block_rsv,
  4241. u64 num_bytes)
  4242. {
  4243. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4244. if (global_rsv == block_rsv ||
  4245. block_rsv->space_info != global_rsv->space_info)
  4246. global_rsv = NULL;
  4247. block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
  4248. num_bytes);
  4249. }
  4250. /*
  4251. * helper to calculate size of global block reservation.
  4252. * the desired value is sum of space used by extent tree,
  4253. * checksum tree and root tree
  4254. */
  4255. static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
  4256. {
  4257. struct btrfs_space_info *sinfo;
  4258. u64 num_bytes;
  4259. u64 meta_used;
  4260. u64 data_used;
  4261. int csum_size = btrfs_super_csum_size(fs_info->super_copy);
  4262. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  4263. spin_lock(&sinfo->lock);
  4264. data_used = sinfo->bytes_used;
  4265. spin_unlock(&sinfo->lock);
  4266. sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4267. spin_lock(&sinfo->lock);
  4268. if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
  4269. data_used = 0;
  4270. meta_used = sinfo->bytes_used;
  4271. spin_unlock(&sinfo->lock);
  4272. num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
  4273. csum_size * 2;
  4274. num_bytes += div64_u64(data_used + meta_used, 50);
  4275. if (num_bytes * 3 > meta_used)
  4276. num_bytes = div64_u64(meta_used, 3);
  4277. return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
  4278. }
  4279. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  4280. {
  4281. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  4282. struct btrfs_space_info *sinfo = block_rsv->space_info;
  4283. u64 num_bytes;
  4284. num_bytes = calc_global_metadata_size(fs_info);
  4285. spin_lock(&sinfo->lock);
  4286. spin_lock(&block_rsv->lock);
  4287. block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
  4288. num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
  4289. sinfo->bytes_reserved + sinfo->bytes_readonly +
  4290. sinfo->bytes_may_use;
  4291. if (sinfo->total_bytes > num_bytes) {
  4292. num_bytes = sinfo->total_bytes - num_bytes;
  4293. block_rsv->reserved += num_bytes;
  4294. sinfo->bytes_may_use += num_bytes;
  4295. trace_btrfs_space_reservation(fs_info, "space_info",
  4296. sinfo->flags, num_bytes, 1);
  4297. }
  4298. if (block_rsv->reserved >= block_rsv->size) {
  4299. num_bytes = block_rsv->reserved - block_rsv->size;
  4300. sinfo->bytes_may_use -= num_bytes;
  4301. trace_btrfs_space_reservation(fs_info, "space_info",
  4302. sinfo->flags, num_bytes, 0);
  4303. block_rsv->reserved = block_rsv->size;
  4304. block_rsv->full = 1;
  4305. }
  4306. spin_unlock(&block_rsv->lock);
  4307. spin_unlock(&sinfo->lock);
  4308. }
  4309. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  4310. {
  4311. struct btrfs_space_info *space_info;
  4312. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  4313. fs_info->chunk_block_rsv.space_info = space_info;
  4314. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4315. fs_info->global_block_rsv.space_info = space_info;
  4316. fs_info->delalloc_block_rsv.space_info = space_info;
  4317. fs_info->trans_block_rsv.space_info = space_info;
  4318. fs_info->empty_block_rsv.space_info = space_info;
  4319. fs_info->delayed_block_rsv.space_info = space_info;
  4320. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  4321. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  4322. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  4323. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  4324. if (fs_info->quota_root)
  4325. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  4326. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  4327. update_global_block_rsv(fs_info);
  4328. }
  4329. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  4330. {
  4331. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  4332. (u64)-1);
  4333. WARN_ON(fs_info->delalloc_block_rsv.size > 0);
  4334. WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
  4335. WARN_ON(fs_info->trans_block_rsv.size > 0);
  4336. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  4337. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  4338. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  4339. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  4340. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  4341. }
  4342. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  4343. struct btrfs_root *root)
  4344. {
  4345. if (!trans->block_rsv)
  4346. return;
  4347. if (!trans->bytes_reserved)
  4348. return;
  4349. trace_btrfs_space_reservation(root->fs_info, "transaction",
  4350. trans->transid, trans->bytes_reserved, 0);
  4351. btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
  4352. trans->bytes_reserved = 0;
  4353. }
  4354. /* Can only return 0 or -ENOSPC */
  4355. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  4356. struct inode *inode)
  4357. {
  4358. struct btrfs_root *root = BTRFS_I(inode)->root;
  4359. struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
  4360. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  4361. /*
  4362. * We need to hold space in order to delete our orphan item once we've
  4363. * added it, so this takes the reservation so we can release it later
  4364. * when we are truly done with the orphan item.
  4365. */
  4366. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4367. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4368. btrfs_ino(inode), num_bytes, 1);
  4369. return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
  4370. }
  4371. void btrfs_orphan_release_metadata(struct inode *inode)
  4372. {
  4373. struct btrfs_root *root = BTRFS_I(inode)->root;
  4374. u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  4375. trace_btrfs_space_reservation(root->fs_info, "orphan",
  4376. btrfs_ino(inode), num_bytes, 0);
  4377. btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
  4378. }
  4379. /*
  4380. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  4381. * root: the root of the parent directory
  4382. * rsv: block reservation
  4383. * items: the number of items that we need do reservation
  4384. * qgroup_reserved: used to return the reserved size in qgroup
  4385. *
  4386. * This function is used to reserve the space for snapshot/subvolume
  4387. * creation and deletion. Those operations are different with the
  4388. * common file/directory operations, they change two fs/file trees
  4389. * and root tree, the number of items that the qgroup reserves is
  4390. * different with the free space reservation. So we can not use
  4391. * the space reseravtion mechanism in start_transaction().
  4392. */
  4393. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  4394. struct btrfs_block_rsv *rsv,
  4395. int items,
  4396. u64 *qgroup_reserved,
  4397. bool use_global_rsv)
  4398. {
  4399. u64 num_bytes;
  4400. int ret;
  4401. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  4402. if (root->fs_info->quota_enabled) {
  4403. /* One for parent inode, two for dir entries */
  4404. num_bytes = 3 * root->leafsize;
  4405. ret = btrfs_qgroup_reserve(root, num_bytes);
  4406. if (ret)
  4407. return ret;
  4408. } else {
  4409. num_bytes = 0;
  4410. }
  4411. *qgroup_reserved = num_bytes;
  4412. num_bytes = btrfs_calc_trans_metadata_size(root, items);
  4413. rsv->space_info = __find_space_info(root->fs_info,
  4414. BTRFS_BLOCK_GROUP_METADATA);
  4415. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  4416. BTRFS_RESERVE_FLUSH_ALL);
  4417. if (ret == -ENOSPC && use_global_rsv)
  4418. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
  4419. if (ret) {
  4420. if (*qgroup_reserved)
  4421. btrfs_qgroup_free(root, *qgroup_reserved);
  4422. }
  4423. return ret;
  4424. }
  4425. void btrfs_subvolume_release_metadata(struct btrfs_root *root,
  4426. struct btrfs_block_rsv *rsv,
  4427. u64 qgroup_reserved)
  4428. {
  4429. btrfs_block_rsv_release(root, rsv, (u64)-1);
  4430. if (qgroup_reserved)
  4431. btrfs_qgroup_free(root, qgroup_reserved);
  4432. }
  4433. /**
  4434. * drop_outstanding_extent - drop an outstanding extent
  4435. * @inode: the inode we're dropping the extent for
  4436. *
  4437. * This is called when we are freeing up an outstanding extent, either called
  4438. * after an error or after an extent is written. This will return the number of
  4439. * reserved extents that need to be freed. This must be called with
  4440. * BTRFS_I(inode)->lock held.
  4441. */
  4442. static unsigned drop_outstanding_extent(struct inode *inode)
  4443. {
  4444. unsigned drop_inode_space = 0;
  4445. unsigned dropped_extents = 0;
  4446. BUG_ON(!BTRFS_I(inode)->outstanding_extents);
  4447. BTRFS_I(inode)->outstanding_extents--;
  4448. if (BTRFS_I(inode)->outstanding_extents == 0 &&
  4449. test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4450. &BTRFS_I(inode)->runtime_flags))
  4451. drop_inode_space = 1;
  4452. /*
  4453. * If we have more or the same amount of outsanding extents than we have
  4454. * reserved then we need to leave the reserved extents count alone.
  4455. */
  4456. if (BTRFS_I(inode)->outstanding_extents >=
  4457. BTRFS_I(inode)->reserved_extents)
  4458. return drop_inode_space;
  4459. dropped_extents = BTRFS_I(inode)->reserved_extents -
  4460. BTRFS_I(inode)->outstanding_extents;
  4461. BTRFS_I(inode)->reserved_extents -= dropped_extents;
  4462. return dropped_extents + drop_inode_space;
  4463. }
  4464. /**
  4465. * calc_csum_metadata_size - return the amount of metada space that must be
  4466. * reserved/free'd for the given bytes.
  4467. * @inode: the inode we're manipulating
  4468. * @num_bytes: the number of bytes in question
  4469. * @reserve: 1 if we are reserving space, 0 if we are freeing space
  4470. *
  4471. * This adjusts the number of csum_bytes in the inode and then returns the
  4472. * correct amount of metadata that must either be reserved or freed. We
  4473. * calculate how many checksums we can fit into one leaf and then divide the
  4474. * number of bytes that will need to be checksumed by this value to figure out
  4475. * how many checksums will be required. If we are adding bytes then the number
  4476. * may go up and we will return the number of additional bytes that must be
  4477. * reserved. If it is going down we will return the number of bytes that must
  4478. * be freed.
  4479. *
  4480. * This must be called with BTRFS_I(inode)->lock held.
  4481. */
  4482. static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
  4483. int reserve)
  4484. {
  4485. struct btrfs_root *root = BTRFS_I(inode)->root;
  4486. u64 csum_size;
  4487. int num_csums_per_leaf;
  4488. int num_csums;
  4489. int old_csums;
  4490. if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
  4491. BTRFS_I(inode)->csum_bytes == 0)
  4492. return 0;
  4493. old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4494. if (reserve)
  4495. BTRFS_I(inode)->csum_bytes += num_bytes;
  4496. else
  4497. BTRFS_I(inode)->csum_bytes -= num_bytes;
  4498. csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
  4499. num_csums_per_leaf = (int)div64_u64(csum_size,
  4500. sizeof(struct btrfs_csum_item) +
  4501. sizeof(struct btrfs_disk_key));
  4502. num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
  4503. num_csums = num_csums + num_csums_per_leaf - 1;
  4504. num_csums = num_csums / num_csums_per_leaf;
  4505. old_csums = old_csums + num_csums_per_leaf - 1;
  4506. old_csums = old_csums / num_csums_per_leaf;
  4507. /* No change, no need to reserve more */
  4508. if (old_csums == num_csums)
  4509. return 0;
  4510. if (reserve)
  4511. return btrfs_calc_trans_metadata_size(root,
  4512. num_csums - old_csums);
  4513. return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
  4514. }
  4515. int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  4516. {
  4517. struct btrfs_root *root = BTRFS_I(inode)->root;
  4518. struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
  4519. u64 to_reserve = 0;
  4520. u64 csum_bytes;
  4521. unsigned nr_extents = 0;
  4522. int extra_reserve = 0;
  4523. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  4524. int ret = 0;
  4525. bool delalloc_lock = true;
  4526. u64 to_free = 0;
  4527. unsigned dropped;
  4528. /* If we are a free space inode we need to not flush since we will be in
  4529. * the middle of a transaction commit. We also don't need the delalloc
  4530. * mutex since we won't race with anybody. We need this mostly to make
  4531. * lockdep shut its filthy mouth.
  4532. */
  4533. if (btrfs_is_free_space_inode(inode)) {
  4534. flush = BTRFS_RESERVE_NO_FLUSH;
  4535. delalloc_lock = false;
  4536. }
  4537. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  4538. btrfs_transaction_in_commit(root->fs_info))
  4539. schedule_timeout(1);
  4540. if (delalloc_lock)
  4541. mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
  4542. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4543. spin_lock(&BTRFS_I(inode)->lock);
  4544. BTRFS_I(inode)->outstanding_extents++;
  4545. if (BTRFS_I(inode)->outstanding_extents >
  4546. BTRFS_I(inode)->reserved_extents)
  4547. nr_extents = BTRFS_I(inode)->outstanding_extents -
  4548. BTRFS_I(inode)->reserved_extents;
  4549. /*
  4550. * Add an item to reserve for updating the inode when we complete the
  4551. * delalloc io.
  4552. */
  4553. if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4554. &BTRFS_I(inode)->runtime_flags)) {
  4555. nr_extents++;
  4556. extra_reserve = 1;
  4557. }
  4558. to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
  4559. to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
  4560. csum_bytes = BTRFS_I(inode)->csum_bytes;
  4561. spin_unlock(&BTRFS_I(inode)->lock);
  4562. if (root->fs_info->quota_enabled) {
  4563. ret = btrfs_qgroup_reserve(root, num_bytes +
  4564. nr_extents * root->leafsize);
  4565. if (ret)
  4566. goto out_fail;
  4567. }
  4568. ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
  4569. if (unlikely(ret)) {
  4570. if (root->fs_info->quota_enabled)
  4571. btrfs_qgroup_free(root, num_bytes +
  4572. nr_extents * root->leafsize);
  4573. goto out_fail;
  4574. }
  4575. spin_lock(&BTRFS_I(inode)->lock);
  4576. if (extra_reserve) {
  4577. set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  4578. &BTRFS_I(inode)->runtime_flags);
  4579. nr_extents--;
  4580. }
  4581. BTRFS_I(inode)->reserved_extents += nr_extents;
  4582. spin_unlock(&BTRFS_I(inode)->lock);
  4583. if (delalloc_lock)
  4584. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4585. if (to_reserve)
  4586. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4587. btrfs_ino(inode), to_reserve, 1);
  4588. block_rsv_add_bytes(block_rsv, to_reserve, 1);
  4589. return 0;
  4590. out_fail:
  4591. spin_lock(&BTRFS_I(inode)->lock);
  4592. dropped = drop_outstanding_extent(inode);
  4593. /*
  4594. * If the inodes csum_bytes is the same as the original
  4595. * csum_bytes then we know we haven't raced with any free()ers
  4596. * so we can just reduce our inodes csum bytes and carry on.
  4597. */
  4598. if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
  4599. calc_csum_metadata_size(inode, num_bytes, 0);
  4600. } else {
  4601. u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
  4602. u64 bytes;
  4603. /*
  4604. * This is tricky, but first we need to figure out how much we
  4605. * free'd from any free-ers that occured during this
  4606. * reservation, so we reset ->csum_bytes to the csum_bytes
  4607. * before we dropped our lock, and then call the free for the
  4608. * number of bytes that were freed while we were trying our
  4609. * reservation.
  4610. */
  4611. bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
  4612. BTRFS_I(inode)->csum_bytes = csum_bytes;
  4613. to_free = calc_csum_metadata_size(inode, bytes, 0);
  4614. /*
  4615. * Now we need to see how much we would have freed had we not
  4616. * been making this reservation and our ->csum_bytes were not
  4617. * artificially inflated.
  4618. */
  4619. BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
  4620. bytes = csum_bytes - orig_csum_bytes;
  4621. bytes = calc_csum_metadata_size(inode, bytes, 0);
  4622. /*
  4623. * Now reset ->csum_bytes to what it should be. If bytes is
  4624. * more than to_free then we would have free'd more space had we
  4625. * not had an artificially high ->csum_bytes, so we need to free
  4626. * the remainder. If bytes is the same or less then we don't
  4627. * need to do anything, the other free-ers did the correct
  4628. * thing.
  4629. */
  4630. BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
  4631. if (bytes > to_free)
  4632. to_free = bytes - to_free;
  4633. else
  4634. to_free = 0;
  4635. }
  4636. spin_unlock(&BTRFS_I(inode)->lock);
  4637. if (dropped)
  4638. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4639. if (to_free) {
  4640. btrfs_block_rsv_release(root, block_rsv, to_free);
  4641. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4642. btrfs_ino(inode), to_free, 0);
  4643. }
  4644. if (delalloc_lock)
  4645. mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
  4646. return ret;
  4647. }
  4648. /**
  4649. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  4650. * @inode: the inode to release the reservation for
  4651. * @num_bytes: the number of bytes we're releasing
  4652. *
  4653. * This will release the metadata reservation for an inode. This can be called
  4654. * once we complete IO for a given set of bytes to release their metadata
  4655. * reservations.
  4656. */
  4657. void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
  4658. {
  4659. struct btrfs_root *root = BTRFS_I(inode)->root;
  4660. u64 to_free = 0;
  4661. unsigned dropped;
  4662. num_bytes = ALIGN(num_bytes, root->sectorsize);
  4663. spin_lock(&BTRFS_I(inode)->lock);
  4664. dropped = drop_outstanding_extent(inode);
  4665. if (num_bytes)
  4666. to_free = calc_csum_metadata_size(inode, num_bytes, 0);
  4667. spin_unlock(&BTRFS_I(inode)->lock);
  4668. if (dropped > 0)
  4669. to_free += btrfs_calc_trans_metadata_size(root, dropped);
  4670. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  4671. btrfs_ino(inode), to_free, 0);
  4672. if (root->fs_info->quota_enabled) {
  4673. btrfs_qgroup_free(root, num_bytes +
  4674. dropped * root->leafsize);
  4675. }
  4676. btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
  4677. to_free);
  4678. }
  4679. /**
  4680. * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
  4681. * @inode: inode we're writing to
  4682. * @num_bytes: the number of bytes we want to allocate
  4683. *
  4684. * This will do the following things
  4685. *
  4686. * o reserve space in the data space info for num_bytes
  4687. * o reserve space in the metadata space info based on number of outstanding
  4688. * extents and how much csums will be needed
  4689. * o add to the inodes ->delalloc_bytes
  4690. * o add it to the fs_info's delalloc inodes list.
  4691. *
  4692. * This will return 0 for success and -ENOSPC if there is no space left.
  4693. */
  4694. int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
  4695. {
  4696. int ret;
  4697. ret = btrfs_check_data_free_space(inode, num_bytes);
  4698. if (ret)
  4699. return ret;
  4700. ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
  4701. if (ret) {
  4702. btrfs_free_reserved_data_space(inode, num_bytes);
  4703. return ret;
  4704. }
  4705. return 0;
  4706. }
  4707. /**
  4708. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  4709. * @inode: inode we're releasing space for
  4710. * @num_bytes: the number of bytes we want to free up
  4711. *
  4712. * This must be matched with a call to btrfs_delalloc_reserve_space. This is
  4713. * called in the case that we don't need the metadata AND data reservations
  4714. * anymore. So if there is an error or we insert an inline extent.
  4715. *
  4716. * This function will release the metadata space that was not used and will
  4717. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  4718. * list if there are no delalloc bytes left.
  4719. */
  4720. void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
  4721. {
  4722. btrfs_delalloc_release_metadata(inode, num_bytes);
  4723. btrfs_free_reserved_data_space(inode, num_bytes);
  4724. }
  4725. static int update_block_group(struct btrfs_root *root,
  4726. u64 bytenr, u64 num_bytes, int alloc)
  4727. {
  4728. struct btrfs_block_group_cache *cache = NULL;
  4729. struct btrfs_fs_info *info = root->fs_info;
  4730. u64 total = num_bytes;
  4731. u64 old_val;
  4732. u64 byte_in_group;
  4733. int factor;
  4734. /* block accounting for super block */
  4735. spin_lock(&info->delalloc_root_lock);
  4736. old_val = btrfs_super_bytes_used(info->super_copy);
  4737. if (alloc)
  4738. old_val += num_bytes;
  4739. else
  4740. old_val -= num_bytes;
  4741. btrfs_set_super_bytes_used(info->super_copy, old_val);
  4742. spin_unlock(&info->delalloc_root_lock);
  4743. while (total) {
  4744. cache = btrfs_lookup_block_group(info, bytenr);
  4745. if (!cache)
  4746. return -ENOENT;
  4747. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  4748. BTRFS_BLOCK_GROUP_RAID1 |
  4749. BTRFS_BLOCK_GROUP_RAID10))
  4750. factor = 2;
  4751. else
  4752. factor = 1;
  4753. /*
  4754. * If this block group has free space cache written out, we
  4755. * need to make sure to load it if we are removing space. This
  4756. * is because we need the unpinning stage to actually add the
  4757. * space back to the block group, otherwise we will leak space.
  4758. */
  4759. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  4760. cache_block_group(cache, 1);
  4761. byte_in_group = bytenr - cache->key.objectid;
  4762. WARN_ON(byte_in_group > cache->key.offset);
  4763. spin_lock(&cache->space_info->lock);
  4764. spin_lock(&cache->lock);
  4765. if (btrfs_test_opt(root, SPACE_CACHE) &&
  4766. cache->disk_cache_state < BTRFS_DC_CLEAR)
  4767. cache->disk_cache_state = BTRFS_DC_CLEAR;
  4768. cache->dirty = 1;
  4769. old_val = btrfs_block_group_used(&cache->item);
  4770. num_bytes = min(total, cache->key.offset - byte_in_group);
  4771. if (alloc) {
  4772. old_val += num_bytes;
  4773. btrfs_set_block_group_used(&cache->item, old_val);
  4774. cache->reserved -= num_bytes;
  4775. cache->space_info->bytes_reserved -= num_bytes;
  4776. cache->space_info->bytes_used += num_bytes;
  4777. cache->space_info->disk_used += num_bytes * factor;
  4778. spin_unlock(&cache->lock);
  4779. spin_unlock(&cache->space_info->lock);
  4780. } else {
  4781. old_val -= num_bytes;
  4782. btrfs_set_block_group_used(&cache->item, old_val);
  4783. cache->pinned += num_bytes;
  4784. cache->space_info->bytes_pinned += num_bytes;
  4785. cache->space_info->bytes_used -= num_bytes;
  4786. cache->space_info->disk_used -= num_bytes * factor;
  4787. spin_unlock(&cache->lock);
  4788. spin_unlock(&cache->space_info->lock);
  4789. set_extent_dirty(info->pinned_extents,
  4790. bytenr, bytenr + num_bytes - 1,
  4791. GFP_NOFS | __GFP_NOFAIL);
  4792. }
  4793. btrfs_put_block_group(cache);
  4794. total -= num_bytes;
  4795. bytenr += num_bytes;
  4796. }
  4797. return 0;
  4798. }
  4799. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  4800. {
  4801. struct btrfs_block_group_cache *cache;
  4802. u64 bytenr;
  4803. spin_lock(&root->fs_info->block_group_cache_lock);
  4804. bytenr = root->fs_info->first_logical_byte;
  4805. spin_unlock(&root->fs_info->block_group_cache_lock);
  4806. if (bytenr < (u64)-1)
  4807. return bytenr;
  4808. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  4809. if (!cache)
  4810. return 0;
  4811. bytenr = cache->key.objectid;
  4812. btrfs_put_block_group(cache);
  4813. return bytenr;
  4814. }
  4815. static int pin_down_extent(struct btrfs_root *root,
  4816. struct btrfs_block_group_cache *cache,
  4817. u64 bytenr, u64 num_bytes, int reserved)
  4818. {
  4819. spin_lock(&cache->space_info->lock);
  4820. spin_lock(&cache->lock);
  4821. cache->pinned += num_bytes;
  4822. cache->space_info->bytes_pinned += num_bytes;
  4823. if (reserved) {
  4824. cache->reserved -= num_bytes;
  4825. cache->space_info->bytes_reserved -= num_bytes;
  4826. }
  4827. spin_unlock(&cache->lock);
  4828. spin_unlock(&cache->space_info->lock);
  4829. set_extent_dirty(root->fs_info->pinned_extents, bytenr,
  4830. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  4831. if (reserved)
  4832. trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
  4833. return 0;
  4834. }
  4835. /*
  4836. * this function must be called within transaction
  4837. */
  4838. int btrfs_pin_extent(struct btrfs_root *root,
  4839. u64 bytenr, u64 num_bytes, int reserved)
  4840. {
  4841. struct btrfs_block_group_cache *cache;
  4842. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4843. BUG_ON(!cache); /* Logic error */
  4844. pin_down_extent(root, cache, bytenr, num_bytes, reserved);
  4845. btrfs_put_block_group(cache);
  4846. return 0;
  4847. }
  4848. /*
  4849. * this function must be called within transaction
  4850. */
  4851. int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
  4852. u64 bytenr, u64 num_bytes)
  4853. {
  4854. struct btrfs_block_group_cache *cache;
  4855. int ret;
  4856. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  4857. if (!cache)
  4858. return -EINVAL;
  4859. /*
  4860. * pull in the free space cache (if any) so that our pin
  4861. * removes the free space from the cache. We have load_only set
  4862. * to one because the slow code to read in the free extents does check
  4863. * the pinned extents.
  4864. */
  4865. cache_block_group(cache, 1);
  4866. pin_down_extent(root, cache, bytenr, num_bytes, 0);
  4867. /* remove us from the free space cache (if we're there at all) */
  4868. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  4869. btrfs_put_block_group(cache);
  4870. return ret;
  4871. }
  4872. static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
  4873. {
  4874. int ret;
  4875. struct btrfs_block_group_cache *block_group;
  4876. struct btrfs_caching_control *caching_ctl;
  4877. block_group = btrfs_lookup_block_group(root->fs_info, start);
  4878. if (!block_group)
  4879. return -EINVAL;
  4880. cache_block_group(block_group, 0);
  4881. caching_ctl = get_caching_control(block_group);
  4882. if (!caching_ctl) {
  4883. /* Logic error */
  4884. BUG_ON(!block_group_cache_done(block_group));
  4885. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  4886. } else {
  4887. mutex_lock(&caching_ctl->mutex);
  4888. if (start >= caching_ctl->progress) {
  4889. ret = add_excluded_extent(root, start, num_bytes);
  4890. } else if (start + num_bytes <= caching_ctl->progress) {
  4891. ret = btrfs_remove_free_space(block_group,
  4892. start, num_bytes);
  4893. } else {
  4894. num_bytes = caching_ctl->progress - start;
  4895. ret = btrfs_remove_free_space(block_group,
  4896. start, num_bytes);
  4897. if (ret)
  4898. goto out_lock;
  4899. num_bytes = (start + num_bytes) -
  4900. caching_ctl->progress;
  4901. start = caching_ctl->progress;
  4902. ret = add_excluded_extent(root, start, num_bytes);
  4903. }
  4904. out_lock:
  4905. mutex_unlock(&caching_ctl->mutex);
  4906. put_caching_control(caching_ctl);
  4907. }
  4908. btrfs_put_block_group(block_group);
  4909. return ret;
  4910. }
  4911. int btrfs_exclude_logged_extents(struct btrfs_root *log,
  4912. struct extent_buffer *eb)
  4913. {
  4914. struct btrfs_file_extent_item *item;
  4915. struct btrfs_key key;
  4916. int found_type;
  4917. int i;
  4918. if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
  4919. return 0;
  4920. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  4921. btrfs_item_key_to_cpu(eb, &key, i);
  4922. if (key.type != BTRFS_EXTENT_DATA_KEY)
  4923. continue;
  4924. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  4925. found_type = btrfs_file_extent_type(eb, item);
  4926. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  4927. continue;
  4928. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  4929. continue;
  4930. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  4931. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  4932. __exclude_logged_extent(log, key.objectid, key.offset);
  4933. }
  4934. return 0;
  4935. }
  4936. /**
  4937. * btrfs_update_reserved_bytes - update the block_group and space info counters
  4938. * @cache: The cache we are manipulating
  4939. * @num_bytes: The number of bytes in question
  4940. * @reserve: One of the reservation enums
  4941. * @delalloc: The blocks are allocated for the delalloc write
  4942. *
  4943. * This is called by the allocator when it reserves space, or by somebody who is
  4944. * freeing space that was never actually used on disk. For example if you
  4945. * reserve some space for a new leaf in transaction A and before transaction A
  4946. * commits you free that leaf, you call this with reserve set to 0 in order to
  4947. * clear the reservation.
  4948. *
  4949. * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
  4950. * ENOSPC accounting. For data we handle the reservation through clearing the
  4951. * delalloc bits in the io_tree. We have to do this since we could end up
  4952. * allocating less disk space for the amount of data we have reserved in the
  4953. * case of compression.
  4954. *
  4955. * If this is a reservation and the block group has become read only we cannot
  4956. * make the reservation and return -EAGAIN, otherwise this function always
  4957. * succeeds.
  4958. */
  4959. static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
  4960. u64 num_bytes, int reserve, int delalloc)
  4961. {
  4962. struct btrfs_space_info *space_info = cache->space_info;
  4963. int ret = 0;
  4964. spin_lock(&space_info->lock);
  4965. spin_lock(&cache->lock);
  4966. if (reserve != RESERVE_FREE) {
  4967. if (cache->ro) {
  4968. ret = -EAGAIN;
  4969. } else {
  4970. cache->reserved += num_bytes;
  4971. space_info->bytes_reserved += num_bytes;
  4972. if (reserve == RESERVE_ALLOC) {
  4973. trace_btrfs_space_reservation(cache->fs_info,
  4974. "space_info", space_info->flags,
  4975. num_bytes, 0);
  4976. space_info->bytes_may_use -= num_bytes;
  4977. }
  4978. if (delalloc)
  4979. cache->delalloc_bytes += num_bytes;
  4980. }
  4981. } else {
  4982. if (cache->ro)
  4983. space_info->bytes_readonly += num_bytes;
  4984. cache->reserved -= num_bytes;
  4985. space_info->bytes_reserved -= num_bytes;
  4986. if (delalloc)
  4987. cache->delalloc_bytes -= num_bytes;
  4988. }
  4989. spin_unlock(&cache->lock);
  4990. spin_unlock(&space_info->lock);
  4991. return ret;
  4992. }
  4993. void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  4994. struct btrfs_root *root)
  4995. {
  4996. struct btrfs_fs_info *fs_info = root->fs_info;
  4997. struct btrfs_caching_control *next;
  4998. struct btrfs_caching_control *caching_ctl;
  4999. struct btrfs_block_group_cache *cache;
  5000. down_write(&fs_info->commit_root_sem);
  5001. list_for_each_entry_safe(caching_ctl, next,
  5002. &fs_info->caching_block_groups, list) {
  5003. cache = caching_ctl->block_group;
  5004. if (block_group_cache_done(cache)) {
  5005. cache->last_byte_to_unpin = (u64)-1;
  5006. list_del_init(&caching_ctl->list);
  5007. put_caching_control(caching_ctl);
  5008. } else {
  5009. cache->last_byte_to_unpin = caching_ctl->progress;
  5010. }
  5011. }
  5012. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5013. fs_info->pinned_extents = &fs_info->freed_extents[1];
  5014. else
  5015. fs_info->pinned_extents = &fs_info->freed_extents[0];
  5016. up_write(&fs_info->commit_root_sem);
  5017. update_global_block_rsv(fs_info);
  5018. }
  5019. static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  5020. {
  5021. struct btrfs_fs_info *fs_info = root->fs_info;
  5022. struct btrfs_block_group_cache *cache = NULL;
  5023. struct btrfs_space_info *space_info;
  5024. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5025. u64 len;
  5026. bool readonly;
  5027. while (start <= end) {
  5028. readonly = false;
  5029. if (!cache ||
  5030. start >= cache->key.objectid + cache->key.offset) {
  5031. if (cache)
  5032. btrfs_put_block_group(cache);
  5033. cache = btrfs_lookup_block_group(fs_info, start);
  5034. BUG_ON(!cache); /* Logic error */
  5035. }
  5036. len = cache->key.objectid + cache->key.offset - start;
  5037. len = min(len, end + 1 - start);
  5038. if (start < cache->last_byte_to_unpin) {
  5039. len = min(len, cache->last_byte_to_unpin - start);
  5040. btrfs_add_free_space(cache, start, len);
  5041. }
  5042. start += len;
  5043. space_info = cache->space_info;
  5044. spin_lock(&space_info->lock);
  5045. spin_lock(&cache->lock);
  5046. cache->pinned -= len;
  5047. space_info->bytes_pinned -= len;
  5048. percpu_counter_add(&space_info->total_bytes_pinned, -len);
  5049. if (cache->ro) {
  5050. space_info->bytes_readonly += len;
  5051. readonly = true;
  5052. }
  5053. spin_unlock(&cache->lock);
  5054. if (!readonly && global_rsv->space_info == space_info) {
  5055. spin_lock(&global_rsv->lock);
  5056. if (!global_rsv->full) {
  5057. len = min(len, global_rsv->size -
  5058. global_rsv->reserved);
  5059. global_rsv->reserved += len;
  5060. space_info->bytes_may_use += len;
  5061. if (global_rsv->reserved >= global_rsv->size)
  5062. global_rsv->full = 1;
  5063. }
  5064. spin_unlock(&global_rsv->lock);
  5065. }
  5066. spin_unlock(&space_info->lock);
  5067. }
  5068. if (cache)
  5069. btrfs_put_block_group(cache);
  5070. return 0;
  5071. }
  5072. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  5073. struct btrfs_root *root)
  5074. {
  5075. struct btrfs_fs_info *fs_info = root->fs_info;
  5076. struct extent_io_tree *unpin;
  5077. u64 start;
  5078. u64 end;
  5079. int ret;
  5080. if (trans->aborted)
  5081. return 0;
  5082. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5083. unpin = &fs_info->freed_extents[1];
  5084. else
  5085. unpin = &fs_info->freed_extents[0];
  5086. while (1) {
  5087. ret = find_first_extent_bit(unpin, 0, &start, &end,
  5088. EXTENT_DIRTY, NULL);
  5089. if (ret)
  5090. break;
  5091. if (btrfs_test_opt(root, DISCARD))
  5092. ret = btrfs_discard_extent(root, start,
  5093. end + 1 - start, NULL);
  5094. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  5095. unpin_extent_range(root, start, end);
  5096. cond_resched();
  5097. }
  5098. return 0;
  5099. }
  5100. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
  5101. u64 owner, u64 root_objectid)
  5102. {
  5103. struct btrfs_space_info *space_info;
  5104. u64 flags;
  5105. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  5106. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  5107. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  5108. else
  5109. flags = BTRFS_BLOCK_GROUP_METADATA;
  5110. } else {
  5111. flags = BTRFS_BLOCK_GROUP_DATA;
  5112. }
  5113. space_info = __find_space_info(fs_info, flags);
  5114. BUG_ON(!space_info); /* Logic bug */
  5115. percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
  5116. }
  5117. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  5118. struct btrfs_root *root,
  5119. u64 bytenr, u64 num_bytes, u64 parent,
  5120. u64 root_objectid, u64 owner_objectid,
  5121. u64 owner_offset, int refs_to_drop,
  5122. struct btrfs_delayed_extent_op *extent_op,
  5123. int no_quota)
  5124. {
  5125. struct btrfs_key key;
  5126. struct btrfs_path *path;
  5127. struct btrfs_fs_info *info = root->fs_info;
  5128. struct btrfs_root *extent_root = info->extent_root;
  5129. struct extent_buffer *leaf;
  5130. struct btrfs_extent_item *ei;
  5131. struct btrfs_extent_inline_ref *iref;
  5132. int ret;
  5133. int is_data;
  5134. int extent_slot = 0;
  5135. int found_extent = 0;
  5136. int num_to_del = 1;
  5137. u32 item_size;
  5138. u64 refs;
  5139. int last_ref = 0;
  5140. enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
  5141. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  5142. SKINNY_METADATA);
  5143. if (!info->quota_enabled || !is_fstree(root_objectid))
  5144. no_quota = 1;
  5145. path = btrfs_alloc_path();
  5146. if (!path)
  5147. return -ENOMEM;
  5148. path->reada = 1;
  5149. path->leave_spinning = 1;
  5150. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  5151. BUG_ON(!is_data && refs_to_drop != 1);
  5152. if (is_data)
  5153. skinny_metadata = 0;
  5154. ret = lookup_extent_backref(trans, extent_root, path, &iref,
  5155. bytenr, num_bytes, parent,
  5156. root_objectid, owner_objectid,
  5157. owner_offset);
  5158. if (ret == 0) {
  5159. extent_slot = path->slots[0];
  5160. while (extent_slot >= 0) {
  5161. btrfs_item_key_to_cpu(path->nodes[0], &key,
  5162. extent_slot);
  5163. if (key.objectid != bytenr)
  5164. break;
  5165. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  5166. key.offset == num_bytes) {
  5167. found_extent = 1;
  5168. break;
  5169. }
  5170. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  5171. key.offset == owner_objectid) {
  5172. found_extent = 1;
  5173. break;
  5174. }
  5175. if (path->slots[0] - extent_slot > 5)
  5176. break;
  5177. extent_slot--;
  5178. }
  5179. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  5180. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  5181. if (found_extent && item_size < sizeof(*ei))
  5182. found_extent = 0;
  5183. #endif
  5184. if (!found_extent) {
  5185. BUG_ON(iref);
  5186. ret = remove_extent_backref(trans, extent_root, path,
  5187. NULL, refs_to_drop,
  5188. is_data, &last_ref);
  5189. if (ret) {
  5190. btrfs_abort_transaction(trans, extent_root, ret);
  5191. goto out;
  5192. }
  5193. btrfs_release_path(path);
  5194. path->leave_spinning = 1;
  5195. key.objectid = bytenr;
  5196. key.type = BTRFS_EXTENT_ITEM_KEY;
  5197. key.offset = num_bytes;
  5198. if (!is_data && skinny_metadata) {
  5199. key.type = BTRFS_METADATA_ITEM_KEY;
  5200. key.offset = owner_objectid;
  5201. }
  5202. ret = btrfs_search_slot(trans, extent_root,
  5203. &key, path, -1, 1);
  5204. if (ret > 0 && skinny_metadata && path->slots[0]) {
  5205. /*
  5206. * Couldn't find our skinny metadata item,
  5207. * see if we have ye olde extent item.
  5208. */
  5209. path->slots[0]--;
  5210. btrfs_item_key_to_cpu(path->nodes[0], &key,
  5211. path->slots[0]);
  5212. if (key.objectid == bytenr &&
  5213. key.type == BTRFS_EXTENT_ITEM_KEY &&
  5214. key.offset == num_bytes)
  5215. ret = 0;
  5216. }
  5217. if (ret > 0 && skinny_metadata) {
  5218. skinny_metadata = false;
  5219. key.objectid = bytenr;
  5220. key.type = BTRFS_EXTENT_ITEM_KEY;
  5221. key.offset = num_bytes;
  5222. btrfs_release_path(path);
  5223. ret = btrfs_search_slot(trans, extent_root,
  5224. &key, path, -1, 1);
  5225. }
  5226. if (ret) {
  5227. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  5228. ret, bytenr);
  5229. if (ret > 0)
  5230. btrfs_print_leaf(extent_root,
  5231. path->nodes[0]);
  5232. }
  5233. if (ret < 0) {
  5234. btrfs_abort_transaction(trans, extent_root, ret);
  5235. goto out;
  5236. }
  5237. extent_slot = path->slots[0];
  5238. }
  5239. } else if (WARN_ON(ret == -ENOENT)) {
  5240. btrfs_print_leaf(extent_root, path->nodes[0]);
  5241. btrfs_err(info,
  5242. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  5243. bytenr, parent, root_objectid, owner_objectid,
  5244. owner_offset);
  5245. btrfs_abort_transaction(trans, extent_root, ret);
  5246. goto out;
  5247. } else {
  5248. btrfs_abort_transaction(trans, extent_root, ret);
  5249. goto out;
  5250. }
  5251. leaf = path->nodes[0];
  5252. item_size = btrfs_item_size_nr(leaf, extent_slot);
  5253. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  5254. if (item_size < sizeof(*ei)) {
  5255. BUG_ON(found_extent || extent_slot != path->slots[0]);
  5256. ret = convert_extent_item_v0(trans, extent_root, path,
  5257. owner_objectid, 0);
  5258. if (ret < 0) {
  5259. btrfs_abort_transaction(trans, extent_root, ret);
  5260. goto out;
  5261. }
  5262. btrfs_release_path(path);
  5263. path->leave_spinning = 1;
  5264. key.objectid = bytenr;
  5265. key.type = BTRFS_EXTENT_ITEM_KEY;
  5266. key.offset = num_bytes;
  5267. ret = btrfs_search_slot(trans, extent_root, &key, path,
  5268. -1, 1);
  5269. if (ret) {
  5270. btrfs_err(info, "umm, got %d back from search, was looking for %llu",
  5271. ret, bytenr);
  5272. btrfs_print_leaf(extent_root, path->nodes[0]);
  5273. }
  5274. if (ret < 0) {
  5275. btrfs_abort_transaction(trans, extent_root, ret);
  5276. goto out;
  5277. }
  5278. extent_slot = path->slots[0];
  5279. leaf = path->nodes[0];
  5280. item_size = btrfs_item_size_nr(leaf, extent_slot);
  5281. }
  5282. #endif
  5283. BUG_ON(item_size < sizeof(*ei));
  5284. ei = btrfs_item_ptr(leaf, extent_slot,
  5285. struct btrfs_extent_item);
  5286. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  5287. key.type == BTRFS_EXTENT_ITEM_KEY) {
  5288. struct btrfs_tree_block_info *bi;
  5289. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  5290. bi = (struct btrfs_tree_block_info *)(ei + 1);
  5291. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  5292. }
  5293. refs = btrfs_extent_refs(leaf, ei);
  5294. if (refs < refs_to_drop) {
  5295. btrfs_err(info, "trying to drop %d refs but we only have %Lu "
  5296. "for bytenr %Lu", refs_to_drop, refs, bytenr);
  5297. ret = -EINVAL;
  5298. btrfs_abort_transaction(trans, extent_root, ret);
  5299. goto out;
  5300. }
  5301. refs -= refs_to_drop;
  5302. if (refs > 0) {
  5303. type = BTRFS_QGROUP_OPER_SUB_SHARED;
  5304. if (extent_op)
  5305. __run_delayed_extent_op(extent_op, leaf, ei);
  5306. /*
  5307. * In the case of inline back ref, reference count will
  5308. * be updated by remove_extent_backref
  5309. */
  5310. if (iref) {
  5311. BUG_ON(!found_extent);
  5312. } else {
  5313. btrfs_set_extent_refs(leaf, ei, refs);
  5314. btrfs_mark_buffer_dirty(leaf);
  5315. }
  5316. if (found_extent) {
  5317. ret = remove_extent_backref(trans, extent_root, path,
  5318. iref, refs_to_drop,
  5319. is_data, &last_ref);
  5320. if (ret) {
  5321. btrfs_abort_transaction(trans, extent_root, ret);
  5322. goto out;
  5323. }
  5324. }
  5325. add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
  5326. root_objectid);
  5327. } else {
  5328. if (found_extent) {
  5329. BUG_ON(is_data && refs_to_drop !=
  5330. extent_data_ref_count(root, path, iref));
  5331. if (iref) {
  5332. BUG_ON(path->slots[0] != extent_slot);
  5333. } else {
  5334. BUG_ON(path->slots[0] != extent_slot + 1);
  5335. path->slots[0] = extent_slot;
  5336. num_to_del = 2;
  5337. }
  5338. }
  5339. last_ref = 1;
  5340. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  5341. num_to_del);
  5342. if (ret) {
  5343. btrfs_abort_transaction(trans, extent_root, ret);
  5344. goto out;
  5345. }
  5346. btrfs_release_path(path);
  5347. if (is_data) {
  5348. ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
  5349. if (ret) {
  5350. btrfs_abort_transaction(trans, extent_root, ret);
  5351. goto out;
  5352. }
  5353. }
  5354. ret = update_block_group(root, bytenr, num_bytes, 0);
  5355. if (ret) {
  5356. btrfs_abort_transaction(trans, extent_root, ret);
  5357. goto out;
  5358. }
  5359. }
  5360. btrfs_release_path(path);
  5361. /* Deal with the quota accounting */
  5362. if (!ret && last_ref && !no_quota) {
  5363. int mod_seq = 0;
  5364. if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
  5365. type == BTRFS_QGROUP_OPER_SUB_SHARED)
  5366. mod_seq = 1;
  5367. ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
  5368. bytenr, num_bytes, type,
  5369. mod_seq);
  5370. }
  5371. out:
  5372. btrfs_free_path(path);
  5373. return ret;
  5374. }
  5375. /*
  5376. * when we free an block, it is possible (and likely) that we free the last
  5377. * delayed ref for that extent as well. This searches the delayed ref tree for
  5378. * a given extent, and if there are no other delayed refs to be processed, it
  5379. * removes it from the tree.
  5380. */
  5381. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  5382. struct btrfs_root *root, u64 bytenr)
  5383. {
  5384. struct btrfs_delayed_ref_head *head;
  5385. struct btrfs_delayed_ref_root *delayed_refs;
  5386. int ret = 0;
  5387. delayed_refs = &trans->transaction->delayed_refs;
  5388. spin_lock(&delayed_refs->lock);
  5389. head = btrfs_find_delayed_ref_head(trans, bytenr);
  5390. if (!head)
  5391. goto out_delayed_unlock;
  5392. spin_lock(&head->lock);
  5393. if (rb_first(&head->ref_root))
  5394. goto out;
  5395. if (head->extent_op) {
  5396. if (!head->must_insert_reserved)
  5397. goto out;
  5398. btrfs_free_delayed_extent_op(head->extent_op);
  5399. head->extent_op = NULL;
  5400. }
  5401. /*
  5402. * waiting for the lock here would deadlock. If someone else has it
  5403. * locked they are already in the process of dropping it anyway
  5404. */
  5405. if (!mutex_trylock(&head->mutex))
  5406. goto out;
  5407. /*
  5408. * at this point we have a head with no other entries. Go
  5409. * ahead and process it.
  5410. */
  5411. head->node.in_tree = 0;
  5412. rb_erase(&head->href_node, &delayed_refs->href_root);
  5413. atomic_dec(&delayed_refs->num_entries);
  5414. /*
  5415. * we don't take a ref on the node because we're removing it from the
  5416. * tree, so we just steal the ref the tree was holding.
  5417. */
  5418. delayed_refs->num_heads--;
  5419. if (head->processing == 0)
  5420. delayed_refs->num_heads_ready--;
  5421. head->processing = 0;
  5422. spin_unlock(&head->lock);
  5423. spin_unlock(&delayed_refs->lock);
  5424. BUG_ON(head->extent_op);
  5425. if (head->must_insert_reserved)
  5426. ret = 1;
  5427. mutex_unlock(&head->mutex);
  5428. btrfs_put_delayed_ref(&head->node);
  5429. return ret;
  5430. out:
  5431. spin_unlock(&head->lock);
  5432. out_delayed_unlock:
  5433. spin_unlock(&delayed_refs->lock);
  5434. return 0;
  5435. }
  5436. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  5437. struct btrfs_root *root,
  5438. struct extent_buffer *buf,
  5439. u64 parent, int last_ref)
  5440. {
  5441. struct btrfs_block_group_cache *cache = NULL;
  5442. int pin = 1;
  5443. int ret;
  5444. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  5445. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  5446. buf->start, buf->len,
  5447. parent, root->root_key.objectid,
  5448. btrfs_header_level(buf),
  5449. BTRFS_DROP_DELAYED_REF, NULL, 0);
  5450. BUG_ON(ret); /* -ENOMEM */
  5451. }
  5452. if (!last_ref)
  5453. return;
  5454. cache = btrfs_lookup_block_group(root->fs_info, buf->start);
  5455. if (btrfs_header_generation(buf) == trans->transid) {
  5456. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  5457. ret = check_ref_cleanup(trans, root, buf->start);
  5458. if (!ret)
  5459. goto out;
  5460. }
  5461. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  5462. pin_down_extent(root, cache, buf->start, buf->len, 1);
  5463. goto out;
  5464. }
  5465. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  5466. btrfs_add_free_space(cache, buf->start, buf->len);
  5467. btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
  5468. trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
  5469. pin = 0;
  5470. }
  5471. out:
  5472. if (pin)
  5473. add_pinned_bytes(root->fs_info, buf->len,
  5474. btrfs_header_level(buf),
  5475. root->root_key.objectid);
  5476. /*
  5477. * Deleting the buffer, clear the corrupt flag since it doesn't matter
  5478. * anymore.
  5479. */
  5480. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  5481. btrfs_put_block_group(cache);
  5482. }
  5483. /* Can return -ENOMEM */
  5484. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  5485. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  5486. u64 owner, u64 offset, int no_quota)
  5487. {
  5488. int ret;
  5489. struct btrfs_fs_info *fs_info = root->fs_info;
  5490. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  5491. if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
  5492. return 0;
  5493. #endif
  5494. add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
  5495. /*
  5496. * tree log blocks never actually go into the extent allocation
  5497. * tree, just update pinning info and exit early.
  5498. */
  5499. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  5500. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  5501. /* unlocks the pinned mutex */
  5502. btrfs_pin_extent(root, bytenr, num_bytes, 1);
  5503. ret = 0;
  5504. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  5505. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  5506. num_bytes,
  5507. parent, root_objectid, (int)owner,
  5508. BTRFS_DROP_DELAYED_REF, NULL, no_quota);
  5509. } else {
  5510. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  5511. num_bytes,
  5512. parent, root_objectid, owner,
  5513. offset, BTRFS_DROP_DELAYED_REF,
  5514. NULL, no_quota);
  5515. }
  5516. return ret;
  5517. }
  5518. static u64 stripe_align(struct btrfs_root *root,
  5519. struct btrfs_block_group_cache *cache,
  5520. u64 val, u64 num_bytes)
  5521. {
  5522. u64 ret = ALIGN(val, root->stripesize);
  5523. return ret;
  5524. }
  5525. /*
  5526. * when we wait for progress in the block group caching, its because
  5527. * our allocation attempt failed at least once. So, we must sleep
  5528. * and let some progress happen before we try again.
  5529. *
  5530. * This function will sleep at least once waiting for new free space to
  5531. * show up, and then it will check the block group free space numbers
  5532. * for our min num_bytes. Another option is to have it go ahead
  5533. * and look in the rbtree for a free extent of a given size, but this
  5534. * is a good start.
  5535. *
  5536. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  5537. * any of the information in this block group.
  5538. */
  5539. static noinline void
  5540. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  5541. u64 num_bytes)
  5542. {
  5543. struct btrfs_caching_control *caching_ctl;
  5544. caching_ctl = get_caching_control(cache);
  5545. if (!caching_ctl)
  5546. return;
  5547. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  5548. (cache->free_space_ctl->free_space >= num_bytes));
  5549. put_caching_control(caching_ctl);
  5550. }
  5551. static noinline int
  5552. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  5553. {
  5554. struct btrfs_caching_control *caching_ctl;
  5555. int ret = 0;
  5556. caching_ctl = get_caching_control(cache);
  5557. if (!caching_ctl)
  5558. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  5559. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  5560. if (cache->cached == BTRFS_CACHE_ERROR)
  5561. ret = -EIO;
  5562. put_caching_control(caching_ctl);
  5563. return ret;
  5564. }
  5565. int __get_raid_index(u64 flags)
  5566. {
  5567. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  5568. return BTRFS_RAID_RAID10;
  5569. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  5570. return BTRFS_RAID_RAID1;
  5571. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  5572. return BTRFS_RAID_DUP;
  5573. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  5574. return BTRFS_RAID_RAID0;
  5575. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  5576. return BTRFS_RAID_RAID5;
  5577. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  5578. return BTRFS_RAID_RAID6;
  5579. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  5580. }
  5581. int get_block_group_index(struct btrfs_block_group_cache *cache)
  5582. {
  5583. return __get_raid_index(cache->flags);
  5584. }
  5585. static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
  5586. [BTRFS_RAID_RAID10] = "raid10",
  5587. [BTRFS_RAID_RAID1] = "raid1",
  5588. [BTRFS_RAID_DUP] = "dup",
  5589. [BTRFS_RAID_RAID0] = "raid0",
  5590. [BTRFS_RAID_SINGLE] = "single",
  5591. [BTRFS_RAID_RAID5] = "raid5",
  5592. [BTRFS_RAID_RAID6] = "raid6",
  5593. };
  5594. static const char *get_raid_name(enum btrfs_raid_types type)
  5595. {
  5596. if (type >= BTRFS_NR_RAID_TYPES)
  5597. return NULL;
  5598. return btrfs_raid_type_names[type];
  5599. }
  5600. enum btrfs_loop_type {
  5601. LOOP_CACHING_NOWAIT = 0,
  5602. LOOP_CACHING_WAIT = 1,
  5603. LOOP_ALLOC_CHUNK = 2,
  5604. LOOP_NO_EMPTY_SIZE = 3,
  5605. };
  5606. static inline void
  5607. btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
  5608. int delalloc)
  5609. {
  5610. if (delalloc)
  5611. down_read(&cache->data_rwsem);
  5612. }
  5613. static inline void
  5614. btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
  5615. int delalloc)
  5616. {
  5617. btrfs_get_block_group(cache);
  5618. if (delalloc)
  5619. down_read(&cache->data_rwsem);
  5620. }
  5621. static struct btrfs_block_group_cache *
  5622. btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
  5623. struct btrfs_free_cluster *cluster,
  5624. int delalloc)
  5625. {
  5626. struct btrfs_block_group_cache *used_bg;
  5627. bool locked = false;
  5628. again:
  5629. spin_lock(&cluster->refill_lock);
  5630. if (locked) {
  5631. if (used_bg == cluster->block_group)
  5632. return used_bg;
  5633. up_read(&used_bg->data_rwsem);
  5634. btrfs_put_block_group(used_bg);
  5635. }
  5636. used_bg = cluster->block_group;
  5637. if (!used_bg)
  5638. return NULL;
  5639. if (used_bg == block_group)
  5640. return used_bg;
  5641. btrfs_get_block_group(used_bg);
  5642. if (!delalloc)
  5643. return used_bg;
  5644. if (down_read_trylock(&used_bg->data_rwsem))
  5645. return used_bg;
  5646. spin_unlock(&cluster->refill_lock);
  5647. down_read(&used_bg->data_rwsem);
  5648. locked = true;
  5649. goto again;
  5650. }
  5651. static inline void
  5652. btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  5653. int delalloc)
  5654. {
  5655. if (delalloc)
  5656. up_read(&cache->data_rwsem);
  5657. btrfs_put_block_group(cache);
  5658. }
  5659. /*
  5660. * walks the btree of allocated extents and find a hole of a given size.
  5661. * The key ins is changed to record the hole:
  5662. * ins->objectid == start position
  5663. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  5664. * ins->offset == the size of the hole.
  5665. * Any available blocks before search_start are skipped.
  5666. *
  5667. * If there is no suitable free space, we will record the max size of
  5668. * the free space extent currently.
  5669. */
  5670. static noinline int find_free_extent(struct btrfs_root *orig_root,
  5671. u64 num_bytes, u64 empty_size,
  5672. u64 hint_byte, struct btrfs_key *ins,
  5673. u64 flags, int delalloc)
  5674. {
  5675. int ret = 0;
  5676. struct btrfs_root *root = orig_root->fs_info->extent_root;
  5677. struct btrfs_free_cluster *last_ptr = NULL;
  5678. struct btrfs_block_group_cache *block_group = NULL;
  5679. u64 search_start = 0;
  5680. u64 max_extent_size = 0;
  5681. int empty_cluster = 2 * 1024 * 1024;
  5682. struct btrfs_space_info *space_info;
  5683. int loop = 0;
  5684. int index = __get_raid_index(flags);
  5685. int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
  5686. RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
  5687. bool failed_cluster_refill = false;
  5688. bool failed_alloc = false;
  5689. bool use_cluster = true;
  5690. bool have_caching_bg = false;
  5691. WARN_ON(num_bytes < root->sectorsize);
  5692. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  5693. ins->objectid = 0;
  5694. ins->offset = 0;
  5695. trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
  5696. space_info = __find_space_info(root->fs_info, flags);
  5697. if (!space_info) {
  5698. btrfs_err(root->fs_info, "No space info for %llu", flags);
  5699. return -ENOSPC;
  5700. }
  5701. /*
  5702. * If the space info is for both data and metadata it means we have a
  5703. * small filesystem and we can't use the clustering stuff.
  5704. */
  5705. if (btrfs_mixed_space_info(space_info))
  5706. use_cluster = false;
  5707. if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
  5708. last_ptr = &root->fs_info->meta_alloc_cluster;
  5709. if (!btrfs_test_opt(root, SSD))
  5710. empty_cluster = 64 * 1024;
  5711. }
  5712. if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
  5713. btrfs_test_opt(root, SSD)) {
  5714. last_ptr = &root->fs_info->data_alloc_cluster;
  5715. }
  5716. if (last_ptr) {
  5717. spin_lock(&last_ptr->lock);
  5718. if (last_ptr->block_group)
  5719. hint_byte = last_ptr->window_start;
  5720. spin_unlock(&last_ptr->lock);
  5721. }
  5722. search_start = max(search_start, first_logical_byte(root, 0));
  5723. search_start = max(search_start, hint_byte);
  5724. if (!last_ptr)
  5725. empty_cluster = 0;
  5726. if (search_start == hint_byte) {
  5727. block_group = btrfs_lookup_block_group(root->fs_info,
  5728. search_start);
  5729. /*
  5730. * we don't want to use the block group if it doesn't match our
  5731. * allocation bits, or if its not cached.
  5732. *
  5733. * However if we are re-searching with an ideal block group
  5734. * picked out then we don't care that the block group is cached.
  5735. */
  5736. if (block_group && block_group_bits(block_group, flags) &&
  5737. block_group->cached != BTRFS_CACHE_NO) {
  5738. down_read(&space_info->groups_sem);
  5739. if (list_empty(&block_group->list) ||
  5740. block_group->ro) {
  5741. /*
  5742. * someone is removing this block group,
  5743. * we can't jump into the have_block_group
  5744. * target because our list pointers are not
  5745. * valid
  5746. */
  5747. btrfs_put_block_group(block_group);
  5748. up_read(&space_info->groups_sem);
  5749. } else {
  5750. index = get_block_group_index(block_group);
  5751. btrfs_lock_block_group(block_group, delalloc);
  5752. goto have_block_group;
  5753. }
  5754. } else if (block_group) {
  5755. btrfs_put_block_group(block_group);
  5756. }
  5757. }
  5758. search:
  5759. have_caching_bg = false;
  5760. down_read(&space_info->groups_sem);
  5761. list_for_each_entry(block_group, &space_info->block_groups[index],
  5762. list) {
  5763. u64 offset;
  5764. int cached;
  5765. btrfs_grab_block_group(block_group, delalloc);
  5766. search_start = block_group->key.objectid;
  5767. /*
  5768. * this can happen if we end up cycling through all the
  5769. * raid types, but we want to make sure we only allocate
  5770. * for the proper type.
  5771. */
  5772. if (!block_group_bits(block_group, flags)) {
  5773. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  5774. BTRFS_BLOCK_GROUP_RAID1 |
  5775. BTRFS_BLOCK_GROUP_RAID5 |
  5776. BTRFS_BLOCK_GROUP_RAID6 |
  5777. BTRFS_BLOCK_GROUP_RAID10;
  5778. /*
  5779. * if they asked for extra copies and this block group
  5780. * doesn't provide them, bail. This does allow us to
  5781. * fill raid0 from raid1.
  5782. */
  5783. if ((flags & extra) && !(block_group->flags & extra))
  5784. goto loop;
  5785. }
  5786. have_block_group:
  5787. cached = block_group_cache_done(block_group);
  5788. if (unlikely(!cached)) {
  5789. ret = cache_block_group(block_group, 0);
  5790. BUG_ON(ret < 0);
  5791. ret = 0;
  5792. }
  5793. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  5794. goto loop;
  5795. if (unlikely(block_group->ro))
  5796. goto loop;
  5797. /*
  5798. * Ok we want to try and use the cluster allocator, so
  5799. * lets look there
  5800. */
  5801. if (last_ptr) {
  5802. struct btrfs_block_group_cache *used_block_group;
  5803. unsigned long aligned_cluster;
  5804. /*
  5805. * the refill lock keeps out other
  5806. * people trying to start a new cluster
  5807. */
  5808. used_block_group = btrfs_lock_cluster(block_group,
  5809. last_ptr,
  5810. delalloc);
  5811. if (!used_block_group)
  5812. goto refill_cluster;
  5813. if (used_block_group != block_group &&
  5814. (used_block_group->ro ||
  5815. !block_group_bits(used_block_group, flags)))
  5816. goto release_cluster;
  5817. offset = btrfs_alloc_from_cluster(used_block_group,
  5818. last_ptr,
  5819. num_bytes,
  5820. used_block_group->key.objectid,
  5821. &max_extent_size);
  5822. if (offset) {
  5823. /* we have a block, we're done */
  5824. spin_unlock(&last_ptr->refill_lock);
  5825. trace_btrfs_reserve_extent_cluster(root,
  5826. used_block_group,
  5827. search_start, num_bytes);
  5828. if (used_block_group != block_group) {
  5829. btrfs_release_block_group(block_group,
  5830. delalloc);
  5831. block_group = used_block_group;
  5832. }
  5833. goto checks;
  5834. }
  5835. WARN_ON(last_ptr->block_group != used_block_group);
  5836. release_cluster:
  5837. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  5838. * set up a new clusters, so lets just skip it
  5839. * and let the allocator find whatever block
  5840. * it can find. If we reach this point, we
  5841. * will have tried the cluster allocator
  5842. * plenty of times and not have found
  5843. * anything, so we are likely way too
  5844. * fragmented for the clustering stuff to find
  5845. * anything.
  5846. *
  5847. * However, if the cluster is taken from the
  5848. * current block group, release the cluster
  5849. * first, so that we stand a better chance of
  5850. * succeeding in the unclustered
  5851. * allocation. */
  5852. if (loop >= LOOP_NO_EMPTY_SIZE &&
  5853. used_block_group != block_group) {
  5854. spin_unlock(&last_ptr->refill_lock);
  5855. btrfs_release_block_group(used_block_group,
  5856. delalloc);
  5857. goto unclustered_alloc;
  5858. }
  5859. /*
  5860. * this cluster didn't work out, free it and
  5861. * start over
  5862. */
  5863. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5864. if (used_block_group != block_group)
  5865. btrfs_release_block_group(used_block_group,
  5866. delalloc);
  5867. refill_cluster:
  5868. if (loop >= LOOP_NO_EMPTY_SIZE) {
  5869. spin_unlock(&last_ptr->refill_lock);
  5870. goto unclustered_alloc;
  5871. }
  5872. aligned_cluster = max_t(unsigned long,
  5873. empty_cluster + empty_size,
  5874. block_group->full_stripe_len);
  5875. /* allocate a cluster in this block group */
  5876. ret = btrfs_find_space_cluster(root, block_group,
  5877. last_ptr, search_start,
  5878. num_bytes,
  5879. aligned_cluster);
  5880. if (ret == 0) {
  5881. /*
  5882. * now pull our allocation out of this
  5883. * cluster
  5884. */
  5885. offset = btrfs_alloc_from_cluster(block_group,
  5886. last_ptr,
  5887. num_bytes,
  5888. search_start,
  5889. &max_extent_size);
  5890. if (offset) {
  5891. /* we found one, proceed */
  5892. spin_unlock(&last_ptr->refill_lock);
  5893. trace_btrfs_reserve_extent_cluster(root,
  5894. block_group, search_start,
  5895. num_bytes);
  5896. goto checks;
  5897. }
  5898. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  5899. && !failed_cluster_refill) {
  5900. spin_unlock(&last_ptr->refill_lock);
  5901. failed_cluster_refill = true;
  5902. wait_block_group_cache_progress(block_group,
  5903. num_bytes + empty_cluster + empty_size);
  5904. goto have_block_group;
  5905. }
  5906. /*
  5907. * at this point we either didn't find a cluster
  5908. * or we weren't able to allocate a block from our
  5909. * cluster. Free the cluster we've been trying
  5910. * to use, and go to the next block group
  5911. */
  5912. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  5913. spin_unlock(&last_ptr->refill_lock);
  5914. goto loop;
  5915. }
  5916. unclustered_alloc:
  5917. spin_lock(&block_group->free_space_ctl->tree_lock);
  5918. if (cached &&
  5919. block_group->free_space_ctl->free_space <
  5920. num_bytes + empty_cluster + empty_size) {
  5921. if (block_group->free_space_ctl->free_space >
  5922. max_extent_size)
  5923. max_extent_size =
  5924. block_group->free_space_ctl->free_space;
  5925. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5926. goto loop;
  5927. }
  5928. spin_unlock(&block_group->free_space_ctl->tree_lock);
  5929. offset = btrfs_find_space_for_alloc(block_group, search_start,
  5930. num_bytes, empty_size,
  5931. &max_extent_size);
  5932. /*
  5933. * If we didn't find a chunk, and we haven't failed on this
  5934. * block group before, and this block group is in the middle of
  5935. * caching and we are ok with waiting, then go ahead and wait
  5936. * for progress to be made, and set failed_alloc to true.
  5937. *
  5938. * If failed_alloc is true then we've already waited on this
  5939. * block group once and should move on to the next block group.
  5940. */
  5941. if (!offset && !failed_alloc && !cached &&
  5942. loop > LOOP_CACHING_NOWAIT) {
  5943. wait_block_group_cache_progress(block_group,
  5944. num_bytes + empty_size);
  5945. failed_alloc = true;
  5946. goto have_block_group;
  5947. } else if (!offset) {
  5948. if (!cached)
  5949. have_caching_bg = true;
  5950. goto loop;
  5951. }
  5952. checks:
  5953. search_start = stripe_align(root, block_group,
  5954. offset, num_bytes);
  5955. /* move on to the next group */
  5956. if (search_start + num_bytes >
  5957. block_group->key.objectid + block_group->key.offset) {
  5958. btrfs_add_free_space(block_group, offset, num_bytes);
  5959. goto loop;
  5960. }
  5961. if (offset < search_start)
  5962. btrfs_add_free_space(block_group, offset,
  5963. search_start - offset);
  5964. BUG_ON(offset > search_start);
  5965. ret = btrfs_update_reserved_bytes(block_group, num_bytes,
  5966. alloc_type, delalloc);
  5967. if (ret == -EAGAIN) {
  5968. btrfs_add_free_space(block_group, offset, num_bytes);
  5969. goto loop;
  5970. }
  5971. /* we are all good, lets return */
  5972. ins->objectid = search_start;
  5973. ins->offset = num_bytes;
  5974. trace_btrfs_reserve_extent(orig_root, block_group,
  5975. search_start, num_bytes);
  5976. btrfs_release_block_group(block_group, delalloc);
  5977. break;
  5978. loop:
  5979. failed_cluster_refill = false;
  5980. failed_alloc = false;
  5981. BUG_ON(index != get_block_group_index(block_group));
  5982. btrfs_release_block_group(block_group, delalloc);
  5983. }
  5984. up_read(&space_info->groups_sem);
  5985. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  5986. goto search;
  5987. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  5988. goto search;
  5989. /*
  5990. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  5991. * caching kthreads as we move along
  5992. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  5993. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  5994. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  5995. * again
  5996. */
  5997. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  5998. index = 0;
  5999. loop++;
  6000. if (loop == LOOP_ALLOC_CHUNK) {
  6001. struct btrfs_trans_handle *trans;
  6002. int exist = 0;
  6003. trans = current->journal_info;
  6004. if (trans)
  6005. exist = 1;
  6006. else
  6007. trans = btrfs_join_transaction(root);
  6008. if (IS_ERR(trans)) {
  6009. ret = PTR_ERR(trans);
  6010. goto out;
  6011. }
  6012. ret = do_chunk_alloc(trans, root, flags,
  6013. CHUNK_ALLOC_FORCE);
  6014. /*
  6015. * Do not bail out on ENOSPC since we
  6016. * can do more things.
  6017. */
  6018. if (ret < 0 && ret != -ENOSPC)
  6019. btrfs_abort_transaction(trans,
  6020. root, ret);
  6021. else
  6022. ret = 0;
  6023. if (!exist)
  6024. btrfs_end_transaction(trans, root);
  6025. if (ret)
  6026. goto out;
  6027. }
  6028. if (loop == LOOP_NO_EMPTY_SIZE) {
  6029. empty_size = 0;
  6030. empty_cluster = 0;
  6031. }
  6032. goto search;
  6033. } else if (!ins->objectid) {
  6034. ret = -ENOSPC;
  6035. } else if (ins->objectid) {
  6036. ret = 0;
  6037. }
  6038. out:
  6039. if (ret == -ENOSPC)
  6040. ins->offset = max_extent_size;
  6041. return ret;
  6042. }
  6043. static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
  6044. int dump_block_groups)
  6045. {
  6046. struct btrfs_block_group_cache *cache;
  6047. int index = 0;
  6048. spin_lock(&info->lock);
  6049. printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
  6050. info->flags,
  6051. info->total_bytes - info->bytes_used - info->bytes_pinned -
  6052. info->bytes_reserved - info->bytes_readonly,
  6053. (info->full) ? "" : "not ");
  6054. printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
  6055. "reserved=%llu, may_use=%llu, readonly=%llu\n",
  6056. info->total_bytes, info->bytes_used, info->bytes_pinned,
  6057. info->bytes_reserved, info->bytes_may_use,
  6058. info->bytes_readonly);
  6059. spin_unlock(&info->lock);
  6060. if (!dump_block_groups)
  6061. return;
  6062. down_read(&info->groups_sem);
  6063. again:
  6064. list_for_each_entry(cache, &info->block_groups[index], list) {
  6065. spin_lock(&cache->lock);
  6066. printk(KERN_INFO "BTRFS: "
  6067. "block group %llu has %llu bytes, "
  6068. "%llu used %llu pinned %llu reserved %s\n",
  6069. cache->key.objectid, cache->key.offset,
  6070. btrfs_block_group_used(&cache->item), cache->pinned,
  6071. cache->reserved, cache->ro ? "[readonly]" : "");
  6072. btrfs_dump_free_space(cache, bytes);
  6073. spin_unlock(&cache->lock);
  6074. }
  6075. if (++index < BTRFS_NR_RAID_TYPES)
  6076. goto again;
  6077. up_read(&info->groups_sem);
  6078. }
  6079. int btrfs_reserve_extent(struct btrfs_root *root,
  6080. u64 num_bytes, u64 min_alloc_size,
  6081. u64 empty_size, u64 hint_byte,
  6082. struct btrfs_key *ins, int is_data, int delalloc)
  6083. {
  6084. bool final_tried = false;
  6085. u64 flags;
  6086. int ret;
  6087. flags = btrfs_get_alloc_profile(root, is_data);
  6088. again:
  6089. WARN_ON(num_bytes < root->sectorsize);
  6090. ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
  6091. flags, delalloc);
  6092. if (ret == -ENOSPC) {
  6093. if (!final_tried && ins->offset) {
  6094. num_bytes = min(num_bytes >> 1, ins->offset);
  6095. num_bytes = round_down(num_bytes, root->sectorsize);
  6096. num_bytes = max(num_bytes, min_alloc_size);
  6097. if (num_bytes == min_alloc_size)
  6098. final_tried = true;
  6099. goto again;
  6100. } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  6101. struct btrfs_space_info *sinfo;
  6102. sinfo = __find_space_info(root->fs_info, flags);
  6103. btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
  6104. flags, num_bytes);
  6105. if (sinfo)
  6106. dump_space_info(sinfo, num_bytes, 1);
  6107. }
  6108. }
  6109. return ret;
  6110. }
  6111. static int __btrfs_free_reserved_extent(struct btrfs_root *root,
  6112. u64 start, u64 len,
  6113. int pin, int delalloc)
  6114. {
  6115. struct btrfs_block_group_cache *cache;
  6116. int ret = 0;
  6117. cache = btrfs_lookup_block_group(root->fs_info, start);
  6118. if (!cache) {
  6119. btrfs_err(root->fs_info, "Unable to find block group for %llu",
  6120. start);
  6121. return -ENOSPC;
  6122. }
  6123. if (btrfs_test_opt(root, DISCARD))
  6124. ret = btrfs_discard_extent(root, start, len, NULL);
  6125. if (pin)
  6126. pin_down_extent(root, cache, start, len, 1);
  6127. else {
  6128. btrfs_add_free_space(cache, start, len);
  6129. btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
  6130. }
  6131. btrfs_put_block_group(cache);
  6132. trace_btrfs_reserved_extent_free(root, start, len);
  6133. return ret;
  6134. }
  6135. int btrfs_free_reserved_extent(struct btrfs_root *root,
  6136. u64 start, u64 len, int delalloc)
  6137. {
  6138. return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
  6139. }
  6140. int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
  6141. u64 start, u64 len)
  6142. {
  6143. return __btrfs_free_reserved_extent(root, start, len, 1, 0);
  6144. }
  6145. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  6146. struct btrfs_root *root,
  6147. u64 parent, u64 root_objectid,
  6148. u64 flags, u64 owner, u64 offset,
  6149. struct btrfs_key *ins, int ref_mod)
  6150. {
  6151. int ret;
  6152. struct btrfs_fs_info *fs_info = root->fs_info;
  6153. struct btrfs_extent_item *extent_item;
  6154. struct btrfs_extent_inline_ref *iref;
  6155. struct btrfs_path *path;
  6156. struct extent_buffer *leaf;
  6157. int type;
  6158. u32 size;
  6159. if (parent > 0)
  6160. type = BTRFS_SHARED_DATA_REF_KEY;
  6161. else
  6162. type = BTRFS_EXTENT_DATA_REF_KEY;
  6163. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  6164. path = btrfs_alloc_path();
  6165. if (!path)
  6166. return -ENOMEM;
  6167. path->leave_spinning = 1;
  6168. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  6169. ins, size);
  6170. if (ret) {
  6171. btrfs_free_path(path);
  6172. return ret;
  6173. }
  6174. leaf = path->nodes[0];
  6175. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  6176. struct btrfs_extent_item);
  6177. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  6178. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  6179. btrfs_set_extent_flags(leaf, extent_item,
  6180. flags | BTRFS_EXTENT_FLAG_DATA);
  6181. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  6182. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  6183. if (parent > 0) {
  6184. struct btrfs_shared_data_ref *ref;
  6185. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  6186. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  6187. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  6188. } else {
  6189. struct btrfs_extent_data_ref *ref;
  6190. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  6191. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  6192. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  6193. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  6194. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  6195. }
  6196. btrfs_mark_buffer_dirty(path->nodes[0]);
  6197. btrfs_free_path(path);
  6198. /* Always set parent to 0 here since its exclusive anyway. */
  6199. ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
  6200. ins->objectid, ins->offset,
  6201. BTRFS_QGROUP_OPER_ADD_EXCL, 0);
  6202. if (ret)
  6203. return ret;
  6204. ret = update_block_group(root, ins->objectid, ins->offset, 1);
  6205. if (ret) { /* -ENOENT, logic error */
  6206. btrfs_err(fs_info, "update block group failed for %llu %llu",
  6207. ins->objectid, ins->offset);
  6208. BUG();
  6209. }
  6210. trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
  6211. return ret;
  6212. }
  6213. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  6214. struct btrfs_root *root,
  6215. u64 parent, u64 root_objectid,
  6216. u64 flags, struct btrfs_disk_key *key,
  6217. int level, struct btrfs_key *ins,
  6218. int no_quota)
  6219. {
  6220. int ret;
  6221. struct btrfs_fs_info *fs_info = root->fs_info;
  6222. struct btrfs_extent_item *extent_item;
  6223. struct btrfs_tree_block_info *block_info;
  6224. struct btrfs_extent_inline_ref *iref;
  6225. struct btrfs_path *path;
  6226. struct extent_buffer *leaf;
  6227. u32 size = sizeof(*extent_item) + sizeof(*iref);
  6228. u64 num_bytes = ins->offset;
  6229. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  6230. SKINNY_METADATA);
  6231. if (!skinny_metadata)
  6232. size += sizeof(*block_info);
  6233. path = btrfs_alloc_path();
  6234. if (!path) {
  6235. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  6236. root->leafsize);
  6237. return -ENOMEM;
  6238. }
  6239. path->leave_spinning = 1;
  6240. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  6241. ins, size);
  6242. if (ret) {
  6243. btrfs_free_and_pin_reserved_extent(root, ins->objectid,
  6244. root->leafsize);
  6245. btrfs_free_path(path);
  6246. return ret;
  6247. }
  6248. leaf = path->nodes[0];
  6249. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  6250. struct btrfs_extent_item);
  6251. btrfs_set_extent_refs(leaf, extent_item, 1);
  6252. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  6253. btrfs_set_extent_flags(leaf, extent_item,
  6254. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  6255. if (skinny_metadata) {
  6256. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  6257. num_bytes = root->leafsize;
  6258. } else {
  6259. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  6260. btrfs_set_tree_block_key(leaf, block_info, key);
  6261. btrfs_set_tree_block_level(leaf, block_info, level);
  6262. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  6263. }
  6264. if (parent > 0) {
  6265. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  6266. btrfs_set_extent_inline_ref_type(leaf, iref,
  6267. BTRFS_SHARED_BLOCK_REF_KEY);
  6268. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  6269. } else {
  6270. btrfs_set_extent_inline_ref_type(leaf, iref,
  6271. BTRFS_TREE_BLOCK_REF_KEY);
  6272. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  6273. }
  6274. btrfs_mark_buffer_dirty(leaf);
  6275. btrfs_free_path(path);
  6276. if (!no_quota) {
  6277. ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
  6278. ins->objectid, num_bytes,
  6279. BTRFS_QGROUP_OPER_ADD_EXCL, 0);
  6280. if (ret)
  6281. return ret;
  6282. }
  6283. ret = update_block_group(root, ins->objectid, root->leafsize, 1);
  6284. if (ret) { /* -ENOENT, logic error */
  6285. btrfs_err(fs_info, "update block group failed for %llu %llu",
  6286. ins->objectid, ins->offset);
  6287. BUG();
  6288. }
  6289. trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
  6290. return ret;
  6291. }
  6292. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  6293. struct btrfs_root *root,
  6294. u64 root_objectid, u64 owner,
  6295. u64 offset, struct btrfs_key *ins)
  6296. {
  6297. int ret;
  6298. BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
  6299. ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
  6300. ins->offset, 0,
  6301. root_objectid, owner, offset,
  6302. BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
  6303. return ret;
  6304. }
  6305. /*
  6306. * this is used by the tree logging recovery code. It records that
  6307. * an extent has been allocated and makes sure to clear the free
  6308. * space cache bits as well
  6309. */
  6310. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  6311. struct btrfs_root *root,
  6312. u64 root_objectid, u64 owner, u64 offset,
  6313. struct btrfs_key *ins)
  6314. {
  6315. int ret;
  6316. struct btrfs_block_group_cache *block_group;
  6317. /*
  6318. * Mixed block groups will exclude before processing the log so we only
  6319. * need to do the exlude dance if this fs isn't mixed.
  6320. */
  6321. if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
  6322. ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
  6323. if (ret)
  6324. return ret;
  6325. }
  6326. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  6327. if (!block_group)
  6328. return -EINVAL;
  6329. ret = btrfs_update_reserved_bytes(block_group, ins->offset,
  6330. RESERVE_ALLOC_NO_ACCOUNT, 0);
  6331. BUG_ON(ret); /* logic error */
  6332. ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
  6333. 0, owner, offset, ins, 1);
  6334. btrfs_put_block_group(block_group);
  6335. return ret;
  6336. }
  6337. static struct extent_buffer *
  6338. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  6339. u64 bytenr, u32 blocksize, int level)
  6340. {
  6341. struct extent_buffer *buf;
  6342. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  6343. if (!buf)
  6344. return ERR_PTR(-ENOMEM);
  6345. btrfs_set_header_generation(buf, trans->transid);
  6346. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  6347. btrfs_tree_lock(buf);
  6348. clean_tree_block(trans, root, buf);
  6349. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  6350. btrfs_set_lock_blocking(buf);
  6351. btrfs_set_buffer_uptodate(buf);
  6352. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  6353. /*
  6354. * we allow two log transactions at a time, use different
  6355. * EXENT bit to differentiate dirty pages.
  6356. */
  6357. if (root->log_transid % 2 == 0)
  6358. set_extent_dirty(&root->dirty_log_pages, buf->start,
  6359. buf->start + buf->len - 1, GFP_NOFS);
  6360. else
  6361. set_extent_new(&root->dirty_log_pages, buf->start,
  6362. buf->start + buf->len - 1, GFP_NOFS);
  6363. } else {
  6364. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  6365. buf->start + buf->len - 1, GFP_NOFS);
  6366. }
  6367. trans->blocks_used++;
  6368. /* this returns a buffer locked for blocking */
  6369. return buf;
  6370. }
  6371. static struct btrfs_block_rsv *
  6372. use_block_rsv(struct btrfs_trans_handle *trans,
  6373. struct btrfs_root *root, u32 blocksize)
  6374. {
  6375. struct btrfs_block_rsv *block_rsv;
  6376. struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
  6377. int ret;
  6378. bool global_updated = false;
  6379. block_rsv = get_block_rsv(trans, root);
  6380. if (unlikely(block_rsv->size == 0))
  6381. goto try_reserve;
  6382. again:
  6383. ret = block_rsv_use_bytes(block_rsv, blocksize);
  6384. if (!ret)
  6385. return block_rsv;
  6386. if (block_rsv->failfast)
  6387. return ERR_PTR(ret);
  6388. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  6389. global_updated = true;
  6390. update_global_block_rsv(root->fs_info);
  6391. goto again;
  6392. }
  6393. if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
  6394. static DEFINE_RATELIMIT_STATE(_rs,
  6395. DEFAULT_RATELIMIT_INTERVAL * 10,
  6396. /*DEFAULT_RATELIMIT_BURST*/ 1);
  6397. if (__ratelimit(&_rs))
  6398. WARN(1, KERN_DEBUG
  6399. "BTRFS: block rsv returned %d\n", ret);
  6400. }
  6401. try_reserve:
  6402. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  6403. BTRFS_RESERVE_NO_FLUSH);
  6404. if (!ret)
  6405. return block_rsv;
  6406. /*
  6407. * If we couldn't reserve metadata bytes try and use some from
  6408. * the global reserve if its space type is the same as the global
  6409. * reservation.
  6410. */
  6411. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  6412. block_rsv->space_info == global_rsv->space_info) {
  6413. ret = block_rsv_use_bytes(global_rsv, blocksize);
  6414. if (!ret)
  6415. return global_rsv;
  6416. }
  6417. return ERR_PTR(ret);
  6418. }
  6419. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  6420. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  6421. {
  6422. block_rsv_add_bytes(block_rsv, blocksize, 0);
  6423. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  6424. }
  6425. /*
  6426. * finds a free extent and does all the dirty work required for allocation
  6427. * returns the key for the extent through ins, and a tree buffer for
  6428. * the first block of the extent through buf.
  6429. *
  6430. * returns the tree buffer or NULL.
  6431. */
  6432. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  6433. struct btrfs_root *root, u32 blocksize,
  6434. u64 parent, u64 root_objectid,
  6435. struct btrfs_disk_key *key, int level,
  6436. u64 hint, u64 empty_size)
  6437. {
  6438. struct btrfs_key ins;
  6439. struct btrfs_block_rsv *block_rsv;
  6440. struct extent_buffer *buf;
  6441. u64 flags = 0;
  6442. int ret;
  6443. bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
  6444. SKINNY_METADATA);
  6445. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  6446. if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) {
  6447. buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
  6448. blocksize, level);
  6449. if (!IS_ERR(buf))
  6450. root->alloc_bytenr += blocksize;
  6451. return buf;
  6452. }
  6453. #endif
  6454. block_rsv = use_block_rsv(trans, root, blocksize);
  6455. if (IS_ERR(block_rsv))
  6456. return ERR_CAST(block_rsv);
  6457. ret = btrfs_reserve_extent(root, blocksize, blocksize,
  6458. empty_size, hint, &ins, 0, 0);
  6459. if (ret) {
  6460. unuse_block_rsv(root->fs_info, block_rsv, blocksize);
  6461. return ERR_PTR(ret);
  6462. }
  6463. buf = btrfs_init_new_buffer(trans, root, ins.objectid,
  6464. blocksize, level);
  6465. BUG_ON(IS_ERR(buf)); /* -ENOMEM */
  6466. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  6467. if (parent == 0)
  6468. parent = ins.objectid;
  6469. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6470. } else
  6471. BUG_ON(parent > 0);
  6472. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  6473. struct btrfs_delayed_extent_op *extent_op;
  6474. extent_op = btrfs_alloc_delayed_extent_op();
  6475. BUG_ON(!extent_op); /* -ENOMEM */
  6476. if (key)
  6477. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  6478. else
  6479. memset(&extent_op->key, 0, sizeof(extent_op->key));
  6480. extent_op->flags_to_set = flags;
  6481. if (skinny_metadata)
  6482. extent_op->update_key = 0;
  6483. else
  6484. extent_op->update_key = 1;
  6485. extent_op->update_flags = 1;
  6486. extent_op->is_data = 0;
  6487. extent_op->level = level;
  6488. ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
  6489. ins.objectid,
  6490. ins.offset, parent, root_objectid,
  6491. level, BTRFS_ADD_DELAYED_EXTENT,
  6492. extent_op, 0);
  6493. BUG_ON(ret); /* -ENOMEM */
  6494. }
  6495. return buf;
  6496. }
  6497. struct walk_control {
  6498. u64 refs[BTRFS_MAX_LEVEL];
  6499. u64 flags[BTRFS_MAX_LEVEL];
  6500. struct btrfs_key update_progress;
  6501. int stage;
  6502. int level;
  6503. int shared_level;
  6504. int update_ref;
  6505. int keep_locks;
  6506. int reada_slot;
  6507. int reada_count;
  6508. int for_reloc;
  6509. };
  6510. #define DROP_REFERENCE 1
  6511. #define UPDATE_BACKREF 2
  6512. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  6513. struct btrfs_root *root,
  6514. struct walk_control *wc,
  6515. struct btrfs_path *path)
  6516. {
  6517. u64 bytenr;
  6518. u64 generation;
  6519. u64 refs;
  6520. u64 flags;
  6521. u32 nritems;
  6522. u32 blocksize;
  6523. struct btrfs_key key;
  6524. struct extent_buffer *eb;
  6525. int ret;
  6526. int slot;
  6527. int nread = 0;
  6528. if (path->slots[wc->level] < wc->reada_slot) {
  6529. wc->reada_count = wc->reada_count * 2 / 3;
  6530. wc->reada_count = max(wc->reada_count, 2);
  6531. } else {
  6532. wc->reada_count = wc->reada_count * 3 / 2;
  6533. wc->reada_count = min_t(int, wc->reada_count,
  6534. BTRFS_NODEPTRS_PER_BLOCK(root));
  6535. }
  6536. eb = path->nodes[wc->level];
  6537. nritems = btrfs_header_nritems(eb);
  6538. blocksize = btrfs_level_size(root, wc->level - 1);
  6539. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  6540. if (nread >= wc->reada_count)
  6541. break;
  6542. cond_resched();
  6543. bytenr = btrfs_node_blockptr(eb, slot);
  6544. generation = btrfs_node_ptr_generation(eb, slot);
  6545. if (slot == path->slots[wc->level])
  6546. goto reada;
  6547. if (wc->stage == UPDATE_BACKREF &&
  6548. generation <= root->root_key.offset)
  6549. continue;
  6550. /* We don't lock the tree block, it's OK to be racy here */
  6551. ret = btrfs_lookup_extent_info(trans, root, bytenr,
  6552. wc->level - 1, 1, &refs,
  6553. &flags);
  6554. /* We don't care about errors in readahead. */
  6555. if (ret < 0)
  6556. continue;
  6557. BUG_ON(refs == 0);
  6558. if (wc->stage == DROP_REFERENCE) {
  6559. if (refs == 1)
  6560. goto reada;
  6561. if (wc->level == 1 &&
  6562. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6563. continue;
  6564. if (!wc->update_ref ||
  6565. generation <= root->root_key.offset)
  6566. continue;
  6567. btrfs_node_key_to_cpu(eb, &key, slot);
  6568. ret = btrfs_comp_cpu_keys(&key,
  6569. &wc->update_progress);
  6570. if (ret < 0)
  6571. continue;
  6572. } else {
  6573. if (wc->level == 1 &&
  6574. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6575. continue;
  6576. }
  6577. reada:
  6578. ret = readahead_tree_block(root, bytenr, blocksize,
  6579. generation);
  6580. if (ret)
  6581. break;
  6582. nread++;
  6583. }
  6584. wc->reada_slot = slot;
  6585. }
  6586. static int account_leaf_items(struct btrfs_trans_handle *trans,
  6587. struct btrfs_root *root,
  6588. struct extent_buffer *eb)
  6589. {
  6590. int nr = btrfs_header_nritems(eb);
  6591. int i, extent_type, ret;
  6592. struct btrfs_key key;
  6593. struct btrfs_file_extent_item *fi;
  6594. u64 bytenr, num_bytes;
  6595. for (i = 0; i < nr; i++) {
  6596. btrfs_item_key_to_cpu(eb, &key, i);
  6597. if (key.type != BTRFS_EXTENT_DATA_KEY)
  6598. continue;
  6599. fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  6600. /* filter out non qgroup-accountable extents */
  6601. extent_type = btrfs_file_extent_type(eb, fi);
  6602. if (extent_type == BTRFS_FILE_EXTENT_INLINE)
  6603. continue;
  6604. bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
  6605. if (!bytenr)
  6606. continue;
  6607. num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
  6608. ret = btrfs_qgroup_record_ref(trans, root->fs_info,
  6609. root->objectid,
  6610. bytenr, num_bytes,
  6611. BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
  6612. if (ret)
  6613. return ret;
  6614. }
  6615. return 0;
  6616. }
  6617. /*
  6618. * Walk up the tree from the bottom, freeing leaves and any interior
  6619. * nodes which have had all slots visited. If a node (leaf or
  6620. * interior) is freed, the node above it will have it's slot
  6621. * incremented. The root node will never be freed.
  6622. *
  6623. * At the end of this function, we should have a path which has all
  6624. * slots incremented to the next position for a search. If we need to
  6625. * read a new node it will be NULL and the node above it will have the
  6626. * correct slot selected for a later read.
  6627. *
  6628. * If we increment the root nodes slot counter past the number of
  6629. * elements, 1 is returned to signal completion of the search.
  6630. */
  6631. static int adjust_slots_upwards(struct btrfs_root *root,
  6632. struct btrfs_path *path, int root_level)
  6633. {
  6634. int level = 0;
  6635. int nr, slot;
  6636. struct extent_buffer *eb;
  6637. if (root_level == 0)
  6638. return 1;
  6639. while (level <= root_level) {
  6640. eb = path->nodes[level];
  6641. nr = btrfs_header_nritems(eb);
  6642. path->slots[level]++;
  6643. slot = path->slots[level];
  6644. if (slot >= nr || level == 0) {
  6645. /*
  6646. * Don't free the root - we will detect this
  6647. * condition after our loop and return a
  6648. * positive value for caller to stop walking the tree.
  6649. */
  6650. if (level != root_level) {
  6651. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6652. path->locks[level] = 0;
  6653. free_extent_buffer(eb);
  6654. path->nodes[level] = NULL;
  6655. path->slots[level] = 0;
  6656. }
  6657. } else {
  6658. /*
  6659. * We have a valid slot to walk back down
  6660. * from. Stop here so caller can process these
  6661. * new nodes.
  6662. */
  6663. break;
  6664. }
  6665. level++;
  6666. }
  6667. eb = path->nodes[root_level];
  6668. if (path->slots[root_level] >= btrfs_header_nritems(eb))
  6669. return 1;
  6670. return 0;
  6671. }
  6672. /*
  6673. * root_eb is the subtree root and is locked before this function is called.
  6674. */
  6675. static int account_shared_subtree(struct btrfs_trans_handle *trans,
  6676. struct btrfs_root *root,
  6677. struct extent_buffer *root_eb,
  6678. u64 root_gen,
  6679. int root_level)
  6680. {
  6681. int ret = 0;
  6682. int level;
  6683. struct extent_buffer *eb = root_eb;
  6684. struct btrfs_path *path = NULL;
  6685. BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
  6686. BUG_ON(root_eb == NULL);
  6687. if (!root->fs_info->quota_enabled)
  6688. return 0;
  6689. if (!extent_buffer_uptodate(root_eb)) {
  6690. ret = btrfs_read_buffer(root_eb, root_gen);
  6691. if (ret)
  6692. goto out;
  6693. }
  6694. if (root_level == 0) {
  6695. ret = account_leaf_items(trans, root, root_eb);
  6696. goto out;
  6697. }
  6698. path = btrfs_alloc_path();
  6699. if (!path)
  6700. return -ENOMEM;
  6701. /*
  6702. * Walk down the tree. Missing extent blocks are filled in as
  6703. * we go. Metadata is accounted every time we read a new
  6704. * extent block.
  6705. *
  6706. * When we reach a leaf, we account for file extent items in it,
  6707. * walk back up the tree (adjusting slot pointers as we go)
  6708. * and restart the search process.
  6709. */
  6710. extent_buffer_get(root_eb); /* For path */
  6711. path->nodes[root_level] = root_eb;
  6712. path->slots[root_level] = 0;
  6713. path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
  6714. walk_down:
  6715. level = root_level;
  6716. while (level >= 0) {
  6717. if (path->nodes[level] == NULL) {
  6718. int child_bsize = root->nodesize;
  6719. int parent_slot;
  6720. u64 child_gen;
  6721. u64 child_bytenr;
  6722. /* We need to get child blockptr/gen from
  6723. * parent before we can read it. */
  6724. eb = path->nodes[level + 1];
  6725. parent_slot = path->slots[level + 1];
  6726. child_bytenr = btrfs_node_blockptr(eb, parent_slot);
  6727. child_gen = btrfs_node_ptr_generation(eb, parent_slot);
  6728. eb = read_tree_block(root, child_bytenr, child_bsize,
  6729. child_gen);
  6730. if (!eb || !extent_buffer_uptodate(eb)) {
  6731. ret = -EIO;
  6732. goto out;
  6733. }
  6734. path->nodes[level] = eb;
  6735. path->slots[level] = 0;
  6736. btrfs_tree_read_lock(eb);
  6737. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  6738. path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
  6739. ret = btrfs_qgroup_record_ref(trans, root->fs_info,
  6740. root->objectid,
  6741. child_bytenr,
  6742. child_bsize,
  6743. BTRFS_QGROUP_OPER_SUB_SUBTREE,
  6744. 0);
  6745. if (ret)
  6746. goto out;
  6747. }
  6748. if (level == 0) {
  6749. ret = account_leaf_items(trans, root, path->nodes[level]);
  6750. if (ret)
  6751. goto out;
  6752. /* Nonzero return here means we completed our search */
  6753. ret = adjust_slots_upwards(root, path, root_level);
  6754. if (ret)
  6755. break;
  6756. /* Restart search with new slots */
  6757. goto walk_down;
  6758. }
  6759. level--;
  6760. }
  6761. ret = 0;
  6762. out:
  6763. btrfs_free_path(path);
  6764. return ret;
  6765. }
  6766. /*
  6767. * helper to process tree block while walking down the tree.
  6768. *
  6769. * when wc->stage == UPDATE_BACKREF, this function updates
  6770. * back refs for pointers in the block.
  6771. *
  6772. * NOTE: return value 1 means we should stop walking down.
  6773. */
  6774. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  6775. struct btrfs_root *root,
  6776. struct btrfs_path *path,
  6777. struct walk_control *wc, int lookup_info)
  6778. {
  6779. int level = wc->level;
  6780. struct extent_buffer *eb = path->nodes[level];
  6781. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  6782. int ret;
  6783. if (wc->stage == UPDATE_BACKREF &&
  6784. btrfs_header_owner(eb) != root->root_key.objectid)
  6785. return 1;
  6786. /*
  6787. * when reference count of tree block is 1, it won't increase
  6788. * again. once full backref flag is set, we never clear it.
  6789. */
  6790. if (lookup_info &&
  6791. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  6792. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  6793. BUG_ON(!path->locks[level]);
  6794. ret = btrfs_lookup_extent_info(trans, root,
  6795. eb->start, level, 1,
  6796. &wc->refs[level],
  6797. &wc->flags[level]);
  6798. BUG_ON(ret == -ENOMEM);
  6799. if (ret)
  6800. return ret;
  6801. BUG_ON(wc->refs[level] == 0);
  6802. }
  6803. if (wc->stage == DROP_REFERENCE) {
  6804. if (wc->refs[level] > 1)
  6805. return 1;
  6806. if (path->locks[level] && !wc->keep_locks) {
  6807. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6808. path->locks[level] = 0;
  6809. }
  6810. return 0;
  6811. }
  6812. /* wc->stage == UPDATE_BACKREF */
  6813. if (!(wc->flags[level] & flag)) {
  6814. BUG_ON(!path->locks[level]);
  6815. ret = btrfs_inc_ref(trans, root, eb, 1);
  6816. BUG_ON(ret); /* -ENOMEM */
  6817. ret = btrfs_dec_ref(trans, root, eb, 0);
  6818. BUG_ON(ret); /* -ENOMEM */
  6819. ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
  6820. eb->len, flag,
  6821. btrfs_header_level(eb), 0);
  6822. BUG_ON(ret); /* -ENOMEM */
  6823. wc->flags[level] |= flag;
  6824. }
  6825. /*
  6826. * the block is shared by multiple trees, so it's not good to
  6827. * keep the tree lock
  6828. */
  6829. if (path->locks[level] && level > 0) {
  6830. btrfs_tree_unlock_rw(eb, path->locks[level]);
  6831. path->locks[level] = 0;
  6832. }
  6833. return 0;
  6834. }
  6835. /*
  6836. * helper to process tree block pointer.
  6837. *
  6838. * when wc->stage == DROP_REFERENCE, this function checks
  6839. * reference count of the block pointed to. if the block
  6840. * is shared and we need update back refs for the subtree
  6841. * rooted at the block, this function changes wc->stage to
  6842. * UPDATE_BACKREF. if the block is shared and there is no
  6843. * need to update back, this function drops the reference
  6844. * to the block.
  6845. *
  6846. * NOTE: return value 1 means we should stop walking down.
  6847. */
  6848. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  6849. struct btrfs_root *root,
  6850. struct btrfs_path *path,
  6851. struct walk_control *wc, int *lookup_info)
  6852. {
  6853. u64 bytenr;
  6854. u64 generation;
  6855. u64 parent;
  6856. u32 blocksize;
  6857. struct btrfs_key key;
  6858. struct extent_buffer *next;
  6859. int level = wc->level;
  6860. int reada = 0;
  6861. int ret = 0;
  6862. bool need_account = false;
  6863. generation = btrfs_node_ptr_generation(path->nodes[level],
  6864. path->slots[level]);
  6865. /*
  6866. * if the lower level block was created before the snapshot
  6867. * was created, we know there is no need to update back refs
  6868. * for the subtree
  6869. */
  6870. if (wc->stage == UPDATE_BACKREF &&
  6871. generation <= root->root_key.offset) {
  6872. *lookup_info = 1;
  6873. return 1;
  6874. }
  6875. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  6876. blocksize = btrfs_level_size(root, level - 1);
  6877. next = btrfs_find_tree_block(root, bytenr, blocksize);
  6878. if (!next) {
  6879. next = btrfs_find_create_tree_block(root, bytenr, blocksize);
  6880. if (!next)
  6881. return -ENOMEM;
  6882. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  6883. level - 1);
  6884. reada = 1;
  6885. }
  6886. btrfs_tree_lock(next);
  6887. btrfs_set_lock_blocking(next);
  6888. ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
  6889. &wc->refs[level - 1],
  6890. &wc->flags[level - 1]);
  6891. if (ret < 0) {
  6892. btrfs_tree_unlock(next);
  6893. return ret;
  6894. }
  6895. if (unlikely(wc->refs[level - 1] == 0)) {
  6896. btrfs_err(root->fs_info, "Missing references.");
  6897. BUG();
  6898. }
  6899. *lookup_info = 0;
  6900. if (wc->stage == DROP_REFERENCE) {
  6901. if (wc->refs[level - 1] > 1) {
  6902. need_account = true;
  6903. if (level == 1 &&
  6904. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6905. goto skip;
  6906. if (!wc->update_ref ||
  6907. generation <= root->root_key.offset)
  6908. goto skip;
  6909. btrfs_node_key_to_cpu(path->nodes[level], &key,
  6910. path->slots[level]);
  6911. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  6912. if (ret < 0)
  6913. goto skip;
  6914. wc->stage = UPDATE_BACKREF;
  6915. wc->shared_level = level - 1;
  6916. }
  6917. } else {
  6918. if (level == 1 &&
  6919. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  6920. goto skip;
  6921. }
  6922. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  6923. btrfs_tree_unlock(next);
  6924. free_extent_buffer(next);
  6925. next = NULL;
  6926. *lookup_info = 1;
  6927. }
  6928. if (!next) {
  6929. if (reada && level == 1)
  6930. reada_walk_down(trans, root, wc, path);
  6931. next = read_tree_block(root, bytenr, blocksize, generation);
  6932. if (!next || !extent_buffer_uptodate(next)) {
  6933. free_extent_buffer(next);
  6934. return -EIO;
  6935. }
  6936. btrfs_tree_lock(next);
  6937. btrfs_set_lock_blocking(next);
  6938. }
  6939. level--;
  6940. BUG_ON(level != btrfs_header_level(next));
  6941. path->nodes[level] = next;
  6942. path->slots[level] = 0;
  6943. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  6944. wc->level = level;
  6945. if (wc->level == 1)
  6946. wc->reada_slot = 0;
  6947. return 0;
  6948. skip:
  6949. wc->refs[level - 1] = 0;
  6950. wc->flags[level - 1] = 0;
  6951. if (wc->stage == DROP_REFERENCE) {
  6952. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  6953. parent = path->nodes[level]->start;
  6954. } else {
  6955. BUG_ON(root->root_key.objectid !=
  6956. btrfs_header_owner(path->nodes[level]));
  6957. parent = 0;
  6958. }
  6959. if (need_account) {
  6960. ret = account_shared_subtree(trans, root, next,
  6961. generation, level - 1);
  6962. if (ret) {
  6963. printk_ratelimited(KERN_ERR "BTRFS: %s Error "
  6964. "%d accounting shared subtree. Quota "
  6965. "is out of sync, rescan required.\n",
  6966. root->fs_info->sb->s_id, ret);
  6967. }
  6968. }
  6969. ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
  6970. root->root_key.objectid, level - 1, 0, 0);
  6971. BUG_ON(ret); /* -ENOMEM */
  6972. }
  6973. btrfs_tree_unlock(next);
  6974. free_extent_buffer(next);
  6975. *lookup_info = 1;
  6976. return 1;
  6977. }
  6978. /*
  6979. * helper to process tree block while walking up the tree.
  6980. *
  6981. * when wc->stage == DROP_REFERENCE, this function drops
  6982. * reference count on the block.
  6983. *
  6984. * when wc->stage == UPDATE_BACKREF, this function changes
  6985. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  6986. * to UPDATE_BACKREF previously while processing the block.
  6987. *
  6988. * NOTE: return value 1 means we should stop walking up.
  6989. */
  6990. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  6991. struct btrfs_root *root,
  6992. struct btrfs_path *path,
  6993. struct walk_control *wc)
  6994. {
  6995. int ret;
  6996. int level = wc->level;
  6997. struct extent_buffer *eb = path->nodes[level];
  6998. u64 parent = 0;
  6999. if (wc->stage == UPDATE_BACKREF) {
  7000. BUG_ON(wc->shared_level < level);
  7001. if (level < wc->shared_level)
  7002. goto out;
  7003. ret = find_next_key(path, level + 1, &wc->update_progress);
  7004. if (ret > 0)
  7005. wc->update_ref = 0;
  7006. wc->stage = DROP_REFERENCE;
  7007. wc->shared_level = -1;
  7008. path->slots[level] = 0;
  7009. /*
  7010. * check reference count again if the block isn't locked.
  7011. * we should start walking down the tree again if reference
  7012. * count is one.
  7013. */
  7014. if (!path->locks[level]) {
  7015. BUG_ON(level == 0);
  7016. btrfs_tree_lock(eb);
  7017. btrfs_set_lock_blocking(eb);
  7018. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7019. ret = btrfs_lookup_extent_info(trans, root,
  7020. eb->start, level, 1,
  7021. &wc->refs[level],
  7022. &wc->flags[level]);
  7023. if (ret < 0) {
  7024. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7025. path->locks[level] = 0;
  7026. return ret;
  7027. }
  7028. BUG_ON(wc->refs[level] == 0);
  7029. if (wc->refs[level] == 1) {
  7030. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7031. path->locks[level] = 0;
  7032. return 1;
  7033. }
  7034. }
  7035. }
  7036. /* wc->stage == DROP_REFERENCE */
  7037. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  7038. if (wc->refs[level] == 1) {
  7039. if (level == 0) {
  7040. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7041. ret = btrfs_dec_ref(trans, root, eb, 1);
  7042. else
  7043. ret = btrfs_dec_ref(trans, root, eb, 0);
  7044. BUG_ON(ret); /* -ENOMEM */
  7045. ret = account_leaf_items(trans, root, eb);
  7046. if (ret) {
  7047. printk_ratelimited(KERN_ERR "BTRFS: %s Error "
  7048. "%d accounting leaf items. Quota "
  7049. "is out of sync, rescan required.\n",
  7050. root->fs_info->sb->s_id, ret);
  7051. }
  7052. }
  7053. /* make block locked assertion in clean_tree_block happy */
  7054. if (!path->locks[level] &&
  7055. btrfs_header_generation(eb) == trans->transid) {
  7056. btrfs_tree_lock(eb);
  7057. btrfs_set_lock_blocking(eb);
  7058. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7059. }
  7060. clean_tree_block(trans, root, eb);
  7061. }
  7062. if (eb == root->node) {
  7063. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7064. parent = eb->start;
  7065. else
  7066. BUG_ON(root->root_key.objectid !=
  7067. btrfs_header_owner(eb));
  7068. } else {
  7069. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7070. parent = path->nodes[level + 1]->start;
  7071. else
  7072. BUG_ON(root->root_key.objectid !=
  7073. btrfs_header_owner(path->nodes[level + 1]));
  7074. }
  7075. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  7076. out:
  7077. wc->refs[level] = 0;
  7078. wc->flags[level] = 0;
  7079. return 0;
  7080. }
  7081. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  7082. struct btrfs_root *root,
  7083. struct btrfs_path *path,
  7084. struct walk_control *wc)
  7085. {
  7086. int level = wc->level;
  7087. int lookup_info = 1;
  7088. int ret;
  7089. while (level >= 0) {
  7090. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  7091. if (ret > 0)
  7092. break;
  7093. if (level == 0)
  7094. break;
  7095. if (path->slots[level] >=
  7096. btrfs_header_nritems(path->nodes[level]))
  7097. break;
  7098. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  7099. if (ret > 0) {
  7100. path->slots[level]++;
  7101. continue;
  7102. } else if (ret < 0)
  7103. return ret;
  7104. level = wc->level;
  7105. }
  7106. return 0;
  7107. }
  7108. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  7109. struct btrfs_root *root,
  7110. struct btrfs_path *path,
  7111. struct walk_control *wc, int max_level)
  7112. {
  7113. int level = wc->level;
  7114. int ret;
  7115. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  7116. while (level < max_level && path->nodes[level]) {
  7117. wc->level = level;
  7118. if (path->slots[level] + 1 <
  7119. btrfs_header_nritems(path->nodes[level])) {
  7120. path->slots[level]++;
  7121. return 0;
  7122. } else {
  7123. ret = walk_up_proc(trans, root, path, wc);
  7124. if (ret > 0)
  7125. return 0;
  7126. if (path->locks[level]) {
  7127. btrfs_tree_unlock_rw(path->nodes[level],
  7128. path->locks[level]);
  7129. path->locks[level] = 0;
  7130. }
  7131. free_extent_buffer(path->nodes[level]);
  7132. path->nodes[level] = NULL;
  7133. level++;
  7134. }
  7135. }
  7136. return 1;
  7137. }
  7138. /*
  7139. * drop a subvolume tree.
  7140. *
  7141. * this function traverses the tree freeing any blocks that only
  7142. * referenced by the tree.
  7143. *
  7144. * when a shared tree block is found. this function decreases its
  7145. * reference count by one. if update_ref is true, this function
  7146. * also make sure backrefs for the shared block and all lower level
  7147. * blocks are properly updated.
  7148. *
  7149. * If called with for_reloc == 0, may exit early with -EAGAIN
  7150. */
  7151. int btrfs_drop_snapshot(struct btrfs_root *root,
  7152. struct btrfs_block_rsv *block_rsv, int update_ref,
  7153. int for_reloc)
  7154. {
  7155. struct btrfs_path *path;
  7156. struct btrfs_trans_handle *trans;
  7157. struct btrfs_root *tree_root = root->fs_info->tree_root;
  7158. struct btrfs_root_item *root_item = &root->root_item;
  7159. struct walk_control *wc;
  7160. struct btrfs_key key;
  7161. int err = 0;
  7162. int ret;
  7163. int level;
  7164. bool root_dropped = false;
  7165. btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
  7166. path = btrfs_alloc_path();
  7167. if (!path) {
  7168. err = -ENOMEM;
  7169. goto out;
  7170. }
  7171. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  7172. if (!wc) {
  7173. btrfs_free_path(path);
  7174. err = -ENOMEM;
  7175. goto out;
  7176. }
  7177. trans = btrfs_start_transaction(tree_root, 0);
  7178. if (IS_ERR(trans)) {
  7179. err = PTR_ERR(trans);
  7180. goto out_free;
  7181. }
  7182. if (block_rsv)
  7183. trans->block_rsv = block_rsv;
  7184. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  7185. level = btrfs_header_level(root->node);
  7186. path->nodes[level] = btrfs_lock_root_node(root);
  7187. btrfs_set_lock_blocking(path->nodes[level]);
  7188. path->slots[level] = 0;
  7189. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7190. memset(&wc->update_progress, 0,
  7191. sizeof(wc->update_progress));
  7192. } else {
  7193. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  7194. memcpy(&wc->update_progress, &key,
  7195. sizeof(wc->update_progress));
  7196. level = root_item->drop_level;
  7197. BUG_ON(level == 0);
  7198. path->lowest_level = level;
  7199. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  7200. path->lowest_level = 0;
  7201. if (ret < 0) {
  7202. err = ret;
  7203. goto out_end_trans;
  7204. }
  7205. WARN_ON(ret > 0);
  7206. /*
  7207. * unlock our path, this is safe because only this
  7208. * function is allowed to delete this snapshot
  7209. */
  7210. btrfs_unlock_up_safe(path, 0);
  7211. level = btrfs_header_level(root->node);
  7212. while (1) {
  7213. btrfs_tree_lock(path->nodes[level]);
  7214. btrfs_set_lock_blocking(path->nodes[level]);
  7215. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7216. ret = btrfs_lookup_extent_info(trans, root,
  7217. path->nodes[level]->start,
  7218. level, 1, &wc->refs[level],
  7219. &wc->flags[level]);
  7220. if (ret < 0) {
  7221. err = ret;
  7222. goto out_end_trans;
  7223. }
  7224. BUG_ON(wc->refs[level] == 0);
  7225. if (level == root_item->drop_level)
  7226. break;
  7227. btrfs_tree_unlock(path->nodes[level]);
  7228. path->locks[level] = 0;
  7229. WARN_ON(wc->refs[level] != 1);
  7230. level--;
  7231. }
  7232. }
  7233. wc->level = level;
  7234. wc->shared_level = -1;
  7235. wc->stage = DROP_REFERENCE;
  7236. wc->update_ref = update_ref;
  7237. wc->keep_locks = 0;
  7238. wc->for_reloc = for_reloc;
  7239. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  7240. while (1) {
  7241. ret = walk_down_tree(trans, root, path, wc);
  7242. if (ret < 0) {
  7243. err = ret;
  7244. break;
  7245. }
  7246. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  7247. if (ret < 0) {
  7248. err = ret;
  7249. break;
  7250. }
  7251. if (ret > 0) {
  7252. BUG_ON(wc->stage != DROP_REFERENCE);
  7253. break;
  7254. }
  7255. if (wc->stage == DROP_REFERENCE) {
  7256. level = wc->level;
  7257. btrfs_node_key(path->nodes[level],
  7258. &root_item->drop_progress,
  7259. path->slots[level]);
  7260. root_item->drop_level = level;
  7261. }
  7262. BUG_ON(wc->level == 0);
  7263. if (btrfs_should_end_transaction(trans, tree_root) ||
  7264. (!for_reloc && btrfs_need_cleaner_sleep(root))) {
  7265. ret = btrfs_update_root(trans, tree_root,
  7266. &root->root_key,
  7267. root_item);
  7268. if (ret) {
  7269. btrfs_abort_transaction(trans, tree_root, ret);
  7270. err = ret;
  7271. goto out_end_trans;
  7272. }
  7273. /*
  7274. * Qgroup update accounting is run from
  7275. * delayed ref handling. This usually works
  7276. * out because delayed refs are normally the
  7277. * only way qgroup updates are added. However,
  7278. * we may have added updates during our tree
  7279. * walk so run qgroups here to make sure we
  7280. * don't lose any updates.
  7281. */
  7282. ret = btrfs_delayed_qgroup_accounting(trans,
  7283. root->fs_info);
  7284. if (ret)
  7285. printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
  7286. "running qgroup updates "
  7287. "during snapshot delete. "
  7288. "Quota is out of sync, "
  7289. "rescan required.\n", ret);
  7290. btrfs_end_transaction_throttle(trans, tree_root);
  7291. if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
  7292. pr_debug("BTRFS: drop snapshot early exit\n");
  7293. err = -EAGAIN;
  7294. goto out_free;
  7295. }
  7296. trans = btrfs_start_transaction(tree_root, 0);
  7297. if (IS_ERR(trans)) {
  7298. err = PTR_ERR(trans);
  7299. goto out_free;
  7300. }
  7301. if (block_rsv)
  7302. trans->block_rsv = block_rsv;
  7303. }
  7304. }
  7305. btrfs_release_path(path);
  7306. if (err)
  7307. goto out_end_trans;
  7308. ret = btrfs_del_root(trans, tree_root, &root->root_key);
  7309. if (ret) {
  7310. btrfs_abort_transaction(trans, tree_root, ret);
  7311. goto out_end_trans;
  7312. }
  7313. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  7314. ret = btrfs_find_root(tree_root, &root->root_key, path,
  7315. NULL, NULL);
  7316. if (ret < 0) {
  7317. btrfs_abort_transaction(trans, tree_root, ret);
  7318. err = ret;
  7319. goto out_end_trans;
  7320. } else if (ret > 0) {
  7321. /* if we fail to delete the orphan item this time
  7322. * around, it'll get picked up the next time.
  7323. *
  7324. * The most common failure here is just -ENOENT.
  7325. */
  7326. btrfs_del_orphan_item(trans, tree_root,
  7327. root->root_key.objectid);
  7328. }
  7329. }
  7330. if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
  7331. btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
  7332. } else {
  7333. free_extent_buffer(root->node);
  7334. free_extent_buffer(root->commit_root);
  7335. btrfs_put_fs_root(root);
  7336. }
  7337. root_dropped = true;
  7338. out_end_trans:
  7339. ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
  7340. if (ret)
  7341. printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
  7342. "running qgroup updates "
  7343. "during snapshot delete. "
  7344. "Quota is out of sync, "
  7345. "rescan required.\n", ret);
  7346. btrfs_end_transaction_throttle(trans, tree_root);
  7347. out_free:
  7348. kfree(wc);
  7349. btrfs_free_path(path);
  7350. out:
  7351. /*
  7352. * So if we need to stop dropping the snapshot for whatever reason we
  7353. * need to make sure to add it back to the dead root list so that we
  7354. * keep trying to do the work later. This also cleans up roots if we
  7355. * don't have it in the radix (like when we recover after a power fail
  7356. * or unmount) so we don't leak memory.
  7357. */
  7358. if (!for_reloc && root_dropped == false)
  7359. btrfs_add_dead_root(root);
  7360. if (err && err != -EAGAIN)
  7361. btrfs_std_error(root->fs_info, err);
  7362. return err;
  7363. }
  7364. /*
  7365. * drop subtree rooted at tree block 'node'.
  7366. *
  7367. * NOTE: this function will unlock and release tree block 'node'
  7368. * only used by relocation code
  7369. */
  7370. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  7371. struct btrfs_root *root,
  7372. struct extent_buffer *node,
  7373. struct extent_buffer *parent)
  7374. {
  7375. struct btrfs_path *path;
  7376. struct walk_control *wc;
  7377. int level;
  7378. int parent_level;
  7379. int ret = 0;
  7380. int wret;
  7381. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  7382. path = btrfs_alloc_path();
  7383. if (!path)
  7384. return -ENOMEM;
  7385. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  7386. if (!wc) {
  7387. btrfs_free_path(path);
  7388. return -ENOMEM;
  7389. }
  7390. btrfs_assert_tree_locked(parent);
  7391. parent_level = btrfs_header_level(parent);
  7392. extent_buffer_get(parent);
  7393. path->nodes[parent_level] = parent;
  7394. path->slots[parent_level] = btrfs_header_nritems(parent);
  7395. btrfs_assert_tree_locked(node);
  7396. level = btrfs_header_level(node);
  7397. path->nodes[level] = node;
  7398. path->slots[level] = 0;
  7399. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7400. wc->refs[parent_level] = 1;
  7401. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7402. wc->level = level;
  7403. wc->shared_level = -1;
  7404. wc->stage = DROP_REFERENCE;
  7405. wc->update_ref = 0;
  7406. wc->keep_locks = 1;
  7407. wc->for_reloc = 1;
  7408. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
  7409. while (1) {
  7410. wret = walk_down_tree(trans, root, path, wc);
  7411. if (wret < 0) {
  7412. ret = wret;
  7413. break;
  7414. }
  7415. wret = walk_up_tree(trans, root, path, wc, parent_level);
  7416. if (wret < 0)
  7417. ret = wret;
  7418. if (wret != 0)
  7419. break;
  7420. }
  7421. kfree(wc);
  7422. btrfs_free_path(path);
  7423. return ret;
  7424. }
  7425. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  7426. {
  7427. u64 num_devices;
  7428. u64 stripped;
  7429. /*
  7430. * if restripe for this chunk_type is on pick target profile and
  7431. * return, otherwise do the usual balance
  7432. */
  7433. stripped = get_restripe_target(root->fs_info, flags);
  7434. if (stripped)
  7435. return extended_to_chunk(stripped);
  7436. num_devices = root->fs_info->fs_devices->rw_devices;
  7437. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  7438. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  7439. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  7440. if (num_devices == 1) {
  7441. stripped |= BTRFS_BLOCK_GROUP_DUP;
  7442. stripped = flags & ~stripped;
  7443. /* turn raid0 into single device chunks */
  7444. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  7445. return stripped;
  7446. /* turn mirroring into duplication */
  7447. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  7448. BTRFS_BLOCK_GROUP_RAID10))
  7449. return stripped | BTRFS_BLOCK_GROUP_DUP;
  7450. } else {
  7451. /* they already had raid on here, just return */
  7452. if (flags & stripped)
  7453. return flags;
  7454. stripped |= BTRFS_BLOCK_GROUP_DUP;
  7455. stripped = flags & ~stripped;
  7456. /* switch duplicated blocks with raid1 */
  7457. if (flags & BTRFS_BLOCK_GROUP_DUP)
  7458. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  7459. /* this is drive concat, leave it alone */
  7460. }
  7461. return flags;
  7462. }
  7463. static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  7464. {
  7465. struct btrfs_space_info *sinfo = cache->space_info;
  7466. u64 num_bytes;
  7467. u64 min_allocable_bytes;
  7468. int ret = -ENOSPC;
  7469. /*
  7470. * We need some metadata space and system metadata space for
  7471. * allocating chunks in some corner cases until we force to set
  7472. * it to be readonly.
  7473. */
  7474. if ((sinfo->flags &
  7475. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  7476. !force)
  7477. min_allocable_bytes = 1 * 1024 * 1024;
  7478. else
  7479. min_allocable_bytes = 0;
  7480. spin_lock(&sinfo->lock);
  7481. spin_lock(&cache->lock);
  7482. if (cache->ro) {
  7483. ret = 0;
  7484. goto out;
  7485. }
  7486. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  7487. cache->bytes_super - btrfs_block_group_used(&cache->item);
  7488. if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
  7489. sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
  7490. min_allocable_bytes <= sinfo->total_bytes) {
  7491. sinfo->bytes_readonly += num_bytes;
  7492. cache->ro = 1;
  7493. ret = 0;
  7494. }
  7495. out:
  7496. spin_unlock(&cache->lock);
  7497. spin_unlock(&sinfo->lock);
  7498. return ret;
  7499. }
  7500. int btrfs_set_block_group_ro(struct btrfs_root *root,
  7501. struct btrfs_block_group_cache *cache)
  7502. {
  7503. struct btrfs_trans_handle *trans;
  7504. u64 alloc_flags;
  7505. int ret;
  7506. BUG_ON(cache->ro);
  7507. trans = btrfs_join_transaction(root);
  7508. if (IS_ERR(trans))
  7509. return PTR_ERR(trans);
  7510. alloc_flags = update_block_group_flags(root, cache->flags);
  7511. if (alloc_flags != cache->flags) {
  7512. ret = do_chunk_alloc(trans, root, alloc_flags,
  7513. CHUNK_ALLOC_FORCE);
  7514. if (ret < 0)
  7515. goto out;
  7516. }
  7517. ret = set_block_group_ro(cache, 0);
  7518. if (!ret)
  7519. goto out;
  7520. alloc_flags = get_alloc_profile(root, cache->space_info->flags);
  7521. ret = do_chunk_alloc(trans, root, alloc_flags,
  7522. CHUNK_ALLOC_FORCE);
  7523. if (ret < 0)
  7524. goto out;
  7525. ret = set_block_group_ro(cache, 0);
  7526. out:
  7527. btrfs_end_transaction(trans, root);
  7528. return ret;
  7529. }
  7530. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  7531. struct btrfs_root *root, u64 type)
  7532. {
  7533. u64 alloc_flags = get_alloc_profile(root, type);
  7534. return do_chunk_alloc(trans, root, alloc_flags,
  7535. CHUNK_ALLOC_FORCE);
  7536. }
  7537. /*
  7538. * helper to account the unused space of all the readonly block group in the
  7539. * list. takes mirrors into account.
  7540. */
  7541. static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
  7542. {
  7543. struct btrfs_block_group_cache *block_group;
  7544. u64 free_bytes = 0;
  7545. int factor;
  7546. list_for_each_entry(block_group, groups_list, list) {
  7547. spin_lock(&block_group->lock);
  7548. if (!block_group->ro) {
  7549. spin_unlock(&block_group->lock);
  7550. continue;
  7551. }
  7552. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  7553. BTRFS_BLOCK_GROUP_RAID10 |
  7554. BTRFS_BLOCK_GROUP_DUP))
  7555. factor = 2;
  7556. else
  7557. factor = 1;
  7558. free_bytes += (block_group->key.offset -
  7559. btrfs_block_group_used(&block_group->item)) *
  7560. factor;
  7561. spin_unlock(&block_group->lock);
  7562. }
  7563. return free_bytes;
  7564. }
  7565. /*
  7566. * helper to account the unused space of all the readonly block group in the
  7567. * space_info. takes mirrors into account.
  7568. */
  7569. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  7570. {
  7571. int i;
  7572. u64 free_bytes = 0;
  7573. spin_lock(&sinfo->lock);
  7574. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  7575. if (!list_empty(&sinfo->block_groups[i]))
  7576. free_bytes += __btrfs_get_ro_block_group_free_space(
  7577. &sinfo->block_groups[i]);
  7578. spin_unlock(&sinfo->lock);
  7579. return free_bytes;
  7580. }
  7581. void btrfs_set_block_group_rw(struct btrfs_root *root,
  7582. struct btrfs_block_group_cache *cache)
  7583. {
  7584. struct btrfs_space_info *sinfo = cache->space_info;
  7585. u64 num_bytes;
  7586. BUG_ON(!cache->ro);
  7587. spin_lock(&sinfo->lock);
  7588. spin_lock(&cache->lock);
  7589. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  7590. cache->bytes_super - btrfs_block_group_used(&cache->item);
  7591. sinfo->bytes_readonly -= num_bytes;
  7592. cache->ro = 0;
  7593. spin_unlock(&cache->lock);
  7594. spin_unlock(&sinfo->lock);
  7595. }
  7596. /*
  7597. * checks to see if its even possible to relocate this block group.
  7598. *
  7599. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  7600. * ok to go ahead and try.
  7601. */
  7602. int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
  7603. {
  7604. struct btrfs_block_group_cache *block_group;
  7605. struct btrfs_space_info *space_info;
  7606. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  7607. struct btrfs_device *device;
  7608. struct btrfs_trans_handle *trans;
  7609. u64 min_free;
  7610. u64 dev_min = 1;
  7611. u64 dev_nr = 0;
  7612. u64 target;
  7613. int index;
  7614. int full = 0;
  7615. int ret = 0;
  7616. block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
  7617. /* odd, couldn't find the block group, leave it alone */
  7618. if (!block_group)
  7619. return -1;
  7620. min_free = btrfs_block_group_used(&block_group->item);
  7621. /* no bytes used, we're good */
  7622. if (!min_free)
  7623. goto out;
  7624. space_info = block_group->space_info;
  7625. spin_lock(&space_info->lock);
  7626. full = space_info->full;
  7627. /*
  7628. * if this is the last block group we have in this space, we can't
  7629. * relocate it unless we're able to allocate a new chunk below.
  7630. *
  7631. * Otherwise, we need to make sure we have room in the space to handle
  7632. * all of the extents from this block group. If we can, we're good
  7633. */
  7634. if ((space_info->total_bytes != block_group->key.offset) &&
  7635. (space_info->bytes_used + space_info->bytes_reserved +
  7636. space_info->bytes_pinned + space_info->bytes_readonly +
  7637. min_free < space_info->total_bytes)) {
  7638. spin_unlock(&space_info->lock);
  7639. goto out;
  7640. }
  7641. spin_unlock(&space_info->lock);
  7642. /*
  7643. * ok we don't have enough space, but maybe we have free space on our
  7644. * devices to allocate new chunks for relocation, so loop through our
  7645. * alloc devices and guess if we have enough space. if this block
  7646. * group is going to be restriped, run checks against the target
  7647. * profile instead of the current one.
  7648. */
  7649. ret = -1;
  7650. /*
  7651. * index:
  7652. * 0: raid10
  7653. * 1: raid1
  7654. * 2: dup
  7655. * 3: raid0
  7656. * 4: single
  7657. */
  7658. target = get_restripe_target(root->fs_info, block_group->flags);
  7659. if (target) {
  7660. index = __get_raid_index(extended_to_chunk(target));
  7661. } else {
  7662. /*
  7663. * this is just a balance, so if we were marked as full
  7664. * we know there is no space for a new chunk
  7665. */
  7666. if (full)
  7667. goto out;
  7668. index = get_block_group_index(block_group);
  7669. }
  7670. if (index == BTRFS_RAID_RAID10) {
  7671. dev_min = 4;
  7672. /* Divide by 2 */
  7673. min_free >>= 1;
  7674. } else if (index == BTRFS_RAID_RAID1) {
  7675. dev_min = 2;
  7676. } else if (index == BTRFS_RAID_DUP) {
  7677. /* Multiply by 2 */
  7678. min_free <<= 1;
  7679. } else if (index == BTRFS_RAID_RAID0) {
  7680. dev_min = fs_devices->rw_devices;
  7681. do_div(min_free, dev_min);
  7682. }
  7683. /* We need to do this so that we can look at pending chunks */
  7684. trans = btrfs_join_transaction(root);
  7685. if (IS_ERR(trans)) {
  7686. ret = PTR_ERR(trans);
  7687. goto out;
  7688. }
  7689. mutex_lock(&root->fs_info->chunk_mutex);
  7690. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  7691. u64 dev_offset;
  7692. /*
  7693. * check to make sure we can actually find a chunk with enough
  7694. * space to fit our block group in.
  7695. */
  7696. if (device->total_bytes > device->bytes_used + min_free &&
  7697. !device->is_tgtdev_for_dev_replace) {
  7698. ret = find_free_dev_extent(trans, device, min_free,
  7699. &dev_offset, NULL);
  7700. if (!ret)
  7701. dev_nr++;
  7702. if (dev_nr >= dev_min)
  7703. break;
  7704. ret = -1;
  7705. }
  7706. }
  7707. mutex_unlock(&root->fs_info->chunk_mutex);
  7708. btrfs_end_transaction(trans, root);
  7709. out:
  7710. btrfs_put_block_group(block_group);
  7711. return ret;
  7712. }
  7713. static int find_first_block_group(struct btrfs_root *root,
  7714. struct btrfs_path *path, struct btrfs_key *key)
  7715. {
  7716. int ret = 0;
  7717. struct btrfs_key found_key;
  7718. struct extent_buffer *leaf;
  7719. int slot;
  7720. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  7721. if (ret < 0)
  7722. goto out;
  7723. while (1) {
  7724. slot = path->slots[0];
  7725. leaf = path->nodes[0];
  7726. if (slot >= btrfs_header_nritems(leaf)) {
  7727. ret = btrfs_next_leaf(root, path);
  7728. if (ret == 0)
  7729. continue;
  7730. if (ret < 0)
  7731. goto out;
  7732. break;
  7733. }
  7734. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  7735. if (found_key.objectid >= key->objectid &&
  7736. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  7737. ret = 0;
  7738. goto out;
  7739. }
  7740. path->slots[0]++;
  7741. }
  7742. out:
  7743. return ret;
  7744. }
  7745. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  7746. {
  7747. struct btrfs_block_group_cache *block_group;
  7748. u64 last = 0;
  7749. while (1) {
  7750. struct inode *inode;
  7751. block_group = btrfs_lookup_first_block_group(info, last);
  7752. while (block_group) {
  7753. spin_lock(&block_group->lock);
  7754. if (block_group->iref)
  7755. break;
  7756. spin_unlock(&block_group->lock);
  7757. block_group = next_block_group(info->tree_root,
  7758. block_group);
  7759. }
  7760. if (!block_group) {
  7761. if (last == 0)
  7762. break;
  7763. last = 0;
  7764. continue;
  7765. }
  7766. inode = block_group->inode;
  7767. block_group->iref = 0;
  7768. block_group->inode = NULL;
  7769. spin_unlock(&block_group->lock);
  7770. iput(inode);
  7771. last = block_group->key.objectid + block_group->key.offset;
  7772. btrfs_put_block_group(block_group);
  7773. }
  7774. }
  7775. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  7776. {
  7777. struct btrfs_block_group_cache *block_group;
  7778. struct btrfs_space_info *space_info;
  7779. struct btrfs_caching_control *caching_ctl;
  7780. struct rb_node *n;
  7781. down_write(&info->commit_root_sem);
  7782. while (!list_empty(&info->caching_block_groups)) {
  7783. caching_ctl = list_entry(info->caching_block_groups.next,
  7784. struct btrfs_caching_control, list);
  7785. list_del(&caching_ctl->list);
  7786. put_caching_control(caching_ctl);
  7787. }
  7788. up_write(&info->commit_root_sem);
  7789. spin_lock(&info->block_group_cache_lock);
  7790. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  7791. block_group = rb_entry(n, struct btrfs_block_group_cache,
  7792. cache_node);
  7793. rb_erase(&block_group->cache_node,
  7794. &info->block_group_cache_tree);
  7795. spin_unlock(&info->block_group_cache_lock);
  7796. down_write(&block_group->space_info->groups_sem);
  7797. list_del(&block_group->list);
  7798. up_write(&block_group->space_info->groups_sem);
  7799. if (block_group->cached == BTRFS_CACHE_STARTED)
  7800. wait_block_group_cache_done(block_group);
  7801. /*
  7802. * We haven't cached this block group, which means we could
  7803. * possibly have excluded extents on this block group.
  7804. */
  7805. if (block_group->cached == BTRFS_CACHE_NO ||
  7806. block_group->cached == BTRFS_CACHE_ERROR)
  7807. free_excluded_extents(info->extent_root, block_group);
  7808. btrfs_remove_free_space_cache(block_group);
  7809. btrfs_put_block_group(block_group);
  7810. spin_lock(&info->block_group_cache_lock);
  7811. }
  7812. spin_unlock(&info->block_group_cache_lock);
  7813. /* now that all the block groups are freed, go through and
  7814. * free all the space_info structs. This is only called during
  7815. * the final stages of unmount, and so we know nobody is
  7816. * using them. We call synchronize_rcu() once before we start,
  7817. * just to be on the safe side.
  7818. */
  7819. synchronize_rcu();
  7820. release_global_block_rsv(info);
  7821. while (!list_empty(&info->space_info)) {
  7822. int i;
  7823. space_info = list_entry(info->space_info.next,
  7824. struct btrfs_space_info,
  7825. list);
  7826. if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
  7827. if (WARN_ON(space_info->bytes_pinned > 0 ||
  7828. space_info->bytes_reserved > 0 ||
  7829. space_info->bytes_may_use > 0)) {
  7830. dump_space_info(space_info, 0, 0);
  7831. }
  7832. }
  7833. list_del(&space_info->list);
  7834. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  7835. struct kobject *kobj;
  7836. kobj = space_info->block_group_kobjs[i];
  7837. space_info->block_group_kobjs[i] = NULL;
  7838. if (kobj) {
  7839. kobject_del(kobj);
  7840. kobject_put(kobj);
  7841. }
  7842. }
  7843. kobject_del(&space_info->kobj);
  7844. kobject_put(&space_info->kobj);
  7845. }
  7846. return 0;
  7847. }
  7848. static void __link_block_group(struct btrfs_space_info *space_info,
  7849. struct btrfs_block_group_cache *cache)
  7850. {
  7851. int index = get_block_group_index(cache);
  7852. bool first = false;
  7853. down_write(&space_info->groups_sem);
  7854. if (list_empty(&space_info->block_groups[index]))
  7855. first = true;
  7856. list_add_tail(&cache->list, &space_info->block_groups[index]);
  7857. up_write(&space_info->groups_sem);
  7858. if (first) {
  7859. struct raid_kobject *rkobj;
  7860. int ret;
  7861. rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
  7862. if (!rkobj)
  7863. goto out_err;
  7864. rkobj->raid_type = index;
  7865. kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
  7866. ret = kobject_add(&rkobj->kobj, &space_info->kobj,
  7867. "%s", get_raid_name(index));
  7868. if (ret) {
  7869. kobject_put(&rkobj->kobj);
  7870. goto out_err;
  7871. }
  7872. space_info->block_group_kobjs[index] = &rkobj->kobj;
  7873. }
  7874. return;
  7875. out_err:
  7876. pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
  7877. }
  7878. static struct btrfs_block_group_cache *
  7879. btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
  7880. {
  7881. struct btrfs_block_group_cache *cache;
  7882. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  7883. if (!cache)
  7884. return NULL;
  7885. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  7886. GFP_NOFS);
  7887. if (!cache->free_space_ctl) {
  7888. kfree(cache);
  7889. return NULL;
  7890. }
  7891. cache->key.objectid = start;
  7892. cache->key.offset = size;
  7893. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  7894. cache->sectorsize = root->sectorsize;
  7895. cache->fs_info = root->fs_info;
  7896. cache->full_stripe_len = btrfs_full_stripe_len(root,
  7897. &root->fs_info->mapping_tree,
  7898. start);
  7899. atomic_set(&cache->count, 1);
  7900. spin_lock_init(&cache->lock);
  7901. init_rwsem(&cache->data_rwsem);
  7902. INIT_LIST_HEAD(&cache->list);
  7903. INIT_LIST_HEAD(&cache->cluster_list);
  7904. INIT_LIST_HEAD(&cache->new_bg_list);
  7905. btrfs_init_free_space_ctl(cache);
  7906. return cache;
  7907. }
  7908. int btrfs_read_block_groups(struct btrfs_root *root)
  7909. {
  7910. struct btrfs_path *path;
  7911. int ret;
  7912. struct btrfs_block_group_cache *cache;
  7913. struct btrfs_fs_info *info = root->fs_info;
  7914. struct btrfs_space_info *space_info;
  7915. struct btrfs_key key;
  7916. struct btrfs_key found_key;
  7917. struct extent_buffer *leaf;
  7918. int need_clear = 0;
  7919. u64 cache_gen;
  7920. root = info->extent_root;
  7921. key.objectid = 0;
  7922. key.offset = 0;
  7923. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  7924. path = btrfs_alloc_path();
  7925. if (!path)
  7926. return -ENOMEM;
  7927. path->reada = 1;
  7928. cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
  7929. if (btrfs_test_opt(root, SPACE_CACHE) &&
  7930. btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
  7931. need_clear = 1;
  7932. if (btrfs_test_opt(root, CLEAR_CACHE))
  7933. need_clear = 1;
  7934. while (1) {
  7935. ret = find_first_block_group(root, path, &key);
  7936. if (ret > 0)
  7937. break;
  7938. if (ret != 0)
  7939. goto error;
  7940. leaf = path->nodes[0];
  7941. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  7942. cache = btrfs_create_block_group_cache(root, found_key.objectid,
  7943. found_key.offset);
  7944. if (!cache) {
  7945. ret = -ENOMEM;
  7946. goto error;
  7947. }
  7948. if (need_clear) {
  7949. /*
  7950. * When we mount with old space cache, we need to
  7951. * set BTRFS_DC_CLEAR and set dirty flag.
  7952. *
  7953. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  7954. * truncate the old free space cache inode and
  7955. * setup a new one.
  7956. * b) Setting 'dirty flag' makes sure that we flush
  7957. * the new space cache info onto disk.
  7958. */
  7959. cache->disk_cache_state = BTRFS_DC_CLEAR;
  7960. if (btrfs_test_opt(root, SPACE_CACHE))
  7961. cache->dirty = 1;
  7962. }
  7963. read_extent_buffer(leaf, &cache->item,
  7964. btrfs_item_ptr_offset(leaf, path->slots[0]),
  7965. sizeof(cache->item));
  7966. cache->flags = btrfs_block_group_flags(&cache->item);
  7967. key.objectid = found_key.objectid + found_key.offset;
  7968. btrfs_release_path(path);
  7969. /*
  7970. * We need to exclude the super stripes now so that the space
  7971. * info has super bytes accounted for, otherwise we'll think
  7972. * we have more space than we actually do.
  7973. */
  7974. ret = exclude_super_stripes(root, cache);
  7975. if (ret) {
  7976. /*
  7977. * We may have excluded something, so call this just in
  7978. * case.
  7979. */
  7980. free_excluded_extents(root, cache);
  7981. btrfs_put_block_group(cache);
  7982. goto error;
  7983. }
  7984. /*
  7985. * check for two cases, either we are full, and therefore
  7986. * don't need to bother with the caching work since we won't
  7987. * find any space, or we are empty, and we can just add all
  7988. * the space in and be done with it. This saves us _alot_ of
  7989. * time, particularly in the full case.
  7990. */
  7991. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  7992. cache->last_byte_to_unpin = (u64)-1;
  7993. cache->cached = BTRFS_CACHE_FINISHED;
  7994. free_excluded_extents(root, cache);
  7995. } else if (btrfs_block_group_used(&cache->item) == 0) {
  7996. cache->last_byte_to_unpin = (u64)-1;
  7997. cache->cached = BTRFS_CACHE_FINISHED;
  7998. add_new_free_space(cache, root->fs_info,
  7999. found_key.objectid,
  8000. found_key.objectid +
  8001. found_key.offset);
  8002. free_excluded_extents(root, cache);
  8003. }
  8004. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  8005. if (ret) {
  8006. btrfs_remove_free_space_cache(cache);
  8007. btrfs_put_block_group(cache);
  8008. goto error;
  8009. }
  8010. ret = update_space_info(info, cache->flags, found_key.offset,
  8011. btrfs_block_group_used(&cache->item),
  8012. &space_info);
  8013. if (ret) {
  8014. btrfs_remove_free_space_cache(cache);
  8015. spin_lock(&info->block_group_cache_lock);
  8016. rb_erase(&cache->cache_node,
  8017. &info->block_group_cache_tree);
  8018. spin_unlock(&info->block_group_cache_lock);
  8019. btrfs_put_block_group(cache);
  8020. goto error;
  8021. }
  8022. cache->space_info = space_info;
  8023. spin_lock(&cache->space_info->lock);
  8024. cache->space_info->bytes_readonly += cache->bytes_super;
  8025. spin_unlock(&cache->space_info->lock);
  8026. __link_block_group(space_info, cache);
  8027. set_avail_alloc_bits(root->fs_info, cache->flags);
  8028. if (btrfs_chunk_readonly(root, cache->key.objectid))
  8029. set_block_group_ro(cache, 1);
  8030. }
  8031. list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
  8032. if (!(get_alloc_profile(root, space_info->flags) &
  8033. (BTRFS_BLOCK_GROUP_RAID10 |
  8034. BTRFS_BLOCK_GROUP_RAID1 |
  8035. BTRFS_BLOCK_GROUP_RAID5 |
  8036. BTRFS_BLOCK_GROUP_RAID6 |
  8037. BTRFS_BLOCK_GROUP_DUP)))
  8038. continue;
  8039. /*
  8040. * avoid allocating from un-mirrored block group if there are
  8041. * mirrored block groups.
  8042. */
  8043. list_for_each_entry(cache,
  8044. &space_info->block_groups[BTRFS_RAID_RAID0],
  8045. list)
  8046. set_block_group_ro(cache, 1);
  8047. list_for_each_entry(cache,
  8048. &space_info->block_groups[BTRFS_RAID_SINGLE],
  8049. list)
  8050. set_block_group_ro(cache, 1);
  8051. }
  8052. init_global_block_rsv(info);
  8053. ret = 0;
  8054. error:
  8055. btrfs_free_path(path);
  8056. return ret;
  8057. }
  8058. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  8059. struct btrfs_root *root)
  8060. {
  8061. struct btrfs_block_group_cache *block_group, *tmp;
  8062. struct btrfs_root *extent_root = root->fs_info->extent_root;
  8063. struct btrfs_block_group_item item;
  8064. struct btrfs_key key;
  8065. int ret = 0;
  8066. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
  8067. new_bg_list) {
  8068. list_del_init(&block_group->new_bg_list);
  8069. if (ret)
  8070. continue;
  8071. spin_lock(&block_group->lock);
  8072. memcpy(&item, &block_group->item, sizeof(item));
  8073. memcpy(&key, &block_group->key, sizeof(key));
  8074. spin_unlock(&block_group->lock);
  8075. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  8076. sizeof(item));
  8077. if (ret)
  8078. btrfs_abort_transaction(trans, extent_root, ret);
  8079. ret = btrfs_finish_chunk_alloc(trans, extent_root,
  8080. key.objectid, key.offset);
  8081. if (ret)
  8082. btrfs_abort_transaction(trans, extent_root, ret);
  8083. }
  8084. }
  8085. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  8086. struct btrfs_root *root, u64 bytes_used,
  8087. u64 type, u64 chunk_objectid, u64 chunk_offset,
  8088. u64 size)
  8089. {
  8090. int ret;
  8091. struct btrfs_root *extent_root;
  8092. struct btrfs_block_group_cache *cache;
  8093. extent_root = root->fs_info->extent_root;
  8094. btrfs_set_log_full_commit(root->fs_info, trans);
  8095. cache = btrfs_create_block_group_cache(root, chunk_offset, size);
  8096. if (!cache)
  8097. return -ENOMEM;
  8098. btrfs_set_block_group_used(&cache->item, bytes_used);
  8099. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  8100. btrfs_set_block_group_flags(&cache->item, type);
  8101. cache->flags = type;
  8102. cache->last_byte_to_unpin = (u64)-1;
  8103. cache->cached = BTRFS_CACHE_FINISHED;
  8104. ret = exclude_super_stripes(root, cache);
  8105. if (ret) {
  8106. /*
  8107. * We may have excluded something, so call this just in
  8108. * case.
  8109. */
  8110. free_excluded_extents(root, cache);
  8111. btrfs_put_block_group(cache);
  8112. return ret;
  8113. }
  8114. add_new_free_space(cache, root->fs_info, chunk_offset,
  8115. chunk_offset + size);
  8116. free_excluded_extents(root, cache);
  8117. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  8118. if (ret) {
  8119. btrfs_remove_free_space_cache(cache);
  8120. btrfs_put_block_group(cache);
  8121. return ret;
  8122. }
  8123. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  8124. &cache->space_info);
  8125. if (ret) {
  8126. btrfs_remove_free_space_cache(cache);
  8127. spin_lock(&root->fs_info->block_group_cache_lock);
  8128. rb_erase(&cache->cache_node,
  8129. &root->fs_info->block_group_cache_tree);
  8130. spin_unlock(&root->fs_info->block_group_cache_lock);
  8131. btrfs_put_block_group(cache);
  8132. return ret;
  8133. }
  8134. update_global_block_rsv(root->fs_info);
  8135. spin_lock(&cache->space_info->lock);
  8136. cache->space_info->bytes_readonly += cache->bytes_super;
  8137. spin_unlock(&cache->space_info->lock);
  8138. __link_block_group(cache->space_info, cache);
  8139. list_add_tail(&cache->new_bg_list, &trans->new_bgs);
  8140. set_avail_alloc_bits(extent_root->fs_info, type);
  8141. return 0;
  8142. }
  8143. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  8144. {
  8145. u64 extra_flags = chunk_to_extended(flags) &
  8146. BTRFS_EXTENDED_PROFILE_MASK;
  8147. write_seqlock(&fs_info->profiles_lock);
  8148. if (flags & BTRFS_BLOCK_GROUP_DATA)
  8149. fs_info->avail_data_alloc_bits &= ~extra_flags;
  8150. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  8151. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  8152. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  8153. fs_info->avail_system_alloc_bits &= ~extra_flags;
  8154. write_sequnlock(&fs_info->profiles_lock);
  8155. }
  8156. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  8157. struct btrfs_root *root, u64 group_start)
  8158. {
  8159. struct btrfs_path *path;
  8160. struct btrfs_block_group_cache *block_group;
  8161. struct btrfs_free_cluster *cluster;
  8162. struct btrfs_root *tree_root = root->fs_info->tree_root;
  8163. struct btrfs_key key;
  8164. struct inode *inode;
  8165. struct kobject *kobj = NULL;
  8166. int ret;
  8167. int index;
  8168. int factor;
  8169. root = root->fs_info->extent_root;
  8170. block_group = btrfs_lookup_block_group(root->fs_info, group_start);
  8171. BUG_ON(!block_group);
  8172. BUG_ON(!block_group->ro);
  8173. /*
  8174. * Free the reserved super bytes from this block group before
  8175. * remove it.
  8176. */
  8177. free_excluded_extents(root, block_group);
  8178. memcpy(&key, &block_group->key, sizeof(key));
  8179. index = get_block_group_index(block_group);
  8180. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  8181. BTRFS_BLOCK_GROUP_RAID1 |
  8182. BTRFS_BLOCK_GROUP_RAID10))
  8183. factor = 2;
  8184. else
  8185. factor = 1;
  8186. /* make sure this block group isn't part of an allocation cluster */
  8187. cluster = &root->fs_info->data_alloc_cluster;
  8188. spin_lock(&cluster->refill_lock);
  8189. btrfs_return_cluster_to_free_space(block_group, cluster);
  8190. spin_unlock(&cluster->refill_lock);
  8191. /*
  8192. * make sure this block group isn't part of a metadata
  8193. * allocation cluster
  8194. */
  8195. cluster = &root->fs_info->meta_alloc_cluster;
  8196. spin_lock(&cluster->refill_lock);
  8197. btrfs_return_cluster_to_free_space(block_group, cluster);
  8198. spin_unlock(&cluster->refill_lock);
  8199. path = btrfs_alloc_path();
  8200. if (!path) {
  8201. ret = -ENOMEM;
  8202. goto out;
  8203. }
  8204. inode = lookup_free_space_inode(tree_root, block_group, path);
  8205. if (!IS_ERR(inode)) {
  8206. ret = btrfs_orphan_add(trans, inode);
  8207. if (ret) {
  8208. btrfs_add_delayed_iput(inode);
  8209. goto out;
  8210. }
  8211. clear_nlink(inode);
  8212. /* One for the block groups ref */
  8213. spin_lock(&block_group->lock);
  8214. if (block_group->iref) {
  8215. block_group->iref = 0;
  8216. block_group->inode = NULL;
  8217. spin_unlock(&block_group->lock);
  8218. iput(inode);
  8219. } else {
  8220. spin_unlock(&block_group->lock);
  8221. }
  8222. /* One for our lookup ref */
  8223. btrfs_add_delayed_iput(inode);
  8224. }
  8225. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  8226. key.offset = block_group->key.objectid;
  8227. key.type = 0;
  8228. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  8229. if (ret < 0)
  8230. goto out;
  8231. if (ret > 0)
  8232. btrfs_release_path(path);
  8233. if (ret == 0) {
  8234. ret = btrfs_del_item(trans, tree_root, path);
  8235. if (ret)
  8236. goto out;
  8237. btrfs_release_path(path);
  8238. }
  8239. spin_lock(&root->fs_info->block_group_cache_lock);
  8240. rb_erase(&block_group->cache_node,
  8241. &root->fs_info->block_group_cache_tree);
  8242. if (root->fs_info->first_logical_byte == block_group->key.objectid)
  8243. root->fs_info->first_logical_byte = (u64)-1;
  8244. spin_unlock(&root->fs_info->block_group_cache_lock);
  8245. down_write(&block_group->space_info->groups_sem);
  8246. /*
  8247. * we must use list_del_init so people can check to see if they
  8248. * are still on the list after taking the semaphore
  8249. */
  8250. list_del_init(&block_group->list);
  8251. if (list_empty(&block_group->space_info->block_groups[index])) {
  8252. kobj = block_group->space_info->block_group_kobjs[index];
  8253. block_group->space_info->block_group_kobjs[index] = NULL;
  8254. clear_avail_alloc_bits(root->fs_info, block_group->flags);
  8255. }
  8256. up_write(&block_group->space_info->groups_sem);
  8257. if (kobj) {
  8258. kobject_del(kobj);
  8259. kobject_put(kobj);
  8260. }
  8261. if (block_group->cached == BTRFS_CACHE_STARTED)
  8262. wait_block_group_cache_done(block_group);
  8263. btrfs_remove_free_space_cache(block_group);
  8264. spin_lock(&block_group->space_info->lock);
  8265. block_group->space_info->total_bytes -= block_group->key.offset;
  8266. block_group->space_info->bytes_readonly -= block_group->key.offset;
  8267. block_group->space_info->disk_total -= block_group->key.offset * factor;
  8268. spin_unlock(&block_group->space_info->lock);
  8269. memcpy(&key, &block_group->key, sizeof(key));
  8270. btrfs_clear_space_info_full(root->fs_info);
  8271. btrfs_put_block_group(block_group);
  8272. btrfs_put_block_group(block_group);
  8273. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  8274. if (ret > 0)
  8275. ret = -EIO;
  8276. if (ret < 0)
  8277. goto out;
  8278. ret = btrfs_del_item(trans, root, path);
  8279. out:
  8280. btrfs_free_path(path);
  8281. return ret;
  8282. }
  8283. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  8284. {
  8285. struct btrfs_space_info *space_info;
  8286. struct btrfs_super_block *disk_super;
  8287. u64 features;
  8288. u64 flags;
  8289. int mixed = 0;
  8290. int ret;
  8291. disk_super = fs_info->super_copy;
  8292. if (!btrfs_super_root(disk_super))
  8293. return 1;
  8294. features = btrfs_super_incompat_flags(disk_super);
  8295. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  8296. mixed = 1;
  8297. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  8298. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  8299. if (ret)
  8300. goto out;
  8301. if (mixed) {
  8302. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  8303. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  8304. } else {
  8305. flags = BTRFS_BLOCK_GROUP_METADATA;
  8306. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  8307. if (ret)
  8308. goto out;
  8309. flags = BTRFS_BLOCK_GROUP_DATA;
  8310. ret = update_space_info(fs_info, flags, 0, 0, &space_info);
  8311. }
  8312. out:
  8313. return ret;
  8314. }
  8315. int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
  8316. {
  8317. return unpin_extent_range(root, start, end);
  8318. }
  8319. int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
  8320. u64 num_bytes, u64 *actual_bytes)
  8321. {
  8322. return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
  8323. }
  8324. int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
  8325. {
  8326. struct btrfs_fs_info *fs_info = root->fs_info;
  8327. struct btrfs_block_group_cache *cache = NULL;
  8328. u64 group_trimmed;
  8329. u64 start;
  8330. u64 end;
  8331. u64 trimmed = 0;
  8332. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  8333. int ret = 0;
  8334. /*
  8335. * try to trim all FS space, our block group may start from non-zero.
  8336. */
  8337. if (range->len == total_bytes)
  8338. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  8339. else
  8340. cache = btrfs_lookup_block_group(fs_info, range->start);
  8341. while (cache) {
  8342. if (cache->key.objectid >= (range->start + range->len)) {
  8343. btrfs_put_block_group(cache);
  8344. break;
  8345. }
  8346. start = max(range->start, cache->key.objectid);
  8347. end = min(range->start + range->len,
  8348. cache->key.objectid + cache->key.offset);
  8349. if (end - start >= range->minlen) {
  8350. if (!block_group_cache_done(cache)) {
  8351. ret = cache_block_group(cache, 0);
  8352. if (ret) {
  8353. btrfs_put_block_group(cache);
  8354. break;
  8355. }
  8356. ret = wait_block_group_cache_done(cache);
  8357. if (ret) {
  8358. btrfs_put_block_group(cache);
  8359. break;
  8360. }
  8361. }
  8362. ret = btrfs_trim_block_group(cache,
  8363. &group_trimmed,
  8364. start,
  8365. end,
  8366. range->minlen);
  8367. trimmed += group_trimmed;
  8368. if (ret) {
  8369. btrfs_put_block_group(cache);
  8370. break;
  8371. }
  8372. }
  8373. cache = next_block_group(fs_info->tree_root, cache);
  8374. }
  8375. range->len = trimmed;
  8376. return ret;
  8377. }
  8378. /*
  8379. * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
  8380. * they are used to prevent the some tasks writing data into the page cache
  8381. * by nocow before the subvolume is snapshoted, but flush the data into
  8382. * the disk after the snapshot creation.
  8383. */
  8384. void btrfs_end_nocow_write(struct btrfs_root *root)
  8385. {
  8386. percpu_counter_dec(&root->subv_writers->counter);
  8387. /*
  8388. * Make sure counter is updated before we wake up
  8389. * waiters.
  8390. */
  8391. smp_mb();
  8392. if (waitqueue_active(&root->subv_writers->wait))
  8393. wake_up(&root->subv_writers->wait);
  8394. }
  8395. int btrfs_start_nocow_write(struct btrfs_root *root)
  8396. {
  8397. if (unlikely(atomic_read(&root->will_be_snapshoted)))
  8398. return 0;
  8399. percpu_counter_inc(&root->subv_writers->counter);
  8400. /*
  8401. * Make sure counter is updated before we check for snapshot creation.
  8402. */
  8403. smp_mb();
  8404. if (unlikely(atomic_read(&root->will_be_snapshoted))) {
  8405. btrfs_end_nocow_write(root);
  8406. return 0;
  8407. }
  8408. return 1;
  8409. }