extent-tree.c 300 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/writeback.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/sort.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/kthread.h>
  26. #include <linux/slab.h>
  27. #include <linux/ratelimit.h>
  28. #include <linux/percpu_counter.h>
  29. #include <linux/lockdep.h>
  30. #include "hash.h"
  31. #include "tree-log.h"
  32. #include "disk-io.h"
  33. #include "print-tree.h"
  34. #include "volumes.h"
  35. #include "raid56.h"
  36. #include "locking.h"
  37. #include "free-space-cache.h"
  38. #include "free-space-tree.h"
  39. #include "math.h"
  40. #include "sysfs.h"
  41. #include "qgroup.h"
  42. #include "ref-verify.h"
  43. #undef SCRAMBLE_DELAYED_REFS
  44. /*
  45. * control flags for do_chunk_alloc's force field
  46. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  47. * if we really need one.
  48. *
  49. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  50. * if we have very few chunks already allocated. This is
  51. * used as part of the clustering code to help make sure
  52. * we have a good pool of storage to cluster in, without
  53. * filling the FS with empty chunks
  54. *
  55. * CHUNK_ALLOC_FORCE means it must try to allocate one
  56. *
  57. */
  58. enum {
  59. CHUNK_ALLOC_NO_FORCE = 0,
  60. CHUNK_ALLOC_LIMITED = 1,
  61. CHUNK_ALLOC_FORCE = 2,
  62. };
  63. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  64. struct btrfs_fs_info *fs_info,
  65. struct btrfs_delayed_ref_node *node, u64 parent,
  66. u64 root_objectid, u64 owner_objectid,
  67. u64 owner_offset, int refs_to_drop,
  68. struct btrfs_delayed_extent_op *extra_op);
  69. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  70. struct extent_buffer *leaf,
  71. struct btrfs_extent_item *ei);
  72. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  73. struct btrfs_fs_info *fs_info,
  74. u64 parent, u64 root_objectid,
  75. u64 flags, u64 owner, u64 offset,
  76. struct btrfs_key *ins, int ref_mod);
  77. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  78. struct btrfs_fs_info *fs_info,
  79. u64 parent, u64 root_objectid,
  80. u64 flags, struct btrfs_disk_key *key,
  81. int level, struct btrfs_key *ins);
  82. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  83. struct btrfs_fs_info *fs_info, u64 flags,
  84. int force);
  85. static int find_next_key(struct btrfs_path *path, int level,
  86. struct btrfs_key *key);
  87. static void dump_space_info(struct btrfs_fs_info *fs_info,
  88. struct btrfs_space_info *info, u64 bytes,
  89. int dump_block_groups);
  90. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  91. u64 num_bytes);
  92. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  93. struct btrfs_space_info *space_info,
  94. u64 num_bytes);
  95. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  96. struct btrfs_space_info *space_info,
  97. u64 num_bytes);
  98. static noinline int
  99. block_group_cache_done(struct btrfs_block_group_cache *cache)
  100. {
  101. smp_mb();
  102. return cache->cached == BTRFS_CACHE_FINISHED ||
  103. cache->cached == BTRFS_CACHE_ERROR;
  104. }
  105. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  106. {
  107. return (cache->flags & bits) == bits;
  108. }
  109. void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  110. {
  111. atomic_inc(&cache->count);
  112. }
  113. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  114. {
  115. if (atomic_dec_and_test(&cache->count)) {
  116. WARN_ON(cache->pinned > 0);
  117. WARN_ON(cache->reserved > 0);
  118. /*
  119. * If not empty, someone is still holding mutex of
  120. * full_stripe_lock, which can only be released by caller.
  121. * And it will definitely cause use-after-free when caller
  122. * tries to release full stripe lock.
  123. *
  124. * No better way to resolve, but only to warn.
  125. */
  126. WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
  127. kfree(cache->free_space_ctl);
  128. kfree(cache);
  129. }
  130. }
  131. /*
  132. * this adds the block group to the fs_info rb tree for the block group
  133. * cache
  134. */
  135. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  136. struct btrfs_block_group_cache *block_group)
  137. {
  138. struct rb_node **p;
  139. struct rb_node *parent = NULL;
  140. struct btrfs_block_group_cache *cache;
  141. spin_lock(&info->block_group_cache_lock);
  142. p = &info->block_group_cache_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. cache = rb_entry(parent, struct btrfs_block_group_cache,
  146. cache_node);
  147. if (block_group->key.objectid < cache->key.objectid) {
  148. p = &(*p)->rb_left;
  149. } else if (block_group->key.objectid > cache->key.objectid) {
  150. p = &(*p)->rb_right;
  151. } else {
  152. spin_unlock(&info->block_group_cache_lock);
  153. return -EEXIST;
  154. }
  155. }
  156. rb_link_node(&block_group->cache_node, parent, p);
  157. rb_insert_color(&block_group->cache_node,
  158. &info->block_group_cache_tree);
  159. if (info->first_logical_byte > block_group->key.objectid)
  160. info->first_logical_byte = block_group->key.objectid;
  161. spin_unlock(&info->block_group_cache_lock);
  162. return 0;
  163. }
  164. /*
  165. * This will return the block group at or after bytenr if contains is 0, else
  166. * it will return the block group that contains the bytenr
  167. */
  168. static struct btrfs_block_group_cache *
  169. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  170. int contains)
  171. {
  172. struct btrfs_block_group_cache *cache, *ret = NULL;
  173. struct rb_node *n;
  174. u64 end, start;
  175. spin_lock(&info->block_group_cache_lock);
  176. n = info->block_group_cache_tree.rb_node;
  177. while (n) {
  178. cache = rb_entry(n, struct btrfs_block_group_cache,
  179. cache_node);
  180. end = cache->key.objectid + cache->key.offset - 1;
  181. start = cache->key.objectid;
  182. if (bytenr < start) {
  183. if (!contains && (!ret || start < ret->key.objectid))
  184. ret = cache;
  185. n = n->rb_left;
  186. } else if (bytenr > start) {
  187. if (contains && bytenr <= end) {
  188. ret = cache;
  189. break;
  190. }
  191. n = n->rb_right;
  192. } else {
  193. ret = cache;
  194. break;
  195. }
  196. }
  197. if (ret) {
  198. btrfs_get_block_group(ret);
  199. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  200. info->first_logical_byte = ret->key.objectid;
  201. }
  202. spin_unlock(&info->block_group_cache_lock);
  203. return ret;
  204. }
  205. static int add_excluded_extent(struct btrfs_fs_info *fs_info,
  206. u64 start, u64 num_bytes)
  207. {
  208. u64 end = start + num_bytes - 1;
  209. set_extent_bits(&fs_info->freed_extents[0],
  210. start, end, EXTENT_UPTODATE);
  211. set_extent_bits(&fs_info->freed_extents[1],
  212. start, end, EXTENT_UPTODATE);
  213. return 0;
  214. }
  215. static void free_excluded_extents(struct btrfs_fs_info *fs_info,
  216. struct btrfs_block_group_cache *cache)
  217. {
  218. u64 start, end;
  219. start = cache->key.objectid;
  220. end = start + cache->key.offset - 1;
  221. clear_extent_bits(&fs_info->freed_extents[0],
  222. start, end, EXTENT_UPTODATE);
  223. clear_extent_bits(&fs_info->freed_extents[1],
  224. start, end, EXTENT_UPTODATE);
  225. }
  226. static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
  227. struct btrfs_block_group_cache *cache)
  228. {
  229. u64 bytenr;
  230. u64 *logical;
  231. int stripe_len;
  232. int i, nr, ret;
  233. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  234. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  235. cache->bytes_super += stripe_len;
  236. ret = add_excluded_extent(fs_info, cache->key.objectid,
  237. stripe_len);
  238. if (ret)
  239. return ret;
  240. }
  241. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  242. bytenr = btrfs_sb_offset(i);
  243. ret = btrfs_rmap_block(fs_info, cache->key.objectid,
  244. bytenr, 0, &logical, &nr, &stripe_len);
  245. if (ret)
  246. return ret;
  247. while (nr--) {
  248. u64 start, len;
  249. if (logical[nr] > cache->key.objectid +
  250. cache->key.offset)
  251. continue;
  252. if (logical[nr] + stripe_len <= cache->key.objectid)
  253. continue;
  254. start = logical[nr];
  255. if (start < cache->key.objectid) {
  256. start = cache->key.objectid;
  257. len = (logical[nr] + stripe_len) - start;
  258. } else {
  259. len = min_t(u64, stripe_len,
  260. cache->key.objectid +
  261. cache->key.offset - start);
  262. }
  263. cache->bytes_super += len;
  264. ret = add_excluded_extent(fs_info, start, len);
  265. if (ret) {
  266. kfree(logical);
  267. return ret;
  268. }
  269. }
  270. kfree(logical);
  271. }
  272. return 0;
  273. }
  274. static struct btrfs_caching_control *
  275. get_caching_control(struct btrfs_block_group_cache *cache)
  276. {
  277. struct btrfs_caching_control *ctl;
  278. spin_lock(&cache->lock);
  279. if (!cache->caching_ctl) {
  280. spin_unlock(&cache->lock);
  281. return NULL;
  282. }
  283. ctl = cache->caching_ctl;
  284. refcount_inc(&ctl->count);
  285. spin_unlock(&cache->lock);
  286. return ctl;
  287. }
  288. static void put_caching_control(struct btrfs_caching_control *ctl)
  289. {
  290. if (refcount_dec_and_test(&ctl->count))
  291. kfree(ctl);
  292. }
  293. #ifdef CONFIG_BTRFS_DEBUG
  294. static void fragment_free_space(struct btrfs_block_group_cache *block_group)
  295. {
  296. struct btrfs_fs_info *fs_info = block_group->fs_info;
  297. u64 start = block_group->key.objectid;
  298. u64 len = block_group->key.offset;
  299. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  300. fs_info->nodesize : fs_info->sectorsize;
  301. u64 step = chunk << 1;
  302. while (len > chunk) {
  303. btrfs_remove_free_space(block_group, start, chunk);
  304. start += step;
  305. if (len < step)
  306. len = 0;
  307. else
  308. len -= step;
  309. }
  310. }
  311. #endif
  312. /*
  313. * this is only called by cache_block_group, since we could have freed extents
  314. * we need to check the pinned_extents for any extents that can't be used yet
  315. * since their free space will be released as soon as the transaction commits.
  316. */
  317. u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  318. struct btrfs_fs_info *info, u64 start, u64 end)
  319. {
  320. u64 extent_start, extent_end, size, total_added = 0;
  321. int ret;
  322. while (start < end) {
  323. ret = find_first_extent_bit(info->pinned_extents, start,
  324. &extent_start, &extent_end,
  325. EXTENT_DIRTY | EXTENT_UPTODATE,
  326. NULL);
  327. if (ret)
  328. break;
  329. if (extent_start <= start) {
  330. start = extent_end + 1;
  331. } else if (extent_start > start && extent_start < end) {
  332. size = extent_start - start;
  333. total_added += size;
  334. ret = btrfs_add_free_space(block_group, start,
  335. size);
  336. BUG_ON(ret); /* -ENOMEM or logic error */
  337. start = extent_end + 1;
  338. } else {
  339. break;
  340. }
  341. }
  342. if (start < end) {
  343. size = end - start;
  344. total_added += size;
  345. ret = btrfs_add_free_space(block_group, start, size);
  346. BUG_ON(ret); /* -ENOMEM or logic error */
  347. }
  348. return total_added;
  349. }
  350. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  351. {
  352. struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
  353. struct btrfs_fs_info *fs_info = block_group->fs_info;
  354. struct btrfs_root *extent_root = fs_info->extent_root;
  355. struct btrfs_path *path;
  356. struct extent_buffer *leaf;
  357. struct btrfs_key key;
  358. u64 total_found = 0;
  359. u64 last = 0;
  360. u32 nritems;
  361. int ret;
  362. bool wakeup = true;
  363. path = btrfs_alloc_path();
  364. if (!path)
  365. return -ENOMEM;
  366. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  367. #ifdef CONFIG_BTRFS_DEBUG
  368. /*
  369. * If we're fragmenting we don't want to make anybody think we can
  370. * allocate from this block group until we've had a chance to fragment
  371. * the free space.
  372. */
  373. if (btrfs_should_fragment_free_space(block_group))
  374. wakeup = false;
  375. #endif
  376. /*
  377. * We don't want to deadlock with somebody trying to allocate a new
  378. * extent for the extent root while also trying to search the extent
  379. * root to add free space. So we skip locking and search the commit
  380. * root, since its read-only
  381. */
  382. path->skip_locking = 1;
  383. path->search_commit_root = 1;
  384. path->reada = READA_FORWARD;
  385. key.objectid = last;
  386. key.offset = 0;
  387. key.type = BTRFS_EXTENT_ITEM_KEY;
  388. next:
  389. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  390. if (ret < 0)
  391. goto out;
  392. leaf = path->nodes[0];
  393. nritems = btrfs_header_nritems(leaf);
  394. while (1) {
  395. if (btrfs_fs_closing(fs_info) > 1) {
  396. last = (u64)-1;
  397. break;
  398. }
  399. if (path->slots[0] < nritems) {
  400. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  401. } else {
  402. ret = find_next_key(path, 0, &key);
  403. if (ret)
  404. break;
  405. if (need_resched() ||
  406. rwsem_is_contended(&fs_info->commit_root_sem)) {
  407. if (wakeup)
  408. caching_ctl->progress = last;
  409. btrfs_release_path(path);
  410. up_read(&fs_info->commit_root_sem);
  411. mutex_unlock(&caching_ctl->mutex);
  412. cond_resched();
  413. mutex_lock(&caching_ctl->mutex);
  414. down_read(&fs_info->commit_root_sem);
  415. goto next;
  416. }
  417. ret = btrfs_next_leaf(extent_root, path);
  418. if (ret < 0)
  419. goto out;
  420. if (ret)
  421. break;
  422. leaf = path->nodes[0];
  423. nritems = btrfs_header_nritems(leaf);
  424. continue;
  425. }
  426. if (key.objectid < last) {
  427. key.objectid = last;
  428. key.offset = 0;
  429. key.type = BTRFS_EXTENT_ITEM_KEY;
  430. if (wakeup)
  431. caching_ctl->progress = last;
  432. btrfs_release_path(path);
  433. goto next;
  434. }
  435. if (key.objectid < block_group->key.objectid) {
  436. path->slots[0]++;
  437. continue;
  438. }
  439. if (key.objectid >= block_group->key.objectid +
  440. block_group->key.offset)
  441. break;
  442. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  443. key.type == BTRFS_METADATA_ITEM_KEY) {
  444. total_found += add_new_free_space(block_group,
  445. fs_info, last,
  446. key.objectid);
  447. if (key.type == BTRFS_METADATA_ITEM_KEY)
  448. last = key.objectid +
  449. fs_info->nodesize;
  450. else
  451. last = key.objectid + key.offset;
  452. if (total_found > CACHING_CTL_WAKE_UP) {
  453. total_found = 0;
  454. if (wakeup)
  455. wake_up(&caching_ctl->wait);
  456. }
  457. }
  458. path->slots[0]++;
  459. }
  460. ret = 0;
  461. total_found += add_new_free_space(block_group, fs_info, last,
  462. block_group->key.objectid +
  463. block_group->key.offset);
  464. caching_ctl->progress = (u64)-1;
  465. out:
  466. btrfs_free_path(path);
  467. return ret;
  468. }
  469. static noinline void caching_thread(struct btrfs_work *work)
  470. {
  471. struct btrfs_block_group_cache *block_group;
  472. struct btrfs_fs_info *fs_info;
  473. struct btrfs_caching_control *caching_ctl;
  474. struct btrfs_root *extent_root;
  475. int ret;
  476. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  477. block_group = caching_ctl->block_group;
  478. fs_info = block_group->fs_info;
  479. extent_root = fs_info->extent_root;
  480. mutex_lock(&caching_ctl->mutex);
  481. down_read(&fs_info->commit_root_sem);
  482. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
  483. ret = load_free_space_tree(caching_ctl);
  484. else
  485. ret = load_extent_tree_free(caching_ctl);
  486. spin_lock(&block_group->lock);
  487. block_group->caching_ctl = NULL;
  488. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  489. spin_unlock(&block_group->lock);
  490. #ifdef CONFIG_BTRFS_DEBUG
  491. if (btrfs_should_fragment_free_space(block_group)) {
  492. u64 bytes_used;
  493. spin_lock(&block_group->space_info->lock);
  494. spin_lock(&block_group->lock);
  495. bytes_used = block_group->key.offset -
  496. btrfs_block_group_used(&block_group->item);
  497. block_group->space_info->bytes_used += bytes_used >> 1;
  498. spin_unlock(&block_group->lock);
  499. spin_unlock(&block_group->space_info->lock);
  500. fragment_free_space(block_group);
  501. }
  502. #endif
  503. caching_ctl->progress = (u64)-1;
  504. up_read(&fs_info->commit_root_sem);
  505. free_excluded_extents(fs_info, block_group);
  506. mutex_unlock(&caching_ctl->mutex);
  507. wake_up(&caching_ctl->wait);
  508. put_caching_control(caching_ctl);
  509. btrfs_put_block_group(block_group);
  510. }
  511. static int cache_block_group(struct btrfs_block_group_cache *cache,
  512. int load_cache_only)
  513. {
  514. DEFINE_WAIT(wait);
  515. struct btrfs_fs_info *fs_info = cache->fs_info;
  516. struct btrfs_caching_control *caching_ctl;
  517. int ret = 0;
  518. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  519. if (!caching_ctl)
  520. return -ENOMEM;
  521. INIT_LIST_HEAD(&caching_ctl->list);
  522. mutex_init(&caching_ctl->mutex);
  523. init_waitqueue_head(&caching_ctl->wait);
  524. caching_ctl->block_group = cache;
  525. caching_ctl->progress = cache->key.objectid;
  526. refcount_set(&caching_ctl->count, 1);
  527. btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
  528. caching_thread, NULL, NULL);
  529. spin_lock(&cache->lock);
  530. /*
  531. * This should be a rare occasion, but this could happen I think in the
  532. * case where one thread starts to load the space cache info, and then
  533. * some other thread starts a transaction commit which tries to do an
  534. * allocation while the other thread is still loading the space cache
  535. * info. The previous loop should have kept us from choosing this block
  536. * group, but if we've moved to the state where we will wait on caching
  537. * block groups we need to first check if we're doing a fast load here,
  538. * so we can wait for it to finish, otherwise we could end up allocating
  539. * from a block group who's cache gets evicted for one reason or
  540. * another.
  541. */
  542. while (cache->cached == BTRFS_CACHE_FAST) {
  543. struct btrfs_caching_control *ctl;
  544. ctl = cache->caching_ctl;
  545. refcount_inc(&ctl->count);
  546. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  547. spin_unlock(&cache->lock);
  548. schedule();
  549. finish_wait(&ctl->wait, &wait);
  550. put_caching_control(ctl);
  551. spin_lock(&cache->lock);
  552. }
  553. if (cache->cached != BTRFS_CACHE_NO) {
  554. spin_unlock(&cache->lock);
  555. kfree(caching_ctl);
  556. return 0;
  557. }
  558. WARN_ON(cache->caching_ctl);
  559. cache->caching_ctl = caching_ctl;
  560. cache->cached = BTRFS_CACHE_FAST;
  561. spin_unlock(&cache->lock);
  562. if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
  563. mutex_lock(&caching_ctl->mutex);
  564. ret = load_free_space_cache(fs_info, cache);
  565. spin_lock(&cache->lock);
  566. if (ret == 1) {
  567. cache->caching_ctl = NULL;
  568. cache->cached = BTRFS_CACHE_FINISHED;
  569. cache->last_byte_to_unpin = (u64)-1;
  570. caching_ctl->progress = (u64)-1;
  571. } else {
  572. if (load_cache_only) {
  573. cache->caching_ctl = NULL;
  574. cache->cached = BTRFS_CACHE_NO;
  575. } else {
  576. cache->cached = BTRFS_CACHE_STARTED;
  577. cache->has_caching_ctl = 1;
  578. }
  579. }
  580. spin_unlock(&cache->lock);
  581. #ifdef CONFIG_BTRFS_DEBUG
  582. if (ret == 1 &&
  583. btrfs_should_fragment_free_space(cache)) {
  584. u64 bytes_used;
  585. spin_lock(&cache->space_info->lock);
  586. spin_lock(&cache->lock);
  587. bytes_used = cache->key.offset -
  588. btrfs_block_group_used(&cache->item);
  589. cache->space_info->bytes_used += bytes_used >> 1;
  590. spin_unlock(&cache->lock);
  591. spin_unlock(&cache->space_info->lock);
  592. fragment_free_space(cache);
  593. }
  594. #endif
  595. mutex_unlock(&caching_ctl->mutex);
  596. wake_up(&caching_ctl->wait);
  597. if (ret == 1) {
  598. put_caching_control(caching_ctl);
  599. free_excluded_extents(fs_info, cache);
  600. return 0;
  601. }
  602. } else {
  603. /*
  604. * We're either using the free space tree or no caching at all.
  605. * Set cached to the appropriate value and wakeup any waiters.
  606. */
  607. spin_lock(&cache->lock);
  608. if (load_cache_only) {
  609. cache->caching_ctl = NULL;
  610. cache->cached = BTRFS_CACHE_NO;
  611. } else {
  612. cache->cached = BTRFS_CACHE_STARTED;
  613. cache->has_caching_ctl = 1;
  614. }
  615. spin_unlock(&cache->lock);
  616. wake_up(&caching_ctl->wait);
  617. }
  618. if (load_cache_only) {
  619. put_caching_control(caching_ctl);
  620. return 0;
  621. }
  622. down_write(&fs_info->commit_root_sem);
  623. refcount_inc(&caching_ctl->count);
  624. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  625. up_write(&fs_info->commit_root_sem);
  626. btrfs_get_block_group(cache);
  627. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  628. return ret;
  629. }
  630. /*
  631. * return the block group that starts at or after bytenr
  632. */
  633. static struct btrfs_block_group_cache *
  634. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  635. {
  636. return block_group_cache_tree_search(info, bytenr, 0);
  637. }
  638. /*
  639. * return the block group that contains the given bytenr
  640. */
  641. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  642. struct btrfs_fs_info *info,
  643. u64 bytenr)
  644. {
  645. return block_group_cache_tree_search(info, bytenr, 1);
  646. }
  647. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  648. u64 flags)
  649. {
  650. struct list_head *head = &info->space_info;
  651. struct btrfs_space_info *found;
  652. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  653. rcu_read_lock();
  654. list_for_each_entry_rcu(found, head, list) {
  655. if (found->flags & flags) {
  656. rcu_read_unlock();
  657. return found;
  658. }
  659. }
  660. rcu_read_unlock();
  661. return NULL;
  662. }
  663. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
  664. u64 owner, u64 root_objectid)
  665. {
  666. struct btrfs_space_info *space_info;
  667. u64 flags;
  668. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  669. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  670. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  671. else
  672. flags = BTRFS_BLOCK_GROUP_METADATA;
  673. } else {
  674. flags = BTRFS_BLOCK_GROUP_DATA;
  675. }
  676. space_info = __find_space_info(fs_info, flags);
  677. ASSERT(space_info);
  678. percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
  679. }
  680. /*
  681. * after adding space to the filesystem, we need to clear the full flags
  682. * on all the space infos.
  683. */
  684. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  685. {
  686. struct list_head *head = &info->space_info;
  687. struct btrfs_space_info *found;
  688. rcu_read_lock();
  689. list_for_each_entry_rcu(found, head, list)
  690. found->full = 0;
  691. rcu_read_unlock();
  692. }
  693. /* simple helper to search for an existing data extent at a given offset */
  694. int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
  695. {
  696. int ret;
  697. struct btrfs_key key;
  698. struct btrfs_path *path;
  699. path = btrfs_alloc_path();
  700. if (!path)
  701. return -ENOMEM;
  702. key.objectid = start;
  703. key.offset = len;
  704. key.type = BTRFS_EXTENT_ITEM_KEY;
  705. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  706. btrfs_free_path(path);
  707. return ret;
  708. }
  709. /*
  710. * helper function to lookup reference count and flags of a tree block.
  711. *
  712. * the head node for delayed ref is used to store the sum of all the
  713. * reference count modifications queued up in the rbtree. the head
  714. * node may also store the extent flags to set. This way you can check
  715. * to see what the reference count and extent flags would be if all of
  716. * the delayed refs are not processed.
  717. */
  718. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  719. struct btrfs_fs_info *fs_info, u64 bytenr,
  720. u64 offset, int metadata, u64 *refs, u64 *flags)
  721. {
  722. struct btrfs_delayed_ref_head *head;
  723. struct btrfs_delayed_ref_root *delayed_refs;
  724. struct btrfs_path *path;
  725. struct btrfs_extent_item *ei;
  726. struct extent_buffer *leaf;
  727. struct btrfs_key key;
  728. u32 item_size;
  729. u64 num_refs;
  730. u64 extent_flags;
  731. int ret;
  732. /*
  733. * If we don't have skinny metadata, don't bother doing anything
  734. * different
  735. */
  736. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
  737. offset = fs_info->nodesize;
  738. metadata = 0;
  739. }
  740. path = btrfs_alloc_path();
  741. if (!path)
  742. return -ENOMEM;
  743. if (!trans) {
  744. path->skip_locking = 1;
  745. path->search_commit_root = 1;
  746. }
  747. search_again:
  748. key.objectid = bytenr;
  749. key.offset = offset;
  750. if (metadata)
  751. key.type = BTRFS_METADATA_ITEM_KEY;
  752. else
  753. key.type = BTRFS_EXTENT_ITEM_KEY;
  754. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
  755. if (ret < 0)
  756. goto out_free;
  757. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  758. if (path->slots[0]) {
  759. path->slots[0]--;
  760. btrfs_item_key_to_cpu(path->nodes[0], &key,
  761. path->slots[0]);
  762. if (key.objectid == bytenr &&
  763. key.type == BTRFS_EXTENT_ITEM_KEY &&
  764. key.offset == fs_info->nodesize)
  765. ret = 0;
  766. }
  767. }
  768. if (ret == 0) {
  769. leaf = path->nodes[0];
  770. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  771. if (item_size >= sizeof(*ei)) {
  772. ei = btrfs_item_ptr(leaf, path->slots[0],
  773. struct btrfs_extent_item);
  774. num_refs = btrfs_extent_refs(leaf, ei);
  775. extent_flags = btrfs_extent_flags(leaf, ei);
  776. } else {
  777. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  778. struct btrfs_extent_item_v0 *ei0;
  779. BUG_ON(item_size != sizeof(*ei0));
  780. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  781. struct btrfs_extent_item_v0);
  782. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  783. /* FIXME: this isn't correct for data */
  784. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  785. #else
  786. BUG();
  787. #endif
  788. }
  789. BUG_ON(num_refs == 0);
  790. } else {
  791. num_refs = 0;
  792. extent_flags = 0;
  793. ret = 0;
  794. }
  795. if (!trans)
  796. goto out;
  797. delayed_refs = &trans->transaction->delayed_refs;
  798. spin_lock(&delayed_refs->lock);
  799. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  800. if (head) {
  801. if (!mutex_trylock(&head->mutex)) {
  802. refcount_inc(&head->refs);
  803. spin_unlock(&delayed_refs->lock);
  804. btrfs_release_path(path);
  805. /*
  806. * Mutex was contended, block until it's released and try
  807. * again
  808. */
  809. mutex_lock(&head->mutex);
  810. mutex_unlock(&head->mutex);
  811. btrfs_put_delayed_ref_head(head);
  812. goto search_again;
  813. }
  814. spin_lock(&head->lock);
  815. if (head->extent_op && head->extent_op->update_flags)
  816. extent_flags |= head->extent_op->flags_to_set;
  817. else
  818. BUG_ON(num_refs == 0);
  819. num_refs += head->ref_mod;
  820. spin_unlock(&head->lock);
  821. mutex_unlock(&head->mutex);
  822. }
  823. spin_unlock(&delayed_refs->lock);
  824. out:
  825. WARN_ON(num_refs == 0);
  826. if (refs)
  827. *refs = num_refs;
  828. if (flags)
  829. *flags = extent_flags;
  830. out_free:
  831. btrfs_free_path(path);
  832. return ret;
  833. }
  834. /*
  835. * Back reference rules. Back refs have three main goals:
  836. *
  837. * 1) differentiate between all holders of references to an extent so that
  838. * when a reference is dropped we can make sure it was a valid reference
  839. * before freeing the extent.
  840. *
  841. * 2) Provide enough information to quickly find the holders of an extent
  842. * if we notice a given block is corrupted or bad.
  843. *
  844. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  845. * maintenance. This is actually the same as #2, but with a slightly
  846. * different use case.
  847. *
  848. * There are two kinds of back refs. The implicit back refs is optimized
  849. * for pointers in non-shared tree blocks. For a given pointer in a block,
  850. * back refs of this kind provide information about the block's owner tree
  851. * and the pointer's key. These information allow us to find the block by
  852. * b-tree searching. The full back refs is for pointers in tree blocks not
  853. * referenced by their owner trees. The location of tree block is recorded
  854. * in the back refs. Actually the full back refs is generic, and can be
  855. * used in all cases the implicit back refs is used. The major shortcoming
  856. * of the full back refs is its overhead. Every time a tree block gets
  857. * COWed, we have to update back refs entry for all pointers in it.
  858. *
  859. * For a newly allocated tree block, we use implicit back refs for
  860. * pointers in it. This means most tree related operations only involve
  861. * implicit back refs. For a tree block created in old transaction, the
  862. * only way to drop a reference to it is COW it. So we can detect the
  863. * event that tree block loses its owner tree's reference and do the
  864. * back refs conversion.
  865. *
  866. * When a tree block is COWed through a tree, there are four cases:
  867. *
  868. * The reference count of the block is one and the tree is the block's
  869. * owner tree. Nothing to do in this case.
  870. *
  871. * The reference count of the block is one and the tree is not the
  872. * block's owner tree. In this case, full back refs is used for pointers
  873. * in the block. Remove these full back refs, add implicit back refs for
  874. * every pointers in the new block.
  875. *
  876. * The reference count of the block is greater than one and the tree is
  877. * the block's owner tree. In this case, implicit back refs is used for
  878. * pointers in the block. Add full back refs for every pointers in the
  879. * block, increase lower level extents' reference counts. The original
  880. * implicit back refs are entailed to the new block.
  881. *
  882. * The reference count of the block is greater than one and the tree is
  883. * not the block's owner tree. Add implicit back refs for every pointer in
  884. * the new block, increase lower level extents' reference count.
  885. *
  886. * Back Reference Key composing:
  887. *
  888. * The key objectid corresponds to the first byte in the extent,
  889. * The key type is used to differentiate between types of back refs.
  890. * There are different meanings of the key offset for different types
  891. * of back refs.
  892. *
  893. * File extents can be referenced by:
  894. *
  895. * - multiple snapshots, subvolumes, or different generations in one subvol
  896. * - different files inside a single subvolume
  897. * - different offsets inside a file (bookend extents in file.c)
  898. *
  899. * The extent ref structure for the implicit back refs has fields for:
  900. *
  901. * - Objectid of the subvolume root
  902. * - objectid of the file holding the reference
  903. * - original offset in the file
  904. * - how many bookend extents
  905. *
  906. * The key offset for the implicit back refs is hash of the first
  907. * three fields.
  908. *
  909. * The extent ref structure for the full back refs has field for:
  910. *
  911. * - number of pointers in the tree leaf
  912. *
  913. * The key offset for the implicit back refs is the first byte of
  914. * the tree leaf
  915. *
  916. * When a file extent is allocated, The implicit back refs is used.
  917. * the fields are filled in:
  918. *
  919. * (root_key.objectid, inode objectid, offset in file, 1)
  920. *
  921. * When a file extent is removed file truncation, we find the
  922. * corresponding implicit back refs and check the following fields:
  923. *
  924. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  925. *
  926. * Btree extents can be referenced by:
  927. *
  928. * - Different subvolumes
  929. *
  930. * Both the implicit back refs and the full back refs for tree blocks
  931. * only consist of key. The key offset for the implicit back refs is
  932. * objectid of block's owner tree. The key offset for the full back refs
  933. * is the first byte of parent block.
  934. *
  935. * When implicit back refs is used, information about the lowest key and
  936. * level of the tree block are required. These information are stored in
  937. * tree block info structure.
  938. */
  939. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  940. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  941. struct btrfs_fs_info *fs_info,
  942. struct btrfs_path *path,
  943. u64 owner, u32 extra_size)
  944. {
  945. struct btrfs_root *root = fs_info->extent_root;
  946. struct btrfs_extent_item *item;
  947. struct btrfs_extent_item_v0 *ei0;
  948. struct btrfs_extent_ref_v0 *ref0;
  949. struct btrfs_tree_block_info *bi;
  950. struct extent_buffer *leaf;
  951. struct btrfs_key key;
  952. struct btrfs_key found_key;
  953. u32 new_size = sizeof(*item);
  954. u64 refs;
  955. int ret;
  956. leaf = path->nodes[0];
  957. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  958. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  959. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  960. struct btrfs_extent_item_v0);
  961. refs = btrfs_extent_refs_v0(leaf, ei0);
  962. if (owner == (u64)-1) {
  963. while (1) {
  964. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  965. ret = btrfs_next_leaf(root, path);
  966. if (ret < 0)
  967. return ret;
  968. BUG_ON(ret > 0); /* Corruption */
  969. leaf = path->nodes[0];
  970. }
  971. btrfs_item_key_to_cpu(leaf, &found_key,
  972. path->slots[0]);
  973. BUG_ON(key.objectid != found_key.objectid);
  974. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  975. path->slots[0]++;
  976. continue;
  977. }
  978. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  979. struct btrfs_extent_ref_v0);
  980. owner = btrfs_ref_objectid_v0(leaf, ref0);
  981. break;
  982. }
  983. }
  984. btrfs_release_path(path);
  985. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  986. new_size += sizeof(*bi);
  987. new_size -= sizeof(*ei0);
  988. ret = btrfs_search_slot(trans, root, &key, path,
  989. new_size + extra_size, 1);
  990. if (ret < 0)
  991. return ret;
  992. BUG_ON(ret); /* Corruption */
  993. btrfs_extend_item(fs_info, path, new_size);
  994. leaf = path->nodes[0];
  995. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  996. btrfs_set_extent_refs(leaf, item, refs);
  997. /* FIXME: get real generation */
  998. btrfs_set_extent_generation(leaf, item, 0);
  999. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1000. btrfs_set_extent_flags(leaf, item,
  1001. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  1002. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  1003. bi = (struct btrfs_tree_block_info *)(item + 1);
  1004. /* FIXME: get first key of the block */
  1005. memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
  1006. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  1007. } else {
  1008. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  1009. }
  1010. btrfs_mark_buffer_dirty(leaf);
  1011. return 0;
  1012. }
  1013. #endif
  1014. /*
  1015. * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
  1016. * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
  1017. * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
  1018. */
  1019. int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
  1020. struct btrfs_extent_inline_ref *iref,
  1021. enum btrfs_inline_ref_type is_data)
  1022. {
  1023. int type = btrfs_extent_inline_ref_type(eb, iref);
  1024. u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
  1025. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  1026. type == BTRFS_SHARED_BLOCK_REF_KEY ||
  1027. type == BTRFS_SHARED_DATA_REF_KEY ||
  1028. type == BTRFS_EXTENT_DATA_REF_KEY) {
  1029. if (is_data == BTRFS_REF_TYPE_BLOCK) {
  1030. if (type == BTRFS_TREE_BLOCK_REF_KEY)
  1031. return type;
  1032. if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1033. ASSERT(eb->fs_info);
  1034. /*
  1035. * Every shared one has parent tree
  1036. * block, which must be aligned to
  1037. * nodesize.
  1038. */
  1039. if (offset &&
  1040. IS_ALIGNED(offset, eb->fs_info->nodesize))
  1041. return type;
  1042. }
  1043. } else if (is_data == BTRFS_REF_TYPE_DATA) {
  1044. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1045. return type;
  1046. if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1047. ASSERT(eb->fs_info);
  1048. /*
  1049. * Every shared one has parent tree
  1050. * block, which must be aligned to
  1051. * nodesize.
  1052. */
  1053. if (offset &&
  1054. IS_ALIGNED(offset, eb->fs_info->nodesize))
  1055. return type;
  1056. }
  1057. } else {
  1058. ASSERT(is_data == BTRFS_REF_TYPE_ANY);
  1059. return type;
  1060. }
  1061. }
  1062. btrfs_print_leaf((struct extent_buffer *)eb);
  1063. btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
  1064. eb->start, type);
  1065. WARN_ON(1);
  1066. return BTRFS_REF_TYPE_INVALID;
  1067. }
  1068. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  1069. {
  1070. u32 high_crc = ~(u32)0;
  1071. u32 low_crc = ~(u32)0;
  1072. __le64 lenum;
  1073. lenum = cpu_to_le64(root_objectid);
  1074. high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
  1075. lenum = cpu_to_le64(owner);
  1076. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1077. lenum = cpu_to_le64(offset);
  1078. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1079. return ((u64)high_crc << 31) ^ (u64)low_crc;
  1080. }
  1081. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  1082. struct btrfs_extent_data_ref *ref)
  1083. {
  1084. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  1085. btrfs_extent_data_ref_objectid(leaf, ref),
  1086. btrfs_extent_data_ref_offset(leaf, ref));
  1087. }
  1088. static int match_extent_data_ref(struct extent_buffer *leaf,
  1089. struct btrfs_extent_data_ref *ref,
  1090. u64 root_objectid, u64 owner, u64 offset)
  1091. {
  1092. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  1093. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  1094. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  1095. return 0;
  1096. return 1;
  1097. }
  1098. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  1099. struct btrfs_fs_info *fs_info,
  1100. struct btrfs_path *path,
  1101. u64 bytenr, u64 parent,
  1102. u64 root_objectid,
  1103. u64 owner, u64 offset)
  1104. {
  1105. struct btrfs_root *root = fs_info->extent_root;
  1106. struct btrfs_key key;
  1107. struct btrfs_extent_data_ref *ref;
  1108. struct extent_buffer *leaf;
  1109. u32 nritems;
  1110. int ret;
  1111. int recow;
  1112. int err = -ENOENT;
  1113. key.objectid = bytenr;
  1114. if (parent) {
  1115. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1116. key.offset = parent;
  1117. } else {
  1118. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1119. key.offset = hash_extent_data_ref(root_objectid,
  1120. owner, offset);
  1121. }
  1122. again:
  1123. recow = 0;
  1124. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1125. if (ret < 0) {
  1126. err = ret;
  1127. goto fail;
  1128. }
  1129. if (parent) {
  1130. if (!ret)
  1131. return 0;
  1132. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1133. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1134. btrfs_release_path(path);
  1135. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1136. if (ret < 0) {
  1137. err = ret;
  1138. goto fail;
  1139. }
  1140. if (!ret)
  1141. return 0;
  1142. #endif
  1143. goto fail;
  1144. }
  1145. leaf = path->nodes[0];
  1146. nritems = btrfs_header_nritems(leaf);
  1147. while (1) {
  1148. if (path->slots[0] >= nritems) {
  1149. ret = btrfs_next_leaf(root, path);
  1150. if (ret < 0)
  1151. err = ret;
  1152. if (ret)
  1153. goto fail;
  1154. leaf = path->nodes[0];
  1155. nritems = btrfs_header_nritems(leaf);
  1156. recow = 1;
  1157. }
  1158. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1159. if (key.objectid != bytenr ||
  1160. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1161. goto fail;
  1162. ref = btrfs_item_ptr(leaf, path->slots[0],
  1163. struct btrfs_extent_data_ref);
  1164. if (match_extent_data_ref(leaf, ref, root_objectid,
  1165. owner, offset)) {
  1166. if (recow) {
  1167. btrfs_release_path(path);
  1168. goto again;
  1169. }
  1170. err = 0;
  1171. break;
  1172. }
  1173. path->slots[0]++;
  1174. }
  1175. fail:
  1176. return err;
  1177. }
  1178. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1179. struct btrfs_fs_info *fs_info,
  1180. struct btrfs_path *path,
  1181. u64 bytenr, u64 parent,
  1182. u64 root_objectid, u64 owner,
  1183. u64 offset, int refs_to_add)
  1184. {
  1185. struct btrfs_root *root = fs_info->extent_root;
  1186. struct btrfs_key key;
  1187. struct extent_buffer *leaf;
  1188. u32 size;
  1189. u32 num_refs;
  1190. int ret;
  1191. key.objectid = bytenr;
  1192. if (parent) {
  1193. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1194. key.offset = parent;
  1195. size = sizeof(struct btrfs_shared_data_ref);
  1196. } else {
  1197. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1198. key.offset = hash_extent_data_ref(root_objectid,
  1199. owner, offset);
  1200. size = sizeof(struct btrfs_extent_data_ref);
  1201. }
  1202. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1203. if (ret && ret != -EEXIST)
  1204. goto fail;
  1205. leaf = path->nodes[0];
  1206. if (parent) {
  1207. struct btrfs_shared_data_ref *ref;
  1208. ref = btrfs_item_ptr(leaf, path->slots[0],
  1209. struct btrfs_shared_data_ref);
  1210. if (ret == 0) {
  1211. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1212. } else {
  1213. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1214. num_refs += refs_to_add;
  1215. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1216. }
  1217. } else {
  1218. struct btrfs_extent_data_ref *ref;
  1219. while (ret == -EEXIST) {
  1220. ref = btrfs_item_ptr(leaf, path->slots[0],
  1221. struct btrfs_extent_data_ref);
  1222. if (match_extent_data_ref(leaf, ref, root_objectid,
  1223. owner, offset))
  1224. break;
  1225. btrfs_release_path(path);
  1226. key.offset++;
  1227. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1228. size);
  1229. if (ret && ret != -EEXIST)
  1230. goto fail;
  1231. leaf = path->nodes[0];
  1232. }
  1233. ref = btrfs_item_ptr(leaf, path->slots[0],
  1234. struct btrfs_extent_data_ref);
  1235. if (ret == 0) {
  1236. btrfs_set_extent_data_ref_root(leaf, ref,
  1237. root_objectid);
  1238. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1239. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1240. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1241. } else {
  1242. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1243. num_refs += refs_to_add;
  1244. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1245. }
  1246. }
  1247. btrfs_mark_buffer_dirty(leaf);
  1248. ret = 0;
  1249. fail:
  1250. btrfs_release_path(path);
  1251. return ret;
  1252. }
  1253. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1254. struct btrfs_fs_info *fs_info,
  1255. struct btrfs_path *path,
  1256. int refs_to_drop, int *last_ref)
  1257. {
  1258. struct btrfs_key key;
  1259. struct btrfs_extent_data_ref *ref1 = NULL;
  1260. struct btrfs_shared_data_ref *ref2 = NULL;
  1261. struct extent_buffer *leaf;
  1262. u32 num_refs = 0;
  1263. int ret = 0;
  1264. leaf = path->nodes[0];
  1265. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1266. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1267. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1268. struct btrfs_extent_data_ref);
  1269. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1270. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1271. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1272. struct btrfs_shared_data_ref);
  1273. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1274. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1275. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1276. struct btrfs_extent_ref_v0 *ref0;
  1277. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1278. struct btrfs_extent_ref_v0);
  1279. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1280. #endif
  1281. } else {
  1282. BUG();
  1283. }
  1284. BUG_ON(num_refs < refs_to_drop);
  1285. num_refs -= refs_to_drop;
  1286. if (num_refs == 0) {
  1287. ret = btrfs_del_item(trans, fs_info->extent_root, path);
  1288. *last_ref = 1;
  1289. } else {
  1290. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1291. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1292. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1293. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1294. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1295. else {
  1296. struct btrfs_extent_ref_v0 *ref0;
  1297. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1298. struct btrfs_extent_ref_v0);
  1299. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1300. }
  1301. #endif
  1302. btrfs_mark_buffer_dirty(leaf);
  1303. }
  1304. return ret;
  1305. }
  1306. static noinline u32 extent_data_ref_count(struct btrfs_path *path,
  1307. struct btrfs_extent_inline_ref *iref)
  1308. {
  1309. struct btrfs_key key;
  1310. struct extent_buffer *leaf;
  1311. struct btrfs_extent_data_ref *ref1;
  1312. struct btrfs_shared_data_ref *ref2;
  1313. u32 num_refs = 0;
  1314. int type;
  1315. leaf = path->nodes[0];
  1316. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1317. if (iref) {
  1318. /*
  1319. * If type is invalid, we should have bailed out earlier than
  1320. * this call.
  1321. */
  1322. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  1323. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1324. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1325. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1326. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1327. } else {
  1328. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1329. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1330. }
  1331. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1332. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1333. struct btrfs_extent_data_ref);
  1334. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1335. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1336. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1337. struct btrfs_shared_data_ref);
  1338. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1339. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1340. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1341. struct btrfs_extent_ref_v0 *ref0;
  1342. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1343. struct btrfs_extent_ref_v0);
  1344. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1345. #endif
  1346. } else {
  1347. WARN_ON(1);
  1348. }
  1349. return num_refs;
  1350. }
  1351. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1352. struct btrfs_fs_info *fs_info,
  1353. struct btrfs_path *path,
  1354. u64 bytenr, u64 parent,
  1355. u64 root_objectid)
  1356. {
  1357. struct btrfs_root *root = fs_info->extent_root;
  1358. struct btrfs_key key;
  1359. int ret;
  1360. key.objectid = bytenr;
  1361. if (parent) {
  1362. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1363. key.offset = parent;
  1364. } else {
  1365. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1366. key.offset = root_objectid;
  1367. }
  1368. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1369. if (ret > 0)
  1370. ret = -ENOENT;
  1371. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1372. if (ret == -ENOENT && parent) {
  1373. btrfs_release_path(path);
  1374. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1375. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1376. if (ret > 0)
  1377. ret = -ENOENT;
  1378. }
  1379. #endif
  1380. return ret;
  1381. }
  1382. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1383. struct btrfs_fs_info *fs_info,
  1384. struct btrfs_path *path,
  1385. u64 bytenr, u64 parent,
  1386. u64 root_objectid)
  1387. {
  1388. struct btrfs_key key;
  1389. int ret;
  1390. key.objectid = bytenr;
  1391. if (parent) {
  1392. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1393. key.offset = parent;
  1394. } else {
  1395. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1396. key.offset = root_objectid;
  1397. }
  1398. ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
  1399. path, &key, 0);
  1400. btrfs_release_path(path);
  1401. return ret;
  1402. }
  1403. static inline int extent_ref_type(u64 parent, u64 owner)
  1404. {
  1405. int type;
  1406. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1407. if (parent > 0)
  1408. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1409. else
  1410. type = BTRFS_TREE_BLOCK_REF_KEY;
  1411. } else {
  1412. if (parent > 0)
  1413. type = BTRFS_SHARED_DATA_REF_KEY;
  1414. else
  1415. type = BTRFS_EXTENT_DATA_REF_KEY;
  1416. }
  1417. return type;
  1418. }
  1419. static int find_next_key(struct btrfs_path *path, int level,
  1420. struct btrfs_key *key)
  1421. {
  1422. for (; level < BTRFS_MAX_LEVEL; level++) {
  1423. if (!path->nodes[level])
  1424. break;
  1425. if (path->slots[level] + 1 >=
  1426. btrfs_header_nritems(path->nodes[level]))
  1427. continue;
  1428. if (level == 0)
  1429. btrfs_item_key_to_cpu(path->nodes[level], key,
  1430. path->slots[level] + 1);
  1431. else
  1432. btrfs_node_key_to_cpu(path->nodes[level], key,
  1433. path->slots[level] + 1);
  1434. return 0;
  1435. }
  1436. return 1;
  1437. }
  1438. /*
  1439. * look for inline back ref. if back ref is found, *ref_ret is set
  1440. * to the address of inline back ref, and 0 is returned.
  1441. *
  1442. * if back ref isn't found, *ref_ret is set to the address where it
  1443. * should be inserted, and -ENOENT is returned.
  1444. *
  1445. * if insert is true and there are too many inline back refs, the path
  1446. * points to the extent item, and -EAGAIN is returned.
  1447. *
  1448. * NOTE: inline back refs are ordered in the same way that back ref
  1449. * items in the tree are ordered.
  1450. */
  1451. static noinline_for_stack
  1452. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1453. struct btrfs_fs_info *fs_info,
  1454. struct btrfs_path *path,
  1455. struct btrfs_extent_inline_ref **ref_ret,
  1456. u64 bytenr, u64 num_bytes,
  1457. u64 parent, u64 root_objectid,
  1458. u64 owner, u64 offset, int insert)
  1459. {
  1460. struct btrfs_root *root = fs_info->extent_root;
  1461. struct btrfs_key key;
  1462. struct extent_buffer *leaf;
  1463. struct btrfs_extent_item *ei;
  1464. struct btrfs_extent_inline_ref *iref;
  1465. u64 flags;
  1466. u64 item_size;
  1467. unsigned long ptr;
  1468. unsigned long end;
  1469. int extra_size;
  1470. int type;
  1471. int want;
  1472. int ret;
  1473. int err = 0;
  1474. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  1475. int needed;
  1476. key.objectid = bytenr;
  1477. key.type = BTRFS_EXTENT_ITEM_KEY;
  1478. key.offset = num_bytes;
  1479. want = extent_ref_type(parent, owner);
  1480. if (insert) {
  1481. extra_size = btrfs_extent_inline_ref_size(want);
  1482. path->keep_locks = 1;
  1483. } else
  1484. extra_size = -1;
  1485. /*
  1486. * Owner is our parent level, so we can just add one to get the level
  1487. * for the block we are interested in.
  1488. */
  1489. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1490. key.type = BTRFS_METADATA_ITEM_KEY;
  1491. key.offset = owner;
  1492. }
  1493. again:
  1494. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1495. if (ret < 0) {
  1496. err = ret;
  1497. goto out;
  1498. }
  1499. /*
  1500. * We may be a newly converted file system which still has the old fat
  1501. * extent entries for metadata, so try and see if we have one of those.
  1502. */
  1503. if (ret > 0 && skinny_metadata) {
  1504. skinny_metadata = false;
  1505. if (path->slots[0]) {
  1506. path->slots[0]--;
  1507. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1508. path->slots[0]);
  1509. if (key.objectid == bytenr &&
  1510. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1511. key.offset == num_bytes)
  1512. ret = 0;
  1513. }
  1514. if (ret) {
  1515. key.objectid = bytenr;
  1516. key.type = BTRFS_EXTENT_ITEM_KEY;
  1517. key.offset = num_bytes;
  1518. btrfs_release_path(path);
  1519. goto again;
  1520. }
  1521. }
  1522. if (ret && !insert) {
  1523. err = -ENOENT;
  1524. goto out;
  1525. } else if (WARN_ON(ret)) {
  1526. err = -EIO;
  1527. goto out;
  1528. }
  1529. leaf = path->nodes[0];
  1530. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1531. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1532. if (item_size < sizeof(*ei)) {
  1533. if (!insert) {
  1534. err = -ENOENT;
  1535. goto out;
  1536. }
  1537. ret = convert_extent_item_v0(trans, fs_info, path, owner,
  1538. extra_size);
  1539. if (ret < 0) {
  1540. err = ret;
  1541. goto out;
  1542. }
  1543. leaf = path->nodes[0];
  1544. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1545. }
  1546. #endif
  1547. BUG_ON(item_size < sizeof(*ei));
  1548. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1549. flags = btrfs_extent_flags(leaf, ei);
  1550. ptr = (unsigned long)(ei + 1);
  1551. end = (unsigned long)ei + item_size;
  1552. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1553. ptr += sizeof(struct btrfs_tree_block_info);
  1554. BUG_ON(ptr > end);
  1555. }
  1556. if (owner >= BTRFS_FIRST_FREE_OBJECTID)
  1557. needed = BTRFS_REF_TYPE_DATA;
  1558. else
  1559. needed = BTRFS_REF_TYPE_BLOCK;
  1560. err = -ENOENT;
  1561. while (1) {
  1562. if (ptr >= end) {
  1563. WARN_ON(ptr > end);
  1564. break;
  1565. }
  1566. iref = (struct btrfs_extent_inline_ref *)ptr;
  1567. type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
  1568. if (type == BTRFS_REF_TYPE_INVALID) {
  1569. err = -EINVAL;
  1570. goto out;
  1571. }
  1572. if (want < type)
  1573. break;
  1574. if (want > type) {
  1575. ptr += btrfs_extent_inline_ref_size(type);
  1576. continue;
  1577. }
  1578. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1579. struct btrfs_extent_data_ref *dref;
  1580. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1581. if (match_extent_data_ref(leaf, dref, root_objectid,
  1582. owner, offset)) {
  1583. err = 0;
  1584. break;
  1585. }
  1586. if (hash_extent_data_ref_item(leaf, dref) <
  1587. hash_extent_data_ref(root_objectid, owner, offset))
  1588. break;
  1589. } else {
  1590. u64 ref_offset;
  1591. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1592. if (parent > 0) {
  1593. if (parent == ref_offset) {
  1594. err = 0;
  1595. break;
  1596. }
  1597. if (ref_offset < parent)
  1598. break;
  1599. } else {
  1600. if (root_objectid == ref_offset) {
  1601. err = 0;
  1602. break;
  1603. }
  1604. if (ref_offset < root_objectid)
  1605. break;
  1606. }
  1607. }
  1608. ptr += btrfs_extent_inline_ref_size(type);
  1609. }
  1610. if (err == -ENOENT && insert) {
  1611. if (item_size + extra_size >=
  1612. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1613. err = -EAGAIN;
  1614. goto out;
  1615. }
  1616. /*
  1617. * To add new inline back ref, we have to make sure
  1618. * there is no corresponding back ref item.
  1619. * For simplicity, we just do not add new inline back
  1620. * ref if there is any kind of item for this block
  1621. */
  1622. if (find_next_key(path, 0, &key) == 0 &&
  1623. key.objectid == bytenr &&
  1624. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1625. err = -EAGAIN;
  1626. goto out;
  1627. }
  1628. }
  1629. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1630. out:
  1631. if (insert) {
  1632. path->keep_locks = 0;
  1633. btrfs_unlock_up_safe(path, 1);
  1634. }
  1635. return err;
  1636. }
  1637. /*
  1638. * helper to add new inline back ref
  1639. */
  1640. static noinline_for_stack
  1641. void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
  1642. struct btrfs_path *path,
  1643. struct btrfs_extent_inline_ref *iref,
  1644. u64 parent, u64 root_objectid,
  1645. u64 owner, u64 offset, int refs_to_add,
  1646. struct btrfs_delayed_extent_op *extent_op)
  1647. {
  1648. struct extent_buffer *leaf;
  1649. struct btrfs_extent_item *ei;
  1650. unsigned long ptr;
  1651. unsigned long end;
  1652. unsigned long item_offset;
  1653. u64 refs;
  1654. int size;
  1655. int type;
  1656. leaf = path->nodes[0];
  1657. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1658. item_offset = (unsigned long)iref - (unsigned long)ei;
  1659. type = extent_ref_type(parent, owner);
  1660. size = btrfs_extent_inline_ref_size(type);
  1661. btrfs_extend_item(fs_info, path, size);
  1662. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1663. refs = btrfs_extent_refs(leaf, ei);
  1664. refs += refs_to_add;
  1665. btrfs_set_extent_refs(leaf, ei, refs);
  1666. if (extent_op)
  1667. __run_delayed_extent_op(extent_op, leaf, ei);
  1668. ptr = (unsigned long)ei + item_offset;
  1669. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1670. if (ptr < end - size)
  1671. memmove_extent_buffer(leaf, ptr + size, ptr,
  1672. end - size - ptr);
  1673. iref = (struct btrfs_extent_inline_ref *)ptr;
  1674. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1675. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1676. struct btrfs_extent_data_ref *dref;
  1677. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1678. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1679. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1680. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1681. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1682. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1683. struct btrfs_shared_data_ref *sref;
  1684. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1685. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1686. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1687. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1688. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1689. } else {
  1690. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1691. }
  1692. btrfs_mark_buffer_dirty(leaf);
  1693. }
  1694. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1695. struct btrfs_fs_info *fs_info,
  1696. struct btrfs_path *path,
  1697. struct btrfs_extent_inline_ref **ref_ret,
  1698. u64 bytenr, u64 num_bytes, u64 parent,
  1699. u64 root_objectid, u64 owner, u64 offset)
  1700. {
  1701. int ret;
  1702. ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
  1703. bytenr, num_bytes, parent,
  1704. root_objectid, owner, offset, 0);
  1705. if (ret != -ENOENT)
  1706. return ret;
  1707. btrfs_release_path(path);
  1708. *ref_ret = NULL;
  1709. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1710. ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
  1711. parent, root_objectid);
  1712. } else {
  1713. ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
  1714. parent, root_objectid, owner,
  1715. offset);
  1716. }
  1717. return ret;
  1718. }
  1719. /*
  1720. * helper to update/remove inline back ref
  1721. */
  1722. static noinline_for_stack
  1723. void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
  1724. struct btrfs_path *path,
  1725. struct btrfs_extent_inline_ref *iref,
  1726. int refs_to_mod,
  1727. struct btrfs_delayed_extent_op *extent_op,
  1728. int *last_ref)
  1729. {
  1730. struct extent_buffer *leaf;
  1731. struct btrfs_extent_item *ei;
  1732. struct btrfs_extent_data_ref *dref = NULL;
  1733. struct btrfs_shared_data_ref *sref = NULL;
  1734. unsigned long ptr;
  1735. unsigned long end;
  1736. u32 item_size;
  1737. int size;
  1738. int type;
  1739. u64 refs;
  1740. leaf = path->nodes[0];
  1741. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1742. refs = btrfs_extent_refs(leaf, ei);
  1743. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1744. refs += refs_to_mod;
  1745. btrfs_set_extent_refs(leaf, ei, refs);
  1746. if (extent_op)
  1747. __run_delayed_extent_op(extent_op, leaf, ei);
  1748. /*
  1749. * If type is invalid, we should have bailed out after
  1750. * lookup_inline_extent_backref().
  1751. */
  1752. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
  1753. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1754. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1755. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1756. refs = btrfs_extent_data_ref_count(leaf, dref);
  1757. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1758. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1759. refs = btrfs_shared_data_ref_count(leaf, sref);
  1760. } else {
  1761. refs = 1;
  1762. BUG_ON(refs_to_mod != -1);
  1763. }
  1764. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1765. refs += refs_to_mod;
  1766. if (refs > 0) {
  1767. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1768. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1769. else
  1770. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1771. } else {
  1772. *last_ref = 1;
  1773. size = btrfs_extent_inline_ref_size(type);
  1774. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1775. ptr = (unsigned long)iref;
  1776. end = (unsigned long)ei + item_size;
  1777. if (ptr + size < end)
  1778. memmove_extent_buffer(leaf, ptr, ptr + size,
  1779. end - ptr - size);
  1780. item_size -= size;
  1781. btrfs_truncate_item(fs_info, path, item_size, 1);
  1782. }
  1783. btrfs_mark_buffer_dirty(leaf);
  1784. }
  1785. static noinline_for_stack
  1786. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1787. struct btrfs_fs_info *fs_info,
  1788. struct btrfs_path *path,
  1789. u64 bytenr, u64 num_bytes, u64 parent,
  1790. u64 root_objectid, u64 owner,
  1791. u64 offset, int refs_to_add,
  1792. struct btrfs_delayed_extent_op *extent_op)
  1793. {
  1794. struct btrfs_extent_inline_ref *iref;
  1795. int ret;
  1796. ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
  1797. bytenr, num_bytes, parent,
  1798. root_objectid, owner, offset, 1);
  1799. if (ret == 0) {
  1800. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1801. update_inline_extent_backref(fs_info, path, iref,
  1802. refs_to_add, extent_op, NULL);
  1803. } else if (ret == -ENOENT) {
  1804. setup_inline_extent_backref(fs_info, path, iref, parent,
  1805. root_objectid, owner, offset,
  1806. refs_to_add, extent_op);
  1807. ret = 0;
  1808. }
  1809. return ret;
  1810. }
  1811. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1812. struct btrfs_fs_info *fs_info,
  1813. struct btrfs_path *path,
  1814. u64 bytenr, u64 parent, u64 root_objectid,
  1815. u64 owner, u64 offset, int refs_to_add)
  1816. {
  1817. int ret;
  1818. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1819. BUG_ON(refs_to_add != 1);
  1820. ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
  1821. parent, root_objectid);
  1822. } else {
  1823. ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
  1824. parent, root_objectid,
  1825. owner, offset, refs_to_add);
  1826. }
  1827. return ret;
  1828. }
  1829. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1830. struct btrfs_fs_info *fs_info,
  1831. struct btrfs_path *path,
  1832. struct btrfs_extent_inline_ref *iref,
  1833. int refs_to_drop, int is_data, int *last_ref)
  1834. {
  1835. int ret = 0;
  1836. BUG_ON(!is_data && refs_to_drop != 1);
  1837. if (iref) {
  1838. update_inline_extent_backref(fs_info, path, iref,
  1839. -refs_to_drop, NULL, last_ref);
  1840. } else if (is_data) {
  1841. ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
  1842. last_ref);
  1843. } else {
  1844. *last_ref = 1;
  1845. ret = btrfs_del_item(trans, fs_info->extent_root, path);
  1846. }
  1847. return ret;
  1848. }
  1849. #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
  1850. static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
  1851. u64 *discarded_bytes)
  1852. {
  1853. int j, ret = 0;
  1854. u64 bytes_left, end;
  1855. u64 aligned_start = ALIGN(start, 1 << 9);
  1856. if (WARN_ON(start != aligned_start)) {
  1857. len -= aligned_start - start;
  1858. len = round_down(len, 1 << 9);
  1859. start = aligned_start;
  1860. }
  1861. *discarded_bytes = 0;
  1862. if (!len)
  1863. return 0;
  1864. end = start + len;
  1865. bytes_left = len;
  1866. /* Skip any superblocks on this device. */
  1867. for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
  1868. u64 sb_start = btrfs_sb_offset(j);
  1869. u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
  1870. u64 size = sb_start - start;
  1871. if (!in_range(sb_start, start, bytes_left) &&
  1872. !in_range(sb_end, start, bytes_left) &&
  1873. !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
  1874. continue;
  1875. /*
  1876. * Superblock spans beginning of range. Adjust start and
  1877. * try again.
  1878. */
  1879. if (sb_start <= start) {
  1880. start += sb_end - start;
  1881. if (start > end) {
  1882. bytes_left = 0;
  1883. break;
  1884. }
  1885. bytes_left = end - start;
  1886. continue;
  1887. }
  1888. if (size) {
  1889. ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
  1890. GFP_NOFS, 0);
  1891. if (!ret)
  1892. *discarded_bytes += size;
  1893. else if (ret != -EOPNOTSUPP)
  1894. return ret;
  1895. }
  1896. start = sb_end;
  1897. if (start > end) {
  1898. bytes_left = 0;
  1899. break;
  1900. }
  1901. bytes_left = end - start;
  1902. }
  1903. if (bytes_left) {
  1904. ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
  1905. GFP_NOFS, 0);
  1906. if (!ret)
  1907. *discarded_bytes += bytes_left;
  1908. }
  1909. return ret;
  1910. }
  1911. int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
  1912. u64 num_bytes, u64 *actual_bytes)
  1913. {
  1914. int ret;
  1915. u64 discarded_bytes = 0;
  1916. struct btrfs_bio *bbio = NULL;
  1917. /*
  1918. * Avoid races with device replace and make sure our bbio has devices
  1919. * associated to its stripes that don't go away while we are discarding.
  1920. */
  1921. btrfs_bio_counter_inc_blocked(fs_info);
  1922. /* Tell the block device(s) that the sectors can be discarded */
  1923. ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
  1924. &bbio, 0);
  1925. /* Error condition is -ENOMEM */
  1926. if (!ret) {
  1927. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1928. int i;
  1929. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1930. u64 bytes;
  1931. struct request_queue *req_q;
  1932. req_q = bdev_get_queue(stripe->dev->bdev);
  1933. if (!blk_queue_discard(req_q))
  1934. continue;
  1935. ret = btrfs_issue_discard(stripe->dev->bdev,
  1936. stripe->physical,
  1937. stripe->length,
  1938. &bytes);
  1939. if (!ret)
  1940. discarded_bytes += bytes;
  1941. else if (ret != -EOPNOTSUPP)
  1942. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1943. /*
  1944. * Just in case we get back EOPNOTSUPP for some reason,
  1945. * just ignore the return value so we don't screw up
  1946. * people calling discard_extent.
  1947. */
  1948. ret = 0;
  1949. }
  1950. btrfs_put_bbio(bbio);
  1951. }
  1952. btrfs_bio_counter_dec(fs_info);
  1953. if (actual_bytes)
  1954. *actual_bytes = discarded_bytes;
  1955. if (ret == -EOPNOTSUPP)
  1956. ret = 0;
  1957. return ret;
  1958. }
  1959. /* Can return -ENOMEM */
  1960. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1961. struct btrfs_root *root,
  1962. u64 bytenr, u64 num_bytes, u64 parent,
  1963. u64 root_objectid, u64 owner, u64 offset)
  1964. {
  1965. struct btrfs_fs_info *fs_info = root->fs_info;
  1966. int old_ref_mod, new_ref_mod;
  1967. int ret;
  1968. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1969. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1970. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
  1971. owner, offset, BTRFS_ADD_DELAYED_REF);
  1972. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1973. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1974. num_bytes, parent,
  1975. root_objectid, (int)owner,
  1976. BTRFS_ADD_DELAYED_REF, NULL,
  1977. &old_ref_mod, &new_ref_mod);
  1978. } else {
  1979. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1980. num_bytes, parent,
  1981. root_objectid, owner, offset,
  1982. 0, BTRFS_ADD_DELAYED_REF,
  1983. &old_ref_mod, &new_ref_mod);
  1984. }
  1985. if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
  1986. add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
  1987. return ret;
  1988. }
  1989. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1990. struct btrfs_fs_info *fs_info,
  1991. struct btrfs_delayed_ref_node *node,
  1992. u64 parent, u64 root_objectid,
  1993. u64 owner, u64 offset, int refs_to_add,
  1994. struct btrfs_delayed_extent_op *extent_op)
  1995. {
  1996. struct btrfs_path *path;
  1997. struct extent_buffer *leaf;
  1998. struct btrfs_extent_item *item;
  1999. struct btrfs_key key;
  2000. u64 bytenr = node->bytenr;
  2001. u64 num_bytes = node->num_bytes;
  2002. u64 refs;
  2003. int ret;
  2004. path = btrfs_alloc_path();
  2005. if (!path)
  2006. return -ENOMEM;
  2007. path->reada = READA_FORWARD;
  2008. path->leave_spinning = 1;
  2009. /* this will setup the path even if it fails to insert the back ref */
  2010. ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
  2011. num_bytes, parent, root_objectid,
  2012. owner, offset,
  2013. refs_to_add, extent_op);
  2014. if ((ret < 0 && ret != -EAGAIN) || !ret)
  2015. goto out;
  2016. /*
  2017. * Ok we had -EAGAIN which means we didn't have space to insert and
  2018. * inline extent ref, so just update the reference count and add a
  2019. * normal backref.
  2020. */
  2021. leaf = path->nodes[0];
  2022. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2023. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2024. refs = btrfs_extent_refs(leaf, item);
  2025. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  2026. if (extent_op)
  2027. __run_delayed_extent_op(extent_op, leaf, item);
  2028. btrfs_mark_buffer_dirty(leaf);
  2029. btrfs_release_path(path);
  2030. path->reada = READA_FORWARD;
  2031. path->leave_spinning = 1;
  2032. /* now insert the actual backref */
  2033. ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
  2034. root_objectid, owner, offset, refs_to_add);
  2035. if (ret)
  2036. btrfs_abort_transaction(trans, ret);
  2037. out:
  2038. btrfs_free_path(path);
  2039. return ret;
  2040. }
  2041. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  2042. struct btrfs_fs_info *fs_info,
  2043. struct btrfs_delayed_ref_node *node,
  2044. struct btrfs_delayed_extent_op *extent_op,
  2045. int insert_reserved)
  2046. {
  2047. int ret = 0;
  2048. struct btrfs_delayed_data_ref *ref;
  2049. struct btrfs_key ins;
  2050. u64 parent = 0;
  2051. u64 ref_root = 0;
  2052. u64 flags = 0;
  2053. ins.objectid = node->bytenr;
  2054. ins.offset = node->num_bytes;
  2055. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2056. ref = btrfs_delayed_node_to_data_ref(node);
  2057. trace_run_delayed_data_ref(fs_info, node, ref, node->action);
  2058. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  2059. parent = ref->parent;
  2060. ref_root = ref->root;
  2061. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2062. if (extent_op)
  2063. flags |= extent_op->flags_to_set;
  2064. ret = alloc_reserved_file_extent(trans, fs_info,
  2065. parent, ref_root, flags,
  2066. ref->objectid, ref->offset,
  2067. &ins, node->ref_mod);
  2068. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2069. ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
  2070. ref_root, ref->objectid,
  2071. ref->offset, node->ref_mod,
  2072. extent_op);
  2073. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2074. ret = __btrfs_free_extent(trans, fs_info, node, parent,
  2075. ref_root, ref->objectid,
  2076. ref->offset, node->ref_mod,
  2077. extent_op);
  2078. } else {
  2079. BUG();
  2080. }
  2081. return ret;
  2082. }
  2083. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  2084. struct extent_buffer *leaf,
  2085. struct btrfs_extent_item *ei)
  2086. {
  2087. u64 flags = btrfs_extent_flags(leaf, ei);
  2088. if (extent_op->update_flags) {
  2089. flags |= extent_op->flags_to_set;
  2090. btrfs_set_extent_flags(leaf, ei, flags);
  2091. }
  2092. if (extent_op->update_key) {
  2093. struct btrfs_tree_block_info *bi;
  2094. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  2095. bi = (struct btrfs_tree_block_info *)(ei + 1);
  2096. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  2097. }
  2098. }
  2099. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  2100. struct btrfs_fs_info *fs_info,
  2101. struct btrfs_delayed_ref_head *head,
  2102. struct btrfs_delayed_extent_op *extent_op)
  2103. {
  2104. struct btrfs_key key;
  2105. struct btrfs_path *path;
  2106. struct btrfs_extent_item *ei;
  2107. struct extent_buffer *leaf;
  2108. u32 item_size;
  2109. int ret;
  2110. int err = 0;
  2111. int metadata = !extent_op->is_data;
  2112. if (trans->aborted)
  2113. return 0;
  2114. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2115. metadata = 0;
  2116. path = btrfs_alloc_path();
  2117. if (!path)
  2118. return -ENOMEM;
  2119. key.objectid = head->bytenr;
  2120. if (metadata) {
  2121. key.type = BTRFS_METADATA_ITEM_KEY;
  2122. key.offset = extent_op->level;
  2123. } else {
  2124. key.type = BTRFS_EXTENT_ITEM_KEY;
  2125. key.offset = head->num_bytes;
  2126. }
  2127. again:
  2128. path->reada = READA_FORWARD;
  2129. path->leave_spinning = 1;
  2130. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
  2131. if (ret < 0) {
  2132. err = ret;
  2133. goto out;
  2134. }
  2135. if (ret > 0) {
  2136. if (metadata) {
  2137. if (path->slots[0] > 0) {
  2138. path->slots[0]--;
  2139. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2140. path->slots[0]);
  2141. if (key.objectid == head->bytenr &&
  2142. key.type == BTRFS_EXTENT_ITEM_KEY &&
  2143. key.offset == head->num_bytes)
  2144. ret = 0;
  2145. }
  2146. if (ret > 0) {
  2147. btrfs_release_path(path);
  2148. metadata = 0;
  2149. key.objectid = head->bytenr;
  2150. key.offset = head->num_bytes;
  2151. key.type = BTRFS_EXTENT_ITEM_KEY;
  2152. goto again;
  2153. }
  2154. } else {
  2155. err = -EIO;
  2156. goto out;
  2157. }
  2158. }
  2159. leaf = path->nodes[0];
  2160. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2161. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2162. if (item_size < sizeof(*ei)) {
  2163. ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
  2164. if (ret < 0) {
  2165. err = ret;
  2166. goto out;
  2167. }
  2168. leaf = path->nodes[0];
  2169. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2170. }
  2171. #endif
  2172. BUG_ON(item_size < sizeof(*ei));
  2173. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2174. __run_delayed_extent_op(extent_op, leaf, ei);
  2175. btrfs_mark_buffer_dirty(leaf);
  2176. out:
  2177. btrfs_free_path(path);
  2178. return err;
  2179. }
  2180. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  2181. struct btrfs_fs_info *fs_info,
  2182. struct btrfs_delayed_ref_node *node,
  2183. struct btrfs_delayed_extent_op *extent_op,
  2184. int insert_reserved)
  2185. {
  2186. int ret = 0;
  2187. struct btrfs_delayed_tree_ref *ref;
  2188. struct btrfs_key ins;
  2189. u64 parent = 0;
  2190. u64 ref_root = 0;
  2191. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  2192. ref = btrfs_delayed_node_to_tree_ref(node);
  2193. trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
  2194. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2195. parent = ref->parent;
  2196. ref_root = ref->root;
  2197. ins.objectid = node->bytenr;
  2198. if (skinny_metadata) {
  2199. ins.offset = ref->level;
  2200. ins.type = BTRFS_METADATA_ITEM_KEY;
  2201. } else {
  2202. ins.offset = node->num_bytes;
  2203. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2204. }
  2205. if (node->ref_mod != 1) {
  2206. btrfs_err(fs_info,
  2207. "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
  2208. node->bytenr, node->ref_mod, node->action, ref_root,
  2209. parent);
  2210. return -EIO;
  2211. }
  2212. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2213. BUG_ON(!extent_op || !extent_op->update_flags);
  2214. ret = alloc_reserved_tree_block(trans, fs_info,
  2215. parent, ref_root,
  2216. extent_op->flags_to_set,
  2217. &extent_op->key,
  2218. ref->level, &ins);
  2219. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2220. ret = __btrfs_inc_extent_ref(trans, fs_info, node,
  2221. parent, ref_root,
  2222. ref->level, 0, 1,
  2223. extent_op);
  2224. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2225. ret = __btrfs_free_extent(trans, fs_info, node,
  2226. parent, ref_root,
  2227. ref->level, 0, 1, extent_op);
  2228. } else {
  2229. BUG();
  2230. }
  2231. return ret;
  2232. }
  2233. /* helper function to actually process a single delayed ref entry */
  2234. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2235. struct btrfs_fs_info *fs_info,
  2236. struct btrfs_delayed_ref_node *node,
  2237. struct btrfs_delayed_extent_op *extent_op,
  2238. int insert_reserved)
  2239. {
  2240. int ret = 0;
  2241. if (trans->aborted) {
  2242. if (insert_reserved)
  2243. btrfs_pin_extent(fs_info, node->bytenr,
  2244. node->num_bytes, 1);
  2245. return 0;
  2246. }
  2247. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2248. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2249. ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
  2250. insert_reserved);
  2251. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2252. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2253. ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
  2254. insert_reserved);
  2255. else
  2256. BUG();
  2257. return ret;
  2258. }
  2259. static inline struct btrfs_delayed_ref_node *
  2260. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2261. {
  2262. struct btrfs_delayed_ref_node *ref;
  2263. if (RB_EMPTY_ROOT(&head->ref_tree))
  2264. return NULL;
  2265. /*
  2266. * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2267. * This is to prevent a ref count from going down to zero, which deletes
  2268. * the extent item from the extent tree, when there still are references
  2269. * to add, which would fail because they would not find the extent item.
  2270. */
  2271. if (!list_empty(&head->ref_add_list))
  2272. return list_first_entry(&head->ref_add_list,
  2273. struct btrfs_delayed_ref_node, add_list);
  2274. ref = rb_entry(rb_first(&head->ref_tree),
  2275. struct btrfs_delayed_ref_node, ref_node);
  2276. ASSERT(list_empty(&ref->add_list));
  2277. return ref;
  2278. }
  2279. static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
  2280. struct btrfs_delayed_ref_head *head)
  2281. {
  2282. spin_lock(&delayed_refs->lock);
  2283. head->processing = 0;
  2284. delayed_refs->num_heads_ready++;
  2285. spin_unlock(&delayed_refs->lock);
  2286. btrfs_delayed_ref_unlock(head);
  2287. }
  2288. static int cleanup_extent_op(struct btrfs_trans_handle *trans,
  2289. struct btrfs_fs_info *fs_info,
  2290. struct btrfs_delayed_ref_head *head)
  2291. {
  2292. struct btrfs_delayed_extent_op *extent_op = head->extent_op;
  2293. int ret;
  2294. if (!extent_op)
  2295. return 0;
  2296. head->extent_op = NULL;
  2297. if (head->must_insert_reserved) {
  2298. btrfs_free_delayed_extent_op(extent_op);
  2299. return 0;
  2300. }
  2301. spin_unlock(&head->lock);
  2302. ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
  2303. btrfs_free_delayed_extent_op(extent_op);
  2304. return ret ? ret : 1;
  2305. }
  2306. static int cleanup_ref_head(struct btrfs_trans_handle *trans,
  2307. struct btrfs_fs_info *fs_info,
  2308. struct btrfs_delayed_ref_head *head)
  2309. {
  2310. struct btrfs_delayed_ref_root *delayed_refs;
  2311. int ret;
  2312. delayed_refs = &trans->transaction->delayed_refs;
  2313. ret = cleanup_extent_op(trans, fs_info, head);
  2314. if (ret < 0) {
  2315. unselect_delayed_ref_head(delayed_refs, head);
  2316. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2317. return ret;
  2318. } else if (ret) {
  2319. return ret;
  2320. }
  2321. /*
  2322. * Need to drop our head ref lock and re-acquire the delayed ref lock
  2323. * and then re-check to make sure nobody got added.
  2324. */
  2325. spin_unlock(&head->lock);
  2326. spin_lock(&delayed_refs->lock);
  2327. spin_lock(&head->lock);
  2328. if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
  2329. spin_unlock(&head->lock);
  2330. spin_unlock(&delayed_refs->lock);
  2331. return 1;
  2332. }
  2333. delayed_refs->num_heads--;
  2334. rb_erase(&head->href_node, &delayed_refs->href_root);
  2335. RB_CLEAR_NODE(&head->href_node);
  2336. spin_unlock(&delayed_refs->lock);
  2337. spin_unlock(&head->lock);
  2338. atomic_dec(&delayed_refs->num_entries);
  2339. trace_run_delayed_ref_head(fs_info, head, 0);
  2340. if (head->total_ref_mod < 0) {
  2341. struct btrfs_block_group_cache *cache;
  2342. cache = btrfs_lookup_block_group(fs_info, head->bytenr);
  2343. ASSERT(cache);
  2344. percpu_counter_add(&cache->space_info->total_bytes_pinned,
  2345. -head->num_bytes);
  2346. btrfs_put_block_group(cache);
  2347. if (head->is_data) {
  2348. spin_lock(&delayed_refs->lock);
  2349. delayed_refs->pending_csums -= head->num_bytes;
  2350. spin_unlock(&delayed_refs->lock);
  2351. }
  2352. }
  2353. if (head->must_insert_reserved) {
  2354. btrfs_pin_extent(fs_info, head->bytenr,
  2355. head->num_bytes, 1);
  2356. if (head->is_data) {
  2357. ret = btrfs_del_csums(trans, fs_info, head->bytenr,
  2358. head->num_bytes);
  2359. }
  2360. }
  2361. /* Also free its reserved qgroup space */
  2362. btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
  2363. head->qgroup_reserved);
  2364. btrfs_delayed_ref_unlock(head);
  2365. btrfs_put_delayed_ref_head(head);
  2366. return 0;
  2367. }
  2368. /*
  2369. * Returns 0 on success or if called with an already aborted transaction.
  2370. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2371. */
  2372. static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2373. struct btrfs_fs_info *fs_info,
  2374. unsigned long nr)
  2375. {
  2376. struct btrfs_delayed_ref_root *delayed_refs;
  2377. struct btrfs_delayed_ref_node *ref;
  2378. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2379. struct btrfs_delayed_extent_op *extent_op;
  2380. ktime_t start = ktime_get();
  2381. int ret;
  2382. unsigned long count = 0;
  2383. unsigned long actual_count = 0;
  2384. int must_insert_reserved = 0;
  2385. delayed_refs = &trans->transaction->delayed_refs;
  2386. while (1) {
  2387. if (!locked_ref) {
  2388. if (count >= nr)
  2389. break;
  2390. spin_lock(&delayed_refs->lock);
  2391. locked_ref = btrfs_select_ref_head(trans);
  2392. if (!locked_ref) {
  2393. spin_unlock(&delayed_refs->lock);
  2394. break;
  2395. }
  2396. /* grab the lock that says we are going to process
  2397. * all the refs for this head */
  2398. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2399. spin_unlock(&delayed_refs->lock);
  2400. /*
  2401. * we may have dropped the spin lock to get the head
  2402. * mutex lock, and that might have given someone else
  2403. * time to free the head. If that's true, it has been
  2404. * removed from our list and we can move on.
  2405. */
  2406. if (ret == -EAGAIN) {
  2407. locked_ref = NULL;
  2408. count++;
  2409. continue;
  2410. }
  2411. }
  2412. /*
  2413. * We need to try and merge add/drops of the same ref since we
  2414. * can run into issues with relocate dropping the implicit ref
  2415. * and then it being added back again before the drop can
  2416. * finish. If we merged anything we need to re-loop so we can
  2417. * get a good ref.
  2418. * Or we can get node references of the same type that weren't
  2419. * merged when created due to bumps in the tree mod seq, and
  2420. * we need to merge them to prevent adding an inline extent
  2421. * backref before dropping it (triggering a BUG_ON at
  2422. * insert_inline_extent_backref()).
  2423. */
  2424. spin_lock(&locked_ref->lock);
  2425. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2426. locked_ref);
  2427. /*
  2428. * locked_ref is the head node, so we have to go one
  2429. * node back for any delayed ref updates
  2430. */
  2431. ref = select_delayed_ref(locked_ref);
  2432. if (ref && ref->seq &&
  2433. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2434. spin_unlock(&locked_ref->lock);
  2435. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2436. locked_ref = NULL;
  2437. cond_resched();
  2438. count++;
  2439. continue;
  2440. }
  2441. /*
  2442. * We're done processing refs in this ref_head, clean everything
  2443. * up and move on to the next ref_head.
  2444. */
  2445. if (!ref) {
  2446. ret = cleanup_ref_head(trans, fs_info, locked_ref);
  2447. if (ret > 0 ) {
  2448. /* We dropped our lock, we need to loop. */
  2449. ret = 0;
  2450. continue;
  2451. } else if (ret) {
  2452. return ret;
  2453. }
  2454. locked_ref = NULL;
  2455. count++;
  2456. continue;
  2457. }
  2458. actual_count++;
  2459. ref->in_tree = 0;
  2460. rb_erase(&ref->ref_node, &locked_ref->ref_tree);
  2461. RB_CLEAR_NODE(&ref->ref_node);
  2462. if (!list_empty(&ref->add_list))
  2463. list_del(&ref->add_list);
  2464. /*
  2465. * When we play the delayed ref, also correct the ref_mod on
  2466. * head
  2467. */
  2468. switch (ref->action) {
  2469. case BTRFS_ADD_DELAYED_REF:
  2470. case BTRFS_ADD_DELAYED_EXTENT:
  2471. locked_ref->ref_mod -= ref->ref_mod;
  2472. break;
  2473. case BTRFS_DROP_DELAYED_REF:
  2474. locked_ref->ref_mod += ref->ref_mod;
  2475. break;
  2476. default:
  2477. WARN_ON(1);
  2478. }
  2479. atomic_dec(&delayed_refs->num_entries);
  2480. /*
  2481. * Record the must-insert_reserved flag before we drop the spin
  2482. * lock.
  2483. */
  2484. must_insert_reserved = locked_ref->must_insert_reserved;
  2485. locked_ref->must_insert_reserved = 0;
  2486. extent_op = locked_ref->extent_op;
  2487. locked_ref->extent_op = NULL;
  2488. spin_unlock(&locked_ref->lock);
  2489. ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
  2490. must_insert_reserved);
  2491. btrfs_free_delayed_extent_op(extent_op);
  2492. if (ret) {
  2493. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2494. btrfs_put_delayed_ref(ref);
  2495. btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
  2496. ret);
  2497. return ret;
  2498. }
  2499. btrfs_put_delayed_ref(ref);
  2500. count++;
  2501. cond_resched();
  2502. }
  2503. /*
  2504. * We don't want to include ref heads since we can have empty ref heads
  2505. * and those will drastically skew our runtime down since we just do
  2506. * accounting, no actual extent tree updates.
  2507. */
  2508. if (actual_count > 0) {
  2509. u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
  2510. u64 avg;
  2511. /*
  2512. * We weigh the current average higher than our current runtime
  2513. * to avoid large swings in the average.
  2514. */
  2515. spin_lock(&delayed_refs->lock);
  2516. avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
  2517. fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
  2518. spin_unlock(&delayed_refs->lock);
  2519. }
  2520. return 0;
  2521. }
  2522. #ifdef SCRAMBLE_DELAYED_REFS
  2523. /*
  2524. * Normally delayed refs get processed in ascending bytenr order. This
  2525. * correlates in most cases to the order added. To expose dependencies on this
  2526. * order, we start to process the tree in the middle instead of the beginning
  2527. */
  2528. static u64 find_middle(struct rb_root *root)
  2529. {
  2530. struct rb_node *n = root->rb_node;
  2531. struct btrfs_delayed_ref_node *entry;
  2532. int alt = 1;
  2533. u64 middle;
  2534. u64 first = 0, last = 0;
  2535. n = rb_first(root);
  2536. if (n) {
  2537. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2538. first = entry->bytenr;
  2539. }
  2540. n = rb_last(root);
  2541. if (n) {
  2542. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2543. last = entry->bytenr;
  2544. }
  2545. n = root->rb_node;
  2546. while (n) {
  2547. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2548. WARN_ON(!entry->in_tree);
  2549. middle = entry->bytenr;
  2550. if (alt)
  2551. n = n->rb_left;
  2552. else
  2553. n = n->rb_right;
  2554. alt = 1 - alt;
  2555. }
  2556. return middle;
  2557. }
  2558. #endif
  2559. static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
  2560. {
  2561. u64 num_bytes;
  2562. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2563. sizeof(struct btrfs_extent_inline_ref));
  2564. if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2565. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2566. /*
  2567. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2568. * closer to what we're really going to want to use.
  2569. */
  2570. return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
  2571. }
  2572. /*
  2573. * Takes the number of bytes to be csumm'ed and figures out how many leaves it
  2574. * would require to store the csums for that many bytes.
  2575. */
  2576. u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
  2577. {
  2578. u64 csum_size;
  2579. u64 num_csums_per_leaf;
  2580. u64 num_csums;
  2581. csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
  2582. num_csums_per_leaf = div64_u64(csum_size,
  2583. (u64)btrfs_super_csum_size(fs_info->super_copy));
  2584. num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
  2585. num_csums += num_csums_per_leaf - 1;
  2586. num_csums = div64_u64(num_csums, num_csums_per_leaf);
  2587. return num_csums;
  2588. }
  2589. int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
  2590. struct btrfs_fs_info *fs_info)
  2591. {
  2592. struct btrfs_block_rsv *global_rsv;
  2593. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2594. u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
  2595. unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
  2596. u64 num_bytes, num_dirty_bgs_bytes;
  2597. int ret = 0;
  2598. num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  2599. num_heads = heads_to_leaves(fs_info, num_heads);
  2600. if (num_heads > 1)
  2601. num_bytes += (num_heads - 1) * fs_info->nodesize;
  2602. num_bytes <<= 1;
  2603. num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
  2604. fs_info->nodesize;
  2605. num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
  2606. num_dirty_bgs);
  2607. global_rsv = &fs_info->global_block_rsv;
  2608. /*
  2609. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2610. * wiggle room since running delayed refs can create more delayed refs.
  2611. */
  2612. if (global_rsv->space_info->full) {
  2613. num_dirty_bgs_bytes <<= 1;
  2614. num_bytes <<= 1;
  2615. }
  2616. spin_lock(&global_rsv->lock);
  2617. if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
  2618. ret = 1;
  2619. spin_unlock(&global_rsv->lock);
  2620. return ret;
  2621. }
  2622. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
  2623. struct btrfs_fs_info *fs_info)
  2624. {
  2625. u64 num_entries =
  2626. atomic_read(&trans->transaction->delayed_refs.num_entries);
  2627. u64 avg_runtime;
  2628. u64 val;
  2629. smp_mb();
  2630. avg_runtime = fs_info->avg_delayed_ref_runtime;
  2631. val = num_entries * avg_runtime;
  2632. if (val >= NSEC_PER_SEC)
  2633. return 1;
  2634. if (val >= NSEC_PER_SEC / 2)
  2635. return 2;
  2636. return btrfs_check_space_for_delayed_refs(trans, fs_info);
  2637. }
  2638. struct async_delayed_refs {
  2639. struct btrfs_root *root;
  2640. u64 transid;
  2641. int count;
  2642. int error;
  2643. int sync;
  2644. struct completion wait;
  2645. struct btrfs_work work;
  2646. };
  2647. static inline struct async_delayed_refs *
  2648. to_async_delayed_refs(struct btrfs_work *work)
  2649. {
  2650. return container_of(work, struct async_delayed_refs, work);
  2651. }
  2652. static void delayed_ref_async_start(struct btrfs_work *work)
  2653. {
  2654. struct async_delayed_refs *async = to_async_delayed_refs(work);
  2655. struct btrfs_trans_handle *trans;
  2656. struct btrfs_fs_info *fs_info = async->root->fs_info;
  2657. int ret;
  2658. /* if the commit is already started, we don't need to wait here */
  2659. if (btrfs_transaction_blocked(fs_info))
  2660. goto done;
  2661. trans = btrfs_join_transaction(async->root);
  2662. if (IS_ERR(trans)) {
  2663. async->error = PTR_ERR(trans);
  2664. goto done;
  2665. }
  2666. /*
  2667. * trans->sync means that when we call end_transaction, we won't
  2668. * wait on delayed refs
  2669. */
  2670. trans->sync = true;
  2671. /* Don't bother flushing if we got into a different transaction */
  2672. if (trans->transid > async->transid)
  2673. goto end;
  2674. ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
  2675. if (ret)
  2676. async->error = ret;
  2677. end:
  2678. ret = btrfs_end_transaction(trans);
  2679. if (ret && !async->error)
  2680. async->error = ret;
  2681. done:
  2682. if (async->sync)
  2683. complete(&async->wait);
  2684. else
  2685. kfree(async);
  2686. }
  2687. int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
  2688. unsigned long count, u64 transid, int wait)
  2689. {
  2690. struct async_delayed_refs *async;
  2691. int ret;
  2692. async = kmalloc(sizeof(*async), GFP_NOFS);
  2693. if (!async)
  2694. return -ENOMEM;
  2695. async->root = fs_info->tree_root;
  2696. async->count = count;
  2697. async->error = 0;
  2698. async->transid = transid;
  2699. if (wait)
  2700. async->sync = 1;
  2701. else
  2702. async->sync = 0;
  2703. init_completion(&async->wait);
  2704. btrfs_init_work(&async->work, btrfs_extent_refs_helper,
  2705. delayed_ref_async_start, NULL, NULL);
  2706. btrfs_queue_work(fs_info->extent_workers, &async->work);
  2707. if (wait) {
  2708. wait_for_completion(&async->wait);
  2709. ret = async->error;
  2710. kfree(async);
  2711. return ret;
  2712. }
  2713. return 0;
  2714. }
  2715. /*
  2716. * this starts processing the delayed reference count updates and
  2717. * extent insertions we have queued up so far. count can be
  2718. * 0, which means to process everything in the tree at the start
  2719. * of the run (but not newly added entries), or it can be some target
  2720. * number you'd like to process.
  2721. *
  2722. * Returns 0 on success or if called with an aborted transaction
  2723. * Returns <0 on error and aborts the transaction
  2724. */
  2725. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2726. struct btrfs_fs_info *fs_info, unsigned long count)
  2727. {
  2728. struct rb_node *node;
  2729. struct btrfs_delayed_ref_root *delayed_refs;
  2730. struct btrfs_delayed_ref_head *head;
  2731. int ret;
  2732. int run_all = count == (unsigned long)-1;
  2733. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  2734. /* We'll clean this up in btrfs_cleanup_transaction */
  2735. if (trans->aborted)
  2736. return 0;
  2737. if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
  2738. return 0;
  2739. delayed_refs = &trans->transaction->delayed_refs;
  2740. if (count == 0)
  2741. count = atomic_read(&delayed_refs->num_entries) * 2;
  2742. again:
  2743. #ifdef SCRAMBLE_DELAYED_REFS
  2744. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2745. #endif
  2746. trans->can_flush_pending_bgs = false;
  2747. ret = __btrfs_run_delayed_refs(trans, fs_info, count);
  2748. if (ret < 0) {
  2749. btrfs_abort_transaction(trans, ret);
  2750. return ret;
  2751. }
  2752. if (run_all) {
  2753. if (!list_empty(&trans->new_bgs))
  2754. btrfs_create_pending_block_groups(trans, fs_info);
  2755. spin_lock(&delayed_refs->lock);
  2756. node = rb_first(&delayed_refs->href_root);
  2757. if (!node) {
  2758. spin_unlock(&delayed_refs->lock);
  2759. goto out;
  2760. }
  2761. head = rb_entry(node, struct btrfs_delayed_ref_head,
  2762. href_node);
  2763. refcount_inc(&head->refs);
  2764. spin_unlock(&delayed_refs->lock);
  2765. /* Mutex was contended, block until it's released and retry. */
  2766. mutex_lock(&head->mutex);
  2767. mutex_unlock(&head->mutex);
  2768. btrfs_put_delayed_ref_head(head);
  2769. cond_resched();
  2770. goto again;
  2771. }
  2772. out:
  2773. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  2774. return 0;
  2775. }
  2776. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2777. struct btrfs_fs_info *fs_info,
  2778. u64 bytenr, u64 num_bytes, u64 flags,
  2779. int level, int is_data)
  2780. {
  2781. struct btrfs_delayed_extent_op *extent_op;
  2782. int ret;
  2783. extent_op = btrfs_alloc_delayed_extent_op();
  2784. if (!extent_op)
  2785. return -ENOMEM;
  2786. extent_op->flags_to_set = flags;
  2787. extent_op->update_flags = true;
  2788. extent_op->update_key = false;
  2789. extent_op->is_data = is_data ? true : false;
  2790. extent_op->level = level;
  2791. ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
  2792. num_bytes, extent_op);
  2793. if (ret)
  2794. btrfs_free_delayed_extent_op(extent_op);
  2795. return ret;
  2796. }
  2797. static noinline int check_delayed_ref(struct btrfs_root *root,
  2798. struct btrfs_path *path,
  2799. u64 objectid, u64 offset, u64 bytenr)
  2800. {
  2801. struct btrfs_delayed_ref_head *head;
  2802. struct btrfs_delayed_ref_node *ref;
  2803. struct btrfs_delayed_data_ref *data_ref;
  2804. struct btrfs_delayed_ref_root *delayed_refs;
  2805. struct btrfs_transaction *cur_trans;
  2806. struct rb_node *node;
  2807. int ret = 0;
  2808. cur_trans = root->fs_info->running_transaction;
  2809. if (!cur_trans)
  2810. return 0;
  2811. delayed_refs = &cur_trans->delayed_refs;
  2812. spin_lock(&delayed_refs->lock);
  2813. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  2814. if (!head) {
  2815. spin_unlock(&delayed_refs->lock);
  2816. return 0;
  2817. }
  2818. if (!mutex_trylock(&head->mutex)) {
  2819. refcount_inc(&head->refs);
  2820. spin_unlock(&delayed_refs->lock);
  2821. btrfs_release_path(path);
  2822. /*
  2823. * Mutex was contended, block until it's released and let
  2824. * caller try again
  2825. */
  2826. mutex_lock(&head->mutex);
  2827. mutex_unlock(&head->mutex);
  2828. btrfs_put_delayed_ref_head(head);
  2829. return -EAGAIN;
  2830. }
  2831. spin_unlock(&delayed_refs->lock);
  2832. spin_lock(&head->lock);
  2833. /*
  2834. * XXX: We should replace this with a proper search function in the
  2835. * future.
  2836. */
  2837. for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
  2838. ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  2839. /* If it's a shared ref we know a cross reference exists */
  2840. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
  2841. ret = 1;
  2842. break;
  2843. }
  2844. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2845. /*
  2846. * If our ref doesn't match the one we're currently looking at
  2847. * then we have a cross reference.
  2848. */
  2849. if (data_ref->root != root->root_key.objectid ||
  2850. data_ref->objectid != objectid ||
  2851. data_ref->offset != offset) {
  2852. ret = 1;
  2853. break;
  2854. }
  2855. }
  2856. spin_unlock(&head->lock);
  2857. mutex_unlock(&head->mutex);
  2858. return ret;
  2859. }
  2860. static noinline int check_committed_ref(struct btrfs_root *root,
  2861. struct btrfs_path *path,
  2862. u64 objectid, u64 offset, u64 bytenr)
  2863. {
  2864. struct btrfs_fs_info *fs_info = root->fs_info;
  2865. struct btrfs_root *extent_root = fs_info->extent_root;
  2866. struct extent_buffer *leaf;
  2867. struct btrfs_extent_data_ref *ref;
  2868. struct btrfs_extent_inline_ref *iref;
  2869. struct btrfs_extent_item *ei;
  2870. struct btrfs_key key;
  2871. u32 item_size;
  2872. int type;
  2873. int ret;
  2874. key.objectid = bytenr;
  2875. key.offset = (u64)-1;
  2876. key.type = BTRFS_EXTENT_ITEM_KEY;
  2877. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2878. if (ret < 0)
  2879. goto out;
  2880. BUG_ON(ret == 0); /* Corruption */
  2881. ret = -ENOENT;
  2882. if (path->slots[0] == 0)
  2883. goto out;
  2884. path->slots[0]--;
  2885. leaf = path->nodes[0];
  2886. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2887. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2888. goto out;
  2889. ret = 1;
  2890. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2891. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2892. if (item_size < sizeof(*ei)) {
  2893. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2894. goto out;
  2895. }
  2896. #endif
  2897. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2898. if (item_size != sizeof(*ei) +
  2899. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2900. goto out;
  2901. if (btrfs_extent_generation(leaf, ei) <=
  2902. btrfs_root_last_snapshot(&root->root_item))
  2903. goto out;
  2904. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2905. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  2906. if (type != BTRFS_EXTENT_DATA_REF_KEY)
  2907. goto out;
  2908. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2909. if (btrfs_extent_refs(leaf, ei) !=
  2910. btrfs_extent_data_ref_count(leaf, ref) ||
  2911. btrfs_extent_data_ref_root(leaf, ref) !=
  2912. root->root_key.objectid ||
  2913. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2914. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2915. goto out;
  2916. ret = 0;
  2917. out:
  2918. return ret;
  2919. }
  2920. int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
  2921. u64 bytenr)
  2922. {
  2923. struct btrfs_path *path;
  2924. int ret;
  2925. int ret2;
  2926. path = btrfs_alloc_path();
  2927. if (!path)
  2928. return -ENOENT;
  2929. do {
  2930. ret = check_committed_ref(root, path, objectid,
  2931. offset, bytenr);
  2932. if (ret && ret != -ENOENT)
  2933. goto out;
  2934. ret2 = check_delayed_ref(root, path, objectid,
  2935. offset, bytenr);
  2936. } while (ret2 == -EAGAIN);
  2937. if (ret2 && ret2 != -ENOENT) {
  2938. ret = ret2;
  2939. goto out;
  2940. }
  2941. if (ret != -ENOENT || ret2 != -ENOENT)
  2942. ret = 0;
  2943. out:
  2944. btrfs_free_path(path);
  2945. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2946. WARN_ON(ret > 0);
  2947. return ret;
  2948. }
  2949. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2950. struct btrfs_root *root,
  2951. struct extent_buffer *buf,
  2952. int full_backref, int inc)
  2953. {
  2954. struct btrfs_fs_info *fs_info = root->fs_info;
  2955. u64 bytenr;
  2956. u64 num_bytes;
  2957. u64 parent;
  2958. u64 ref_root;
  2959. u32 nritems;
  2960. struct btrfs_key key;
  2961. struct btrfs_file_extent_item *fi;
  2962. int i;
  2963. int level;
  2964. int ret = 0;
  2965. int (*process_func)(struct btrfs_trans_handle *,
  2966. struct btrfs_root *,
  2967. u64, u64, u64, u64, u64, u64);
  2968. if (btrfs_is_testing(fs_info))
  2969. return 0;
  2970. ref_root = btrfs_header_owner(buf);
  2971. nritems = btrfs_header_nritems(buf);
  2972. level = btrfs_header_level(buf);
  2973. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
  2974. return 0;
  2975. if (inc)
  2976. process_func = btrfs_inc_extent_ref;
  2977. else
  2978. process_func = btrfs_free_extent;
  2979. if (full_backref)
  2980. parent = buf->start;
  2981. else
  2982. parent = 0;
  2983. for (i = 0; i < nritems; i++) {
  2984. if (level == 0) {
  2985. btrfs_item_key_to_cpu(buf, &key, i);
  2986. if (key.type != BTRFS_EXTENT_DATA_KEY)
  2987. continue;
  2988. fi = btrfs_item_ptr(buf, i,
  2989. struct btrfs_file_extent_item);
  2990. if (btrfs_file_extent_type(buf, fi) ==
  2991. BTRFS_FILE_EXTENT_INLINE)
  2992. continue;
  2993. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2994. if (bytenr == 0)
  2995. continue;
  2996. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2997. key.offset -= btrfs_file_extent_offset(buf, fi);
  2998. ret = process_func(trans, root, bytenr, num_bytes,
  2999. parent, ref_root, key.objectid,
  3000. key.offset);
  3001. if (ret)
  3002. goto fail;
  3003. } else {
  3004. bytenr = btrfs_node_blockptr(buf, i);
  3005. num_bytes = fs_info->nodesize;
  3006. ret = process_func(trans, root, bytenr, num_bytes,
  3007. parent, ref_root, level - 1, 0);
  3008. if (ret)
  3009. goto fail;
  3010. }
  3011. }
  3012. return 0;
  3013. fail:
  3014. return ret;
  3015. }
  3016. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  3017. struct extent_buffer *buf, int full_backref)
  3018. {
  3019. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  3020. }
  3021. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  3022. struct extent_buffer *buf, int full_backref)
  3023. {
  3024. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  3025. }
  3026. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  3027. struct btrfs_fs_info *fs_info,
  3028. struct btrfs_path *path,
  3029. struct btrfs_block_group_cache *cache)
  3030. {
  3031. int ret;
  3032. struct btrfs_root *extent_root = fs_info->extent_root;
  3033. unsigned long bi;
  3034. struct extent_buffer *leaf;
  3035. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  3036. if (ret) {
  3037. if (ret > 0)
  3038. ret = -ENOENT;
  3039. goto fail;
  3040. }
  3041. leaf = path->nodes[0];
  3042. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  3043. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  3044. btrfs_mark_buffer_dirty(leaf);
  3045. fail:
  3046. btrfs_release_path(path);
  3047. return ret;
  3048. }
  3049. static struct btrfs_block_group_cache *
  3050. next_block_group(struct btrfs_fs_info *fs_info,
  3051. struct btrfs_block_group_cache *cache)
  3052. {
  3053. struct rb_node *node;
  3054. spin_lock(&fs_info->block_group_cache_lock);
  3055. /* If our block group was removed, we need a full search. */
  3056. if (RB_EMPTY_NODE(&cache->cache_node)) {
  3057. const u64 next_bytenr = cache->key.objectid + cache->key.offset;
  3058. spin_unlock(&fs_info->block_group_cache_lock);
  3059. btrfs_put_block_group(cache);
  3060. cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
  3061. }
  3062. node = rb_next(&cache->cache_node);
  3063. btrfs_put_block_group(cache);
  3064. if (node) {
  3065. cache = rb_entry(node, struct btrfs_block_group_cache,
  3066. cache_node);
  3067. btrfs_get_block_group(cache);
  3068. } else
  3069. cache = NULL;
  3070. spin_unlock(&fs_info->block_group_cache_lock);
  3071. return cache;
  3072. }
  3073. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  3074. struct btrfs_trans_handle *trans,
  3075. struct btrfs_path *path)
  3076. {
  3077. struct btrfs_fs_info *fs_info = block_group->fs_info;
  3078. struct btrfs_root *root = fs_info->tree_root;
  3079. struct inode *inode = NULL;
  3080. struct extent_changeset *data_reserved = NULL;
  3081. u64 alloc_hint = 0;
  3082. int dcs = BTRFS_DC_ERROR;
  3083. u64 num_pages = 0;
  3084. int retries = 0;
  3085. int ret = 0;
  3086. /*
  3087. * If this block group is smaller than 100 megs don't bother caching the
  3088. * block group.
  3089. */
  3090. if (block_group->key.offset < (100 * SZ_1M)) {
  3091. spin_lock(&block_group->lock);
  3092. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  3093. spin_unlock(&block_group->lock);
  3094. return 0;
  3095. }
  3096. if (trans->aborted)
  3097. return 0;
  3098. again:
  3099. inode = lookup_free_space_inode(fs_info, block_group, path);
  3100. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  3101. ret = PTR_ERR(inode);
  3102. btrfs_release_path(path);
  3103. goto out;
  3104. }
  3105. if (IS_ERR(inode)) {
  3106. BUG_ON(retries);
  3107. retries++;
  3108. if (block_group->ro)
  3109. goto out_free;
  3110. ret = create_free_space_inode(fs_info, trans, block_group,
  3111. path);
  3112. if (ret)
  3113. goto out_free;
  3114. goto again;
  3115. }
  3116. /*
  3117. * We want to set the generation to 0, that way if anything goes wrong
  3118. * from here on out we know not to trust this cache when we load up next
  3119. * time.
  3120. */
  3121. BTRFS_I(inode)->generation = 0;
  3122. ret = btrfs_update_inode(trans, root, inode);
  3123. if (ret) {
  3124. /*
  3125. * So theoretically we could recover from this, simply set the
  3126. * super cache generation to 0 so we know to invalidate the
  3127. * cache, but then we'd have to keep track of the block groups
  3128. * that fail this way so we know we _have_ to reset this cache
  3129. * before the next commit or risk reading stale cache. So to
  3130. * limit our exposure to horrible edge cases lets just abort the
  3131. * transaction, this only happens in really bad situations
  3132. * anyway.
  3133. */
  3134. btrfs_abort_transaction(trans, ret);
  3135. goto out_put;
  3136. }
  3137. WARN_ON(ret);
  3138. /* We've already setup this transaction, go ahead and exit */
  3139. if (block_group->cache_generation == trans->transid &&
  3140. i_size_read(inode)) {
  3141. dcs = BTRFS_DC_SETUP;
  3142. goto out_put;
  3143. }
  3144. if (i_size_read(inode) > 0) {
  3145. ret = btrfs_check_trunc_cache_free_space(fs_info,
  3146. &fs_info->global_block_rsv);
  3147. if (ret)
  3148. goto out_put;
  3149. ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
  3150. if (ret)
  3151. goto out_put;
  3152. }
  3153. spin_lock(&block_group->lock);
  3154. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  3155. !btrfs_test_opt(fs_info, SPACE_CACHE)) {
  3156. /*
  3157. * don't bother trying to write stuff out _if_
  3158. * a) we're not cached,
  3159. * b) we're with nospace_cache mount option,
  3160. * c) we're with v2 space_cache (FREE_SPACE_TREE).
  3161. */
  3162. dcs = BTRFS_DC_WRITTEN;
  3163. spin_unlock(&block_group->lock);
  3164. goto out_put;
  3165. }
  3166. spin_unlock(&block_group->lock);
  3167. /*
  3168. * We hit an ENOSPC when setting up the cache in this transaction, just
  3169. * skip doing the setup, we've already cleared the cache so we're safe.
  3170. */
  3171. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  3172. ret = -ENOSPC;
  3173. goto out_put;
  3174. }
  3175. /*
  3176. * Try to preallocate enough space based on how big the block group is.
  3177. * Keep in mind this has to include any pinned space which could end up
  3178. * taking up quite a bit since it's not folded into the other space
  3179. * cache.
  3180. */
  3181. num_pages = div_u64(block_group->key.offset, SZ_256M);
  3182. if (!num_pages)
  3183. num_pages = 1;
  3184. num_pages *= 16;
  3185. num_pages *= PAGE_SIZE;
  3186. ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
  3187. if (ret)
  3188. goto out_put;
  3189. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  3190. num_pages, num_pages,
  3191. &alloc_hint);
  3192. /*
  3193. * Our cache requires contiguous chunks so that we don't modify a bunch
  3194. * of metadata or split extents when writing the cache out, which means
  3195. * we can enospc if we are heavily fragmented in addition to just normal
  3196. * out of space conditions. So if we hit this just skip setting up any
  3197. * other block groups for this transaction, maybe we'll unpin enough
  3198. * space the next time around.
  3199. */
  3200. if (!ret)
  3201. dcs = BTRFS_DC_SETUP;
  3202. else if (ret == -ENOSPC)
  3203. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  3204. out_put:
  3205. iput(inode);
  3206. out_free:
  3207. btrfs_release_path(path);
  3208. out:
  3209. spin_lock(&block_group->lock);
  3210. if (!ret && dcs == BTRFS_DC_SETUP)
  3211. block_group->cache_generation = trans->transid;
  3212. block_group->disk_cache_state = dcs;
  3213. spin_unlock(&block_group->lock);
  3214. extent_changeset_free(data_reserved);
  3215. return ret;
  3216. }
  3217. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  3218. struct btrfs_fs_info *fs_info)
  3219. {
  3220. struct btrfs_block_group_cache *cache, *tmp;
  3221. struct btrfs_transaction *cur_trans = trans->transaction;
  3222. struct btrfs_path *path;
  3223. if (list_empty(&cur_trans->dirty_bgs) ||
  3224. !btrfs_test_opt(fs_info, SPACE_CACHE))
  3225. return 0;
  3226. path = btrfs_alloc_path();
  3227. if (!path)
  3228. return -ENOMEM;
  3229. /* Could add new block groups, use _safe just in case */
  3230. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  3231. dirty_list) {
  3232. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  3233. cache_save_setup(cache, trans, path);
  3234. }
  3235. btrfs_free_path(path);
  3236. return 0;
  3237. }
  3238. /*
  3239. * transaction commit does final block group cache writeback during a
  3240. * critical section where nothing is allowed to change the FS. This is
  3241. * required in order for the cache to actually match the block group,
  3242. * but can introduce a lot of latency into the commit.
  3243. *
  3244. * So, btrfs_start_dirty_block_groups is here to kick off block group
  3245. * cache IO. There's a chance we'll have to redo some of it if the
  3246. * block group changes again during the commit, but it greatly reduces
  3247. * the commit latency by getting rid of the easy block groups while
  3248. * we're still allowing others to join the commit.
  3249. */
  3250. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
  3251. struct btrfs_fs_info *fs_info)
  3252. {
  3253. struct btrfs_block_group_cache *cache;
  3254. struct btrfs_transaction *cur_trans = trans->transaction;
  3255. int ret = 0;
  3256. int should_put;
  3257. struct btrfs_path *path = NULL;
  3258. LIST_HEAD(dirty);
  3259. struct list_head *io = &cur_trans->io_bgs;
  3260. int num_started = 0;
  3261. int loops = 0;
  3262. spin_lock(&cur_trans->dirty_bgs_lock);
  3263. if (list_empty(&cur_trans->dirty_bgs)) {
  3264. spin_unlock(&cur_trans->dirty_bgs_lock);
  3265. return 0;
  3266. }
  3267. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3268. spin_unlock(&cur_trans->dirty_bgs_lock);
  3269. again:
  3270. /*
  3271. * make sure all the block groups on our dirty list actually
  3272. * exist
  3273. */
  3274. btrfs_create_pending_block_groups(trans, fs_info);
  3275. if (!path) {
  3276. path = btrfs_alloc_path();
  3277. if (!path)
  3278. return -ENOMEM;
  3279. }
  3280. /*
  3281. * cache_write_mutex is here only to save us from balance or automatic
  3282. * removal of empty block groups deleting this block group while we are
  3283. * writing out the cache
  3284. */
  3285. mutex_lock(&trans->transaction->cache_write_mutex);
  3286. while (!list_empty(&dirty)) {
  3287. cache = list_first_entry(&dirty,
  3288. struct btrfs_block_group_cache,
  3289. dirty_list);
  3290. /*
  3291. * this can happen if something re-dirties a block
  3292. * group that is already under IO. Just wait for it to
  3293. * finish and then do it all again
  3294. */
  3295. if (!list_empty(&cache->io_list)) {
  3296. list_del_init(&cache->io_list);
  3297. btrfs_wait_cache_io(trans, cache, path);
  3298. btrfs_put_block_group(cache);
  3299. }
  3300. /*
  3301. * btrfs_wait_cache_io uses the cache->dirty_list to decide
  3302. * if it should update the cache_state. Don't delete
  3303. * until after we wait.
  3304. *
  3305. * Since we're not running in the commit critical section
  3306. * we need the dirty_bgs_lock to protect from update_block_group
  3307. */
  3308. spin_lock(&cur_trans->dirty_bgs_lock);
  3309. list_del_init(&cache->dirty_list);
  3310. spin_unlock(&cur_trans->dirty_bgs_lock);
  3311. should_put = 1;
  3312. cache_save_setup(cache, trans, path);
  3313. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  3314. cache->io_ctl.inode = NULL;
  3315. ret = btrfs_write_out_cache(fs_info, trans,
  3316. cache, path);
  3317. if (ret == 0 && cache->io_ctl.inode) {
  3318. num_started++;
  3319. should_put = 0;
  3320. /*
  3321. * the cache_write_mutex is protecting
  3322. * the io_list
  3323. */
  3324. list_add_tail(&cache->io_list, io);
  3325. } else {
  3326. /*
  3327. * if we failed to write the cache, the
  3328. * generation will be bad and life goes on
  3329. */
  3330. ret = 0;
  3331. }
  3332. }
  3333. if (!ret) {
  3334. ret = write_one_cache_group(trans, fs_info,
  3335. path, cache);
  3336. /*
  3337. * Our block group might still be attached to the list
  3338. * of new block groups in the transaction handle of some
  3339. * other task (struct btrfs_trans_handle->new_bgs). This
  3340. * means its block group item isn't yet in the extent
  3341. * tree. If this happens ignore the error, as we will
  3342. * try again later in the critical section of the
  3343. * transaction commit.
  3344. */
  3345. if (ret == -ENOENT) {
  3346. ret = 0;
  3347. spin_lock(&cur_trans->dirty_bgs_lock);
  3348. if (list_empty(&cache->dirty_list)) {
  3349. list_add_tail(&cache->dirty_list,
  3350. &cur_trans->dirty_bgs);
  3351. btrfs_get_block_group(cache);
  3352. }
  3353. spin_unlock(&cur_trans->dirty_bgs_lock);
  3354. } else if (ret) {
  3355. btrfs_abort_transaction(trans, ret);
  3356. }
  3357. }
  3358. /* if its not on the io list, we need to put the block group */
  3359. if (should_put)
  3360. btrfs_put_block_group(cache);
  3361. if (ret)
  3362. break;
  3363. /*
  3364. * Avoid blocking other tasks for too long. It might even save
  3365. * us from writing caches for block groups that are going to be
  3366. * removed.
  3367. */
  3368. mutex_unlock(&trans->transaction->cache_write_mutex);
  3369. mutex_lock(&trans->transaction->cache_write_mutex);
  3370. }
  3371. mutex_unlock(&trans->transaction->cache_write_mutex);
  3372. /*
  3373. * go through delayed refs for all the stuff we've just kicked off
  3374. * and then loop back (just once)
  3375. */
  3376. ret = btrfs_run_delayed_refs(trans, fs_info, 0);
  3377. if (!ret && loops == 0) {
  3378. loops++;
  3379. spin_lock(&cur_trans->dirty_bgs_lock);
  3380. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3381. /*
  3382. * dirty_bgs_lock protects us from concurrent block group
  3383. * deletes too (not just cache_write_mutex).
  3384. */
  3385. if (!list_empty(&dirty)) {
  3386. spin_unlock(&cur_trans->dirty_bgs_lock);
  3387. goto again;
  3388. }
  3389. spin_unlock(&cur_trans->dirty_bgs_lock);
  3390. } else if (ret < 0) {
  3391. btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
  3392. }
  3393. btrfs_free_path(path);
  3394. return ret;
  3395. }
  3396. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  3397. struct btrfs_fs_info *fs_info)
  3398. {
  3399. struct btrfs_block_group_cache *cache;
  3400. struct btrfs_transaction *cur_trans = trans->transaction;
  3401. int ret = 0;
  3402. int should_put;
  3403. struct btrfs_path *path;
  3404. struct list_head *io = &cur_trans->io_bgs;
  3405. int num_started = 0;
  3406. path = btrfs_alloc_path();
  3407. if (!path)
  3408. return -ENOMEM;
  3409. /*
  3410. * Even though we are in the critical section of the transaction commit,
  3411. * we can still have concurrent tasks adding elements to this
  3412. * transaction's list of dirty block groups. These tasks correspond to
  3413. * endio free space workers started when writeback finishes for a
  3414. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  3415. * allocate new block groups as a result of COWing nodes of the root
  3416. * tree when updating the free space inode. The writeback for the space
  3417. * caches is triggered by an earlier call to
  3418. * btrfs_start_dirty_block_groups() and iterations of the following
  3419. * loop.
  3420. * Also we want to do the cache_save_setup first and then run the
  3421. * delayed refs to make sure we have the best chance at doing this all
  3422. * in one shot.
  3423. */
  3424. spin_lock(&cur_trans->dirty_bgs_lock);
  3425. while (!list_empty(&cur_trans->dirty_bgs)) {
  3426. cache = list_first_entry(&cur_trans->dirty_bgs,
  3427. struct btrfs_block_group_cache,
  3428. dirty_list);
  3429. /*
  3430. * this can happen if cache_save_setup re-dirties a block
  3431. * group that is already under IO. Just wait for it to
  3432. * finish and then do it all again
  3433. */
  3434. if (!list_empty(&cache->io_list)) {
  3435. spin_unlock(&cur_trans->dirty_bgs_lock);
  3436. list_del_init(&cache->io_list);
  3437. btrfs_wait_cache_io(trans, cache, path);
  3438. btrfs_put_block_group(cache);
  3439. spin_lock(&cur_trans->dirty_bgs_lock);
  3440. }
  3441. /*
  3442. * don't remove from the dirty list until after we've waited
  3443. * on any pending IO
  3444. */
  3445. list_del_init(&cache->dirty_list);
  3446. spin_unlock(&cur_trans->dirty_bgs_lock);
  3447. should_put = 1;
  3448. cache_save_setup(cache, trans, path);
  3449. if (!ret)
  3450. ret = btrfs_run_delayed_refs(trans, fs_info,
  3451. (unsigned long) -1);
  3452. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  3453. cache->io_ctl.inode = NULL;
  3454. ret = btrfs_write_out_cache(fs_info, trans,
  3455. cache, path);
  3456. if (ret == 0 && cache->io_ctl.inode) {
  3457. num_started++;
  3458. should_put = 0;
  3459. list_add_tail(&cache->io_list, io);
  3460. } else {
  3461. /*
  3462. * if we failed to write the cache, the
  3463. * generation will be bad and life goes on
  3464. */
  3465. ret = 0;
  3466. }
  3467. }
  3468. if (!ret) {
  3469. ret = write_one_cache_group(trans, fs_info,
  3470. path, cache);
  3471. /*
  3472. * One of the free space endio workers might have
  3473. * created a new block group while updating a free space
  3474. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  3475. * and hasn't released its transaction handle yet, in
  3476. * which case the new block group is still attached to
  3477. * its transaction handle and its creation has not
  3478. * finished yet (no block group item in the extent tree
  3479. * yet, etc). If this is the case, wait for all free
  3480. * space endio workers to finish and retry. This is a
  3481. * a very rare case so no need for a more efficient and
  3482. * complex approach.
  3483. */
  3484. if (ret == -ENOENT) {
  3485. wait_event(cur_trans->writer_wait,
  3486. atomic_read(&cur_trans->num_writers) == 1);
  3487. ret = write_one_cache_group(trans, fs_info,
  3488. path, cache);
  3489. }
  3490. if (ret)
  3491. btrfs_abort_transaction(trans, ret);
  3492. }
  3493. /* if its not on the io list, we need to put the block group */
  3494. if (should_put)
  3495. btrfs_put_block_group(cache);
  3496. spin_lock(&cur_trans->dirty_bgs_lock);
  3497. }
  3498. spin_unlock(&cur_trans->dirty_bgs_lock);
  3499. while (!list_empty(io)) {
  3500. cache = list_first_entry(io, struct btrfs_block_group_cache,
  3501. io_list);
  3502. list_del_init(&cache->io_list);
  3503. btrfs_wait_cache_io(trans, cache, path);
  3504. btrfs_put_block_group(cache);
  3505. }
  3506. btrfs_free_path(path);
  3507. return ret;
  3508. }
  3509. int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
  3510. {
  3511. struct btrfs_block_group_cache *block_group;
  3512. int readonly = 0;
  3513. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  3514. if (!block_group || block_group->ro)
  3515. readonly = 1;
  3516. if (block_group)
  3517. btrfs_put_block_group(block_group);
  3518. return readonly;
  3519. }
  3520. bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3521. {
  3522. struct btrfs_block_group_cache *bg;
  3523. bool ret = true;
  3524. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3525. if (!bg)
  3526. return false;
  3527. spin_lock(&bg->lock);
  3528. if (bg->ro)
  3529. ret = false;
  3530. else
  3531. atomic_inc(&bg->nocow_writers);
  3532. spin_unlock(&bg->lock);
  3533. /* no put on block group, done by btrfs_dec_nocow_writers */
  3534. if (!ret)
  3535. btrfs_put_block_group(bg);
  3536. return ret;
  3537. }
  3538. void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3539. {
  3540. struct btrfs_block_group_cache *bg;
  3541. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3542. ASSERT(bg);
  3543. if (atomic_dec_and_test(&bg->nocow_writers))
  3544. wake_up_atomic_t(&bg->nocow_writers);
  3545. /*
  3546. * Once for our lookup and once for the lookup done by a previous call
  3547. * to btrfs_inc_nocow_writers()
  3548. */
  3549. btrfs_put_block_group(bg);
  3550. btrfs_put_block_group(bg);
  3551. }
  3552. void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
  3553. {
  3554. wait_on_atomic_t(&bg->nocow_writers, atomic_t_wait,
  3555. TASK_UNINTERRUPTIBLE);
  3556. }
  3557. static const char *alloc_name(u64 flags)
  3558. {
  3559. switch (flags) {
  3560. case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
  3561. return "mixed";
  3562. case BTRFS_BLOCK_GROUP_METADATA:
  3563. return "metadata";
  3564. case BTRFS_BLOCK_GROUP_DATA:
  3565. return "data";
  3566. case BTRFS_BLOCK_GROUP_SYSTEM:
  3567. return "system";
  3568. default:
  3569. WARN_ON(1);
  3570. return "invalid-combination";
  3571. };
  3572. }
  3573. static int create_space_info(struct btrfs_fs_info *info, u64 flags,
  3574. struct btrfs_space_info **new)
  3575. {
  3576. struct btrfs_space_info *space_info;
  3577. int i;
  3578. int ret;
  3579. space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
  3580. if (!space_info)
  3581. return -ENOMEM;
  3582. ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
  3583. GFP_KERNEL);
  3584. if (ret) {
  3585. kfree(space_info);
  3586. return ret;
  3587. }
  3588. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3589. INIT_LIST_HEAD(&space_info->block_groups[i]);
  3590. init_rwsem(&space_info->groups_sem);
  3591. spin_lock_init(&space_info->lock);
  3592. space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3593. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3594. init_waitqueue_head(&space_info->wait);
  3595. INIT_LIST_HEAD(&space_info->ro_bgs);
  3596. INIT_LIST_HEAD(&space_info->tickets);
  3597. INIT_LIST_HEAD(&space_info->priority_tickets);
  3598. ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
  3599. info->space_info_kobj, "%s",
  3600. alloc_name(space_info->flags));
  3601. if (ret) {
  3602. percpu_counter_destroy(&space_info->total_bytes_pinned);
  3603. kfree(space_info);
  3604. return ret;
  3605. }
  3606. *new = space_info;
  3607. list_add_rcu(&space_info->list, &info->space_info);
  3608. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3609. info->data_sinfo = space_info;
  3610. return ret;
  3611. }
  3612. static void update_space_info(struct btrfs_fs_info *info, u64 flags,
  3613. u64 total_bytes, u64 bytes_used,
  3614. u64 bytes_readonly,
  3615. struct btrfs_space_info **space_info)
  3616. {
  3617. struct btrfs_space_info *found;
  3618. int factor;
  3619. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  3620. BTRFS_BLOCK_GROUP_RAID10))
  3621. factor = 2;
  3622. else
  3623. factor = 1;
  3624. found = __find_space_info(info, flags);
  3625. ASSERT(found);
  3626. spin_lock(&found->lock);
  3627. found->total_bytes += total_bytes;
  3628. found->disk_total += total_bytes * factor;
  3629. found->bytes_used += bytes_used;
  3630. found->disk_used += bytes_used * factor;
  3631. found->bytes_readonly += bytes_readonly;
  3632. if (total_bytes > 0)
  3633. found->full = 0;
  3634. space_info_add_new_bytes(info, found, total_bytes -
  3635. bytes_used - bytes_readonly);
  3636. spin_unlock(&found->lock);
  3637. *space_info = found;
  3638. }
  3639. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3640. {
  3641. u64 extra_flags = chunk_to_extended(flags) &
  3642. BTRFS_EXTENDED_PROFILE_MASK;
  3643. write_seqlock(&fs_info->profiles_lock);
  3644. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3645. fs_info->avail_data_alloc_bits |= extra_flags;
  3646. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3647. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3648. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3649. fs_info->avail_system_alloc_bits |= extra_flags;
  3650. write_sequnlock(&fs_info->profiles_lock);
  3651. }
  3652. /*
  3653. * returns target flags in extended format or 0 if restripe for this
  3654. * chunk_type is not in progress
  3655. *
  3656. * should be called with either volume_mutex or balance_lock held
  3657. */
  3658. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3659. {
  3660. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3661. u64 target = 0;
  3662. if (!bctl)
  3663. return 0;
  3664. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3665. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3666. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3667. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3668. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3669. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3670. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3671. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3672. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3673. }
  3674. return target;
  3675. }
  3676. /*
  3677. * @flags: available profiles in extended format (see ctree.h)
  3678. *
  3679. * Returns reduced profile in chunk format. If profile changing is in
  3680. * progress (either running or paused) picks the target profile (if it's
  3681. * already available), otherwise falls back to plain reducing.
  3682. */
  3683. static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  3684. {
  3685. u64 num_devices = fs_info->fs_devices->rw_devices;
  3686. u64 target;
  3687. u64 raid_type;
  3688. u64 allowed = 0;
  3689. /*
  3690. * see if restripe for this chunk_type is in progress, if so
  3691. * try to reduce to the target profile
  3692. */
  3693. spin_lock(&fs_info->balance_lock);
  3694. target = get_restripe_target(fs_info, flags);
  3695. if (target) {
  3696. /* pick target profile only if it's already available */
  3697. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3698. spin_unlock(&fs_info->balance_lock);
  3699. return extended_to_chunk(target);
  3700. }
  3701. }
  3702. spin_unlock(&fs_info->balance_lock);
  3703. /* First, mask out the RAID levels which aren't possible */
  3704. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  3705. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  3706. allowed |= btrfs_raid_group[raid_type];
  3707. }
  3708. allowed &= flags;
  3709. if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  3710. allowed = BTRFS_BLOCK_GROUP_RAID6;
  3711. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  3712. allowed = BTRFS_BLOCK_GROUP_RAID5;
  3713. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  3714. allowed = BTRFS_BLOCK_GROUP_RAID10;
  3715. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  3716. allowed = BTRFS_BLOCK_GROUP_RAID1;
  3717. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  3718. allowed = BTRFS_BLOCK_GROUP_RAID0;
  3719. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  3720. return extended_to_chunk(flags | allowed);
  3721. }
  3722. static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  3723. {
  3724. unsigned seq;
  3725. u64 flags;
  3726. do {
  3727. flags = orig_flags;
  3728. seq = read_seqbegin(&fs_info->profiles_lock);
  3729. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3730. flags |= fs_info->avail_data_alloc_bits;
  3731. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3732. flags |= fs_info->avail_system_alloc_bits;
  3733. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3734. flags |= fs_info->avail_metadata_alloc_bits;
  3735. } while (read_seqretry(&fs_info->profiles_lock, seq));
  3736. return btrfs_reduce_alloc_profile(fs_info, flags);
  3737. }
  3738. static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
  3739. {
  3740. struct btrfs_fs_info *fs_info = root->fs_info;
  3741. u64 flags;
  3742. u64 ret;
  3743. if (data)
  3744. flags = BTRFS_BLOCK_GROUP_DATA;
  3745. else if (root == fs_info->chunk_root)
  3746. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3747. else
  3748. flags = BTRFS_BLOCK_GROUP_METADATA;
  3749. ret = get_alloc_profile(fs_info, flags);
  3750. return ret;
  3751. }
  3752. u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
  3753. {
  3754. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3755. }
  3756. u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
  3757. {
  3758. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3759. }
  3760. u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
  3761. {
  3762. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3763. }
  3764. static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
  3765. bool may_use_included)
  3766. {
  3767. ASSERT(s_info);
  3768. return s_info->bytes_used + s_info->bytes_reserved +
  3769. s_info->bytes_pinned + s_info->bytes_readonly +
  3770. (may_use_included ? s_info->bytes_may_use : 0);
  3771. }
  3772. int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
  3773. {
  3774. struct btrfs_root *root = inode->root;
  3775. struct btrfs_fs_info *fs_info = root->fs_info;
  3776. struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
  3777. u64 used;
  3778. int ret = 0;
  3779. int need_commit = 2;
  3780. int have_pinned_space;
  3781. /* make sure bytes are sectorsize aligned */
  3782. bytes = ALIGN(bytes, fs_info->sectorsize);
  3783. if (btrfs_is_free_space_inode(inode)) {
  3784. need_commit = 0;
  3785. ASSERT(current->journal_info);
  3786. }
  3787. again:
  3788. /* make sure we have enough space to handle the data first */
  3789. spin_lock(&data_sinfo->lock);
  3790. used = btrfs_space_info_used(data_sinfo, true);
  3791. if (used + bytes > data_sinfo->total_bytes) {
  3792. struct btrfs_trans_handle *trans;
  3793. /*
  3794. * if we don't have enough free bytes in this space then we need
  3795. * to alloc a new chunk.
  3796. */
  3797. if (!data_sinfo->full) {
  3798. u64 alloc_target;
  3799. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3800. spin_unlock(&data_sinfo->lock);
  3801. alloc_target = btrfs_data_alloc_profile(fs_info);
  3802. /*
  3803. * It is ugly that we don't call nolock join
  3804. * transaction for the free space inode case here.
  3805. * But it is safe because we only do the data space
  3806. * reservation for the free space cache in the
  3807. * transaction context, the common join transaction
  3808. * just increase the counter of the current transaction
  3809. * handler, doesn't try to acquire the trans_lock of
  3810. * the fs.
  3811. */
  3812. trans = btrfs_join_transaction(root);
  3813. if (IS_ERR(trans))
  3814. return PTR_ERR(trans);
  3815. ret = do_chunk_alloc(trans, fs_info, alloc_target,
  3816. CHUNK_ALLOC_NO_FORCE);
  3817. btrfs_end_transaction(trans);
  3818. if (ret < 0) {
  3819. if (ret != -ENOSPC)
  3820. return ret;
  3821. else {
  3822. have_pinned_space = 1;
  3823. goto commit_trans;
  3824. }
  3825. }
  3826. goto again;
  3827. }
  3828. /*
  3829. * If we don't have enough pinned space to deal with this
  3830. * allocation, and no removed chunk in current transaction,
  3831. * don't bother committing the transaction.
  3832. */
  3833. have_pinned_space = percpu_counter_compare(
  3834. &data_sinfo->total_bytes_pinned,
  3835. used + bytes - data_sinfo->total_bytes);
  3836. spin_unlock(&data_sinfo->lock);
  3837. /* commit the current transaction and try again */
  3838. commit_trans:
  3839. if (need_commit &&
  3840. !atomic_read(&fs_info->open_ioctl_trans)) {
  3841. need_commit--;
  3842. if (need_commit > 0) {
  3843. btrfs_start_delalloc_roots(fs_info, 0, -1);
  3844. btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
  3845. (u64)-1);
  3846. }
  3847. trans = btrfs_join_transaction(root);
  3848. if (IS_ERR(trans))
  3849. return PTR_ERR(trans);
  3850. if (have_pinned_space >= 0 ||
  3851. test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
  3852. &trans->transaction->flags) ||
  3853. need_commit > 0) {
  3854. ret = btrfs_commit_transaction(trans);
  3855. if (ret)
  3856. return ret;
  3857. /*
  3858. * The cleaner kthread might still be doing iput
  3859. * operations. Wait for it to finish so that
  3860. * more space is released.
  3861. */
  3862. mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
  3863. mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
  3864. goto again;
  3865. } else {
  3866. btrfs_end_transaction(trans);
  3867. }
  3868. }
  3869. trace_btrfs_space_reservation(fs_info,
  3870. "space_info:enospc",
  3871. data_sinfo->flags, bytes, 1);
  3872. return -ENOSPC;
  3873. }
  3874. data_sinfo->bytes_may_use += bytes;
  3875. trace_btrfs_space_reservation(fs_info, "space_info",
  3876. data_sinfo->flags, bytes, 1);
  3877. spin_unlock(&data_sinfo->lock);
  3878. return ret;
  3879. }
  3880. int btrfs_check_data_free_space(struct inode *inode,
  3881. struct extent_changeset **reserved, u64 start, u64 len)
  3882. {
  3883. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3884. int ret;
  3885. /* align the range */
  3886. len = round_up(start + len, fs_info->sectorsize) -
  3887. round_down(start, fs_info->sectorsize);
  3888. start = round_down(start, fs_info->sectorsize);
  3889. ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
  3890. if (ret < 0)
  3891. return ret;
  3892. /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
  3893. ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
  3894. if (ret < 0)
  3895. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3896. else
  3897. ret = 0;
  3898. return ret;
  3899. }
  3900. /*
  3901. * Called if we need to clear a data reservation for this inode
  3902. * Normally in a error case.
  3903. *
  3904. * This one will *NOT* use accurate qgroup reserved space API, just for case
  3905. * which we can't sleep and is sure it won't affect qgroup reserved space.
  3906. * Like clear_bit_hook().
  3907. */
  3908. void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
  3909. u64 len)
  3910. {
  3911. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3912. struct btrfs_space_info *data_sinfo;
  3913. /* Make sure the range is aligned to sectorsize */
  3914. len = round_up(start + len, fs_info->sectorsize) -
  3915. round_down(start, fs_info->sectorsize);
  3916. start = round_down(start, fs_info->sectorsize);
  3917. data_sinfo = fs_info->data_sinfo;
  3918. spin_lock(&data_sinfo->lock);
  3919. if (WARN_ON(data_sinfo->bytes_may_use < len))
  3920. data_sinfo->bytes_may_use = 0;
  3921. else
  3922. data_sinfo->bytes_may_use -= len;
  3923. trace_btrfs_space_reservation(fs_info, "space_info",
  3924. data_sinfo->flags, len, 0);
  3925. spin_unlock(&data_sinfo->lock);
  3926. }
  3927. /*
  3928. * Called if we need to clear a data reservation for this inode
  3929. * Normally in a error case.
  3930. *
  3931. * This one will handle the per-inode data rsv map for accurate reserved
  3932. * space framework.
  3933. */
  3934. void btrfs_free_reserved_data_space(struct inode *inode,
  3935. struct extent_changeset *reserved, u64 start, u64 len)
  3936. {
  3937. struct btrfs_root *root = BTRFS_I(inode)->root;
  3938. /* Make sure the range is aligned to sectorsize */
  3939. len = round_up(start + len, root->fs_info->sectorsize) -
  3940. round_down(start, root->fs_info->sectorsize);
  3941. start = round_down(start, root->fs_info->sectorsize);
  3942. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3943. btrfs_qgroup_free_data(inode, reserved, start, len);
  3944. }
  3945. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3946. {
  3947. struct list_head *head = &info->space_info;
  3948. struct btrfs_space_info *found;
  3949. rcu_read_lock();
  3950. list_for_each_entry_rcu(found, head, list) {
  3951. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3952. found->force_alloc = CHUNK_ALLOC_FORCE;
  3953. }
  3954. rcu_read_unlock();
  3955. }
  3956. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3957. {
  3958. return (global->size << 1);
  3959. }
  3960. static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
  3961. struct btrfs_space_info *sinfo, int force)
  3962. {
  3963. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  3964. u64 bytes_used = btrfs_space_info_used(sinfo, false);
  3965. u64 thresh;
  3966. if (force == CHUNK_ALLOC_FORCE)
  3967. return 1;
  3968. /*
  3969. * We need to take into account the global rsv because for all intents
  3970. * and purposes it's used space. Don't worry about locking the
  3971. * global_rsv, it doesn't change except when the transaction commits.
  3972. */
  3973. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3974. bytes_used += calc_global_rsv_need_space(global_rsv);
  3975. /*
  3976. * in limited mode, we want to have some free space up to
  3977. * about 1% of the FS size.
  3978. */
  3979. if (force == CHUNK_ALLOC_LIMITED) {
  3980. thresh = btrfs_super_total_bytes(fs_info->super_copy);
  3981. thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
  3982. if (sinfo->total_bytes - bytes_used < thresh)
  3983. return 1;
  3984. }
  3985. if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
  3986. return 0;
  3987. return 1;
  3988. }
  3989. static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
  3990. {
  3991. u64 num_dev;
  3992. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3993. BTRFS_BLOCK_GROUP_RAID0 |
  3994. BTRFS_BLOCK_GROUP_RAID5 |
  3995. BTRFS_BLOCK_GROUP_RAID6))
  3996. num_dev = fs_info->fs_devices->rw_devices;
  3997. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  3998. num_dev = 2;
  3999. else
  4000. num_dev = 1; /* DUP or single */
  4001. return num_dev;
  4002. }
  4003. /*
  4004. * If @is_allocation is true, reserve space in the system space info necessary
  4005. * for allocating a chunk, otherwise if it's false, reserve space necessary for
  4006. * removing a chunk.
  4007. */
  4008. void check_system_chunk(struct btrfs_trans_handle *trans,
  4009. struct btrfs_fs_info *fs_info, u64 type)
  4010. {
  4011. struct btrfs_space_info *info;
  4012. u64 left;
  4013. u64 thresh;
  4014. int ret = 0;
  4015. u64 num_devs;
  4016. /*
  4017. * Needed because we can end up allocating a system chunk and for an
  4018. * atomic and race free space reservation in the chunk block reserve.
  4019. */
  4020. ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
  4021. info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  4022. spin_lock(&info->lock);
  4023. left = info->total_bytes - btrfs_space_info_used(info, true);
  4024. spin_unlock(&info->lock);
  4025. num_devs = get_profile_num_devs(fs_info, type);
  4026. /* num_devs device items to update and 1 chunk item to add or remove */
  4027. thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
  4028. btrfs_calc_trans_metadata_size(fs_info, 1);
  4029. if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  4030. btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
  4031. left, thresh, type);
  4032. dump_space_info(fs_info, info, 0, 0);
  4033. }
  4034. if (left < thresh) {
  4035. u64 flags = btrfs_system_alloc_profile(fs_info);
  4036. /*
  4037. * Ignore failure to create system chunk. We might end up not
  4038. * needing it, as we might not need to COW all nodes/leafs from
  4039. * the paths we visit in the chunk tree (they were already COWed
  4040. * or created in the current transaction for example).
  4041. */
  4042. ret = btrfs_alloc_chunk(trans, fs_info, flags);
  4043. }
  4044. if (!ret) {
  4045. ret = btrfs_block_rsv_add(fs_info->chunk_root,
  4046. &fs_info->chunk_block_rsv,
  4047. thresh, BTRFS_RESERVE_NO_FLUSH);
  4048. if (!ret)
  4049. trans->chunk_bytes_reserved += thresh;
  4050. }
  4051. }
  4052. /*
  4053. * If force is CHUNK_ALLOC_FORCE:
  4054. * - return 1 if it successfully allocates a chunk,
  4055. * - return errors including -ENOSPC otherwise.
  4056. * If force is NOT CHUNK_ALLOC_FORCE:
  4057. * - return 0 if it doesn't need to allocate a new chunk,
  4058. * - return 1 if it successfully allocates a chunk,
  4059. * - return errors including -ENOSPC otherwise.
  4060. */
  4061. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  4062. struct btrfs_fs_info *fs_info, u64 flags, int force)
  4063. {
  4064. struct btrfs_space_info *space_info;
  4065. int wait_for_alloc = 0;
  4066. int ret = 0;
  4067. /* Don't re-enter if we're already allocating a chunk */
  4068. if (trans->allocating_chunk)
  4069. return -ENOSPC;
  4070. space_info = __find_space_info(fs_info, flags);
  4071. if (!space_info) {
  4072. ret = create_space_info(fs_info, flags, &space_info);
  4073. if (ret)
  4074. return ret;
  4075. }
  4076. again:
  4077. spin_lock(&space_info->lock);
  4078. if (force < space_info->force_alloc)
  4079. force = space_info->force_alloc;
  4080. if (space_info->full) {
  4081. if (should_alloc_chunk(fs_info, space_info, force))
  4082. ret = -ENOSPC;
  4083. else
  4084. ret = 0;
  4085. spin_unlock(&space_info->lock);
  4086. return ret;
  4087. }
  4088. if (!should_alloc_chunk(fs_info, space_info, force)) {
  4089. spin_unlock(&space_info->lock);
  4090. return 0;
  4091. } else if (space_info->chunk_alloc) {
  4092. wait_for_alloc = 1;
  4093. } else {
  4094. space_info->chunk_alloc = 1;
  4095. }
  4096. spin_unlock(&space_info->lock);
  4097. mutex_lock(&fs_info->chunk_mutex);
  4098. /*
  4099. * The chunk_mutex is held throughout the entirety of a chunk
  4100. * allocation, so once we've acquired the chunk_mutex we know that the
  4101. * other guy is done and we need to recheck and see if we should
  4102. * allocate.
  4103. */
  4104. if (wait_for_alloc) {
  4105. mutex_unlock(&fs_info->chunk_mutex);
  4106. wait_for_alloc = 0;
  4107. goto again;
  4108. }
  4109. trans->allocating_chunk = true;
  4110. /*
  4111. * If we have mixed data/metadata chunks we want to make sure we keep
  4112. * allocating mixed chunks instead of individual chunks.
  4113. */
  4114. if (btrfs_mixed_space_info(space_info))
  4115. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  4116. /*
  4117. * if we're doing a data chunk, go ahead and make sure that
  4118. * we keep a reasonable number of metadata chunks allocated in the
  4119. * FS as well.
  4120. */
  4121. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  4122. fs_info->data_chunk_allocations++;
  4123. if (!(fs_info->data_chunk_allocations %
  4124. fs_info->metadata_ratio))
  4125. force_metadata_allocation(fs_info);
  4126. }
  4127. /*
  4128. * Check if we have enough space in SYSTEM chunk because we may need
  4129. * to update devices.
  4130. */
  4131. check_system_chunk(trans, fs_info, flags);
  4132. ret = btrfs_alloc_chunk(trans, fs_info, flags);
  4133. trans->allocating_chunk = false;
  4134. spin_lock(&space_info->lock);
  4135. if (ret < 0 && ret != -ENOSPC)
  4136. goto out;
  4137. if (ret)
  4138. space_info->full = 1;
  4139. else
  4140. ret = 1;
  4141. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  4142. out:
  4143. space_info->chunk_alloc = 0;
  4144. spin_unlock(&space_info->lock);
  4145. mutex_unlock(&fs_info->chunk_mutex);
  4146. /*
  4147. * When we allocate a new chunk we reserve space in the chunk block
  4148. * reserve to make sure we can COW nodes/leafs in the chunk tree or
  4149. * add new nodes/leafs to it if we end up needing to do it when
  4150. * inserting the chunk item and updating device items as part of the
  4151. * second phase of chunk allocation, performed by
  4152. * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
  4153. * large number of new block groups to create in our transaction
  4154. * handle's new_bgs list to avoid exhausting the chunk block reserve
  4155. * in extreme cases - like having a single transaction create many new
  4156. * block groups when starting to write out the free space caches of all
  4157. * the block groups that were made dirty during the lifetime of the
  4158. * transaction.
  4159. */
  4160. if (trans->can_flush_pending_bgs &&
  4161. trans->chunk_bytes_reserved >= (u64)SZ_2M) {
  4162. btrfs_create_pending_block_groups(trans, fs_info);
  4163. btrfs_trans_release_chunk_metadata(trans);
  4164. }
  4165. return ret;
  4166. }
  4167. static int can_overcommit(struct btrfs_fs_info *fs_info,
  4168. struct btrfs_space_info *space_info, u64 bytes,
  4169. enum btrfs_reserve_flush_enum flush,
  4170. bool system_chunk)
  4171. {
  4172. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4173. u64 profile;
  4174. u64 space_size;
  4175. u64 avail;
  4176. u64 used;
  4177. /* Don't overcommit when in mixed mode. */
  4178. if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
  4179. return 0;
  4180. if (system_chunk)
  4181. profile = btrfs_system_alloc_profile(fs_info);
  4182. else
  4183. profile = btrfs_metadata_alloc_profile(fs_info);
  4184. used = btrfs_space_info_used(space_info, false);
  4185. /*
  4186. * We only want to allow over committing if we have lots of actual space
  4187. * free, but if we don't have enough space to handle the global reserve
  4188. * space then we could end up having a real enospc problem when trying
  4189. * to allocate a chunk or some other such important allocation.
  4190. */
  4191. spin_lock(&global_rsv->lock);
  4192. space_size = calc_global_rsv_need_space(global_rsv);
  4193. spin_unlock(&global_rsv->lock);
  4194. if (used + space_size >= space_info->total_bytes)
  4195. return 0;
  4196. used += space_info->bytes_may_use;
  4197. avail = atomic64_read(&fs_info->free_chunk_space);
  4198. /*
  4199. * If we have dup, raid1 or raid10 then only half of the free
  4200. * space is actually useable. For raid56, the space info used
  4201. * doesn't include the parity drive, so we don't have to
  4202. * change the math
  4203. */
  4204. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  4205. BTRFS_BLOCK_GROUP_RAID1 |
  4206. BTRFS_BLOCK_GROUP_RAID10))
  4207. avail >>= 1;
  4208. /*
  4209. * If we aren't flushing all things, let us overcommit up to
  4210. * 1/2th of the space. If we can flush, don't let us overcommit
  4211. * too much, let it overcommit up to 1/8 of the space.
  4212. */
  4213. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4214. avail >>= 3;
  4215. else
  4216. avail >>= 1;
  4217. if (used + bytes < space_info->total_bytes + avail)
  4218. return 1;
  4219. return 0;
  4220. }
  4221. static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
  4222. unsigned long nr_pages, int nr_items)
  4223. {
  4224. struct super_block *sb = fs_info->sb;
  4225. if (down_read_trylock(&sb->s_umount)) {
  4226. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  4227. up_read(&sb->s_umount);
  4228. } else {
  4229. /*
  4230. * We needn't worry the filesystem going from r/w to r/o though
  4231. * we don't acquire ->s_umount mutex, because the filesystem
  4232. * should guarantee the delalloc inodes list be empty after
  4233. * the filesystem is readonly(all dirty pages are written to
  4234. * the disk).
  4235. */
  4236. btrfs_start_delalloc_roots(fs_info, 0, nr_items);
  4237. if (!current->journal_info)
  4238. btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
  4239. }
  4240. }
  4241. static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
  4242. u64 to_reclaim)
  4243. {
  4244. u64 bytes;
  4245. u64 nr;
  4246. bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  4247. nr = div64_u64(to_reclaim, bytes);
  4248. if (!nr)
  4249. nr = 1;
  4250. return nr;
  4251. }
  4252. #define EXTENT_SIZE_PER_ITEM SZ_256K
  4253. /*
  4254. * shrink metadata reservation for delalloc
  4255. */
  4256. static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
  4257. u64 orig, bool wait_ordered)
  4258. {
  4259. struct btrfs_space_info *space_info;
  4260. struct btrfs_trans_handle *trans;
  4261. u64 delalloc_bytes;
  4262. u64 max_reclaim;
  4263. u64 items;
  4264. long time_left;
  4265. unsigned long nr_pages;
  4266. int loops;
  4267. enum btrfs_reserve_flush_enum flush;
  4268. /* Calc the number of the pages we need flush for space reservation */
  4269. items = calc_reclaim_items_nr(fs_info, to_reclaim);
  4270. to_reclaim = items * EXTENT_SIZE_PER_ITEM;
  4271. trans = (struct btrfs_trans_handle *)current->journal_info;
  4272. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4273. delalloc_bytes = percpu_counter_sum_positive(
  4274. &fs_info->delalloc_bytes);
  4275. if (delalloc_bytes == 0) {
  4276. if (trans)
  4277. return;
  4278. if (wait_ordered)
  4279. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4280. return;
  4281. }
  4282. loops = 0;
  4283. while (delalloc_bytes && loops < 3) {
  4284. max_reclaim = min(delalloc_bytes, to_reclaim);
  4285. nr_pages = max_reclaim >> PAGE_SHIFT;
  4286. btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
  4287. /*
  4288. * We need to wait for the async pages to actually start before
  4289. * we do anything.
  4290. */
  4291. max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
  4292. if (!max_reclaim)
  4293. goto skip_async;
  4294. if (max_reclaim <= nr_pages)
  4295. max_reclaim = 0;
  4296. else
  4297. max_reclaim -= nr_pages;
  4298. wait_event(fs_info->async_submit_wait,
  4299. atomic_read(&fs_info->async_delalloc_pages) <=
  4300. (int)max_reclaim);
  4301. skip_async:
  4302. if (!trans)
  4303. flush = BTRFS_RESERVE_FLUSH_ALL;
  4304. else
  4305. flush = BTRFS_RESERVE_NO_FLUSH;
  4306. spin_lock(&space_info->lock);
  4307. if (list_empty(&space_info->tickets) &&
  4308. list_empty(&space_info->priority_tickets)) {
  4309. spin_unlock(&space_info->lock);
  4310. break;
  4311. }
  4312. spin_unlock(&space_info->lock);
  4313. loops++;
  4314. if (wait_ordered && !trans) {
  4315. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4316. } else {
  4317. time_left = schedule_timeout_killable(1);
  4318. if (time_left)
  4319. break;
  4320. }
  4321. delalloc_bytes = percpu_counter_sum_positive(
  4322. &fs_info->delalloc_bytes);
  4323. }
  4324. }
  4325. struct reserve_ticket {
  4326. u64 bytes;
  4327. int error;
  4328. struct list_head list;
  4329. wait_queue_head_t wait;
  4330. };
  4331. /**
  4332. * maybe_commit_transaction - possibly commit the transaction if its ok to
  4333. * @root - the root we're allocating for
  4334. * @bytes - the number of bytes we want to reserve
  4335. * @force - force the commit
  4336. *
  4337. * This will check to make sure that committing the transaction will actually
  4338. * get us somewhere and then commit the transaction if it does. Otherwise it
  4339. * will return -ENOSPC.
  4340. */
  4341. static int may_commit_transaction(struct btrfs_fs_info *fs_info,
  4342. struct btrfs_space_info *space_info)
  4343. {
  4344. struct reserve_ticket *ticket = NULL;
  4345. struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
  4346. struct btrfs_trans_handle *trans;
  4347. u64 bytes;
  4348. trans = (struct btrfs_trans_handle *)current->journal_info;
  4349. if (trans)
  4350. return -EAGAIN;
  4351. spin_lock(&space_info->lock);
  4352. if (!list_empty(&space_info->priority_tickets))
  4353. ticket = list_first_entry(&space_info->priority_tickets,
  4354. struct reserve_ticket, list);
  4355. else if (!list_empty(&space_info->tickets))
  4356. ticket = list_first_entry(&space_info->tickets,
  4357. struct reserve_ticket, list);
  4358. bytes = (ticket) ? ticket->bytes : 0;
  4359. spin_unlock(&space_info->lock);
  4360. if (!bytes)
  4361. return 0;
  4362. /* See if there is enough pinned space to make this reservation */
  4363. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4364. bytes) >= 0)
  4365. goto commit;
  4366. /*
  4367. * See if there is some space in the delayed insertion reservation for
  4368. * this reservation.
  4369. */
  4370. if (space_info != delayed_rsv->space_info)
  4371. return -ENOSPC;
  4372. spin_lock(&delayed_rsv->lock);
  4373. if (delayed_rsv->size > bytes)
  4374. bytes = 0;
  4375. else
  4376. bytes -= delayed_rsv->size;
  4377. spin_unlock(&delayed_rsv->lock);
  4378. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4379. bytes) < 0) {
  4380. return -ENOSPC;
  4381. }
  4382. commit:
  4383. trans = btrfs_join_transaction(fs_info->extent_root);
  4384. if (IS_ERR(trans))
  4385. return -ENOSPC;
  4386. return btrfs_commit_transaction(trans);
  4387. }
  4388. /*
  4389. * Try to flush some data based on policy set by @state. This is only advisory
  4390. * and may fail for various reasons. The caller is supposed to examine the
  4391. * state of @space_info to detect the outcome.
  4392. */
  4393. static void flush_space(struct btrfs_fs_info *fs_info,
  4394. struct btrfs_space_info *space_info, u64 num_bytes,
  4395. int state)
  4396. {
  4397. struct btrfs_root *root = fs_info->extent_root;
  4398. struct btrfs_trans_handle *trans;
  4399. int nr;
  4400. int ret = 0;
  4401. switch (state) {
  4402. case FLUSH_DELAYED_ITEMS_NR:
  4403. case FLUSH_DELAYED_ITEMS:
  4404. if (state == FLUSH_DELAYED_ITEMS_NR)
  4405. nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
  4406. else
  4407. nr = -1;
  4408. trans = btrfs_join_transaction(root);
  4409. if (IS_ERR(trans)) {
  4410. ret = PTR_ERR(trans);
  4411. break;
  4412. }
  4413. ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
  4414. btrfs_end_transaction(trans);
  4415. break;
  4416. case FLUSH_DELALLOC:
  4417. case FLUSH_DELALLOC_WAIT:
  4418. shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
  4419. state == FLUSH_DELALLOC_WAIT);
  4420. break;
  4421. case ALLOC_CHUNK:
  4422. trans = btrfs_join_transaction(root);
  4423. if (IS_ERR(trans)) {
  4424. ret = PTR_ERR(trans);
  4425. break;
  4426. }
  4427. ret = do_chunk_alloc(trans, fs_info,
  4428. btrfs_metadata_alloc_profile(fs_info),
  4429. CHUNK_ALLOC_NO_FORCE);
  4430. btrfs_end_transaction(trans);
  4431. if (ret > 0 || ret == -ENOSPC)
  4432. ret = 0;
  4433. break;
  4434. case COMMIT_TRANS:
  4435. ret = may_commit_transaction(fs_info, space_info);
  4436. break;
  4437. default:
  4438. ret = -ENOSPC;
  4439. break;
  4440. }
  4441. trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
  4442. ret);
  4443. return;
  4444. }
  4445. static inline u64
  4446. btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
  4447. struct btrfs_space_info *space_info,
  4448. bool system_chunk)
  4449. {
  4450. struct reserve_ticket *ticket;
  4451. u64 used;
  4452. u64 expected;
  4453. u64 to_reclaim = 0;
  4454. list_for_each_entry(ticket, &space_info->tickets, list)
  4455. to_reclaim += ticket->bytes;
  4456. list_for_each_entry(ticket, &space_info->priority_tickets, list)
  4457. to_reclaim += ticket->bytes;
  4458. if (to_reclaim)
  4459. return to_reclaim;
  4460. to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
  4461. if (can_overcommit(fs_info, space_info, to_reclaim,
  4462. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4463. return 0;
  4464. used = btrfs_space_info_used(space_info, true);
  4465. if (can_overcommit(fs_info, space_info, SZ_1M,
  4466. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4467. expected = div_factor_fine(space_info->total_bytes, 95);
  4468. else
  4469. expected = div_factor_fine(space_info->total_bytes, 90);
  4470. if (used > expected)
  4471. to_reclaim = used - expected;
  4472. else
  4473. to_reclaim = 0;
  4474. to_reclaim = min(to_reclaim, space_info->bytes_may_use +
  4475. space_info->bytes_reserved);
  4476. return to_reclaim;
  4477. }
  4478. static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
  4479. struct btrfs_space_info *space_info,
  4480. u64 used, bool system_chunk)
  4481. {
  4482. u64 thresh = div_factor_fine(space_info->total_bytes, 98);
  4483. /* If we're just plain full then async reclaim just slows us down. */
  4484. if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
  4485. return 0;
  4486. if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4487. system_chunk))
  4488. return 0;
  4489. return (used >= thresh && !btrfs_fs_closing(fs_info) &&
  4490. !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
  4491. }
  4492. static void wake_all_tickets(struct list_head *head)
  4493. {
  4494. struct reserve_ticket *ticket;
  4495. while (!list_empty(head)) {
  4496. ticket = list_first_entry(head, struct reserve_ticket, list);
  4497. list_del_init(&ticket->list);
  4498. ticket->error = -ENOSPC;
  4499. wake_up(&ticket->wait);
  4500. }
  4501. }
  4502. /*
  4503. * This is for normal flushers, we can wait all goddamned day if we want to. We
  4504. * will loop and continuously try to flush as long as we are making progress.
  4505. * We count progress as clearing off tickets each time we have to loop.
  4506. */
  4507. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  4508. {
  4509. struct btrfs_fs_info *fs_info;
  4510. struct btrfs_space_info *space_info;
  4511. u64 to_reclaim;
  4512. int flush_state;
  4513. int commit_cycles = 0;
  4514. u64 last_tickets_id;
  4515. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  4516. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4517. spin_lock(&space_info->lock);
  4518. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4519. false);
  4520. if (!to_reclaim) {
  4521. space_info->flush = 0;
  4522. spin_unlock(&space_info->lock);
  4523. return;
  4524. }
  4525. last_tickets_id = space_info->tickets_id;
  4526. spin_unlock(&space_info->lock);
  4527. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4528. do {
  4529. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4530. spin_lock(&space_info->lock);
  4531. if (list_empty(&space_info->tickets)) {
  4532. space_info->flush = 0;
  4533. spin_unlock(&space_info->lock);
  4534. return;
  4535. }
  4536. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
  4537. space_info,
  4538. false);
  4539. if (last_tickets_id == space_info->tickets_id) {
  4540. flush_state++;
  4541. } else {
  4542. last_tickets_id = space_info->tickets_id;
  4543. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4544. if (commit_cycles)
  4545. commit_cycles--;
  4546. }
  4547. if (flush_state > COMMIT_TRANS) {
  4548. commit_cycles++;
  4549. if (commit_cycles > 2) {
  4550. wake_all_tickets(&space_info->tickets);
  4551. space_info->flush = 0;
  4552. } else {
  4553. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4554. }
  4555. }
  4556. spin_unlock(&space_info->lock);
  4557. } while (flush_state <= COMMIT_TRANS);
  4558. }
  4559. void btrfs_init_async_reclaim_work(struct work_struct *work)
  4560. {
  4561. INIT_WORK(work, btrfs_async_reclaim_metadata_space);
  4562. }
  4563. static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
  4564. struct btrfs_space_info *space_info,
  4565. struct reserve_ticket *ticket)
  4566. {
  4567. u64 to_reclaim;
  4568. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  4569. spin_lock(&space_info->lock);
  4570. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4571. false);
  4572. if (!to_reclaim) {
  4573. spin_unlock(&space_info->lock);
  4574. return;
  4575. }
  4576. spin_unlock(&space_info->lock);
  4577. do {
  4578. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4579. flush_state++;
  4580. spin_lock(&space_info->lock);
  4581. if (ticket->bytes == 0) {
  4582. spin_unlock(&space_info->lock);
  4583. return;
  4584. }
  4585. spin_unlock(&space_info->lock);
  4586. /*
  4587. * Priority flushers can't wait on delalloc without
  4588. * deadlocking.
  4589. */
  4590. if (flush_state == FLUSH_DELALLOC ||
  4591. flush_state == FLUSH_DELALLOC_WAIT)
  4592. flush_state = ALLOC_CHUNK;
  4593. } while (flush_state < COMMIT_TRANS);
  4594. }
  4595. static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  4596. struct btrfs_space_info *space_info,
  4597. struct reserve_ticket *ticket, u64 orig_bytes)
  4598. {
  4599. DEFINE_WAIT(wait);
  4600. int ret = 0;
  4601. spin_lock(&space_info->lock);
  4602. while (ticket->bytes > 0 && ticket->error == 0) {
  4603. ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
  4604. if (ret) {
  4605. ret = -EINTR;
  4606. break;
  4607. }
  4608. spin_unlock(&space_info->lock);
  4609. schedule();
  4610. finish_wait(&ticket->wait, &wait);
  4611. spin_lock(&space_info->lock);
  4612. }
  4613. if (!ret)
  4614. ret = ticket->error;
  4615. if (!list_empty(&ticket->list))
  4616. list_del_init(&ticket->list);
  4617. if (ticket->bytes && ticket->bytes < orig_bytes) {
  4618. u64 num_bytes = orig_bytes - ticket->bytes;
  4619. space_info->bytes_may_use -= num_bytes;
  4620. trace_btrfs_space_reservation(fs_info, "space_info",
  4621. space_info->flags, num_bytes, 0);
  4622. }
  4623. spin_unlock(&space_info->lock);
  4624. return ret;
  4625. }
  4626. /**
  4627. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4628. * @root - the root we're allocating for
  4629. * @space_info - the space info we want to allocate from
  4630. * @orig_bytes - the number of bytes we want
  4631. * @flush - whether or not we can flush to make our reservation
  4632. *
  4633. * This will reserve orig_bytes number of bytes from the space info associated
  4634. * with the block_rsv. If there is not enough space it will make an attempt to
  4635. * flush out space to make room. It will do this by flushing delalloc if
  4636. * possible or committing the transaction. If flush is 0 then no attempts to
  4637. * regain reservations will be made and this will fail if there is not enough
  4638. * space already.
  4639. */
  4640. static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
  4641. struct btrfs_space_info *space_info,
  4642. u64 orig_bytes,
  4643. enum btrfs_reserve_flush_enum flush,
  4644. bool system_chunk)
  4645. {
  4646. struct reserve_ticket ticket;
  4647. u64 used;
  4648. int ret = 0;
  4649. ASSERT(orig_bytes);
  4650. ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
  4651. spin_lock(&space_info->lock);
  4652. ret = -ENOSPC;
  4653. used = btrfs_space_info_used(space_info, true);
  4654. /*
  4655. * If we have enough space then hooray, make our reservation and carry
  4656. * on. If not see if we can overcommit, and if we can, hooray carry on.
  4657. * If not things get more complicated.
  4658. */
  4659. if (used + orig_bytes <= space_info->total_bytes) {
  4660. space_info->bytes_may_use += orig_bytes;
  4661. trace_btrfs_space_reservation(fs_info, "space_info",
  4662. space_info->flags, orig_bytes, 1);
  4663. ret = 0;
  4664. } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
  4665. system_chunk)) {
  4666. space_info->bytes_may_use += orig_bytes;
  4667. trace_btrfs_space_reservation(fs_info, "space_info",
  4668. space_info->flags, orig_bytes, 1);
  4669. ret = 0;
  4670. }
  4671. /*
  4672. * If we couldn't make a reservation then setup our reservation ticket
  4673. * and kick the async worker if it's not already running.
  4674. *
  4675. * If we are a priority flusher then we just need to add our ticket to
  4676. * the list and we will do our own flushing further down.
  4677. */
  4678. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  4679. ticket.bytes = orig_bytes;
  4680. ticket.error = 0;
  4681. init_waitqueue_head(&ticket.wait);
  4682. if (flush == BTRFS_RESERVE_FLUSH_ALL) {
  4683. list_add_tail(&ticket.list, &space_info->tickets);
  4684. if (!space_info->flush) {
  4685. space_info->flush = 1;
  4686. trace_btrfs_trigger_flush(fs_info,
  4687. space_info->flags,
  4688. orig_bytes, flush,
  4689. "enospc");
  4690. queue_work(system_unbound_wq,
  4691. &fs_info->async_reclaim_work);
  4692. }
  4693. } else {
  4694. list_add_tail(&ticket.list,
  4695. &space_info->priority_tickets);
  4696. }
  4697. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  4698. used += orig_bytes;
  4699. /*
  4700. * We will do the space reservation dance during log replay,
  4701. * which means we won't have fs_info->fs_root set, so don't do
  4702. * the async reclaim as we will panic.
  4703. */
  4704. if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
  4705. need_do_async_reclaim(fs_info, space_info,
  4706. used, system_chunk) &&
  4707. !work_busy(&fs_info->async_reclaim_work)) {
  4708. trace_btrfs_trigger_flush(fs_info, space_info->flags,
  4709. orig_bytes, flush, "preempt");
  4710. queue_work(system_unbound_wq,
  4711. &fs_info->async_reclaim_work);
  4712. }
  4713. }
  4714. spin_unlock(&space_info->lock);
  4715. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  4716. return ret;
  4717. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4718. return wait_reserve_ticket(fs_info, space_info, &ticket,
  4719. orig_bytes);
  4720. ret = 0;
  4721. priority_reclaim_metadata_space(fs_info, space_info, &ticket);
  4722. spin_lock(&space_info->lock);
  4723. if (ticket.bytes) {
  4724. if (ticket.bytes < orig_bytes) {
  4725. u64 num_bytes = orig_bytes - ticket.bytes;
  4726. space_info->bytes_may_use -= num_bytes;
  4727. trace_btrfs_space_reservation(fs_info, "space_info",
  4728. space_info->flags,
  4729. num_bytes, 0);
  4730. }
  4731. list_del_init(&ticket.list);
  4732. ret = -ENOSPC;
  4733. }
  4734. spin_unlock(&space_info->lock);
  4735. ASSERT(list_empty(&ticket.list));
  4736. return ret;
  4737. }
  4738. /**
  4739. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4740. * @root - the root we're allocating for
  4741. * @block_rsv - the block_rsv we're allocating for
  4742. * @orig_bytes - the number of bytes we want
  4743. * @flush - whether or not we can flush to make our reservation
  4744. *
  4745. * This will reserve orgi_bytes number of bytes from the space info associated
  4746. * with the block_rsv. If there is not enough space it will make an attempt to
  4747. * flush out space to make room. It will do this by flushing delalloc if
  4748. * possible or committing the transaction. If flush is 0 then no attempts to
  4749. * regain reservations will be made and this will fail if there is not enough
  4750. * space already.
  4751. */
  4752. static int reserve_metadata_bytes(struct btrfs_root *root,
  4753. struct btrfs_block_rsv *block_rsv,
  4754. u64 orig_bytes,
  4755. enum btrfs_reserve_flush_enum flush)
  4756. {
  4757. struct btrfs_fs_info *fs_info = root->fs_info;
  4758. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4759. int ret;
  4760. bool system_chunk = (root == fs_info->chunk_root);
  4761. ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
  4762. orig_bytes, flush, system_chunk);
  4763. if (ret == -ENOSPC &&
  4764. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  4765. if (block_rsv != global_rsv &&
  4766. !block_rsv_use_bytes(global_rsv, orig_bytes))
  4767. ret = 0;
  4768. }
  4769. if (ret == -ENOSPC)
  4770. trace_btrfs_space_reservation(fs_info, "space_info:enospc",
  4771. block_rsv->space_info->flags,
  4772. orig_bytes, 1);
  4773. return ret;
  4774. }
  4775. static struct btrfs_block_rsv *get_block_rsv(
  4776. const struct btrfs_trans_handle *trans,
  4777. const struct btrfs_root *root)
  4778. {
  4779. struct btrfs_fs_info *fs_info = root->fs_info;
  4780. struct btrfs_block_rsv *block_rsv = NULL;
  4781. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  4782. (root == fs_info->csum_root && trans->adding_csums) ||
  4783. (root == fs_info->uuid_root))
  4784. block_rsv = trans->block_rsv;
  4785. if (!block_rsv)
  4786. block_rsv = root->block_rsv;
  4787. if (!block_rsv)
  4788. block_rsv = &fs_info->empty_block_rsv;
  4789. return block_rsv;
  4790. }
  4791. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  4792. u64 num_bytes)
  4793. {
  4794. int ret = -ENOSPC;
  4795. spin_lock(&block_rsv->lock);
  4796. if (block_rsv->reserved >= num_bytes) {
  4797. block_rsv->reserved -= num_bytes;
  4798. if (block_rsv->reserved < block_rsv->size)
  4799. block_rsv->full = 0;
  4800. ret = 0;
  4801. }
  4802. spin_unlock(&block_rsv->lock);
  4803. return ret;
  4804. }
  4805. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  4806. u64 num_bytes, int update_size)
  4807. {
  4808. spin_lock(&block_rsv->lock);
  4809. block_rsv->reserved += num_bytes;
  4810. if (update_size)
  4811. block_rsv->size += num_bytes;
  4812. else if (block_rsv->reserved >= block_rsv->size)
  4813. block_rsv->full = 1;
  4814. spin_unlock(&block_rsv->lock);
  4815. }
  4816. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  4817. struct btrfs_block_rsv *dest, u64 num_bytes,
  4818. int min_factor)
  4819. {
  4820. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4821. u64 min_bytes;
  4822. if (global_rsv->space_info != dest->space_info)
  4823. return -ENOSPC;
  4824. spin_lock(&global_rsv->lock);
  4825. min_bytes = div_factor(global_rsv->size, min_factor);
  4826. if (global_rsv->reserved < min_bytes + num_bytes) {
  4827. spin_unlock(&global_rsv->lock);
  4828. return -ENOSPC;
  4829. }
  4830. global_rsv->reserved -= num_bytes;
  4831. if (global_rsv->reserved < global_rsv->size)
  4832. global_rsv->full = 0;
  4833. spin_unlock(&global_rsv->lock);
  4834. block_rsv_add_bytes(dest, num_bytes, 1);
  4835. return 0;
  4836. }
  4837. /*
  4838. * This is for space we already have accounted in space_info->bytes_may_use, so
  4839. * basically when we're returning space from block_rsv's.
  4840. */
  4841. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  4842. struct btrfs_space_info *space_info,
  4843. u64 num_bytes)
  4844. {
  4845. struct reserve_ticket *ticket;
  4846. struct list_head *head;
  4847. u64 used;
  4848. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
  4849. bool check_overcommit = false;
  4850. spin_lock(&space_info->lock);
  4851. head = &space_info->priority_tickets;
  4852. /*
  4853. * If we are over our limit then we need to check and see if we can
  4854. * overcommit, and if we can't then we just need to free up our space
  4855. * and not satisfy any requests.
  4856. */
  4857. used = btrfs_space_info_used(space_info, true);
  4858. if (used - num_bytes >= space_info->total_bytes)
  4859. check_overcommit = true;
  4860. again:
  4861. while (!list_empty(head) && num_bytes) {
  4862. ticket = list_first_entry(head, struct reserve_ticket,
  4863. list);
  4864. /*
  4865. * We use 0 bytes because this space is already reserved, so
  4866. * adding the ticket space would be a double count.
  4867. */
  4868. if (check_overcommit &&
  4869. !can_overcommit(fs_info, space_info, 0, flush, false))
  4870. break;
  4871. if (num_bytes >= ticket->bytes) {
  4872. list_del_init(&ticket->list);
  4873. num_bytes -= ticket->bytes;
  4874. ticket->bytes = 0;
  4875. space_info->tickets_id++;
  4876. wake_up(&ticket->wait);
  4877. } else {
  4878. ticket->bytes -= num_bytes;
  4879. num_bytes = 0;
  4880. }
  4881. }
  4882. if (num_bytes && head == &space_info->priority_tickets) {
  4883. head = &space_info->tickets;
  4884. flush = BTRFS_RESERVE_FLUSH_ALL;
  4885. goto again;
  4886. }
  4887. space_info->bytes_may_use -= num_bytes;
  4888. trace_btrfs_space_reservation(fs_info, "space_info",
  4889. space_info->flags, num_bytes, 0);
  4890. spin_unlock(&space_info->lock);
  4891. }
  4892. /*
  4893. * This is for newly allocated space that isn't accounted in
  4894. * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
  4895. * we use this helper.
  4896. */
  4897. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  4898. struct btrfs_space_info *space_info,
  4899. u64 num_bytes)
  4900. {
  4901. struct reserve_ticket *ticket;
  4902. struct list_head *head = &space_info->priority_tickets;
  4903. again:
  4904. while (!list_empty(head) && num_bytes) {
  4905. ticket = list_first_entry(head, struct reserve_ticket,
  4906. list);
  4907. if (num_bytes >= ticket->bytes) {
  4908. trace_btrfs_space_reservation(fs_info, "space_info",
  4909. space_info->flags,
  4910. ticket->bytes, 1);
  4911. list_del_init(&ticket->list);
  4912. num_bytes -= ticket->bytes;
  4913. space_info->bytes_may_use += ticket->bytes;
  4914. ticket->bytes = 0;
  4915. space_info->tickets_id++;
  4916. wake_up(&ticket->wait);
  4917. } else {
  4918. trace_btrfs_space_reservation(fs_info, "space_info",
  4919. space_info->flags,
  4920. num_bytes, 1);
  4921. space_info->bytes_may_use += num_bytes;
  4922. ticket->bytes -= num_bytes;
  4923. num_bytes = 0;
  4924. }
  4925. }
  4926. if (num_bytes && head == &space_info->priority_tickets) {
  4927. head = &space_info->tickets;
  4928. goto again;
  4929. }
  4930. }
  4931. static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  4932. struct btrfs_block_rsv *block_rsv,
  4933. struct btrfs_block_rsv *dest, u64 num_bytes)
  4934. {
  4935. struct btrfs_space_info *space_info = block_rsv->space_info;
  4936. u64 ret;
  4937. spin_lock(&block_rsv->lock);
  4938. if (num_bytes == (u64)-1)
  4939. num_bytes = block_rsv->size;
  4940. block_rsv->size -= num_bytes;
  4941. if (block_rsv->reserved >= block_rsv->size) {
  4942. num_bytes = block_rsv->reserved - block_rsv->size;
  4943. block_rsv->reserved = block_rsv->size;
  4944. block_rsv->full = 1;
  4945. } else {
  4946. num_bytes = 0;
  4947. }
  4948. spin_unlock(&block_rsv->lock);
  4949. ret = num_bytes;
  4950. if (num_bytes > 0) {
  4951. if (dest) {
  4952. spin_lock(&dest->lock);
  4953. if (!dest->full) {
  4954. u64 bytes_to_add;
  4955. bytes_to_add = dest->size - dest->reserved;
  4956. bytes_to_add = min(num_bytes, bytes_to_add);
  4957. dest->reserved += bytes_to_add;
  4958. if (dest->reserved >= dest->size)
  4959. dest->full = 1;
  4960. num_bytes -= bytes_to_add;
  4961. }
  4962. spin_unlock(&dest->lock);
  4963. }
  4964. if (num_bytes)
  4965. space_info_add_old_bytes(fs_info, space_info,
  4966. num_bytes);
  4967. }
  4968. return ret;
  4969. }
  4970. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
  4971. struct btrfs_block_rsv *dst, u64 num_bytes,
  4972. int update_size)
  4973. {
  4974. int ret;
  4975. ret = block_rsv_use_bytes(src, num_bytes);
  4976. if (ret)
  4977. return ret;
  4978. block_rsv_add_bytes(dst, num_bytes, update_size);
  4979. return 0;
  4980. }
  4981. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  4982. {
  4983. memset(rsv, 0, sizeof(*rsv));
  4984. spin_lock_init(&rsv->lock);
  4985. rsv->type = type;
  4986. }
  4987. void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
  4988. struct btrfs_block_rsv *rsv,
  4989. unsigned short type)
  4990. {
  4991. btrfs_init_block_rsv(rsv, type);
  4992. rsv->space_info = __find_space_info(fs_info,
  4993. BTRFS_BLOCK_GROUP_METADATA);
  4994. }
  4995. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
  4996. unsigned short type)
  4997. {
  4998. struct btrfs_block_rsv *block_rsv;
  4999. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  5000. if (!block_rsv)
  5001. return NULL;
  5002. btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
  5003. return block_rsv;
  5004. }
  5005. void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
  5006. struct btrfs_block_rsv *rsv)
  5007. {
  5008. if (!rsv)
  5009. return;
  5010. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  5011. kfree(rsv);
  5012. }
  5013. void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
  5014. {
  5015. kfree(rsv);
  5016. }
  5017. int btrfs_block_rsv_add(struct btrfs_root *root,
  5018. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  5019. enum btrfs_reserve_flush_enum flush)
  5020. {
  5021. int ret;
  5022. if (num_bytes == 0)
  5023. return 0;
  5024. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5025. if (!ret) {
  5026. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  5027. return 0;
  5028. }
  5029. return ret;
  5030. }
  5031. int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
  5032. {
  5033. u64 num_bytes = 0;
  5034. int ret = -ENOSPC;
  5035. if (!block_rsv)
  5036. return 0;
  5037. spin_lock(&block_rsv->lock);
  5038. num_bytes = div_factor(block_rsv->size, min_factor);
  5039. if (block_rsv->reserved >= num_bytes)
  5040. ret = 0;
  5041. spin_unlock(&block_rsv->lock);
  5042. return ret;
  5043. }
  5044. int btrfs_block_rsv_refill(struct btrfs_root *root,
  5045. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  5046. enum btrfs_reserve_flush_enum flush)
  5047. {
  5048. u64 num_bytes = 0;
  5049. int ret = -ENOSPC;
  5050. if (!block_rsv)
  5051. return 0;
  5052. spin_lock(&block_rsv->lock);
  5053. num_bytes = min_reserved;
  5054. if (block_rsv->reserved >= num_bytes)
  5055. ret = 0;
  5056. else
  5057. num_bytes -= block_rsv->reserved;
  5058. spin_unlock(&block_rsv->lock);
  5059. if (!ret)
  5060. return 0;
  5061. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5062. if (!ret) {
  5063. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  5064. return 0;
  5065. }
  5066. return ret;
  5067. }
  5068. /**
  5069. * btrfs_inode_rsv_refill - refill the inode block rsv.
  5070. * @inode - the inode we are refilling.
  5071. * @flush - the flusing restriction.
  5072. *
  5073. * Essentially the same as btrfs_block_rsv_refill, except it uses the
  5074. * block_rsv->size as the minimum size. We'll either refill the missing amount
  5075. * or return if we already have enough space. This will also handle the resreve
  5076. * tracepoint for the reserved amount.
  5077. */
  5078. static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
  5079. enum btrfs_reserve_flush_enum flush)
  5080. {
  5081. struct btrfs_root *root = inode->root;
  5082. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5083. u64 num_bytes = 0;
  5084. int ret = -ENOSPC;
  5085. spin_lock(&block_rsv->lock);
  5086. if (block_rsv->reserved < block_rsv->size)
  5087. num_bytes = block_rsv->size - block_rsv->reserved;
  5088. spin_unlock(&block_rsv->lock);
  5089. if (num_bytes == 0)
  5090. return 0;
  5091. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5092. if (!ret) {
  5093. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  5094. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  5095. btrfs_ino(inode), num_bytes, 1);
  5096. }
  5097. return ret;
  5098. }
  5099. /**
  5100. * btrfs_inode_rsv_release - release any excessive reservation.
  5101. * @inode - the inode we need to release from.
  5102. *
  5103. * This is the same as btrfs_block_rsv_release, except that it handles the
  5104. * tracepoint for the reservation.
  5105. */
  5106. void btrfs_inode_rsv_release(struct btrfs_inode *inode)
  5107. {
  5108. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5109. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5110. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5111. u64 released = 0;
  5112. /*
  5113. * Since we statically set the block_rsv->size we just want to say we
  5114. * are releasing 0 bytes, and then we'll just get the reservation over
  5115. * the size free'd.
  5116. */
  5117. released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
  5118. if (released > 0)
  5119. trace_btrfs_space_reservation(fs_info, "delalloc",
  5120. btrfs_ino(inode), released, 0);
  5121. }
  5122. void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
  5123. struct btrfs_block_rsv *block_rsv,
  5124. u64 num_bytes)
  5125. {
  5126. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5127. if (global_rsv == block_rsv ||
  5128. block_rsv->space_info != global_rsv->space_info)
  5129. global_rsv = NULL;
  5130. block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
  5131. }
  5132. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  5133. {
  5134. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  5135. struct btrfs_space_info *sinfo = block_rsv->space_info;
  5136. u64 num_bytes;
  5137. /*
  5138. * The global block rsv is based on the size of the extent tree, the
  5139. * checksum tree and the root tree. If the fs is empty we want to set
  5140. * it to a minimal amount for safety.
  5141. */
  5142. num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
  5143. btrfs_root_used(&fs_info->csum_root->root_item) +
  5144. btrfs_root_used(&fs_info->tree_root->root_item);
  5145. num_bytes = max_t(u64, num_bytes, SZ_16M);
  5146. spin_lock(&sinfo->lock);
  5147. spin_lock(&block_rsv->lock);
  5148. block_rsv->size = min_t(u64, num_bytes, SZ_512M);
  5149. if (block_rsv->reserved < block_rsv->size) {
  5150. num_bytes = btrfs_space_info_used(sinfo, true);
  5151. if (sinfo->total_bytes > num_bytes) {
  5152. num_bytes = sinfo->total_bytes - num_bytes;
  5153. num_bytes = min(num_bytes,
  5154. block_rsv->size - block_rsv->reserved);
  5155. block_rsv->reserved += num_bytes;
  5156. sinfo->bytes_may_use += num_bytes;
  5157. trace_btrfs_space_reservation(fs_info, "space_info",
  5158. sinfo->flags, num_bytes,
  5159. 1);
  5160. }
  5161. } else if (block_rsv->reserved > block_rsv->size) {
  5162. num_bytes = block_rsv->reserved - block_rsv->size;
  5163. sinfo->bytes_may_use -= num_bytes;
  5164. trace_btrfs_space_reservation(fs_info, "space_info",
  5165. sinfo->flags, num_bytes, 0);
  5166. block_rsv->reserved = block_rsv->size;
  5167. }
  5168. if (block_rsv->reserved == block_rsv->size)
  5169. block_rsv->full = 1;
  5170. else
  5171. block_rsv->full = 0;
  5172. spin_unlock(&block_rsv->lock);
  5173. spin_unlock(&sinfo->lock);
  5174. }
  5175. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  5176. {
  5177. struct btrfs_space_info *space_info;
  5178. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  5179. fs_info->chunk_block_rsv.space_info = space_info;
  5180. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  5181. fs_info->global_block_rsv.space_info = space_info;
  5182. fs_info->trans_block_rsv.space_info = space_info;
  5183. fs_info->empty_block_rsv.space_info = space_info;
  5184. fs_info->delayed_block_rsv.space_info = space_info;
  5185. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  5186. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  5187. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  5188. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  5189. if (fs_info->quota_root)
  5190. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  5191. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  5192. update_global_block_rsv(fs_info);
  5193. }
  5194. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  5195. {
  5196. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  5197. (u64)-1);
  5198. WARN_ON(fs_info->trans_block_rsv.size > 0);
  5199. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  5200. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  5201. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  5202. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  5203. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  5204. }
  5205. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  5206. struct btrfs_fs_info *fs_info)
  5207. {
  5208. if (!trans->block_rsv) {
  5209. ASSERT(!trans->bytes_reserved);
  5210. return;
  5211. }
  5212. if (!trans->bytes_reserved)
  5213. return;
  5214. ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
  5215. trace_btrfs_space_reservation(fs_info, "transaction",
  5216. trans->transid, trans->bytes_reserved, 0);
  5217. btrfs_block_rsv_release(fs_info, trans->block_rsv,
  5218. trans->bytes_reserved);
  5219. trans->bytes_reserved = 0;
  5220. }
  5221. /*
  5222. * To be called after all the new block groups attached to the transaction
  5223. * handle have been created (btrfs_create_pending_block_groups()).
  5224. */
  5225. void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  5226. {
  5227. struct btrfs_fs_info *fs_info = trans->fs_info;
  5228. if (!trans->chunk_bytes_reserved)
  5229. return;
  5230. WARN_ON_ONCE(!list_empty(&trans->new_bgs));
  5231. block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
  5232. trans->chunk_bytes_reserved);
  5233. trans->chunk_bytes_reserved = 0;
  5234. }
  5235. /* Can only return 0 or -ENOSPC */
  5236. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  5237. struct btrfs_inode *inode)
  5238. {
  5239. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5240. struct btrfs_root *root = inode->root;
  5241. /*
  5242. * We always use trans->block_rsv here as we will have reserved space
  5243. * for our orphan when starting the transaction, using get_block_rsv()
  5244. * here will sometimes make us choose the wrong block rsv as we could be
  5245. * doing a reloc inode for a non refcounted root.
  5246. */
  5247. struct btrfs_block_rsv *src_rsv = trans->block_rsv;
  5248. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  5249. /*
  5250. * We need to hold space in order to delete our orphan item once we've
  5251. * added it, so this takes the reservation so we can release it later
  5252. * when we are truly done with the orphan item.
  5253. */
  5254. u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  5255. trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
  5256. num_bytes, 1);
  5257. return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  5258. }
  5259. void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
  5260. {
  5261. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5262. struct btrfs_root *root = inode->root;
  5263. u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  5264. trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
  5265. num_bytes, 0);
  5266. btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
  5267. }
  5268. /*
  5269. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  5270. * root: the root of the parent directory
  5271. * rsv: block reservation
  5272. * items: the number of items that we need do reservation
  5273. * qgroup_reserved: used to return the reserved size in qgroup
  5274. *
  5275. * This function is used to reserve the space for snapshot/subvolume
  5276. * creation and deletion. Those operations are different with the
  5277. * common file/directory operations, they change two fs/file trees
  5278. * and root tree, the number of items that the qgroup reserves is
  5279. * different with the free space reservation. So we can not use
  5280. * the space reservation mechanism in start_transaction().
  5281. */
  5282. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  5283. struct btrfs_block_rsv *rsv,
  5284. int items,
  5285. u64 *qgroup_reserved,
  5286. bool use_global_rsv)
  5287. {
  5288. u64 num_bytes;
  5289. int ret;
  5290. struct btrfs_fs_info *fs_info = root->fs_info;
  5291. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5292. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
  5293. /* One for parent inode, two for dir entries */
  5294. num_bytes = 3 * fs_info->nodesize;
  5295. ret = btrfs_qgroup_reserve_meta(root, num_bytes, true);
  5296. if (ret)
  5297. return ret;
  5298. } else {
  5299. num_bytes = 0;
  5300. }
  5301. *qgroup_reserved = num_bytes;
  5302. num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
  5303. rsv->space_info = __find_space_info(fs_info,
  5304. BTRFS_BLOCK_GROUP_METADATA);
  5305. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  5306. BTRFS_RESERVE_FLUSH_ALL);
  5307. if (ret == -ENOSPC && use_global_rsv)
  5308. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
  5309. if (ret && *qgroup_reserved)
  5310. btrfs_qgroup_free_meta(root, *qgroup_reserved);
  5311. return ret;
  5312. }
  5313. void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
  5314. struct btrfs_block_rsv *rsv)
  5315. {
  5316. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  5317. }
  5318. static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
  5319. struct btrfs_inode *inode)
  5320. {
  5321. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5322. u64 reserve_size = 0;
  5323. u64 csum_leaves;
  5324. unsigned outstanding_extents;
  5325. lockdep_assert_held(&inode->lock);
  5326. outstanding_extents = inode->outstanding_extents;
  5327. if (outstanding_extents)
  5328. reserve_size = btrfs_calc_trans_metadata_size(fs_info,
  5329. outstanding_extents + 1);
  5330. csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
  5331. inode->csum_bytes);
  5332. reserve_size += btrfs_calc_trans_metadata_size(fs_info,
  5333. csum_leaves);
  5334. spin_lock(&block_rsv->lock);
  5335. block_rsv->size = reserve_size;
  5336. spin_unlock(&block_rsv->lock);
  5337. }
  5338. int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
  5339. {
  5340. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5341. struct btrfs_root *root = inode->root;
  5342. unsigned nr_extents;
  5343. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  5344. int ret = 0;
  5345. bool delalloc_lock = true;
  5346. /* If we are a free space inode we need to not flush since we will be in
  5347. * the middle of a transaction commit. We also don't need the delalloc
  5348. * mutex since we won't race with anybody. We need this mostly to make
  5349. * lockdep shut its filthy mouth.
  5350. *
  5351. * If we have a transaction open (can happen if we call truncate_block
  5352. * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
  5353. */
  5354. if (btrfs_is_free_space_inode(inode)) {
  5355. flush = BTRFS_RESERVE_NO_FLUSH;
  5356. delalloc_lock = false;
  5357. } else if (current->journal_info) {
  5358. flush = BTRFS_RESERVE_FLUSH_LIMIT;
  5359. }
  5360. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  5361. btrfs_transaction_in_commit(fs_info))
  5362. schedule_timeout(1);
  5363. if (delalloc_lock)
  5364. mutex_lock(&inode->delalloc_mutex);
  5365. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5366. /* Add our new extents and calculate the new rsv size. */
  5367. spin_lock(&inode->lock);
  5368. nr_extents = count_max_extents(num_bytes);
  5369. btrfs_mod_outstanding_extents(inode, nr_extents);
  5370. inode->csum_bytes += num_bytes;
  5371. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5372. spin_unlock(&inode->lock);
  5373. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
  5374. ret = btrfs_qgroup_reserve_meta(root,
  5375. nr_extents * fs_info->nodesize, true);
  5376. if (ret)
  5377. goto out_fail;
  5378. }
  5379. ret = btrfs_inode_rsv_refill(inode, flush);
  5380. if (unlikely(ret)) {
  5381. btrfs_qgroup_free_meta(root,
  5382. nr_extents * fs_info->nodesize);
  5383. goto out_fail;
  5384. }
  5385. if (delalloc_lock)
  5386. mutex_unlock(&inode->delalloc_mutex);
  5387. return 0;
  5388. out_fail:
  5389. spin_lock(&inode->lock);
  5390. nr_extents = count_max_extents(num_bytes);
  5391. btrfs_mod_outstanding_extents(inode, -nr_extents);
  5392. inode->csum_bytes -= num_bytes;
  5393. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5394. spin_unlock(&inode->lock);
  5395. btrfs_inode_rsv_release(inode);
  5396. if (delalloc_lock)
  5397. mutex_unlock(&inode->delalloc_mutex);
  5398. return ret;
  5399. }
  5400. /**
  5401. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  5402. * @inode: the inode to release the reservation for.
  5403. * @num_bytes: the number of bytes we are releasing.
  5404. *
  5405. * This will release the metadata reservation for an inode. This can be called
  5406. * once we complete IO for a given set of bytes to release their metadata
  5407. * reservations, or on error for the same reason.
  5408. */
  5409. void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
  5410. {
  5411. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5412. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5413. spin_lock(&inode->lock);
  5414. inode->csum_bytes -= num_bytes;
  5415. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5416. spin_unlock(&inode->lock);
  5417. if (btrfs_is_testing(fs_info))
  5418. return;
  5419. btrfs_inode_rsv_release(inode);
  5420. }
  5421. /**
  5422. * btrfs_delalloc_release_extents - release our outstanding_extents
  5423. * @inode: the inode to balance the reservation for.
  5424. * @num_bytes: the number of bytes we originally reserved with
  5425. *
  5426. * When we reserve space we increase outstanding_extents for the extents we may
  5427. * add. Once we've set the range as delalloc or created our ordered extents we
  5428. * have outstanding_extents to track the real usage, so we use this to free our
  5429. * temporarily tracked outstanding_extents. This _must_ be used in conjunction
  5430. * with btrfs_delalloc_reserve_metadata.
  5431. */
  5432. void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
  5433. {
  5434. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5435. unsigned num_extents;
  5436. spin_lock(&inode->lock);
  5437. num_extents = count_max_extents(num_bytes);
  5438. btrfs_mod_outstanding_extents(inode, -num_extents);
  5439. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5440. spin_unlock(&inode->lock);
  5441. if (btrfs_is_testing(fs_info))
  5442. return;
  5443. btrfs_inode_rsv_release(inode);
  5444. }
  5445. /**
  5446. * btrfs_delalloc_reserve_space - reserve data and metadata space for
  5447. * delalloc
  5448. * @inode: inode we're writing to
  5449. * @start: start range we are writing to
  5450. * @len: how long the range we are writing to
  5451. * @reserved: mandatory parameter, record actually reserved qgroup ranges of
  5452. * current reservation.
  5453. *
  5454. * This will do the following things
  5455. *
  5456. * o reserve space in data space info for num bytes
  5457. * and reserve precious corresponding qgroup space
  5458. * (Done in check_data_free_space)
  5459. *
  5460. * o reserve space for metadata space, based on the number of outstanding
  5461. * extents and how much csums will be needed
  5462. * also reserve metadata space in a per root over-reserve method.
  5463. * o add to the inodes->delalloc_bytes
  5464. * o add it to the fs_info's delalloc inodes list.
  5465. * (Above 3 all done in delalloc_reserve_metadata)
  5466. *
  5467. * Return 0 for success
  5468. * Return <0 for error(-ENOSPC or -EQUOT)
  5469. */
  5470. int btrfs_delalloc_reserve_space(struct inode *inode,
  5471. struct extent_changeset **reserved, u64 start, u64 len)
  5472. {
  5473. int ret;
  5474. ret = btrfs_check_data_free_space(inode, reserved, start, len);
  5475. if (ret < 0)
  5476. return ret;
  5477. ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
  5478. if (ret < 0)
  5479. btrfs_free_reserved_data_space(inode, *reserved, start, len);
  5480. return ret;
  5481. }
  5482. /**
  5483. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  5484. * @inode: inode we're releasing space for
  5485. * @start: start position of the space already reserved
  5486. * @len: the len of the space already reserved
  5487. * @release_bytes: the len of the space we consumed or didn't use
  5488. *
  5489. * This function will release the metadata space that was not used and will
  5490. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  5491. * list if there are no delalloc bytes left.
  5492. * Also it will handle the qgroup reserved space.
  5493. */
  5494. void btrfs_delalloc_release_space(struct inode *inode,
  5495. struct extent_changeset *reserved,
  5496. u64 start, u64 len)
  5497. {
  5498. btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
  5499. btrfs_free_reserved_data_space(inode, reserved, start, len);
  5500. }
  5501. static int update_block_group(struct btrfs_trans_handle *trans,
  5502. struct btrfs_fs_info *info, u64 bytenr,
  5503. u64 num_bytes, int alloc)
  5504. {
  5505. struct btrfs_block_group_cache *cache = NULL;
  5506. u64 total = num_bytes;
  5507. u64 old_val;
  5508. u64 byte_in_group;
  5509. int factor;
  5510. /* block accounting for super block */
  5511. spin_lock(&info->delalloc_root_lock);
  5512. old_val = btrfs_super_bytes_used(info->super_copy);
  5513. if (alloc)
  5514. old_val += num_bytes;
  5515. else
  5516. old_val -= num_bytes;
  5517. btrfs_set_super_bytes_used(info->super_copy, old_val);
  5518. spin_unlock(&info->delalloc_root_lock);
  5519. while (total) {
  5520. cache = btrfs_lookup_block_group(info, bytenr);
  5521. if (!cache)
  5522. return -ENOENT;
  5523. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  5524. BTRFS_BLOCK_GROUP_RAID1 |
  5525. BTRFS_BLOCK_GROUP_RAID10))
  5526. factor = 2;
  5527. else
  5528. factor = 1;
  5529. /*
  5530. * If this block group has free space cache written out, we
  5531. * need to make sure to load it if we are removing space. This
  5532. * is because we need the unpinning stage to actually add the
  5533. * space back to the block group, otherwise we will leak space.
  5534. */
  5535. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  5536. cache_block_group(cache, 1);
  5537. byte_in_group = bytenr - cache->key.objectid;
  5538. WARN_ON(byte_in_group > cache->key.offset);
  5539. spin_lock(&cache->space_info->lock);
  5540. spin_lock(&cache->lock);
  5541. if (btrfs_test_opt(info, SPACE_CACHE) &&
  5542. cache->disk_cache_state < BTRFS_DC_CLEAR)
  5543. cache->disk_cache_state = BTRFS_DC_CLEAR;
  5544. old_val = btrfs_block_group_used(&cache->item);
  5545. num_bytes = min(total, cache->key.offset - byte_in_group);
  5546. if (alloc) {
  5547. old_val += num_bytes;
  5548. btrfs_set_block_group_used(&cache->item, old_val);
  5549. cache->reserved -= num_bytes;
  5550. cache->space_info->bytes_reserved -= num_bytes;
  5551. cache->space_info->bytes_used += num_bytes;
  5552. cache->space_info->disk_used += num_bytes * factor;
  5553. spin_unlock(&cache->lock);
  5554. spin_unlock(&cache->space_info->lock);
  5555. } else {
  5556. old_val -= num_bytes;
  5557. btrfs_set_block_group_used(&cache->item, old_val);
  5558. cache->pinned += num_bytes;
  5559. cache->space_info->bytes_pinned += num_bytes;
  5560. cache->space_info->bytes_used -= num_bytes;
  5561. cache->space_info->disk_used -= num_bytes * factor;
  5562. spin_unlock(&cache->lock);
  5563. spin_unlock(&cache->space_info->lock);
  5564. trace_btrfs_space_reservation(info, "pinned",
  5565. cache->space_info->flags,
  5566. num_bytes, 1);
  5567. percpu_counter_add(&cache->space_info->total_bytes_pinned,
  5568. num_bytes);
  5569. set_extent_dirty(info->pinned_extents,
  5570. bytenr, bytenr + num_bytes - 1,
  5571. GFP_NOFS | __GFP_NOFAIL);
  5572. }
  5573. spin_lock(&trans->transaction->dirty_bgs_lock);
  5574. if (list_empty(&cache->dirty_list)) {
  5575. list_add_tail(&cache->dirty_list,
  5576. &trans->transaction->dirty_bgs);
  5577. trans->transaction->num_dirty_bgs++;
  5578. btrfs_get_block_group(cache);
  5579. }
  5580. spin_unlock(&trans->transaction->dirty_bgs_lock);
  5581. /*
  5582. * No longer have used bytes in this block group, queue it for
  5583. * deletion. We do this after adding the block group to the
  5584. * dirty list to avoid races between cleaner kthread and space
  5585. * cache writeout.
  5586. */
  5587. if (!alloc && old_val == 0) {
  5588. spin_lock(&info->unused_bgs_lock);
  5589. if (list_empty(&cache->bg_list)) {
  5590. btrfs_get_block_group(cache);
  5591. list_add_tail(&cache->bg_list,
  5592. &info->unused_bgs);
  5593. }
  5594. spin_unlock(&info->unused_bgs_lock);
  5595. }
  5596. btrfs_put_block_group(cache);
  5597. total -= num_bytes;
  5598. bytenr += num_bytes;
  5599. }
  5600. return 0;
  5601. }
  5602. static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
  5603. {
  5604. struct btrfs_block_group_cache *cache;
  5605. u64 bytenr;
  5606. spin_lock(&fs_info->block_group_cache_lock);
  5607. bytenr = fs_info->first_logical_byte;
  5608. spin_unlock(&fs_info->block_group_cache_lock);
  5609. if (bytenr < (u64)-1)
  5610. return bytenr;
  5611. cache = btrfs_lookup_first_block_group(fs_info, search_start);
  5612. if (!cache)
  5613. return 0;
  5614. bytenr = cache->key.objectid;
  5615. btrfs_put_block_group(cache);
  5616. return bytenr;
  5617. }
  5618. static int pin_down_extent(struct btrfs_fs_info *fs_info,
  5619. struct btrfs_block_group_cache *cache,
  5620. u64 bytenr, u64 num_bytes, int reserved)
  5621. {
  5622. spin_lock(&cache->space_info->lock);
  5623. spin_lock(&cache->lock);
  5624. cache->pinned += num_bytes;
  5625. cache->space_info->bytes_pinned += num_bytes;
  5626. if (reserved) {
  5627. cache->reserved -= num_bytes;
  5628. cache->space_info->bytes_reserved -= num_bytes;
  5629. }
  5630. spin_unlock(&cache->lock);
  5631. spin_unlock(&cache->space_info->lock);
  5632. trace_btrfs_space_reservation(fs_info, "pinned",
  5633. cache->space_info->flags, num_bytes, 1);
  5634. percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
  5635. set_extent_dirty(fs_info->pinned_extents, bytenr,
  5636. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  5637. return 0;
  5638. }
  5639. /*
  5640. * this function must be called within transaction
  5641. */
  5642. int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
  5643. u64 bytenr, u64 num_bytes, int reserved)
  5644. {
  5645. struct btrfs_block_group_cache *cache;
  5646. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5647. BUG_ON(!cache); /* Logic error */
  5648. pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
  5649. btrfs_put_block_group(cache);
  5650. return 0;
  5651. }
  5652. /*
  5653. * this function must be called within transaction
  5654. */
  5655. int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
  5656. u64 bytenr, u64 num_bytes)
  5657. {
  5658. struct btrfs_block_group_cache *cache;
  5659. int ret;
  5660. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5661. if (!cache)
  5662. return -EINVAL;
  5663. /*
  5664. * pull in the free space cache (if any) so that our pin
  5665. * removes the free space from the cache. We have load_only set
  5666. * to one because the slow code to read in the free extents does check
  5667. * the pinned extents.
  5668. */
  5669. cache_block_group(cache, 1);
  5670. pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
  5671. /* remove us from the free space cache (if we're there at all) */
  5672. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  5673. btrfs_put_block_group(cache);
  5674. return ret;
  5675. }
  5676. static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
  5677. u64 start, u64 num_bytes)
  5678. {
  5679. int ret;
  5680. struct btrfs_block_group_cache *block_group;
  5681. struct btrfs_caching_control *caching_ctl;
  5682. block_group = btrfs_lookup_block_group(fs_info, start);
  5683. if (!block_group)
  5684. return -EINVAL;
  5685. cache_block_group(block_group, 0);
  5686. caching_ctl = get_caching_control(block_group);
  5687. if (!caching_ctl) {
  5688. /* Logic error */
  5689. BUG_ON(!block_group_cache_done(block_group));
  5690. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5691. } else {
  5692. mutex_lock(&caching_ctl->mutex);
  5693. if (start >= caching_ctl->progress) {
  5694. ret = add_excluded_extent(fs_info, start, num_bytes);
  5695. } else if (start + num_bytes <= caching_ctl->progress) {
  5696. ret = btrfs_remove_free_space(block_group,
  5697. start, num_bytes);
  5698. } else {
  5699. num_bytes = caching_ctl->progress - start;
  5700. ret = btrfs_remove_free_space(block_group,
  5701. start, num_bytes);
  5702. if (ret)
  5703. goto out_lock;
  5704. num_bytes = (start + num_bytes) -
  5705. caching_ctl->progress;
  5706. start = caching_ctl->progress;
  5707. ret = add_excluded_extent(fs_info, start, num_bytes);
  5708. }
  5709. out_lock:
  5710. mutex_unlock(&caching_ctl->mutex);
  5711. put_caching_control(caching_ctl);
  5712. }
  5713. btrfs_put_block_group(block_group);
  5714. return ret;
  5715. }
  5716. int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
  5717. struct extent_buffer *eb)
  5718. {
  5719. struct btrfs_file_extent_item *item;
  5720. struct btrfs_key key;
  5721. int found_type;
  5722. int i;
  5723. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
  5724. return 0;
  5725. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  5726. btrfs_item_key_to_cpu(eb, &key, i);
  5727. if (key.type != BTRFS_EXTENT_DATA_KEY)
  5728. continue;
  5729. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  5730. found_type = btrfs_file_extent_type(eb, item);
  5731. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  5732. continue;
  5733. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  5734. continue;
  5735. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  5736. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  5737. __exclude_logged_extent(fs_info, key.objectid, key.offset);
  5738. }
  5739. return 0;
  5740. }
  5741. static void
  5742. btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
  5743. {
  5744. atomic_inc(&bg->reservations);
  5745. }
  5746. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  5747. const u64 start)
  5748. {
  5749. struct btrfs_block_group_cache *bg;
  5750. bg = btrfs_lookup_block_group(fs_info, start);
  5751. ASSERT(bg);
  5752. if (atomic_dec_and_test(&bg->reservations))
  5753. wake_up_atomic_t(&bg->reservations);
  5754. btrfs_put_block_group(bg);
  5755. }
  5756. void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  5757. {
  5758. struct btrfs_space_info *space_info = bg->space_info;
  5759. ASSERT(bg->ro);
  5760. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  5761. return;
  5762. /*
  5763. * Our block group is read only but before we set it to read only,
  5764. * some task might have had allocated an extent from it already, but it
  5765. * has not yet created a respective ordered extent (and added it to a
  5766. * root's list of ordered extents).
  5767. * Therefore wait for any task currently allocating extents, since the
  5768. * block group's reservations counter is incremented while a read lock
  5769. * on the groups' semaphore is held and decremented after releasing
  5770. * the read access on that semaphore and creating the ordered extent.
  5771. */
  5772. down_write(&space_info->groups_sem);
  5773. up_write(&space_info->groups_sem);
  5774. wait_on_atomic_t(&bg->reservations, atomic_t_wait,
  5775. TASK_UNINTERRUPTIBLE);
  5776. }
  5777. /**
  5778. * btrfs_add_reserved_bytes - update the block_group and space info counters
  5779. * @cache: The cache we are manipulating
  5780. * @ram_bytes: The number of bytes of file content, and will be same to
  5781. * @num_bytes except for the compress path.
  5782. * @num_bytes: The number of bytes in question
  5783. * @delalloc: The blocks are allocated for the delalloc write
  5784. *
  5785. * This is called by the allocator when it reserves space. If this is a
  5786. * reservation and the block group has become read only we cannot make the
  5787. * reservation and return -EAGAIN, otherwise this function always succeeds.
  5788. */
  5789. static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
  5790. u64 ram_bytes, u64 num_bytes, int delalloc)
  5791. {
  5792. struct btrfs_space_info *space_info = cache->space_info;
  5793. int ret = 0;
  5794. spin_lock(&space_info->lock);
  5795. spin_lock(&cache->lock);
  5796. if (cache->ro) {
  5797. ret = -EAGAIN;
  5798. } else {
  5799. cache->reserved += num_bytes;
  5800. space_info->bytes_reserved += num_bytes;
  5801. trace_btrfs_space_reservation(cache->fs_info,
  5802. "space_info", space_info->flags,
  5803. ram_bytes, 0);
  5804. space_info->bytes_may_use -= ram_bytes;
  5805. if (delalloc)
  5806. cache->delalloc_bytes += num_bytes;
  5807. }
  5808. spin_unlock(&cache->lock);
  5809. spin_unlock(&space_info->lock);
  5810. return ret;
  5811. }
  5812. /**
  5813. * btrfs_free_reserved_bytes - update the block_group and space info counters
  5814. * @cache: The cache we are manipulating
  5815. * @num_bytes: The number of bytes in question
  5816. * @delalloc: The blocks are allocated for the delalloc write
  5817. *
  5818. * This is called by somebody who is freeing space that was never actually used
  5819. * on disk. For example if you reserve some space for a new leaf in transaction
  5820. * A and before transaction A commits you free that leaf, you call this with
  5821. * reserve set to 0 in order to clear the reservation.
  5822. */
  5823. static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
  5824. u64 num_bytes, int delalloc)
  5825. {
  5826. struct btrfs_space_info *space_info = cache->space_info;
  5827. int ret = 0;
  5828. spin_lock(&space_info->lock);
  5829. spin_lock(&cache->lock);
  5830. if (cache->ro)
  5831. space_info->bytes_readonly += num_bytes;
  5832. cache->reserved -= num_bytes;
  5833. space_info->bytes_reserved -= num_bytes;
  5834. if (delalloc)
  5835. cache->delalloc_bytes -= num_bytes;
  5836. spin_unlock(&cache->lock);
  5837. spin_unlock(&space_info->lock);
  5838. return ret;
  5839. }
  5840. void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
  5841. {
  5842. struct btrfs_caching_control *next;
  5843. struct btrfs_caching_control *caching_ctl;
  5844. struct btrfs_block_group_cache *cache;
  5845. down_write(&fs_info->commit_root_sem);
  5846. list_for_each_entry_safe(caching_ctl, next,
  5847. &fs_info->caching_block_groups, list) {
  5848. cache = caching_ctl->block_group;
  5849. if (block_group_cache_done(cache)) {
  5850. cache->last_byte_to_unpin = (u64)-1;
  5851. list_del_init(&caching_ctl->list);
  5852. put_caching_control(caching_ctl);
  5853. } else {
  5854. cache->last_byte_to_unpin = caching_ctl->progress;
  5855. }
  5856. }
  5857. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5858. fs_info->pinned_extents = &fs_info->freed_extents[1];
  5859. else
  5860. fs_info->pinned_extents = &fs_info->freed_extents[0];
  5861. up_write(&fs_info->commit_root_sem);
  5862. update_global_block_rsv(fs_info);
  5863. }
  5864. /*
  5865. * Returns the free cluster for the given space info and sets empty_cluster to
  5866. * what it should be based on the mount options.
  5867. */
  5868. static struct btrfs_free_cluster *
  5869. fetch_cluster_info(struct btrfs_fs_info *fs_info,
  5870. struct btrfs_space_info *space_info, u64 *empty_cluster)
  5871. {
  5872. struct btrfs_free_cluster *ret = NULL;
  5873. *empty_cluster = 0;
  5874. if (btrfs_mixed_space_info(space_info))
  5875. return ret;
  5876. if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  5877. ret = &fs_info->meta_alloc_cluster;
  5878. if (btrfs_test_opt(fs_info, SSD))
  5879. *empty_cluster = SZ_2M;
  5880. else
  5881. *empty_cluster = SZ_64K;
  5882. } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
  5883. btrfs_test_opt(fs_info, SSD_SPREAD)) {
  5884. *empty_cluster = SZ_2M;
  5885. ret = &fs_info->data_alloc_cluster;
  5886. }
  5887. return ret;
  5888. }
  5889. static int unpin_extent_range(struct btrfs_fs_info *fs_info,
  5890. u64 start, u64 end,
  5891. const bool return_free_space)
  5892. {
  5893. struct btrfs_block_group_cache *cache = NULL;
  5894. struct btrfs_space_info *space_info;
  5895. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5896. struct btrfs_free_cluster *cluster = NULL;
  5897. u64 len;
  5898. u64 total_unpinned = 0;
  5899. u64 empty_cluster = 0;
  5900. bool readonly;
  5901. while (start <= end) {
  5902. readonly = false;
  5903. if (!cache ||
  5904. start >= cache->key.objectid + cache->key.offset) {
  5905. if (cache)
  5906. btrfs_put_block_group(cache);
  5907. total_unpinned = 0;
  5908. cache = btrfs_lookup_block_group(fs_info, start);
  5909. BUG_ON(!cache); /* Logic error */
  5910. cluster = fetch_cluster_info(fs_info,
  5911. cache->space_info,
  5912. &empty_cluster);
  5913. empty_cluster <<= 1;
  5914. }
  5915. len = cache->key.objectid + cache->key.offset - start;
  5916. len = min(len, end + 1 - start);
  5917. if (start < cache->last_byte_to_unpin) {
  5918. len = min(len, cache->last_byte_to_unpin - start);
  5919. if (return_free_space)
  5920. btrfs_add_free_space(cache, start, len);
  5921. }
  5922. start += len;
  5923. total_unpinned += len;
  5924. space_info = cache->space_info;
  5925. /*
  5926. * If this space cluster has been marked as fragmented and we've
  5927. * unpinned enough in this block group to potentially allow a
  5928. * cluster to be created inside of it go ahead and clear the
  5929. * fragmented check.
  5930. */
  5931. if (cluster && cluster->fragmented &&
  5932. total_unpinned > empty_cluster) {
  5933. spin_lock(&cluster->lock);
  5934. cluster->fragmented = 0;
  5935. spin_unlock(&cluster->lock);
  5936. }
  5937. spin_lock(&space_info->lock);
  5938. spin_lock(&cache->lock);
  5939. cache->pinned -= len;
  5940. space_info->bytes_pinned -= len;
  5941. trace_btrfs_space_reservation(fs_info, "pinned",
  5942. space_info->flags, len, 0);
  5943. space_info->max_extent_size = 0;
  5944. percpu_counter_add(&space_info->total_bytes_pinned, -len);
  5945. if (cache->ro) {
  5946. space_info->bytes_readonly += len;
  5947. readonly = true;
  5948. }
  5949. spin_unlock(&cache->lock);
  5950. if (!readonly && return_free_space &&
  5951. global_rsv->space_info == space_info) {
  5952. u64 to_add = len;
  5953. spin_lock(&global_rsv->lock);
  5954. if (!global_rsv->full) {
  5955. to_add = min(len, global_rsv->size -
  5956. global_rsv->reserved);
  5957. global_rsv->reserved += to_add;
  5958. space_info->bytes_may_use += to_add;
  5959. if (global_rsv->reserved >= global_rsv->size)
  5960. global_rsv->full = 1;
  5961. trace_btrfs_space_reservation(fs_info,
  5962. "space_info",
  5963. space_info->flags,
  5964. to_add, 1);
  5965. len -= to_add;
  5966. }
  5967. spin_unlock(&global_rsv->lock);
  5968. /* Add to any tickets we may have */
  5969. if (len)
  5970. space_info_add_new_bytes(fs_info, space_info,
  5971. len);
  5972. }
  5973. spin_unlock(&space_info->lock);
  5974. }
  5975. if (cache)
  5976. btrfs_put_block_group(cache);
  5977. return 0;
  5978. }
  5979. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  5980. struct btrfs_fs_info *fs_info)
  5981. {
  5982. struct btrfs_block_group_cache *block_group, *tmp;
  5983. struct list_head *deleted_bgs;
  5984. struct extent_io_tree *unpin;
  5985. u64 start;
  5986. u64 end;
  5987. int ret;
  5988. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5989. unpin = &fs_info->freed_extents[1];
  5990. else
  5991. unpin = &fs_info->freed_extents[0];
  5992. while (!trans->aborted) {
  5993. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  5994. ret = find_first_extent_bit(unpin, 0, &start, &end,
  5995. EXTENT_DIRTY, NULL);
  5996. if (ret) {
  5997. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  5998. break;
  5999. }
  6000. if (btrfs_test_opt(fs_info, DISCARD))
  6001. ret = btrfs_discard_extent(fs_info, start,
  6002. end + 1 - start, NULL);
  6003. clear_extent_dirty(unpin, start, end);
  6004. unpin_extent_range(fs_info, start, end, true);
  6005. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  6006. cond_resched();
  6007. }
  6008. /*
  6009. * Transaction is finished. We don't need the lock anymore. We
  6010. * do need to clean up the block groups in case of a transaction
  6011. * abort.
  6012. */
  6013. deleted_bgs = &trans->transaction->deleted_bgs;
  6014. list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
  6015. u64 trimmed = 0;
  6016. ret = -EROFS;
  6017. if (!trans->aborted)
  6018. ret = btrfs_discard_extent(fs_info,
  6019. block_group->key.objectid,
  6020. block_group->key.offset,
  6021. &trimmed);
  6022. list_del_init(&block_group->bg_list);
  6023. btrfs_put_block_group_trimming(block_group);
  6024. btrfs_put_block_group(block_group);
  6025. if (ret) {
  6026. const char *errstr = btrfs_decode_error(ret);
  6027. btrfs_warn(fs_info,
  6028. "discard failed while removing blockgroup: errno=%d %s",
  6029. ret, errstr);
  6030. }
  6031. }
  6032. return 0;
  6033. }
  6034. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  6035. struct btrfs_fs_info *info,
  6036. struct btrfs_delayed_ref_node *node, u64 parent,
  6037. u64 root_objectid, u64 owner_objectid,
  6038. u64 owner_offset, int refs_to_drop,
  6039. struct btrfs_delayed_extent_op *extent_op)
  6040. {
  6041. struct btrfs_key key;
  6042. struct btrfs_path *path;
  6043. struct btrfs_root *extent_root = info->extent_root;
  6044. struct extent_buffer *leaf;
  6045. struct btrfs_extent_item *ei;
  6046. struct btrfs_extent_inline_ref *iref;
  6047. int ret;
  6048. int is_data;
  6049. int extent_slot = 0;
  6050. int found_extent = 0;
  6051. int num_to_del = 1;
  6052. u32 item_size;
  6053. u64 refs;
  6054. u64 bytenr = node->bytenr;
  6055. u64 num_bytes = node->num_bytes;
  6056. int last_ref = 0;
  6057. bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
  6058. path = btrfs_alloc_path();
  6059. if (!path)
  6060. return -ENOMEM;
  6061. path->reada = READA_FORWARD;
  6062. path->leave_spinning = 1;
  6063. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  6064. BUG_ON(!is_data && refs_to_drop != 1);
  6065. if (is_data)
  6066. skinny_metadata = false;
  6067. ret = lookup_extent_backref(trans, info, path, &iref,
  6068. bytenr, num_bytes, parent,
  6069. root_objectid, owner_objectid,
  6070. owner_offset);
  6071. if (ret == 0) {
  6072. extent_slot = path->slots[0];
  6073. while (extent_slot >= 0) {
  6074. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6075. extent_slot);
  6076. if (key.objectid != bytenr)
  6077. break;
  6078. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  6079. key.offset == num_bytes) {
  6080. found_extent = 1;
  6081. break;
  6082. }
  6083. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  6084. key.offset == owner_objectid) {
  6085. found_extent = 1;
  6086. break;
  6087. }
  6088. if (path->slots[0] - extent_slot > 5)
  6089. break;
  6090. extent_slot--;
  6091. }
  6092. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6093. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  6094. if (found_extent && item_size < sizeof(*ei))
  6095. found_extent = 0;
  6096. #endif
  6097. if (!found_extent) {
  6098. BUG_ON(iref);
  6099. ret = remove_extent_backref(trans, info, path, NULL,
  6100. refs_to_drop,
  6101. is_data, &last_ref);
  6102. if (ret) {
  6103. btrfs_abort_transaction(trans, ret);
  6104. goto out;
  6105. }
  6106. btrfs_release_path(path);
  6107. path->leave_spinning = 1;
  6108. key.objectid = bytenr;
  6109. key.type = BTRFS_EXTENT_ITEM_KEY;
  6110. key.offset = num_bytes;
  6111. if (!is_data && skinny_metadata) {
  6112. key.type = BTRFS_METADATA_ITEM_KEY;
  6113. key.offset = owner_objectid;
  6114. }
  6115. ret = btrfs_search_slot(trans, extent_root,
  6116. &key, path, -1, 1);
  6117. if (ret > 0 && skinny_metadata && path->slots[0]) {
  6118. /*
  6119. * Couldn't find our skinny metadata item,
  6120. * see if we have ye olde extent item.
  6121. */
  6122. path->slots[0]--;
  6123. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6124. path->slots[0]);
  6125. if (key.objectid == bytenr &&
  6126. key.type == BTRFS_EXTENT_ITEM_KEY &&
  6127. key.offset == num_bytes)
  6128. ret = 0;
  6129. }
  6130. if (ret > 0 && skinny_metadata) {
  6131. skinny_metadata = false;
  6132. key.objectid = bytenr;
  6133. key.type = BTRFS_EXTENT_ITEM_KEY;
  6134. key.offset = num_bytes;
  6135. btrfs_release_path(path);
  6136. ret = btrfs_search_slot(trans, extent_root,
  6137. &key, path, -1, 1);
  6138. }
  6139. if (ret) {
  6140. btrfs_err(info,
  6141. "umm, got %d back from search, was looking for %llu",
  6142. ret, bytenr);
  6143. if (ret > 0)
  6144. btrfs_print_leaf(path->nodes[0]);
  6145. }
  6146. if (ret < 0) {
  6147. btrfs_abort_transaction(trans, ret);
  6148. goto out;
  6149. }
  6150. extent_slot = path->slots[0];
  6151. }
  6152. } else if (WARN_ON(ret == -ENOENT)) {
  6153. btrfs_print_leaf(path->nodes[0]);
  6154. btrfs_err(info,
  6155. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  6156. bytenr, parent, root_objectid, owner_objectid,
  6157. owner_offset);
  6158. btrfs_abort_transaction(trans, ret);
  6159. goto out;
  6160. } else {
  6161. btrfs_abort_transaction(trans, ret);
  6162. goto out;
  6163. }
  6164. leaf = path->nodes[0];
  6165. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6166. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6167. if (item_size < sizeof(*ei)) {
  6168. BUG_ON(found_extent || extent_slot != path->slots[0]);
  6169. ret = convert_extent_item_v0(trans, info, path, owner_objectid,
  6170. 0);
  6171. if (ret < 0) {
  6172. btrfs_abort_transaction(trans, ret);
  6173. goto out;
  6174. }
  6175. btrfs_release_path(path);
  6176. path->leave_spinning = 1;
  6177. key.objectid = bytenr;
  6178. key.type = BTRFS_EXTENT_ITEM_KEY;
  6179. key.offset = num_bytes;
  6180. ret = btrfs_search_slot(trans, extent_root, &key, path,
  6181. -1, 1);
  6182. if (ret) {
  6183. btrfs_err(info,
  6184. "umm, got %d back from search, was looking for %llu",
  6185. ret, bytenr);
  6186. btrfs_print_leaf(path->nodes[0]);
  6187. }
  6188. if (ret < 0) {
  6189. btrfs_abort_transaction(trans, ret);
  6190. goto out;
  6191. }
  6192. extent_slot = path->slots[0];
  6193. leaf = path->nodes[0];
  6194. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6195. }
  6196. #endif
  6197. BUG_ON(item_size < sizeof(*ei));
  6198. ei = btrfs_item_ptr(leaf, extent_slot,
  6199. struct btrfs_extent_item);
  6200. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  6201. key.type == BTRFS_EXTENT_ITEM_KEY) {
  6202. struct btrfs_tree_block_info *bi;
  6203. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  6204. bi = (struct btrfs_tree_block_info *)(ei + 1);
  6205. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  6206. }
  6207. refs = btrfs_extent_refs(leaf, ei);
  6208. if (refs < refs_to_drop) {
  6209. btrfs_err(info,
  6210. "trying to drop %d refs but we only have %Lu for bytenr %Lu",
  6211. refs_to_drop, refs, bytenr);
  6212. ret = -EINVAL;
  6213. btrfs_abort_transaction(trans, ret);
  6214. goto out;
  6215. }
  6216. refs -= refs_to_drop;
  6217. if (refs > 0) {
  6218. if (extent_op)
  6219. __run_delayed_extent_op(extent_op, leaf, ei);
  6220. /*
  6221. * In the case of inline back ref, reference count will
  6222. * be updated by remove_extent_backref
  6223. */
  6224. if (iref) {
  6225. BUG_ON(!found_extent);
  6226. } else {
  6227. btrfs_set_extent_refs(leaf, ei, refs);
  6228. btrfs_mark_buffer_dirty(leaf);
  6229. }
  6230. if (found_extent) {
  6231. ret = remove_extent_backref(trans, info, path,
  6232. iref, refs_to_drop,
  6233. is_data, &last_ref);
  6234. if (ret) {
  6235. btrfs_abort_transaction(trans, ret);
  6236. goto out;
  6237. }
  6238. }
  6239. } else {
  6240. if (found_extent) {
  6241. BUG_ON(is_data && refs_to_drop !=
  6242. extent_data_ref_count(path, iref));
  6243. if (iref) {
  6244. BUG_ON(path->slots[0] != extent_slot);
  6245. } else {
  6246. BUG_ON(path->slots[0] != extent_slot + 1);
  6247. path->slots[0] = extent_slot;
  6248. num_to_del = 2;
  6249. }
  6250. }
  6251. last_ref = 1;
  6252. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  6253. num_to_del);
  6254. if (ret) {
  6255. btrfs_abort_transaction(trans, ret);
  6256. goto out;
  6257. }
  6258. btrfs_release_path(path);
  6259. if (is_data) {
  6260. ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
  6261. if (ret) {
  6262. btrfs_abort_transaction(trans, ret);
  6263. goto out;
  6264. }
  6265. }
  6266. ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
  6267. if (ret) {
  6268. btrfs_abort_transaction(trans, ret);
  6269. goto out;
  6270. }
  6271. ret = update_block_group(trans, info, bytenr, num_bytes, 0);
  6272. if (ret) {
  6273. btrfs_abort_transaction(trans, ret);
  6274. goto out;
  6275. }
  6276. }
  6277. btrfs_release_path(path);
  6278. out:
  6279. btrfs_free_path(path);
  6280. return ret;
  6281. }
  6282. /*
  6283. * when we free an block, it is possible (and likely) that we free the last
  6284. * delayed ref for that extent as well. This searches the delayed ref tree for
  6285. * a given extent, and if there are no other delayed refs to be processed, it
  6286. * removes it from the tree.
  6287. */
  6288. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  6289. u64 bytenr)
  6290. {
  6291. struct btrfs_delayed_ref_head *head;
  6292. struct btrfs_delayed_ref_root *delayed_refs;
  6293. int ret = 0;
  6294. delayed_refs = &trans->transaction->delayed_refs;
  6295. spin_lock(&delayed_refs->lock);
  6296. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  6297. if (!head)
  6298. goto out_delayed_unlock;
  6299. spin_lock(&head->lock);
  6300. if (!RB_EMPTY_ROOT(&head->ref_tree))
  6301. goto out;
  6302. if (head->extent_op) {
  6303. if (!head->must_insert_reserved)
  6304. goto out;
  6305. btrfs_free_delayed_extent_op(head->extent_op);
  6306. head->extent_op = NULL;
  6307. }
  6308. /*
  6309. * waiting for the lock here would deadlock. If someone else has it
  6310. * locked they are already in the process of dropping it anyway
  6311. */
  6312. if (!mutex_trylock(&head->mutex))
  6313. goto out;
  6314. /*
  6315. * at this point we have a head with no other entries. Go
  6316. * ahead and process it.
  6317. */
  6318. rb_erase(&head->href_node, &delayed_refs->href_root);
  6319. RB_CLEAR_NODE(&head->href_node);
  6320. atomic_dec(&delayed_refs->num_entries);
  6321. /*
  6322. * we don't take a ref on the node because we're removing it from the
  6323. * tree, so we just steal the ref the tree was holding.
  6324. */
  6325. delayed_refs->num_heads--;
  6326. if (head->processing == 0)
  6327. delayed_refs->num_heads_ready--;
  6328. head->processing = 0;
  6329. spin_unlock(&head->lock);
  6330. spin_unlock(&delayed_refs->lock);
  6331. BUG_ON(head->extent_op);
  6332. if (head->must_insert_reserved)
  6333. ret = 1;
  6334. mutex_unlock(&head->mutex);
  6335. btrfs_put_delayed_ref_head(head);
  6336. return ret;
  6337. out:
  6338. spin_unlock(&head->lock);
  6339. out_delayed_unlock:
  6340. spin_unlock(&delayed_refs->lock);
  6341. return 0;
  6342. }
  6343. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  6344. struct btrfs_root *root,
  6345. struct extent_buffer *buf,
  6346. u64 parent, int last_ref)
  6347. {
  6348. struct btrfs_fs_info *fs_info = root->fs_info;
  6349. int pin = 1;
  6350. int ret;
  6351. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6352. int old_ref_mod, new_ref_mod;
  6353. btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
  6354. root->root_key.objectid,
  6355. btrfs_header_level(buf), 0,
  6356. BTRFS_DROP_DELAYED_REF);
  6357. ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
  6358. buf->len, parent,
  6359. root->root_key.objectid,
  6360. btrfs_header_level(buf),
  6361. BTRFS_DROP_DELAYED_REF, NULL,
  6362. &old_ref_mod, &new_ref_mod);
  6363. BUG_ON(ret); /* -ENOMEM */
  6364. pin = old_ref_mod >= 0 && new_ref_mod < 0;
  6365. }
  6366. if (last_ref && btrfs_header_generation(buf) == trans->transid) {
  6367. struct btrfs_block_group_cache *cache;
  6368. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6369. ret = check_ref_cleanup(trans, buf->start);
  6370. if (!ret)
  6371. goto out;
  6372. }
  6373. pin = 0;
  6374. cache = btrfs_lookup_block_group(fs_info, buf->start);
  6375. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  6376. pin_down_extent(fs_info, cache, buf->start,
  6377. buf->len, 1);
  6378. btrfs_put_block_group(cache);
  6379. goto out;
  6380. }
  6381. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  6382. btrfs_add_free_space(cache, buf->start, buf->len);
  6383. btrfs_free_reserved_bytes(cache, buf->len, 0);
  6384. btrfs_put_block_group(cache);
  6385. trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
  6386. }
  6387. out:
  6388. if (pin)
  6389. add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
  6390. root->root_key.objectid);
  6391. if (last_ref) {
  6392. /*
  6393. * Deleting the buffer, clear the corrupt flag since it doesn't
  6394. * matter anymore.
  6395. */
  6396. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  6397. }
  6398. }
  6399. /* Can return -ENOMEM */
  6400. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  6401. struct btrfs_root *root,
  6402. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  6403. u64 owner, u64 offset)
  6404. {
  6405. struct btrfs_fs_info *fs_info = root->fs_info;
  6406. int old_ref_mod, new_ref_mod;
  6407. int ret;
  6408. if (btrfs_is_testing(fs_info))
  6409. return 0;
  6410. if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
  6411. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
  6412. root_objectid, owner, offset,
  6413. BTRFS_DROP_DELAYED_REF);
  6414. /*
  6415. * tree log blocks never actually go into the extent allocation
  6416. * tree, just update pinning info and exit early.
  6417. */
  6418. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  6419. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  6420. /* unlocks the pinned mutex */
  6421. btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
  6422. old_ref_mod = new_ref_mod = 0;
  6423. ret = 0;
  6424. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  6425. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  6426. num_bytes, parent,
  6427. root_objectid, (int)owner,
  6428. BTRFS_DROP_DELAYED_REF, NULL,
  6429. &old_ref_mod, &new_ref_mod);
  6430. } else {
  6431. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  6432. num_bytes, parent,
  6433. root_objectid, owner, offset,
  6434. 0, BTRFS_DROP_DELAYED_REF,
  6435. &old_ref_mod, &new_ref_mod);
  6436. }
  6437. if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
  6438. add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
  6439. return ret;
  6440. }
  6441. /*
  6442. * when we wait for progress in the block group caching, its because
  6443. * our allocation attempt failed at least once. So, we must sleep
  6444. * and let some progress happen before we try again.
  6445. *
  6446. * This function will sleep at least once waiting for new free space to
  6447. * show up, and then it will check the block group free space numbers
  6448. * for our min num_bytes. Another option is to have it go ahead
  6449. * and look in the rbtree for a free extent of a given size, but this
  6450. * is a good start.
  6451. *
  6452. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  6453. * any of the information in this block group.
  6454. */
  6455. static noinline void
  6456. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  6457. u64 num_bytes)
  6458. {
  6459. struct btrfs_caching_control *caching_ctl;
  6460. caching_ctl = get_caching_control(cache);
  6461. if (!caching_ctl)
  6462. return;
  6463. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  6464. (cache->free_space_ctl->free_space >= num_bytes));
  6465. put_caching_control(caching_ctl);
  6466. }
  6467. static noinline int
  6468. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  6469. {
  6470. struct btrfs_caching_control *caching_ctl;
  6471. int ret = 0;
  6472. caching_ctl = get_caching_control(cache);
  6473. if (!caching_ctl)
  6474. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  6475. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  6476. if (cache->cached == BTRFS_CACHE_ERROR)
  6477. ret = -EIO;
  6478. put_caching_control(caching_ctl);
  6479. return ret;
  6480. }
  6481. int __get_raid_index(u64 flags)
  6482. {
  6483. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  6484. return BTRFS_RAID_RAID10;
  6485. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  6486. return BTRFS_RAID_RAID1;
  6487. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  6488. return BTRFS_RAID_DUP;
  6489. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6490. return BTRFS_RAID_RAID0;
  6491. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  6492. return BTRFS_RAID_RAID5;
  6493. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  6494. return BTRFS_RAID_RAID6;
  6495. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  6496. }
  6497. int get_block_group_index(struct btrfs_block_group_cache *cache)
  6498. {
  6499. return __get_raid_index(cache->flags);
  6500. }
  6501. static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
  6502. [BTRFS_RAID_RAID10] = "raid10",
  6503. [BTRFS_RAID_RAID1] = "raid1",
  6504. [BTRFS_RAID_DUP] = "dup",
  6505. [BTRFS_RAID_RAID0] = "raid0",
  6506. [BTRFS_RAID_SINGLE] = "single",
  6507. [BTRFS_RAID_RAID5] = "raid5",
  6508. [BTRFS_RAID_RAID6] = "raid6",
  6509. };
  6510. static const char *get_raid_name(enum btrfs_raid_types type)
  6511. {
  6512. if (type >= BTRFS_NR_RAID_TYPES)
  6513. return NULL;
  6514. return btrfs_raid_type_names[type];
  6515. }
  6516. enum btrfs_loop_type {
  6517. LOOP_CACHING_NOWAIT = 0,
  6518. LOOP_CACHING_WAIT = 1,
  6519. LOOP_ALLOC_CHUNK = 2,
  6520. LOOP_NO_EMPTY_SIZE = 3,
  6521. };
  6522. static inline void
  6523. btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
  6524. int delalloc)
  6525. {
  6526. if (delalloc)
  6527. down_read(&cache->data_rwsem);
  6528. }
  6529. static inline void
  6530. btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
  6531. int delalloc)
  6532. {
  6533. btrfs_get_block_group(cache);
  6534. if (delalloc)
  6535. down_read(&cache->data_rwsem);
  6536. }
  6537. static struct btrfs_block_group_cache *
  6538. btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
  6539. struct btrfs_free_cluster *cluster,
  6540. int delalloc)
  6541. {
  6542. struct btrfs_block_group_cache *used_bg = NULL;
  6543. spin_lock(&cluster->refill_lock);
  6544. while (1) {
  6545. used_bg = cluster->block_group;
  6546. if (!used_bg)
  6547. return NULL;
  6548. if (used_bg == block_group)
  6549. return used_bg;
  6550. btrfs_get_block_group(used_bg);
  6551. if (!delalloc)
  6552. return used_bg;
  6553. if (down_read_trylock(&used_bg->data_rwsem))
  6554. return used_bg;
  6555. spin_unlock(&cluster->refill_lock);
  6556. /* We should only have one-level nested. */
  6557. down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
  6558. spin_lock(&cluster->refill_lock);
  6559. if (used_bg == cluster->block_group)
  6560. return used_bg;
  6561. up_read(&used_bg->data_rwsem);
  6562. btrfs_put_block_group(used_bg);
  6563. }
  6564. }
  6565. static inline void
  6566. btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  6567. int delalloc)
  6568. {
  6569. if (delalloc)
  6570. up_read(&cache->data_rwsem);
  6571. btrfs_put_block_group(cache);
  6572. }
  6573. /*
  6574. * walks the btree of allocated extents and find a hole of a given size.
  6575. * The key ins is changed to record the hole:
  6576. * ins->objectid == start position
  6577. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  6578. * ins->offset == the size of the hole.
  6579. * Any available blocks before search_start are skipped.
  6580. *
  6581. * If there is no suitable free space, we will record the max size of
  6582. * the free space extent currently.
  6583. */
  6584. static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
  6585. u64 ram_bytes, u64 num_bytes, u64 empty_size,
  6586. u64 hint_byte, struct btrfs_key *ins,
  6587. u64 flags, int delalloc)
  6588. {
  6589. int ret = 0;
  6590. struct btrfs_root *root = fs_info->extent_root;
  6591. struct btrfs_free_cluster *last_ptr = NULL;
  6592. struct btrfs_block_group_cache *block_group = NULL;
  6593. u64 search_start = 0;
  6594. u64 max_extent_size = 0;
  6595. u64 empty_cluster = 0;
  6596. struct btrfs_space_info *space_info;
  6597. int loop = 0;
  6598. int index = __get_raid_index(flags);
  6599. bool failed_cluster_refill = false;
  6600. bool failed_alloc = false;
  6601. bool use_cluster = true;
  6602. bool have_caching_bg = false;
  6603. bool orig_have_caching_bg = false;
  6604. bool full_search = false;
  6605. WARN_ON(num_bytes < fs_info->sectorsize);
  6606. ins->type = BTRFS_EXTENT_ITEM_KEY;
  6607. ins->objectid = 0;
  6608. ins->offset = 0;
  6609. trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
  6610. space_info = __find_space_info(fs_info, flags);
  6611. if (!space_info) {
  6612. btrfs_err(fs_info, "No space info for %llu", flags);
  6613. return -ENOSPC;
  6614. }
  6615. /*
  6616. * If our free space is heavily fragmented we may not be able to make
  6617. * big contiguous allocations, so instead of doing the expensive search
  6618. * for free space, simply return ENOSPC with our max_extent_size so we
  6619. * can go ahead and search for a more manageable chunk.
  6620. *
  6621. * If our max_extent_size is large enough for our allocation simply
  6622. * disable clustering since we will likely not be able to find enough
  6623. * space to create a cluster and induce latency trying.
  6624. */
  6625. if (unlikely(space_info->max_extent_size)) {
  6626. spin_lock(&space_info->lock);
  6627. if (space_info->max_extent_size &&
  6628. num_bytes > space_info->max_extent_size) {
  6629. ins->offset = space_info->max_extent_size;
  6630. spin_unlock(&space_info->lock);
  6631. return -ENOSPC;
  6632. } else if (space_info->max_extent_size) {
  6633. use_cluster = false;
  6634. }
  6635. spin_unlock(&space_info->lock);
  6636. }
  6637. last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
  6638. if (last_ptr) {
  6639. spin_lock(&last_ptr->lock);
  6640. if (last_ptr->block_group)
  6641. hint_byte = last_ptr->window_start;
  6642. if (last_ptr->fragmented) {
  6643. /*
  6644. * We still set window_start so we can keep track of the
  6645. * last place we found an allocation to try and save
  6646. * some time.
  6647. */
  6648. hint_byte = last_ptr->window_start;
  6649. use_cluster = false;
  6650. }
  6651. spin_unlock(&last_ptr->lock);
  6652. }
  6653. search_start = max(search_start, first_logical_byte(fs_info, 0));
  6654. search_start = max(search_start, hint_byte);
  6655. if (search_start == hint_byte) {
  6656. block_group = btrfs_lookup_block_group(fs_info, search_start);
  6657. /*
  6658. * we don't want to use the block group if it doesn't match our
  6659. * allocation bits, or if its not cached.
  6660. *
  6661. * However if we are re-searching with an ideal block group
  6662. * picked out then we don't care that the block group is cached.
  6663. */
  6664. if (block_group && block_group_bits(block_group, flags) &&
  6665. block_group->cached != BTRFS_CACHE_NO) {
  6666. down_read(&space_info->groups_sem);
  6667. if (list_empty(&block_group->list) ||
  6668. block_group->ro) {
  6669. /*
  6670. * someone is removing this block group,
  6671. * we can't jump into the have_block_group
  6672. * target because our list pointers are not
  6673. * valid
  6674. */
  6675. btrfs_put_block_group(block_group);
  6676. up_read(&space_info->groups_sem);
  6677. } else {
  6678. index = get_block_group_index(block_group);
  6679. btrfs_lock_block_group(block_group, delalloc);
  6680. goto have_block_group;
  6681. }
  6682. } else if (block_group) {
  6683. btrfs_put_block_group(block_group);
  6684. }
  6685. }
  6686. search:
  6687. have_caching_bg = false;
  6688. if (index == 0 || index == __get_raid_index(flags))
  6689. full_search = true;
  6690. down_read(&space_info->groups_sem);
  6691. list_for_each_entry(block_group, &space_info->block_groups[index],
  6692. list) {
  6693. u64 offset;
  6694. int cached;
  6695. /* If the block group is read-only, we can skip it entirely. */
  6696. if (unlikely(block_group->ro))
  6697. continue;
  6698. btrfs_grab_block_group(block_group, delalloc);
  6699. search_start = block_group->key.objectid;
  6700. /*
  6701. * this can happen if we end up cycling through all the
  6702. * raid types, but we want to make sure we only allocate
  6703. * for the proper type.
  6704. */
  6705. if (!block_group_bits(block_group, flags)) {
  6706. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  6707. BTRFS_BLOCK_GROUP_RAID1 |
  6708. BTRFS_BLOCK_GROUP_RAID5 |
  6709. BTRFS_BLOCK_GROUP_RAID6 |
  6710. BTRFS_BLOCK_GROUP_RAID10;
  6711. /*
  6712. * if they asked for extra copies and this block group
  6713. * doesn't provide them, bail. This does allow us to
  6714. * fill raid0 from raid1.
  6715. */
  6716. if ((flags & extra) && !(block_group->flags & extra))
  6717. goto loop;
  6718. }
  6719. have_block_group:
  6720. cached = block_group_cache_done(block_group);
  6721. if (unlikely(!cached)) {
  6722. have_caching_bg = true;
  6723. ret = cache_block_group(block_group, 0);
  6724. BUG_ON(ret < 0);
  6725. ret = 0;
  6726. }
  6727. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  6728. goto loop;
  6729. /*
  6730. * Ok we want to try and use the cluster allocator, so
  6731. * lets look there
  6732. */
  6733. if (last_ptr && use_cluster) {
  6734. struct btrfs_block_group_cache *used_block_group;
  6735. unsigned long aligned_cluster;
  6736. /*
  6737. * the refill lock keeps out other
  6738. * people trying to start a new cluster
  6739. */
  6740. used_block_group = btrfs_lock_cluster(block_group,
  6741. last_ptr,
  6742. delalloc);
  6743. if (!used_block_group)
  6744. goto refill_cluster;
  6745. if (used_block_group != block_group &&
  6746. (used_block_group->ro ||
  6747. !block_group_bits(used_block_group, flags)))
  6748. goto release_cluster;
  6749. offset = btrfs_alloc_from_cluster(used_block_group,
  6750. last_ptr,
  6751. num_bytes,
  6752. used_block_group->key.objectid,
  6753. &max_extent_size);
  6754. if (offset) {
  6755. /* we have a block, we're done */
  6756. spin_unlock(&last_ptr->refill_lock);
  6757. trace_btrfs_reserve_extent_cluster(fs_info,
  6758. used_block_group,
  6759. search_start, num_bytes);
  6760. if (used_block_group != block_group) {
  6761. btrfs_release_block_group(block_group,
  6762. delalloc);
  6763. block_group = used_block_group;
  6764. }
  6765. goto checks;
  6766. }
  6767. WARN_ON(last_ptr->block_group != used_block_group);
  6768. release_cluster:
  6769. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  6770. * set up a new clusters, so lets just skip it
  6771. * and let the allocator find whatever block
  6772. * it can find. If we reach this point, we
  6773. * will have tried the cluster allocator
  6774. * plenty of times and not have found
  6775. * anything, so we are likely way too
  6776. * fragmented for the clustering stuff to find
  6777. * anything.
  6778. *
  6779. * However, if the cluster is taken from the
  6780. * current block group, release the cluster
  6781. * first, so that we stand a better chance of
  6782. * succeeding in the unclustered
  6783. * allocation. */
  6784. if (loop >= LOOP_NO_EMPTY_SIZE &&
  6785. used_block_group != block_group) {
  6786. spin_unlock(&last_ptr->refill_lock);
  6787. btrfs_release_block_group(used_block_group,
  6788. delalloc);
  6789. goto unclustered_alloc;
  6790. }
  6791. /*
  6792. * this cluster didn't work out, free it and
  6793. * start over
  6794. */
  6795. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6796. if (used_block_group != block_group)
  6797. btrfs_release_block_group(used_block_group,
  6798. delalloc);
  6799. refill_cluster:
  6800. if (loop >= LOOP_NO_EMPTY_SIZE) {
  6801. spin_unlock(&last_ptr->refill_lock);
  6802. goto unclustered_alloc;
  6803. }
  6804. aligned_cluster = max_t(unsigned long,
  6805. empty_cluster + empty_size,
  6806. block_group->full_stripe_len);
  6807. /* allocate a cluster in this block group */
  6808. ret = btrfs_find_space_cluster(fs_info, block_group,
  6809. last_ptr, search_start,
  6810. num_bytes,
  6811. aligned_cluster);
  6812. if (ret == 0) {
  6813. /*
  6814. * now pull our allocation out of this
  6815. * cluster
  6816. */
  6817. offset = btrfs_alloc_from_cluster(block_group,
  6818. last_ptr,
  6819. num_bytes,
  6820. search_start,
  6821. &max_extent_size);
  6822. if (offset) {
  6823. /* we found one, proceed */
  6824. spin_unlock(&last_ptr->refill_lock);
  6825. trace_btrfs_reserve_extent_cluster(fs_info,
  6826. block_group, search_start,
  6827. num_bytes);
  6828. goto checks;
  6829. }
  6830. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  6831. && !failed_cluster_refill) {
  6832. spin_unlock(&last_ptr->refill_lock);
  6833. failed_cluster_refill = true;
  6834. wait_block_group_cache_progress(block_group,
  6835. num_bytes + empty_cluster + empty_size);
  6836. goto have_block_group;
  6837. }
  6838. /*
  6839. * at this point we either didn't find a cluster
  6840. * or we weren't able to allocate a block from our
  6841. * cluster. Free the cluster we've been trying
  6842. * to use, and go to the next block group
  6843. */
  6844. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6845. spin_unlock(&last_ptr->refill_lock);
  6846. goto loop;
  6847. }
  6848. unclustered_alloc:
  6849. /*
  6850. * We are doing an unclustered alloc, set the fragmented flag so
  6851. * we don't bother trying to setup a cluster again until we get
  6852. * more space.
  6853. */
  6854. if (unlikely(last_ptr)) {
  6855. spin_lock(&last_ptr->lock);
  6856. last_ptr->fragmented = 1;
  6857. spin_unlock(&last_ptr->lock);
  6858. }
  6859. if (cached) {
  6860. struct btrfs_free_space_ctl *ctl =
  6861. block_group->free_space_ctl;
  6862. spin_lock(&ctl->tree_lock);
  6863. if (ctl->free_space <
  6864. num_bytes + empty_cluster + empty_size) {
  6865. if (ctl->free_space > max_extent_size)
  6866. max_extent_size = ctl->free_space;
  6867. spin_unlock(&ctl->tree_lock);
  6868. goto loop;
  6869. }
  6870. spin_unlock(&ctl->tree_lock);
  6871. }
  6872. offset = btrfs_find_space_for_alloc(block_group, search_start,
  6873. num_bytes, empty_size,
  6874. &max_extent_size);
  6875. /*
  6876. * If we didn't find a chunk, and we haven't failed on this
  6877. * block group before, and this block group is in the middle of
  6878. * caching and we are ok with waiting, then go ahead and wait
  6879. * for progress to be made, and set failed_alloc to true.
  6880. *
  6881. * If failed_alloc is true then we've already waited on this
  6882. * block group once and should move on to the next block group.
  6883. */
  6884. if (!offset && !failed_alloc && !cached &&
  6885. loop > LOOP_CACHING_NOWAIT) {
  6886. wait_block_group_cache_progress(block_group,
  6887. num_bytes + empty_size);
  6888. failed_alloc = true;
  6889. goto have_block_group;
  6890. } else if (!offset) {
  6891. goto loop;
  6892. }
  6893. checks:
  6894. search_start = ALIGN(offset, fs_info->stripesize);
  6895. /* move on to the next group */
  6896. if (search_start + num_bytes >
  6897. block_group->key.objectid + block_group->key.offset) {
  6898. btrfs_add_free_space(block_group, offset, num_bytes);
  6899. goto loop;
  6900. }
  6901. if (offset < search_start)
  6902. btrfs_add_free_space(block_group, offset,
  6903. search_start - offset);
  6904. BUG_ON(offset > search_start);
  6905. ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
  6906. num_bytes, delalloc);
  6907. if (ret == -EAGAIN) {
  6908. btrfs_add_free_space(block_group, offset, num_bytes);
  6909. goto loop;
  6910. }
  6911. btrfs_inc_block_group_reservations(block_group);
  6912. /* we are all good, lets return */
  6913. ins->objectid = search_start;
  6914. ins->offset = num_bytes;
  6915. trace_btrfs_reserve_extent(fs_info, block_group,
  6916. search_start, num_bytes);
  6917. btrfs_release_block_group(block_group, delalloc);
  6918. break;
  6919. loop:
  6920. failed_cluster_refill = false;
  6921. failed_alloc = false;
  6922. BUG_ON(index != get_block_group_index(block_group));
  6923. btrfs_release_block_group(block_group, delalloc);
  6924. cond_resched();
  6925. }
  6926. up_read(&space_info->groups_sem);
  6927. if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
  6928. && !orig_have_caching_bg)
  6929. orig_have_caching_bg = true;
  6930. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  6931. goto search;
  6932. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  6933. goto search;
  6934. /*
  6935. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  6936. * caching kthreads as we move along
  6937. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  6938. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  6939. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  6940. * again
  6941. */
  6942. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  6943. index = 0;
  6944. if (loop == LOOP_CACHING_NOWAIT) {
  6945. /*
  6946. * We want to skip the LOOP_CACHING_WAIT step if we
  6947. * don't have any uncached bgs and we've already done a
  6948. * full search through.
  6949. */
  6950. if (orig_have_caching_bg || !full_search)
  6951. loop = LOOP_CACHING_WAIT;
  6952. else
  6953. loop = LOOP_ALLOC_CHUNK;
  6954. } else {
  6955. loop++;
  6956. }
  6957. if (loop == LOOP_ALLOC_CHUNK) {
  6958. struct btrfs_trans_handle *trans;
  6959. int exist = 0;
  6960. trans = current->journal_info;
  6961. if (trans)
  6962. exist = 1;
  6963. else
  6964. trans = btrfs_join_transaction(root);
  6965. if (IS_ERR(trans)) {
  6966. ret = PTR_ERR(trans);
  6967. goto out;
  6968. }
  6969. ret = do_chunk_alloc(trans, fs_info, flags,
  6970. CHUNK_ALLOC_FORCE);
  6971. /*
  6972. * If we can't allocate a new chunk we've already looped
  6973. * through at least once, move on to the NO_EMPTY_SIZE
  6974. * case.
  6975. */
  6976. if (ret == -ENOSPC)
  6977. loop = LOOP_NO_EMPTY_SIZE;
  6978. /*
  6979. * Do not bail out on ENOSPC since we
  6980. * can do more things.
  6981. */
  6982. if (ret < 0 && ret != -ENOSPC)
  6983. btrfs_abort_transaction(trans, ret);
  6984. else
  6985. ret = 0;
  6986. if (!exist)
  6987. btrfs_end_transaction(trans);
  6988. if (ret)
  6989. goto out;
  6990. }
  6991. if (loop == LOOP_NO_EMPTY_SIZE) {
  6992. /*
  6993. * Don't loop again if we already have no empty_size and
  6994. * no empty_cluster.
  6995. */
  6996. if (empty_size == 0 &&
  6997. empty_cluster == 0) {
  6998. ret = -ENOSPC;
  6999. goto out;
  7000. }
  7001. empty_size = 0;
  7002. empty_cluster = 0;
  7003. }
  7004. goto search;
  7005. } else if (!ins->objectid) {
  7006. ret = -ENOSPC;
  7007. } else if (ins->objectid) {
  7008. if (!use_cluster && last_ptr) {
  7009. spin_lock(&last_ptr->lock);
  7010. last_ptr->window_start = ins->objectid;
  7011. spin_unlock(&last_ptr->lock);
  7012. }
  7013. ret = 0;
  7014. }
  7015. out:
  7016. if (ret == -ENOSPC) {
  7017. spin_lock(&space_info->lock);
  7018. space_info->max_extent_size = max_extent_size;
  7019. spin_unlock(&space_info->lock);
  7020. ins->offset = max_extent_size;
  7021. }
  7022. return ret;
  7023. }
  7024. static void dump_space_info(struct btrfs_fs_info *fs_info,
  7025. struct btrfs_space_info *info, u64 bytes,
  7026. int dump_block_groups)
  7027. {
  7028. struct btrfs_block_group_cache *cache;
  7029. int index = 0;
  7030. spin_lock(&info->lock);
  7031. btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
  7032. info->flags,
  7033. info->total_bytes - btrfs_space_info_used(info, true),
  7034. info->full ? "" : "not ");
  7035. btrfs_info(fs_info,
  7036. "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
  7037. info->total_bytes, info->bytes_used, info->bytes_pinned,
  7038. info->bytes_reserved, info->bytes_may_use,
  7039. info->bytes_readonly);
  7040. spin_unlock(&info->lock);
  7041. if (!dump_block_groups)
  7042. return;
  7043. down_read(&info->groups_sem);
  7044. again:
  7045. list_for_each_entry(cache, &info->block_groups[index], list) {
  7046. spin_lock(&cache->lock);
  7047. btrfs_info(fs_info,
  7048. "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
  7049. cache->key.objectid, cache->key.offset,
  7050. btrfs_block_group_used(&cache->item), cache->pinned,
  7051. cache->reserved, cache->ro ? "[readonly]" : "");
  7052. btrfs_dump_free_space(cache, bytes);
  7053. spin_unlock(&cache->lock);
  7054. }
  7055. if (++index < BTRFS_NR_RAID_TYPES)
  7056. goto again;
  7057. up_read(&info->groups_sem);
  7058. }
  7059. int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
  7060. u64 num_bytes, u64 min_alloc_size,
  7061. u64 empty_size, u64 hint_byte,
  7062. struct btrfs_key *ins, int is_data, int delalloc)
  7063. {
  7064. struct btrfs_fs_info *fs_info = root->fs_info;
  7065. bool final_tried = num_bytes == min_alloc_size;
  7066. u64 flags;
  7067. int ret;
  7068. flags = get_alloc_profile_by_root(root, is_data);
  7069. again:
  7070. WARN_ON(num_bytes < fs_info->sectorsize);
  7071. ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
  7072. hint_byte, ins, flags, delalloc);
  7073. if (!ret && !is_data) {
  7074. btrfs_dec_block_group_reservations(fs_info, ins->objectid);
  7075. } else if (ret == -ENOSPC) {
  7076. if (!final_tried && ins->offset) {
  7077. num_bytes = min(num_bytes >> 1, ins->offset);
  7078. num_bytes = round_down(num_bytes,
  7079. fs_info->sectorsize);
  7080. num_bytes = max(num_bytes, min_alloc_size);
  7081. ram_bytes = num_bytes;
  7082. if (num_bytes == min_alloc_size)
  7083. final_tried = true;
  7084. goto again;
  7085. } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  7086. struct btrfs_space_info *sinfo;
  7087. sinfo = __find_space_info(fs_info, flags);
  7088. btrfs_err(fs_info,
  7089. "allocation failed flags %llu, wanted %llu",
  7090. flags, num_bytes);
  7091. if (sinfo)
  7092. dump_space_info(fs_info, sinfo, num_bytes, 1);
  7093. }
  7094. }
  7095. return ret;
  7096. }
  7097. static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  7098. u64 start, u64 len,
  7099. int pin, int delalloc)
  7100. {
  7101. struct btrfs_block_group_cache *cache;
  7102. int ret = 0;
  7103. cache = btrfs_lookup_block_group(fs_info, start);
  7104. if (!cache) {
  7105. btrfs_err(fs_info, "Unable to find block group for %llu",
  7106. start);
  7107. return -ENOSPC;
  7108. }
  7109. if (pin)
  7110. pin_down_extent(fs_info, cache, start, len, 1);
  7111. else {
  7112. if (btrfs_test_opt(fs_info, DISCARD))
  7113. ret = btrfs_discard_extent(fs_info, start, len, NULL);
  7114. btrfs_add_free_space(cache, start, len);
  7115. btrfs_free_reserved_bytes(cache, len, delalloc);
  7116. trace_btrfs_reserved_extent_free(fs_info, start, len);
  7117. }
  7118. btrfs_put_block_group(cache);
  7119. return ret;
  7120. }
  7121. int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  7122. u64 start, u64 len, int delalloc)
  7123. {
  7124. return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
  7125. }
  7126. int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
  7127. u64 start, u64 len)
  7128. {
  7129. return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
  7130. }
  7131. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7132. struct btrfs_fs_info *fs_info,
  7133. u64 parent, u64 root_objectid,
  7134. u64 flags, u64 owner, u64 offset,
  7135. struct btrfs_key *ins, int ref_mod)
  7136. {
  7137. int ret;
  7138. struct btrfs_extent_item *extent_item;
  7139. struct btrfs_extent_inline_ref *iref;
  7140. struct btrfs_path *path;
  7141. struct extent_buffer *leaf;
  7142. int type;
  7143. u32 size;
  7144. if (parent > 0)
  7145. type = BTRFS_SHARED_DATA_REF_KEY;
  7146. else
  7147. type = BTRFS_EXTENT_DATA_REF_KEY;
  7148. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  7149. path = btrfs_alloc_path();
  7150. if (!path)
  7151. return -ENOMEM;
  7152. path->leave_spinning = 1;
  7153. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7154. ins, size);
  7155. if (ret) {
  7156. btrfs_free_path(path);
  7157. return ret;
  7158. }
  7159. leaf = path->nodes[0];
  7160. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7161. struct btrfs_extent_item);
  7162. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  7163. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7164. btrfs_set_extent_flags(leaf, extent_item,
  7165. flags | BTRFS_EXTENT_FLAG_DATA);
  7166. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7167. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  7168. if (parent > 0) {
  7169. struct btrfs_shared_data_ref *ref;
  7170. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  7171. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7172. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  7173. } else {
  7174. struct btrfs_extent_data_ref *ref;
  7175. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  7176. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  7177. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  7178. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  7179. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  7180. }
  7181. btrfs_mark_buffer_dirty(path->nodes[0]);
  7182. btrfs_free_path(path);
  7183. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7184. ins->offset);
  7185. if (ret)
  7186. return ret;
  7187. ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
  7188. if (ret) { /* -ENOENT, logic error */
  7189. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7190. ins->objectid, ins->offset);
  7191. BUG();
  7192. }
  7193. trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
  7194. return ret;
  7195. }
  7196. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  7197. struct btrfs_fs_info *fs_info,
  7198. u64 parent, u64 root_objectid,
  7199. u64 flags, struct btrfs_disk_key *key,
  7200. int level, struct btrfs_key *ins)
  7201. {
  7202. int ret;
  7203. struct btrfs_extent_item *extent_item;
  7204. struct btrfs_tree_block_info *block_info;
  7205. struct btrfs_extent_inline_ref *iref;
  7206. struct btrfs_path *path;
  7207. struct extent_buffer *leaf;
  7208. u32 size = sizeof(*extent_item) + sizeof(*iref);
  7209. u64 num_bytes = ins->offset;
  7210. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7211. if (!skinny_metadata)
  7212. size += sizeof(*block_info);
  7213. path = btrfs_alloc_path();
  7214. if (!path) {
  7215. btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
  7216. fs_info->nodesize);
  7217. return -ENOMEM;
  7218. }
  7219. path->leave_spinning = 1;
  7220. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7221. ins, size);
  7222. if (ret) {
  7223. btrfs_free_path(path);
  7224. btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
  7225. fs_info->nodesize);
  7226. return ret;
  7227. }
  7228. leaf = path->nodes[0];
  7229. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7230. struct btrfs_extent_item);
  7231. btrfs_set_extent_refs(leaf, extent_item, 1);
  7232. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7233. btrfs_set_extent_flags(leaf, extent_item,
  7234. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  7235. if (skinny_metadata) {
  7236. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7237. num_bytes = fs_info->nodesize;
  7238. } else {
  7239. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  7240. btrfs_set_tree_block_key(leaf, block_info, key);
  7241. btrfs_set_tree_block_level(leaf, block_info, level);
  7242. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  7243. }
  7244. if (parent > 0) {
  7245. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  7246. btrfs_set_extent_inline_ref_type(leaf, iref,
  7247. BTRFS_SHARED_BLOCK_REF_KEY);
  7248. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7249. } else {
  7250. btrfs_set_extent_inline_ref_type(leaf, iref,
  7251. BTRFS_TREE_BLOCK_REF_KEY);
  7252. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  7253. }
  7254. btrfs_mark_buffer_dirty(leaf);
  7255. btrfs_free_path(path);
  7256. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7257. num_bytes);
  7258. if (ret)
  7259. return ret;
  7260. ret = update_block_group(trans, fs_info, ins->objectid,
  7261. fs_info->nodesize, 1);
  7262. if (ret) { /* -ENOENT, logic error */
  7263. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7264. ins->objectid, ins->offset);
  7265. BUG();
  7266. }
  7267. trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
  7268. fs_info->nodesize);
  7269. return ret;
  7270. }
  7271. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7272. struct btrfs_root *root, u64 owner,
  7273. u64 offset, u64 ram_bytes,
  7274. struct btrfs_key *ins)
  7275. {
  7276. struct btrfs_fs_info *fs_info = root->fs_info;
  7277. int ret;
  7278. BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
  7279. btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
  7280. root->root_key.objectid, owner, offset,
  7281. BTRFS_ADD_DELAYED_EXTENT);
  7282. ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
  7283. ins->offset, 0,
  7284. root->root_key.objectid, owner,
  7285. offset, ram_bytes,
  7286. BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
  7287. return ret;
  7288. }
  7289. /*
  7290. * this is used by the tree logging recovery code. It records that
  7291. * an extent has been allocated and makes sure to clear the free
  7292. * space cache bits as well
  7293. */
  7294. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  7295. struct btrfs_fs_info *fs_info,
  7296. u64 root_objectid, u64 owner, u64 offset,
  7297. struct btrfs_key *ins)
  7298. {
  7299. int ret;
  7300. struct btrfs_block_group_cache *block_group;
  7301. struct btrfs_space_info *space_info;
  7302. /*
  7303. * Mixed block groups will exclude before processing the log so we only
  7304. * need to do the exclude dance if this fs isn't mixed.
  7305. */
  7306. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
  7307. ret = __exclude_logged_extent(fs_info, ins->objectid,
  7308. ins->offset);
  7309. if (ret)
  7310. return ret;
  7311. }
  7312. block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
  7313. if (!block_group)
  7314. return -EINVAL;
  7315. space_info = block_group->space_info;
  7316. spin_lock(&space_info->lock);
  7317. spin_lock(&block_group->lock);
  7318. space_info->bytes_reserved += ins->offset;
  7319. block_group->reserved += ins->offset;
  7320. spin_unlock(&block_group->lock);
  7321. spin_unlock(&space_info->lock);
  7322. ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
  7323. 0, owner, offset, ins, 1);
  7324. btrfs_put_block_group(block_group);
  7325. return ret;
  7326. }
  7327. static struct extent_buffer *
  7328. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  7329. u64 bytenr, int level)
  7330. {
  7331. struct btrfs_fs_info *fs_info = root->fs_info;
  7332. struct extent_buffer *buf;
  7333. buf = btrfs_find_create_tree_block(fs_info, bytenr);
  7334. if (IS_ERR(buf))
  7335. return buf;
  7336. btrfs_set_header_generation(buf, trans->transid);
  7337. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  7338. btrfs_tree_lock(buf);
  7339. clean_tree_block(fs_info, buf);
  7340. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  7341. btrfs_set_lock_blocking(buf);
  7342. set_extent_buffer_uptodate(buf);
  7343. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  7344. buf->log_index = root->log_transid % 2;
  7345. /*
  7346. * we allow two log transactions at a time, use different
  7347. * EXENT bit to differentiate dirty pages.
  7348. */
  7349. if (buf->log_index == 0)
  7350. set_extent_dirty(&root->dirty_log_pages, buf->start,
  7351. buf->start + buf->len - 1, GFP_NOFS);
  7352. else
  7353. set_extent_new(&root->dirty_log_pages, buf->start,
  7354. buf->start + buf->len - 1);
  7355. } else {
  7356. buf->log_index = -1;
  7357. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  7358. buf->start + buf->len - 1, GFP_NOFS);
  7359. }
  7360. trans->dirty = true;
  7361. /* this returns a buffer locked for blocking */
  7362. return buf;
  7363. }
  7364. static struct btrfs_block_rsv *
  7365. use_block_rsv(struct btrfs_trans_handle *trans,
  7366. struct btrfs_root *root, u32 blocksize)
  7367. {
  7368. struct btrfs_fs_info *fs_info = root->fs_info;
  7369. struct btrfs_block_rsv *block_rsv;
  7370. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  7371. int ret;
  7372. bool global_updated = false;
  7373. block_rsv = get_block_rsv(trans, root);
  7374. if (unlikely(block_rsv->size == 0))
  7375. goto try_reserve;
  7376. again:
  7377. ret = block_rsv_use_bytes(block_rsv, blocksize);
  7378. if (!ret)
  7379. return block_rsv;
  7380. if (block_rsv->failfast)
  7381. return ERR_PTR(ret);
  7382. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  7383. global_updated = true;
  7384. update_global_block_rsv(fs_info);
  7385. goto again;
  7386. }
  7387. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  7388. static DEFINE_RATELIMIT_STATE(_rs,
  7389. DEFAULT_RATELIMIT_INTERVAL * 10,
  7390. /*DEFAULT_RATELIMIT_BURST*/ 1);
  7391. if (__ratelimit(&_rs))
  7392. WARN(1, KERN_DEBUG
  7393. "BTRFS: block rsv returned %d\n", ret);
  7394. }
  7395. try_reserve:
  7396. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  7397. BTRFS_RESERVE_NO_FLUSH);
  7398. if (!ret)
  7399. return block_rsv;
  7400. /*
  7401. * If we couldn't reserve metadata bytes try and use some from
  7402. * the global reserve if its space type is the same as the global
  7403. * reservation.
  7404. */
  7405. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  7406. block_rsv->space_info == global_rsv->space_info) {
  7407. ret = block_rsv_use_bytes(global_rsv, blocksize);
  7408. if (!ret)
  7409. return global_rsv;
  7410. }
  7411. return ERR_PTR(ret);
  7412. }
  7413. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  7414. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  7415. {
  7416. block_rsv_add_bytes(block_rsv, blocksize, 0);
  7417. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  7418. }
  7419. /*
  7420. * finds a free extent and does all the dirty work required for allocation
  7421. * returns the tree buffer or an ERR_PTR on error.
  7422. */
  7423. struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
  7424. struct btrfs_root *root,
  7425. u64 parent, u64 root_objectid,
  7426. const struct btrfs_disk_key *key,
  7427. int level, u64 hint,
  7428. u64 empty_size)
  7429. {
  7430. struct btrfs_fs_info *fs_info = root->fs_info;
  7431. struct btrfs_key ins;
  7432. struct btrfs_block_rsv *block_rsv;
  7433. struct extent_buffer *buf;
  7434. struct btrfs_delayed_extent_op *extent_op;
  7435. u64 flags = 0;
  7436. int ret;
  7437. u32 blocksize = fs_info->nodesize;
  7438. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7439. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  7440. if (btrfs_is_testing(fs_info)) {
  7441. buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
  7442. level);
  7443. if (!IS_ERR(buf))
  7444. root->alloc_bytenr += blocksize;
  7445. return buf;
  7446. }
  7447. #endif
  7448. block_rsv = use_block_rsv(trans, root, blocksize);
  7449. if (IS_ERR(block_rsv))
  7450. return ERR_CAST(block_rsv);
  7451. ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
  7452. empty_size, hint, &ins, 0, 0);
  7453. if (ret)
  7454. goto out_unuse;
  7455. buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
  7456. if (IS_ERR(buf)) {
  7457. ret = PTR_ERR(buf);
  7458. goto out_free_reserved;
  7459. }
  7460. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  7461. if (parent == 0)
  7462. parent = ins.objectid;
  7463. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7464. } else
  7465. BUG_ON(parent > 0);
  7466. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  7467. extent_op = btrfs_alloc_delayed_extent_op();
  7468. if (!extent_op) {
  7469. ret = -ENOMEM;
  7470. goto out_free_buf;
  7471. }
  7472. if (key)
  7473. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  7474. else
  7475. memset(&extent_op->key, 0, sizeof(extent_op->key));
  7476. extent_op->flags_to_set = flags;
  7477. extent_op->update_key = skinny_metadata ? false : true;
  7478. extent_op->update_flags = true;
  7479. extent_op->is_data = false;
  7480. extent_op->level = level;
  7481. btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
  7482. root_objectid, level, 0,
  7483. BTRFS_ADD_DELAYED_EXTENT);
  7484. ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
  7485. ins.offset, parent,
  7486. root_objectid, level,
  7487. BTRFS_ADD_DELAYED_EXTENT,
  7488. extent_op, NULL, NULL);
  7489. if (ret)
  7490. goto out_free_delayed;
  7491. }
  7492. return buf;
  7493. out_free_delayed:
  7494. btrfs_free_delayed_extent_op(extent_op);
  7495. out_free_buf:
  7496. free_extent_buffer(buf);
  7497. out_free_reserved:
  7498. btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
  7499. out_unuse:
  7500. unuse_block_rsv(fs_info, block_rsv, blocksize);
  7501. return ERR_PTR(ret);
  7502. }
  7503. struct walk_control {
  7504. u64 refs[BTRFS_MAX_LEVEL];
  7505. u64 flags[BTRFS_MAX_LEVEL];
  7506. struct btrfs_key update_progress;
  7507. int stage;
  7508. int level;
  7509. int shared_level;
  7510. int update_ref;
  7511. int keep_locks;
  7512. int reada_slot;
  7513. int reada_count;
  7514. int for_reloc;
  7515. };
  7516. #define DROP_REFERENCE 1
  7517. #define UPDATE_BACKREF 2
  7518. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  7519. struct btrfs_root *root,
  7520. struct walk_control *wc,
  7521. struct btrfs_path *path)
  7522. {
  7523. struct btrfs_fs_info *fs_info = root->fs_info;
  7524. u64 bytenr;
  7525. u64 generation;
  7526. u64 refs;
  7527. u64 flags;
  7528. u32 nritems;
  7529. struct btrfs_key key;
  7530. struct extent_buffer *eb;
  7531. int ret;
  7532. int slot;
  7533. int nread = 0;
  7534. if (path->slots[wc->level] < wc->reada_slot) {
  7535. wc->reada_count = wc->reada_count * 2 / 3;
  7536. wc->reada_count = max(wc->reada_count, 2);
  7537. } else {
  7538. wc->reada_count = wc->reada_count * 3 / 2;
  7539. wc->reada_count = min_t(int, wc->reada_count,
  7540. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  7541. }
  7542. eb = path->nodes[wc->level];
  7543. nritems = btrfs_header_nritems(eb);
  7544. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  7545. if (nread >= wc->reada_count)
  7546. break;
  7547. cond_resched();
  7548. bytenr = btrfs_node_blockptr(eb, slot);
  7549. generation = btrfs_node_ptr_generation(eb, slot);
  7550. if (slot == path->slots[wc->level])
  7551. goto reada;
  7552. if (wc->stage == UPDATE_BACKREF &&
  7553. generation <= root->root_key.offset)
  7554. continue;
  7555. /* We don't lock the tree block, it's OK to be racy here */
  7556. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
  7557. wc->level - 1, 1, &refs,
  7558. &flags);
  7559. /* We don't care about errors in readahead. */
  7560. if (ret < 0)
  7561. continue;
  7562. BUG_ON(refs == 0);
  7563. if (wc->stage == DROP_REFERENCE) {
  7564. if (refs == 1)
  7565. goto reada;
  7566. if (wc->level == 1 &&
  7567. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7568. continue;
  7569. if (!wc->update_ref ||
  7570. generation <= root->root_key.offset)
  7571. continue;
  7572. btrfs_node_key_to_cpu(eb, &key, slot);
  7573. ret = btrfs_comp_cpu_keys(&key,
  7574. &wc->update_progress);
  7575. if (ret < 0)
  7576. continue;
  7577. } else {
  7578. if (wc->level == 1 &&
  7579. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7580. continue;
  7581. }
  7582. reada:
  7583. readahead_tree_block(fs_info, bytenr);
  7584. nread++;
  7585. }
  7586. wc->reada_slot = slot;
  7587. }
  7588. /*
  7589. * helper to process tree block while walking down the tree.
  7590. *
  7591. * when wc->stage == UPDATE_BACKREF, this function updates
  7592. * back refs for pointers in the block.
  7593. *
  7594. * NOTE: return value 1 means we should stop walking down.
  7595. */
  7596. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  7597. struct btrfs_root *root,
  7598. struct btrfs_path *path,
  7599. struct walk_control *wc, int lookup_info)
  7600. {
  7601. struct btrfs_fs_info *fs_info = root->fs_info;
  7602. int level = wc->level;
  7603. struct extent_buffer *eb = path->nodes[level];
  7604. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7605. int ret;
  7606. if (wc->stage == UPDATE_BACKREF &&
  7607. btrfs_header_owner(eb) != root->root_key.objectid)
  7608. return 1;
  7609. /*
  7610. * when reference count of tree block is 1, it won't increase
  7611. * again. once full backref flag is set, we never clear it.
  7612. */
  7613. if (lookup_info &&
  7614. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  7615. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  7616. BUG_ON(!path->locks[level]);
  7617. ret = btrfs_lookup_extent_info(trans, fs_info,
  7618. eb->start, level, 1,
  7619. &wc->refs[level],
  7620. &wc->flags[level]);
  7621. BUG_ON(ret == -ENOMEM);
  7622. if (ret)
  7623. return ret;
  7624. BUG_ON(wc->refs[level] == 0);
  7625. }
  7626. if (wc->stage == DROP_REFERENCE) {
  7627. if (wc->refs[level] > 1)
  7628. return 1;
  7629. if (path->locks[level] && !wc->keep_locks) {
  7630. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7631. path->locks[level] = 0;
  7632. }
  7633. return 0;
  7634. }
  7635. /* wc->stage == UPDATE_BACKREF */
  7636. if (!(wc->flags[level] & flag)) {
  7637. BUG_ON(!path->locks[level]);
  7638. ret = btrfs_inc_ref(trans, root, eb, 1);
  7639. BUG_ON(ret); /* -ENOMEM */
  7640. ret = btrfs_dec_ref(trans, root, eb, 0);
  7641. BUG_ON(ret); /* -ENOMEM */
  7642. ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
  7643. eb->len, flag,
  7644. btrfs_header_level(eb), 0);
  7645. BUG_ON(ret); /* -ENOMEM */
  7646. wc->flags[level] |= flag;
  7647. }
  7648. /*
  7649. * the block is shared by multiple trees, so it's not good to
  7650. * keep the tree lock
  7651. */
  7652. if (path->locks[level] && level > 0) {
  7653. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7654. path->locks[level] = 0;
  7655. }
  7656. return 0;
  7657. }
  7658. /*
  7659. * helper to process tree block pointer.
  7660. *
  7661. * when wc->stage == DROP_REFERENCE, this function checks
  7662. * reference count of the block pointed to. if the block
  7663. * is shared and we need update back refs for the subtree
  7664. * rooted at the block, this function changes wc->stage to
  7665. * UPDATE_BACKREF. if the block is shared and there is no
  7666. * need to update back, this function drops the reference
  7667. * to the block.
  7668. *
  7669. * NOTE: return value 1 means we should stop walking down.
  7670. */
  7671. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  7672. struct btrfs_root *root,
  7673. struct btrfs_path *path,
  7674. struct walk_control *wc, int *lookup_info)
  7675. {
  7676. struct btrfs_fs_info *fs_info = root->fs_info;
  7677. u64 bytenr;
  7678. u64 generation;
  7679. u64 parent;
  7680. u32 blocksize;
  7681. struct btrfs_key key;
  7682. struct extent_buffer *next;
  7683. int level = wc->level;
  7684. int reada = 0;
  7685. int ret = 0;
  7686. bool need_account = false;
  7687. generation = btrfs_node_ptr_generation(path->nodes[level],
  7688. path->slots[level]);
  7689. /*
  7690. * if the lower level block was created before the snapshot
  7691. * was created, we know there is no need to update back refs
  7692. * for the subtree
  7693. */
  7694. if (wc->stage == UPDATE_BACKREF &&
  7695. generation <= root->root_key.offset) {
  7696. *lookup_info = 1;
  7697. return 1;
  7698. }
  7699. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  7700. blocksize = fs_info->nodesize;
  7701. next = find_extent_buffer(fs_info, bytenr);
  7702. if (!next) {
  7703. next = btrfs_find_create_tree_block(fs_info, bytenr);
  7704. if (IS_ERR(next))
  7705. return PTR_ERR(next);
  7706. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  7707. level - 1);
  7708. reada = 1;
  7709. }
  7710. btrfs_tree_lock(next);
  7711. btrfs_set_lock_blocking(next);
  7712. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
  7713. &wc->refs[level - 1],
  7714. &wc->flags[level - 1]);
  7715. if (ret < 0)
  7716. goto out_unlock;
  7717. if (unlikely(wc->refs[level - 1] == 0)) {
  7718. btrfs_err(fs_info, "Missing references.");
  7719. ret = -EIO;
  7720. goto out_unlock;
  7721. }
  7722. *lookup_info = 0;
  7723. if (wc->stage == DROP_REFERENCE) {
  7724. if (wc->refs[level - 1] > 1) {
  7725. need_account = true;
  7726. if (level == 1 &&
  7727. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7728. goto skip;
  7729. if (!wc->update_ref ||
  7730. generation <= root->root_key.offset)
  7731. goto skip;
  7732. btrfs_node_key_to_cpu(path->nodes[level], &key,
  7733. path->slots[level]);
  7734. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  7735. if (ret < 0)
  7736. goto skip;
  7737. wc->stage = UPDATE_BACKREF;
  7738. wc->shared_level = level - 1;
  7739. }
  7740. } else {
  7741. if (level == 1 &&
  7742. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7743. goto skip;
  7744. }
  7745. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  7746. btrfs_tree_unlock(next);
  7747. free_extent_buffer(next);
  7748. next = NULL;
  7749. *lookup_info = 1;
  7750. }
  7751. if (!next) {
  7752. if (reada && level == 1)
  7753. reada_walk_down(trans, root, wc, path);
  7754. next = read_tree_block(fs_info, bytenr, generation);
  7755. if (IS_ERR(next)) {
  7756. return PTR_ERR(next);
  7757. } else if (!extent_buffer_uptodate(next)) {
  7758. free_extent_buffer(next);
  7759. return -EIO;
  7760. }
  7761. btrfs_tree_lock(next);
  7762. btrfs_set_lock_blocking(next);
  7763. }
  7764. level--;
  7765. ASSERT(level == btrfs_header_level(next));
  7766. if (level != btrfs_header_level(next)) {
  7767. btrfs_err(root->fs_info, "mismatched level");
  7768. ret = -EIO;
  7769. goto out_unlock;
  7770. }
  7771. path->nodes[level] = next;
  7772. path->slots[level] = 0;
  7773. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7774. wc->level = level;
  7775. if (wc->level == 1)
  7776. wc->reada_slot = 0;
  7777. return 0;
  7778. skip:
  7779. wc->refs[level - 1] = 0;
  7780. wc->flags[level - 1] = 0;
  7781. if (wc->stage == DROP_REFERENCE) {
  7782. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  7783. parent = path->nodes[level]->start;
  7784. } else {
  7785. ASSERT(root->root_key.objectid ==
  7786. btrfs_header_owner(path->nodes[level]));
  7787. if (root->root_key.objectid !=
  7788. btrfs_header_owner(path->nodes[level])) {
  7789. btrfs_err(root->fs_info,
  7790. "mismatched block owner");
  7791. ret = -EIO;
  7792. goto out_unlock;
  7793. }
  7794. parent = 0;
  7795. }
  7796. if (need_account) {
  7797. ret = btrfs_qgroup_trace_subtree(trans, root, next,
  7798. generation, level - 1);
  7799. if (ret) {
  7800. btrfs_err_rl(fs_info,
  7801. "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
  7802. ret);
  7803. }
  7804. }
  7805. ret = btrfs_free_extent(trans, root, bytenr, blocksize,
  7806. parent, root->root_key.objectid,
  7807. level - 1, 0);
  7808. if (ret)
  7809. goto out_unlock;
  7810. }
  7811. *lookup_info = 1;
  7812. ret = 1;
  7813. out_unlock:
  7814. btrfs_tree_unlock(next);
  7815. free_extent_buffer(next);
  7816. return ret;
  7817. }
  7818. /*
  7819. * helper to process tree block while walking up the tree.
  7820. *
  7821. * when wc->stage == DROP_REFERENCE, this function drops
  7822. * reference count on the block.
  7823. *
  7824. * when wc->stage == UPDATE_BACKREF, this function changes
  7825. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  7826. * to UPDATE_BACKREF previously while processing the block.
  7827. *
  7828. * NOTE: return value 1 means we should stop walking up.
  7829. */
  7830. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  7831. struct btrfs_root *root,
  7832. struct btrfs_path *path,
  7833. struct walk_control *wc)
  7834. {
  7835. struct btrfs_fs_info *fs_info = root->fs_info;
  7836. int ret;
  7837. int level = wc->level;
  7838. struct extent_buffer *eb = path->nodes[level];
  7839. u64 parent = 0;
  7840. if (wc->stage == UPDATE_BACKREF) {
  7841. BUG_ON(wc->shared_level < level);
  7842. if (level < wc->shared_level)
  7843. goto out;
  7844. ret = find_next_key(path, level + 1, &wc->update_progress);
  7845. if (ret > 0)
  7846. wc->update_ref = 0;
  7847. wc->stage = DROP_REFERENCE;
  7848. wc->shared_level = -1;
  7849. path->slots[level] = 0;
  7850. /*
  7851. * check reference count again if the block isn't locked.
  7852. * we should start walking down the tree again if reference
  7853. * count is one.
  7854. */
  7855. if (!path->locks[level]) {
  7856. BUG_ON(level == 0);
  7857. btrfs_tree_lock(eb);
  7858. btrfs_set_lock_blocking(eb);
  7859. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7860. ret = btrfs_lookup_extent_info(trans, fs_info,
  7861. eb->start, level, 1,
  7862. &wc->refs[level],
  7863. &wc->flags[level]);
  7864. if (ret < 0) {
  7865. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7866. path->locks[level] = 0;
  7867. return ret;
  7868. }
  7869. BUG_ON(wc->refs[level] == 0);
  7870. if (wc->refs[level] == 1) {
  7871. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7872. path->locks[level] = 0;
  7873. return 1;
  7874. }
  7875. }
  7876. }
  7877. /* wc->stage == DROP_REFERENCE */
  7878. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  7879. if (wc->refs[level] == 1) {
  7880. if (level == 0) {
  7881. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7882. ret = btrfs_dec_ref(trans, root, eb, 1);
  7883. else
  7884. ret = btrfs_dec_ref(trans, root, eb, 0);
  7885. BUG_ON(ret); /* -ENOMEM */
  7886. ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
  7887. if (ret) {
  7888. btrfs_err_rl(fs_info,
  7889. "error %d accounting leaf items. Quota is out of sync, rescan required.",
  7890. ret);
  7891. }
  7892. }
  7893. /* make block locked assertion in clean_tree_block happy */
  7894. if (!path->locks[level] &&
  7895. btrfs_header_generation(eb) == trans->transid) {
  7896. btrfs_tree_lock(eb);
  7897. btrfs_set_lock_blocking(eb);
  7898. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7899. }
  7900. clean_tree_block(fs_info, eb);
  7901. }
  7902. if (eb == root->node) {
  7903. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7904. parent = eb->start;
  7905. else
  7906. BUG_ON(root->root_key.objectid !=
  7907. btrfs_header_owner(eb));
  7908. } else {
  7909. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7910. parent = path->nodes[level + 1]->start;
  7911. else
  7912. BUG_ON(root->root_key.objectid !=
  7913. btrfs_header_owner(path->nodes[level + 1]));
  7914. }
  7915. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  7916. out:
  7917. wc->refs[level] = 0;
  7918. wc->flags[level] = 0;
  7919. return 0;
  7920. }
  7921. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  7922. struct btrfs_root *root,
  7923. struct btrfs_path *path,
  7924. struct walk_control *wc)
  7925. {
  7926. int level = wc->level;
  7927. int lookup_info = 1;
  7928. int ret;
  7929. while (level >= 0) {
  7930. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  7931. if (ret > 0)
  7932. break;
  7933. if (level == 0)
  7934. break;
  7935. if (path->slots[level] >=
  7936. btrfs_header_nritems(path->nodes[level]))
  7937. break;
  7938. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  7939. if (ret > 0) {
  7940. path->slots[level]++;
  7941. continue;
  7942. } else if (ret < 0)
  7943. return ret;
  7944. level = wc->level;
  7945. }
  7946. return 0;
  7947. }
  7948. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  7949. struct btrfs_root *root,
  7950. struct btrfs_path *path,
  7951. struct walk_control *wc, int max_level)
  7952. {
  7953. int level = wc->level;
  7954. int ret;
  7955. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  7956. while (level < max_level && path->nodes[level]) {
  7957. wc->level = level;
  7958. if (path->slots[level] + 1 <
  7959. btrfs_header_nritems(path->nodes[level])) {
  7960. path->slots[level]++;
  7961. return 0;
  7962. } else {
  7963. ret = walk_up_proc(trans, root, path, wc);
  7964. if (ret > 0)
  7965. return 0;
  7966. if (path->locks[level]) {
  7967. btrfs_tree_unlock_rw(path->nodes[level],
  7968. path->locks[level]);
  7969. path->locks[level] = 0;
  7970. }
  7971. free_extent_buffer(path->nodes[level]);
  7972. path->nodes[level] = NULL;
  7973. level++;
  7974. }
  7975. }
  7976. return 1;
  7977. }
  7978. /*
  7979. * drop a subvolume tree.
  7980. *
  7981. * this function traverses the tree freeing any blocks that only
  7982. * referenced by the tree.
  7983. *
  7984. * when a shared tree block is found. this function decreases its
  7985. * reference count by one. if update_ref is true, this function
  7986. * also make sure backrefs for the shared block and all lower level
  7987. * blocks are properly updated.
  7988. *
  7989. * If called with for_reloc == 0, may exit early with -EAGAIN
  7990. */
  7991. int btrfs_drop_snapshot(struct btrfs_root *root,
  7992. struct btrfs_block_rsv *block_rsv, int update_ref,
  7993. int for_reloc)
  7994. {
  7995. struct btrfs_fs_info *fs_info = root->fs_info;
  7996. struct btrfs_path *path;
  7997. struct btrfs_trans_handle *trans;
  7998. struct btrfs_root *tree_root = fs_info->tree_root;
  7999. struct btrfs_root_item *root_item = &root->root_item;
  8000. struct walk_control *wc;
  8001. struct btrfs_key key;
  8002. int err = 0;
  8003. int ret;
  8004. int level;
  8005. bool root_dropped = false;
  8006. btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
  8007. path = btrfs_alloc_path();
  8008. if (!path) {
  8009. err = -ENOMEM;
  8010. goto out;
  8011. }
  8012. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8013. if (!wc) {
  8014. btrfs_free_path(path);
  8015. err = -ENOMEM;
  8016. goto out;
  8017. }
  8018. trans = btrfs_start_transaction(tree_root, 0);
  8019. if (IS_ERR(trans)) {
  8020. err = PTR_ERR(trans);
  8021. goto out_free;
  8022. }
  8023. if (block_rsv)
  8024. trans->block_rsv = block_rsv;
  8025. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  8026. level = btrfs_header_level(root->node);
  8027. path->nodes[level] = btrfs_lock_root_node(root);
  8028. btrfs_set_lock_blocking(path->nodes[level]);
  8029. path->slots[level] = 0;
  8030. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8031. memset(&wc->update_progress, 0,
  8032. sizeof(wc->update_progress));
  8033. } else {
  8034. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  8035. memcpy(&wc->update_progress, &key,
  8036. sizeof(wc->update_progress));
  8037. level = root_item->drop_level;
  8038. BUG_ON(level == 0);
  8039. path->lowest_level = level;
  8040. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  8041. path->lowest_level = 0;
  8042. if (ret < 0) {
  8043. err = ret;
  8044. goto out_end_trans;
  8045. }
  8046. WARN_ON(ret > 0);
  8047. /*
  8048. * unlock our path, this is safe because only this
  8049. * function is allowed to delete this snapshot
  8050. */
  8051. btrfs_unlock_up_safe(path, 0);
  8052. level = btrfs_header_level(root->node);
  8053. while (1) {
  8054. btrfs_tree_lock(path->nodes[level]);
  8055. btrfs_set_lock_blocking(path->nodes[level]);
  8056. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8057. ret = btrfs_lookup_extent_info(trans, fs_info,
  8058. path->nodes[level]->start,
  8059. level, 1, &wc->refs[level],
  8060. &wc->flags[level]);
  8061. if (ret < 0) {
  8062. err = ret;
  8063. goto out_end_trans;
  8064. }
  8065. BUG_ON(wc->refs[level] == 0);
  8066. if (level == root_item->drop_level)
  8067. break;
  8068. btrfs_tree_unlock(path->nodes[level]);
  8069. path->locks[level] = 0;
  8070. WARN_ON(wc->refs[level] != 1);
  8071. level--;
  8072. }
  8073. }
  8074. wc->level = level;
  8075. wc->shared_level = -1;
  8076. wc->stage = DROP_REFERENCE;
  8077. wc->update_ref = update_ref;
  8078. wc->keep_locks = 0;
  8079. wc->for_reloc = for_reloc;
  8080. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  8081. while (1) {
  8082. ret = walk_down_tree(trans, root, path, wc);
  8083. if (ret < 0) {
  8084. err = ret;
  8085. break;
  8086. }
  8087. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  8088. if (ret < 0) {
  8089. err = ret;
  8090. break;
  8091. }
  8092. if (ret > 0) {
  8093. BUG_ON(wc->stage != DROP_REFERENCE);
  8094. break;
  8095. }
  8096. if (wc->stage == DROP_REFERENCE) {
  8097. level = wc->level;
  8098. btrfs_node_key(path->nodes[level],
  8099. &root_item->drop_progress,
  8100. path->slots[level]);
  8101. root_item->drop_level = level;
  8102. }
  8103. BUG_ON(wc->level == 0);
  8104. if (btrfs_should_end_transaction(trans) ||
  8105. (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
  8106. ret = btrfs_update_root(trans, tree_root,
  8107. &root->root_key,
  8108. root_item);
  8109. if (ret) {
  8110. btrfs_abort_transaction(trans, ret);
  8111. err = ret;
  8112. goto out_end_trans;
  8113. }
  8114. btrfs_end_transaction_throttle(trans);
  8115. if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
  8116. btrfs_debug(fs_info,
  8117. "drop snapshot early exit");
  8118. err = -EAGAIN;
  8119. goto out_free;
  8120. }
  8121. trans = btrfs_start_transaction(tree_root, 0);
  8122. if (IS_ERR(trans)) {
  8123. err = PTR_ERR(trans);
  8124. goto out_free;
  8125. }
  8126. if (block_rsv)
  8127. trans->block_rsv = block_rsv;
  8128. }
  8129. }
  8130. btrfs_release_path(path);
  8131. if (err)
  8132. goto out_end_trans;
  8133. ret = btrfs_del_root(trans, fs_info, &root->root_key);
  8134. if (ret) {
  8135. btrfs_abort_transaction(trans, ret);
  8136. err = ret;
  8137. goto out_end_trans;
  8138. }
  8139. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  8140. ret = btrfs_find_root(tree_root, &root->root_key, path,
  8141. NULL, NULL);
  8142. if (ret < 0) {
  8143. btrfs_abort_transaction(trans, ret);
  8144. err = ret;
  8145. goto out_end_trans;
  8146. } else if (ret > 0) {
  8147. /* if we fail to delete the orphan item this time
  8148. * around, it'll get picked up the next time.
  8149. *
  8150. * The most common failure here is just -ENOENT.
  8151. */
  8152. btrfs_del_orphan_item(trans, tree_root,
  8153. root->root_key.objectid);
  8154. }
  8155. }
  8156. if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
  8157. btrfs_add_dropped_root(trans, root);
  8158. } else {
  8159. free_extent_buffer(root->node);
  8160. free_extent_buffer(root->commit_root);
  8161. btrfs_put_fs_root(root);
  8162. }
  8163. root_dropped = true;
  8164. out_end_trans:
  8165. btrfs_end_transaction_throttle(trans);
  8166. out_free:
  8167. kfree(wc);
  8168. btrfs_free_path(path);
  8169. out:
  8170. /*
  8171. * So if we need to stop dropping the snapshot for whatever reason we
  8172. * need to make sure to add it back to the dead root list so that we
  8173. * keep trying to do the work later. This also cleans up roots if we
  8174. * don't have it in the radix (like when we recover after a power fail
  8175. * or unmount) so we don't leak memory.
  8176. */
  8177. if (!for_reloc && !root_dropped)
  8178. btrfs_add_dead_root(root);
  8179. if (err && err != -EAGAIN)
  8180. btrfs_handle_fs_error(fs_info, err, NULL);
  8181. return err;
  8182. }
  8183. /*
  8184. * drop subtree rooted at tree block 'node'.
  8185. *
  8186. * NOTE: this function will unlock and release tree block 'node'
  8187. * only used by relocation code
  8188. */
  8189. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  8190. struct btrfs_root *root,
  8191. struct extent_buffer *node,
  8192. struct extent_buffer *parent)
  8193. {
  8194. struct btrfs_fs_info *fs_info = root->fs_info;
  8195. struct btrfs_path *path;
  8196. struct walk_control *wc;
  8197. int level;
  8198. int parent_level;
  8199. int ret = 0;
  8200. int wret;
  8201. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  8202. path = btrfs_alloc_path();
  8203. if (!path)
  8204. return -ENOMEM;
  8205. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8206. if (!wc) {
  8207. btrfs_free_path(path);
  8208. return -ENOMEM;
  8209. }
  8210. btrfs_assert_tree_locked(parent);
  8211. parent_level = btrfs_header_level(parent);
  8212. extent_buffer_get(parent);
  8213. path->nodes[parent_level] = parent;
  8214. path->slots[parent_level] = btrfs_header_nritems(parent);
  8215. btrfs_assert_tree_locked(node);
  8216. level = btrfs_header_level(node);
  8217. path->nodes[level] = node;
  8218. path->slots[level] = 0;
  8219. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8220. wc->refs[parent_level] = 1;
  8221. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  8222. wc->level = level;
  8223. wc->shared_level = -1;
  8224. wc->stage = DROP_REFERENCE;
  8225. wc->update_ref = 0;
  8226. wc->keep_locks = 1;
  8227. wc->for_reloc = 1;
  8228. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  8229. while (1) {
  8230. wret = walk_down_tree(trans, root, path, wc);
  8231. if (wret < 0) {
  8232. ret = wret;
  8233. break;
  8234. }
  8235. wret = walk_up_tree(trans, root, path, wc, parent_level);
  8236. if (wret < 0)
  8237. ret = wret;
  8238. if (wret != 0)
  8239. break;
  8240. }
  8241. kfree(wc);
  8242. btrfs_free_path(path);
  8243. return ret;
  8244. }
  8245. static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
  8246. {
  8247. u64 num_devices;
  8248. u64 stripped;
  8249. /*
  8250. * if restripe for this chunk_type is on pick target profile and
  8251. * return, otherwise do the usual balance
  8252. */
  8253. stripped = get_restripe_target(fs_info, flags);
  8254. if (stripped)
  8255. return extended_to_chunk(stripped);
  8256. num_devices = fs_info->fs_devices->rw_devices;
  8257. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  8258. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  8259. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  8260. if (num_devices == 1) {
  8261. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8262. stripped = flags & ~stripped;
  8263. /* turn raid0 into single device chunks */
  8264. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  8265. return stripped;
  8266. /* turn mirroring into duplication */
  8267. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8268. BTRFS_BLOCK_GROUP_RAID10))
  8269. return stripped | BTRFS_BLOCK_GROUP_DUP;
  8270. } else {
  8271. /* they already had raid on here, just return */
  8272. if (flags & stripped)
  8273. return flags;
  8274. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8275. stripped = flags & ~stripped;
  8276. /* switch duplicated blocks with raid1 */
  8277. if (flags & BTRFS_BLOCK_GROUP_DUP)
  8278. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  8279. /* this is drive concat, leave it alone */
  8280. }
  8281. return flags;
  8282. }
  8283. static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  8284. {
  8285. struct btrfs_space_info *sinfo = cache->space_info;
  8286. u64 num_bytes;
  8287. u64 min_allocable_bytes;
  8288. int ret = -ENOSPC;
  8289. /*
  8290. * We need some metadata space and system metadata space for
  8291. * allocating chunks in some corner cases until we force to set
  8292. * it to be readonly.
  8293. */
  8294. if ((sinfo->flags &
  8295. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  8296. !force)
  8297. min_allocable_bytes = SZ_1M;
  8298. else
  8299. min_allocable_bytes = 0;
  8300. spin_lock(&sinfo->lock);
  8301. spin_lock(&cache->lock);
  8302. if (cache->ro) {
  8303. cache->ro++;
  8304. ret = 0;
  8305. goto out;
  8306. }
  8307. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  8308. cache->bytes_super - btrfs_block_group_used(&cache->item);
  8309. if (btrfs_space_info_used(sinfo, true) + num_bytes +
  8310. min_allocable_bytes <= sinfo->total_bytes) {
  8311. sinfo->bytes_readonly += num_bytes;
  8312. cache->ro++;
  8313. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  8314. ret = 0;
  8315. }
  8316. out:
  8317. spin_unlock(&cache->lock);
  8318. spin_unlock(&sinfo->lock);
  8319. return ret;
  8320. }
  8321. int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
  8322. struct btrfs_block_group_cache *cache)
  8323. {
  8324. struct btrfs_trans_handle *trans;
  8325. u64 alloc_flags;
  8326. int ret;
  8327. again:
  8328. trans = btrfs_join_transaction(fs_info->extent_root);
  8329. if (IS_ERR(trans))
  8330. return PTR_ERR(trans);
  8331. /*
  8332. * we're not allowed to set block groups readonly after the dirty
  8333. * block groups cache has started writing. If it already started,
  8334. * back off and let this transaction commit
  8335. */
  8336. mutex_lock(&fs_info->ro_block_group_mutex);
  8337. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  8338. u64 transid = trans->transid;
  8339. mutex_unlock(&fs_info->ro_block_group_mutex);
  8340. btrfs_end_transaction(trans);
  8341. ret = btrfs_wait_for_commit(fs_info, transid);
  8342. if (ret)
  8343. return ret;
  8344. goto again;
  8345. }
  8346. /*
  8347. * if we are changing raid levels, try to allocate a corresponding
  8348. * block group with the new raid level.
  8349. */
  8350. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8351. if (alloc_flags != cache->flags) {
  8352. ret = do_chunk_alloc(trans, fs_info, alloc_flags,
  8353. CHUNK_ALLOC_FORCE);
  8354. /*
  8355. * ENOSPC is allowed here, we may have enough space
  8356. * already allocated at the new raid level to
  8357. * carry on
  8358. */
  8359. if (ret == -ENOSPC)
  8360. ret = 0;
  8361. if (ret < 0)
  8362. goto out;
  8363. }
  8364. ret = inc_block_group_ro(cache, 0);
  8365. if (!ret)
  8366. goto out;
  8367. alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
  8368. ret = do_chunk_alloc(trans, fs_info, alloc_flags,
  8369. CHUNK_ALLOC_FORCE);
  8370. if (ret < 0)
  8371. goto out;
  8372. ret = inc_block_group_ro(cache, 0);
  8373. out:
  8374. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  8375. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8376. mutex_lock(&fs_info->chunk_mutex);
  8377. check_system_chunk(trans, fs_info, alloc_flags);
  8378. mutex_unlock(&fs_info->chunk_mutex);
  8379. }
  8380. mutex_unlock(&fs_info->ro_block_group_mutex);
  8381. btrfs_end_transaction(trans);
  8382. return ret;
  8383. }
  8384. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  8385. struct btrfs_fs_info *fs_info, u64 type)
  8386. {
  8387. u64 alloc_flags = get_alloc_profile(fs_info, type);
  8388. return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
  8389. }
  8390. /*
  8391. * helper to account the unused space of all the readonly block group in the
  8392. * space_info. takes mirrors into account.
  8393. */
  8394. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  8395. {
  8396. struct btrfs_block_group_cache *block_group;
  8397. u64 free_bytes = 0;
  8398. int factor;
  8399. /* It's df, we don't care if it's racy */
  8400. if (list_empty(&sinfo->ro_bgs))
  8401. return 0;
  8402. spin_lock(&sinfo->lock);
  8403. list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
  8404. spin_lock(&block_group->lock);
  8405. if (!block_group->ro) {
  8406. spin_unlock(&block_group->lock);
  8407. continue;
  8408. }
  8409. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8410. BTRFS_BLOCK_GROUP_RAID10 |
  8411. BTRFS_BLOCK_GROUP_DUP))
  8412. factor = 2;
  8413. else
  8414. factor = 1;
  8415. free_bytes += (block_group->key.offset -
  8416. btrfs_block_group_used(&block_group->item)) *
  8417. factor;
  8418. spin_unlock(&block_group->lock);
  8419. }
  8420. spin_unlock(&sinfo->lock);
  8421. return free_bytes;
  8422. }
  8423. void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
  8424. {
  8425. struct btrfs_space_info *sinfo = cache->space_info;
  8426. u64 num_bytes;
  8427. BUG_ON(!cache->ro);
  8428. spin_lock(&sinfo->lock);
  8429. spin_lock(&cache->lock);
  8430. if (!--cache->ro) {
  8431. num_bytes = cache->key.offset - cache->reserved -
  8432. cache->pinned - cache->bytes_super -
  8433. btrfs_block_group_used(&cache->item);
  8434. sinfo->bytes_readonly -= num_bytes;
  8435. list_del_init(&cache->ro_list);
  8436. }
  8437. spin_unlock(&cache->lock);
  8438. spin_unlock(&sinfo->lock);
  8439. }
  8440. /*
  8441. * checks to see if its even possible to relocate this block group.
  8442. *
  8443. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  8444. * ok to go ahead and try.
  8445. */
  8446. int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
  8447. {
  8448. struct btrfs_root *root = fs_info->extent_root;
  8449. struct btrfs_block_group_cache *block_group;
  8450. struct btrfs_space_info *space_info;
  8451. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  8452. struct btrfs_device *device;
  8453. struct btrfs_trans_handle *trans;
  8454. u64 min_free;
  8455. u64 dev_min = 1;
  8456. u64 dev_nr = 0;
  8457. u64 target;
  8458. int debug;
  8459. int index;
  8460. int full = 0;
  8461. int ret = 0;
  8462. debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
  8463. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  8464. /* odd, couldn't find the block group, leave it alone */
  8465. if (!block_group) {
  8466. if (debug)
  8467. btrfs_warn(fs_info,
  8468. "can't find block group for bytenr %llu",
  8469. bytenr);
  8470. return -1;
  8471. }
  8472. min_free = btrfs_block_group_used(&block_group->item);
  8473. /* no bytes used, we're good */
  8474. if (!min_free)
  8475. goto out;
  8476. space_info = block_group->space_info;
  8477. spin_lock(&space_info->lock);
  8478. full = space_info->full;
  8479. /*
  8480. * if this is the last block group we have in this space, we can't
  8481. * relocate it unless we're able to allocate a new chunk below.
  8482. *
  8483. * Otherwise, we need to make sure we have room in the space to handle
  8484. * all of the extents from this block group. If we can, we're good
  8485. */
  8486. if ((space_info->total_bytes != block_group->key.offset) &&
  8487. (btrfs_space_info_used(space_info, false) + min_free <
  8488. space_info->total_bytes)) {
  8489. spin_unlock(&space_info->lock);
  8490. goto out;
  8491. }
  8492. spin_unlock(&space_info->lock);
  8493. /*
  8494. * ok we don't have enough space, but maybe we have free space on our
  8495. * devices to allocate new chunks for relocation, so loop through our
  8496. * alloc devices and guess if we have enough space. if this block
  8497. * group is going to be restriped, run checks against the target
  8498. * profile instead of the current one.
  8499. */
  8500. ret = -1;
  8501. /*
  8502. * index:
  8503. * 0: raid10
  8504. * 1: raid1
  8505. * 2: dup
  8506. * 3: raid0
  8507. * 4: single
  8508. */
  8509. target = get_restripe_target(fs_info, block_group->flags);
  8510. if (target) {
  8511. index = __get_raid_index(extended_to_chunk(target));
  8512. } else {
  8513. /*
  8514. * this is just a balance, so if we were marked as full
  8515. * we know there is no space for a new chunk
  8516. */
  8517. if (full) {
  8518. if (debug)
  8519. btrfs_warn(fs_info,
  8520. "no space to alloc new chunk for block group %llu",
  8521. block_group->key.objectid);
  8522. goto out;
  8523. }
  8524. index = get_block_group_index(block_group);
  8525. }
  8526. if (index == BTRFS_RAID_RAID10) {
  8527. dev_min = 4;
  8528. /* Divide by 2 */
  8529. min_free >>= 1;
  8530. } else if (index == BTRFS_RAID_RAID1) {
  8531. dev_min = 2;
  8532. } else if (index == BTRFS_RAID_DUP) {
  8533. /* Multiply by 2 */
  8534. min_free <<= 1;
  8535. } else if (index == BTRFS_RAID_RAID0) {
  8536. dev_min = fs_devices->rw_devices;
  8537. min_free = div64_u64(min_free, dev_min);
  8538. }
  8539. /* We need to do this so that we can look at pending chunks */
  8540. trans = btrfs_join_transaction(root);
  8541. if (IS_ERR(trans)) {
  8542. ret = PTR_ERR(trans);
  8543. goto out;
  8544. }
  8545. mutex_lock(&fs_info->chunk_mutex);
  8546. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  8547. u64 dev_offset;
  8548. /*
  8549. * check to make sure we can actually find a chunk with enough
  8550. * space to fit our block group in.
  8551. */
  8552. if (device->total_bytes > device->bytes_used + min_free &&
  8553. !device->is_tgtdev_for_dev_replace) {
  8554. ret = find_free_dev_extent(trans, device, min_free,
  8555. &dev_offset, NULL);
  8556. if (!ret)
  8557. dev_nr++;
  8558. if (dev_nr >= dev_min)
  8559. break;
  8560. ret = -1;
  8561. }
  8562. }
  8563. if (debug && ret == -1)
  8564. btrfs_warn(fs_info,
  8565. "no space to allocate a new chunk for block group %llu",
  8566. block_group->key.objectid);
  8567. mutex_unlock(&fs_info->chunk_mutex);
  8568. btrfs_end_transaction(trans);
  8569. out:
  8570. btrfs_put_block_group(block_group);
  8571. return ret;
  8572. }
  8573. static int find_first_block_group(struct btrfs_fs_info *fs_info,
  8574. struct btrfs_path *path,
  8575. struct btrfs_key *key)
  8576. {
  8577. struct btrfs_root *root = fs_info->extent_root;
  8578. int ret = 0;
  8579. struct btrfs_key found_key;
  8580. struct extent_buffer *leaf;
  8581. int slot;
  8582. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  8583. if (ret < 0)
  8584. goto out;
  8585. while (1) {
  8586. slot = path->slots[0];
  8587. leaf = path->nodes[0];
  8588. if (slot >= btrfs_header_nritems(leaf)) {
  8589. ret = btrfs_next_leaf(root, path);
  8590. if (ret == 0)
  8591. continue;
  8592. if (ret < 0)
  8593. goto out;
  8594. break;
  8595. }
  8596. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  8597. if (found_key.objectid >= key->objectid &&
  8598. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  8599. struct extent_map_tree *em_tree;
  8600. struct extent_map *em;
  8601. em_tree = &root->fs_info->mapping_tree.map_tree;
  8602. read_lock(&em_tree->lock);
  8603. em = lookup_extent_mapping(em_tree, found_key.objectid,
  8604. found_key.offset);
  8605. read_unlock(&em_tree->lock);
  8606. if (!em) {
  8607. btrfs_err(fs_info,
  8608. "logical %llu len %llu found bg but no related chunk",
  8609. found_key.objectid, found_key.offset);
  8610. ret = -ENOENT;
  8611. } else {
  8612. ret = 0;
  8613. }
  8614. free_extent_map(em);
  8615. goto out;
  8616. }
  8617. path->slots[0]++;
  8618. }
  8619. out:
  8620. return ret;
  8621. }
  8622. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  8623. {
  8624. struct btrfs_block_group_cache *block_group;
  8625. u64 last = 0;
  8626. while (1) {
  8627. struct inode *inode;
  8628. block_group = btrfs_lookup_first_block_group(info, last);
  8629. while (block_group) {
  8630. spin_lock(&block_group->lock);
  8631. if (block_group->iref)
  8632. break;
  8633. spin_unlock(&block_group->lock);
  8634. block_group = next_block_group(info, block_group);
  8635. }
  8636. if (!block_group) {
  8637. if (last == 0)
  8638. break;
  8639. last = 0;
  8640. continue;
  8641. }
  8642. inode = block_group->inode;
  8643. block_group->iref = 0;
  8644. block_group->inode = NULL;
  8645. spin_unlock(&block_group->lock);
  8646. ASSERT(block_group->io_ctl.inode == NULL);
  8647. iput(inode);
  8648. last = block_group->key.objectid + block_group->key.offset;
  8649. btrfs_put_block_group(block_group);
  8650. }
  8651. }
  8652. /*
  8653. * Must be called only after stopping all workers, since we could have block
  8654. * group caching kthreads running, and therefore they could race with us if we
  8655. * freed the block groups before stopping them.
  8656. */
  8657. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  8658. {
  8659. struct btrfs_block_group_cache *block_group;
  8660. struct btrfs_space_info *space_info;
  8661. struct btrfs_caching_control *caching_ctl;
  8662. struct rb_node *n;
  8663. down_write(&info->commit_root_sem);
  8664. while (!list_empty(&info->caching_block_groups)) {
  8665. caching_ctl = list_entry(info->caching_block_groups.next,
  8666. struct btrfs_caching_control, list);
  8667. list_del(&caching_ctl->list);
  8668. put_caching_control(caching_ctl);
  8669. }
  8670. up_write(&info->commit_root_sem);
  8671. spin_lock(&info->unused_bgs_lock);
  8672. while (!list_empty(&info->unused_bgs)) {
  8673. block_group = list_first_entry(&info->unused_bgs,
  8674. struct btrfs_block_group_cache,
  8675. bg_list);
  8676. list_del_init(&block_group->bg_list);
  8677. btrfs_put_block_group(block_group);
  8678. }
  8679. spin_unlock(&info->unused_bgs_lock);
  8680. spin_lock(&info->block_group_cache_lock);
  8681. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  8682. block_group = rb_entry(n, struct btrfs_block_group_cache,
  8683. cache_node);
  8684. rb_erase(&block_group->cache_node,
  8685. &info->block_group_cache_tree);
  8686. RB_CLEAR_NODE(&block_group->cache_node);
  8687. spin_unlock(&info->block_group_cache_lock);
  8688. down_write(&block_group->space_info->groups_sem);
  8689. list_del(&block_group->list);
  8690. up_write(&block_group->space_info->groups_sem);
  8691. /*
  8692. * We haven't cached this block group, which means we could
  8693. * possibly have excluded extents on this block group.
  8694. */
  8695. if (block_group->cached == BTRFS_CACHE_NO ||
  8696. block_group->cached == BTRFS_CACHE_ERROR)
  8697. free_excluded_extents(info, block_group);
  8698. btrfs_remove_free_space_cache(block_group);
  8699. ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
  8700. ASSERT(list_empty(&block_group->dirty_list));
  8701. ASSERT(list_empty(&block_group->io_list));
  8702. ASSERT(list_empty(&block_group->bg_list));
  8703. ASSERT(atomic_read(&block_group->count) == 1);
  8704. btrfs_put_block_group(block_group);
  8705. spin_lock(&info->block_group_cache_lock);
  8706. }
  8707. spin_unlock(&info->block_group_cache_lock);
  8708. /* now that all the block groups are freed, go through and
  8709. * free all the space_info structs. This is only called during
  8710. * the final stages of unmount, and so we know nobody is
  8711. * using them. We call synchronize_rcu() once before we start,
  8712. * just to be on the safe side.
  8713. */
  8714. synchronize_rcu();
  8715. release_global_block_rsv(info);
  8716. while (!list_empty(&info->space_info)) {
  8717. int i;
  8718. space_info = list_entry(info->space_info.next,
  8719. struct btrfs_space_info,
  8720. list);
  8721. /*
  8722. * Do not hide this behind enospc_debug, this is actually
  8723. * important and indicates a real bug if this happens.
  8724. */
  8725. if (WARN_ON(space_info->bytes_pinned > 0 ||
  8726. space_info->bytes_reserved > 0 ||
  8727. space_info->bytes_may_use > 0))
  8728. dump_space_info(info, space_info, 0, 0);
  8729. list_del(&space_info->list);
  8730. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  8731. struct kobject *kobj;
  8732. kobj = space_info->block_group_kobjs[i];
  8733. space_info->block_group_kobjs[i] = NULL;
  8734. if (kobj) {
  8735. kobject_del(kobj);
  8736. kobject_put(kobj);
  8737. }
  8738. }
  8739. kobject_del(&space_info->kobj);
  8740. kobject_put(&space_info->kobj);
  8741. }
  8742. return 0;
  8743. }
  8744. static void link_block_group(struct btrfs_block_group_cache *cache)
  8745. {
  8746. struct btrfs_space_info *space_info = cache->space_info;
  8747. int index = get_block_group_index(cache);
  8748. bool first = false;
  8749. down_write(&space_info->groups_sem);
  8750. if (list_empty(&space_info->block_groups[index]))
  8751. first = true;
  8752. list_add_tail(&cache->list, &space_info->block_groups[index]);
  8753. up_write(&space_info->groups_sem);
  8754. if (first) {
  8755. struct raid_kobject *rkobj;
  8756. int ret;
  8757. rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
  8758. if (!rkobj)
  8759. goto out_err;
  8760. rkobj->raid_type = index;
  8761. kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
  8762. ret = kobject_add(&rkobj->kobj, &space_info->kobj,
  8763. "%s", get_raid_name(index));
  8764. if (ret) {
  8765. kobject_put(&rkobj->kobj);
  8766. goto out_err;
  8767. }
  8768. space_info->block_group_kobjs[index] = &rkobj->kobj;
  8769. }
  8770. return;
  8771. out_err:
  8772. btrfs_warn(cache->fs_info,
  8773. "failed to add kobject for block cache, ignoring");
  8774. }
  8775. static struct btrfs_block_group_cache *
  8776. btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
  8777. u64 start, u64 size)
  8778. {
  8779. struct btrfs_block_group_cache *cache;
  8780. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  8781. if (!cache)
  8782. return NULL;
  8783. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  8784. GFP_NOFS);
  8785. if (!cache->free_space_ctl) {
  8786. kfree(cache);
  8787. return NULL;
  8788. }
  8789. cache->key.objectid = start;
  8790. cache->key.offset = size;
  8791. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8792. cache->fs_info = fs_info;
  8793. cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
  8794. set_free_space_tree_thresholds(cache);
  8795. atomic_set(&cache->count, 1);
  8796. spin_lock_init(&cache->lock);
  8797. init_rwsem(&cache->data_rwsem);
  8798. INIT_LIST_HEAD(&cache->list);
  8799. INIT_LIST_HEAD(&cache->cluster_list);
  8800. INIT_LIST_HEAD(&cache->bg_list);
  8801. INIT_LIST_HEAD(&cache->ro_list);
  8802. INIT_LIST_HEAD(&cache->dirty_list);
  8803. INIT_LIST_HEAD(&cache->io_list);
  8804. btrfs_init_free_space_ctl(cache);
  8805. atomic_set(&cache->trimming, 0);
  8806. mutex_init(&cache->free_space_lock);
  8807. btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
  8808. return cache;
  8809. }
  8810. int btrfs_read_block_groups(struct btrfs_fs_info *info)
  8811. {
  8812. struct btrfs_path *path;
  8813. int ret;
  8814. struct btrfs_block_group_cache *cache;
  8815. struct btrfs_space_info *space_info;
  8816. struct btrfs_key key;
  8817. struct btrfs_key found_key;
  8818. struct extent_buffer *leaf;
  8819. int need_clear = 0;
  8820. u64 cache_gen;
  8821. u64 feature;
  8822. int mixed;
  8823. feature = btrfs_super_incompat_flags(info->super_copy);
  8824. mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
  8825. key.objectid = 0;
  8826. key.offset = 0;
  8827. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8828. path = btrfs_alloc_path();
  8829. if (!path)
  8830. return -ENOMEM;
  8831. path->reada = READA_FORWARD;
  8832. cache_gen = btrfs_super_cache_generation(info->super_copy);
  8833. if (btrfs_test_opt(info, SPACE_CACHE) &&
  8834. btrfs_super_generation(info->super_copy) != cache_gen)
  8835. need_clear = 1;
  8836. if (btrfs_test_opt(info, CLEAR_CACHE))
  8837. need_clear = 1;
  8838. while (1) {
  8839. ret = find_first_block_group(info, path, &key);
  8840. if (ret > 0)
  8841. break;
  8842. if (ret != 0)
  8843. goto error;
  8844. leaf = path->nodes[0];
  8845. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  8846. cache = btrfs_create_block_group_cache(info, found_key.objectid,
  8847. found_key.offset);
  8848. if (!cache) {
  8849. ret = -ENOMEM;
  8850. goto error;
  8851. }
  8852. if (need_clear) {
  8853. /*
  8854. * When we mount with old space cache, we need to
  8855. * set BTRFS_DC_CLEAR and set dirty flag.
  8856. *
  8857. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  8858. * truncate the old free space cache inode and
  8859. * setup a new one.
  8860. * b) Setting 'dirty flag' makes sure that we flush
  8861. * the new space cache info onto disk.
  8862. */
  8863. if (btrfs_test_opt(info, SPACE_CACHE))
  8864. cache->disk_cache_state = BTRFS_DC_CLEAR;
  8865. }
  8866. read_extent_buffer(leaf, &cache->item,
  8867. btrfs_item_ptr_offset(leaf, path->slots[0]),
  8868. sizeof(cache->item));
  8869. cache->flags = btrfs_block_group_flags(&cache->item);
  8870. if (!mixed &&
  8871. ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  8872. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  8873. btrfs_err(info,
  8874. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  8875. cache->key.objectid);
  8876. ret = -EINVAL;
  8877. goto error;
  8878. }
  8879. key.objectid = found_key.objectid + found_key.offset;
  8880. btrfs_release_path(path);
  8881. /*
  8882. * We need to exclude the super stripes now so that the space
  8883. * info has super bytes accounted for, otherwise we'll think
  8884. * we have more space than we actually do.
  8885. */
  8886. ret = exclude_super_stripes(info, cache);
  8887. if (ret) {
  8888. /*
  8889. * We may have excluded something, so call this just in
  8890. * case.
  8891. */
  8892. free_excluded_extents(info, cache);
  8893. btrfs_put_block_group(cache);
  8894. goto error;
  8895. }
  8896. /*
  8897. * check for two cases, either we are full, and therefore
  8898. * don't need to bother with the caching work since we won't
  8899. * find any space, or we are empty, and we can just add all
  8900. * the space in and be done with it. This saves us _alot_ of
  8901. * time, particularly in the full case.
  8902. */
  8903. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  8904. cache->last_byte_to_unpin = (u64)-1;
  8905. cache->cached = BTRFS_CACHE_FINISHED;
  8906. free_excluded_extents(info, cache);
  8907. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8908. cache->last_byte_to_unpin = (u64)-1;
  8909. cache->cached = BTRFS_CACHE_FINISHED;
  8910. add_new_free_space(cache, info,
  8911. found_key.objectid,
  8912. found_key.objectid +
  8913. found_key.offset);
  8914. free_excluded_extents(info, cache);
  8915. }
  8916. ret = btrfs_add_block_group_cache(info, cache);
  8917. if (ret) {
  8918. btrfs_remove_free_space_cache(cache);
  8919. btrfs_put_block_group(cache);
  8920. goto error;
  8921. }
  8922. trace_btrfs_add_block_group(info, cache, 0);
  8923. update_space_info(info, cache->flags, found_key.offset,
  8924. btrfs_block_group_used(&cache->item),
  8925. cache->bytes_super, &space_info);
  8926. cache->space_info = space_info;
  8927. link_block_group(cache);
  8928. set_avail_alloc_bits(info, cache->flags);
  8929. if (btrfs_chunk_readonly(info, cache->key.objectid)) {
  8930. inc_block_group_ro(cache, 1);
  8931. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8932. spin_lock(&info->unused_bgs_lock);
  8933. /* Should always be true but just in case. */
  8934. if (list_empty(&cache->bg_list)) {
  8935. btrfs_get_block_group(cache);
  8936. list_add_tail(&cache->bg_list,
  8937. &info->unused_bgs);
  8938. }
  8939. spin_unlock(&info->unused_bgs_lock);
  8940. }
  8941. }
  8942. list_for_each_entry_rcu(space_info, &info->space_info, list) {
  8943. if (!(get_alloc_profile(info, space_info->flags) &
  8944. (BTRFS_BLOCK_GROUP_RAID10 |
  8945. BTRFS_BLOCK_GROUP_RAID1 |
  8946. BTRFS_BLOCK_GROUP_RAID5 |
  8947. BTRFS_BLOCK_GROUP_RAID6 |
  8948. BTRFS_BLOCK_GROUP_DUP)))
  8949. continue;
  8950. /*
  8951. * avoid allocating from un-mirrored block group if there are
  8952. * mirrored block groups.
  8953. */
  8954. list_for_each_entry(cache,
  8955. &space_info->block_groups[BTRFS_RAID_RAID0],
  8956. list)
  8957. inc_block_group_ro(cache, 1);
  8958. list_for_each_entry(cache,
  8959. &space_info->block_groups[BTRFS_RAID_SINGLE],
  8960. list)
  8961. inc_block_group_ro(cache, 1);
  8962. }
  8963. init_global_block_rsv(info);
  8964. ret = 0;
  8965. error:
  8966. btrfs_free_path(path);
  8967. return ret;
  8968. }
  8969. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  8970. struct btrfs_fs_info *fs_info)
  8971. {
  8972. struct btrfs_block_group_cache *block_group, *tmp;
  8973. struct btrfs_root *extent_root = fs_info->extent_root;
  8974. struct btrfs_block_group_item item;
  8975. struct btrfs_key key;
  8976. int ret = 0;
  8977. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  8978. trans->can_flush_pending_bgs = false;
  8979. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
  8980. if (ret)
  8981. goto next;
  8982. spin_lock(&block_group->lock);
  8983. memcpy(&item, &block_group->item, sizeof(item));
  8984. memcpy(&key, &block_group->key, sizeof(key));
  8985. spin_unlock(&block_group->lock);
  8986. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  8987. sizeof(item));
  8988. if (ret)
  8989. btrfs_abort_transaction(trans, ret);
  8990. ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
  8991. key.offset);
  8992. if (ret)
  8993. btrfs_abort_transaction(trans, ret);
  8994. add_block_group_free_space(trans, fs_info, block_group);
  8995. /* already aborted the transaction if it failed. */
  8996. next:
  8997. list_del_init(&block_group->bg_list);
  8998. }
  8999. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  9000. }
  9001. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  9002. struct btrfs_fs_info *fs_info, u64 bytes_used,
  9003. u64 type, u64 chunk_offset, u64 size)
  9004. {
  9005. struct btrfs_block_group_cache *cache;
  9006. int ret;
  9007. btrfs_set_log_full_commit(fs_info, trans);
  9008. cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
  9009. if (!cache)
  9010. return -ENOMEM;
  9011. btrfs_set_block_group_used(&cache->item, bytes_used);
  9012. btrfs_set_block_group_chunk_objectid(&cache->item,
  9013. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  9014. btrfs_set_block_group_flags(&cache->item, type);
  9015. cache->flags = type;
  9016. cache->last_byte_to_unpin = (u64)-1;
  9017. cache->cached = BTRFS_CACHE_FINISHED;
  9018. cache->needs_free_space = 1;
  9019. ret = exclude_super_stripes(fs_info, cache);
  9020. if (ret) {
  9021. /*
  9022. * We may have excluded something, so call this just in
  9023. * case.
  9024. */
  9025. free_excluded_extents(fs_info, cache);
  9026. btrfs_put_block_group(cache);
  9027. return ret;
  9028. }
  9029. add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
  9030. free_excluded_extents(fs_info, cache);
  9031. #ifdef CONFIG_BTRFS_DEBUG
  9032. if (btrfs_should_fragment_free_space(cache)) {
  9033. u64 new_bytes_used = size - bytes_used;
  9034. bytes_used += new_bytes_used >> 1;
  9035. fragment_free_space(cache);
  9036. }
  9037. #endif
  9038. /*
  9039. * Ensure the corresponding space_info object is created and
  9040. * assigned to our block group. We want our bg to be added to the rbtree
  9041. * with its ->space_info set.
  9042. */
  9043. cache->space_info = __find_space_info(fs_info, cache->flags);
  9044. if (!cache->space_info) {
  9045. ret = create_space_info(fs_info, cache->flags,
  9046. &cache->space_info);
  9047. if (ret) {
  9048. btrfs_remove_free_space_cache(cache);
  9049. btrfs_put_block_group(cache);
  9050. return ret;
  9051. }
  9052. }
  9053. ret = btrfs_add_block_group_cache(fs_info, cache);
  9054. if (ret) {
  9055. btrfs_remove_free_space_cache(cache);
  9056. btrfs_put_block_group(cache);
  9057. return ret;
  9058. }
  9059. /*
  9060. * Now that our block group has its ->space_info set and is inserted in
  9061. * the rbtree, update the space info's counters.
  9062. */
  9063. trace_btrfs_add_block_group(fs_info, cache, 1);
  9064. update_space_info(fs_info, cache->flags, size, bytes_used,
  9065. cache->bytes_super, &cache->space_info);
  9066. update_global_block_rsv(fs_info);
  9067. link_block_group(cache);
  9068. list_add_tail(&cache->bg_list, &trans->new_bgs);
  9069. set_avail_alloc_bits(fs_info, type);
  9070. return 0;
  9071. }
  9072. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  9073. {
  9074. u64 extra_flags = chunk_to_extended(flags) &
  9075. BTRFS_EXTENDED_PROFILE_MASK;
  9076. write_seqlock(&fs_info->profiles_lock);
  9077. if (flags & BTRFS_BLOCK_GROUP_DATA)
  9078. fs_info->avail_data_alloc_bits &= ~extra_flags;
  9079. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  9080. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  9081. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  9082. fs_info->avail_system_alloc_bits &= ~extra_flags;
  9083. write_sequnlock(&fs_info->profiles_lock);
  9084. }
  9085. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  9086. struct btrfs_fs_info *fs_info, u64 group_start,
  9087. struct extent_map *em)
  9088. {
  9089. struct btrfs_root *root = fs_info->extent_root;
  9090. struct btrfs_path *path;
  9091. struct btrfs_block_group_cache *block_group;
  9092. struct btrfs_free_cluster *cluster;
  9093. struct btrfs_root *tree_root = fs_info->tree_root;
  9094. struct btrfs_key key;
  9095. struct inode *inode;
  9096. struct kobject *kobj = NULL;
  9097. int ret;
  9098. int index;
  9099. int factor;
  9100. struct btrfs_caching_control *caching_ctl = NULL;
  9101. bool remove_em;
  9102. block_group = btrfs_lookup_block_group(fs_info, group_start);
  9103. BUG_ON(!block_group);
  9104. BUG_ON(!block_group->ro);
  9105. /*
  9106. * Free the reserved super bytes from this block group before
  9107. * remove it.
  9108. */
  9109. free_excluded_extents(fs_info, block_group);
  9110. btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
  9111. block_group->key.offset);
  9112. memcpy(&key, &block_group->key, sizeof(key));
  9113. index = get_block_group_index(block_group);
  9114. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  9115. BTRFS_BLOCK_GROUP_RAID1 |
  9116. BTRFS_BLOCK_GROUP_RAID10))
  9117. factor = 2;
  9118. else
  9119. factor = 1;
  9120. /* make sure this block group isn't part of an allocation cluster */
  9121. cluster = &fs_info->data_alloc_cluster;
  9122. spin_lock(&cluster->refill_lock);
  9123. btrfs_return_cluster_to_free_space(block_group, cluster);
  9124. spin_unlock(&cluster->refill_lock);
  9125. /*
  9126. * make sure this block group isn't part of a metadata
  9127. * allocation cluster
  9128. */
  9129. cluster = &fs_info->meta_alloc_cluster;
  9130. spin_lock(&cluster->refill_lock);
  9131. btrfs_return_cluster_to_free_space(block_group, cluster);
  9132. spin_unlock(&cluster->refill_lock);
  9133. path = btrfs_alloc_path();
  9134. if (!path) {
  9135. ret = -ENOMEM;
  9136. goto out;
  9137. }
  9138. /*
  9139. * get the inode first so any iput calls done for the io_list
  9140. * aren't the final iput (no unlinks allowed now)
  9141. */
  9142. inode = lookup_free_space_inode(fs_info, block_group, path);
  9143. mutex_lock(&trans->transaction->cache_write_mutex);
  9144. /*
  9145. * make sure our free spache cache IO is done before remove the
  9146. * free space inode
  9147. */
  9148. spin_lock(&trans->transaction->dirty_bgs_lock);
  9149. if (!list_empty(&block_group->io_list)) {
  9150. list_del_init(&block_group->io_list);
  9151. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  9152. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9153. btrfs_wait_cache_io(trans, block_group, path);
  9154. btrfs_put_block_group(block_group);
  9155. spin_lock(&trans->transaction->dirty_bgs_lock);
  9156. }
  9157. if (!list_empty(&block_group->dirty_list)) {
  9158. list_del_init(&block_group->dirty_list);
  9159. btrfs_put_block_group(block_group);
  9160. }
  9161. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9162. mutex_unlock(&trans->transaction->cache_write_mutex);
  9163. if (!IS_ERR(inode)) {
  9164. ret = btrfs_orphan_add(trans, BTRFS_I(inode));
  9165. if (ret) {
  9166. btrfs_add_delayed_iput(inode);
  9167. goto out;
  9168. }
  9169. clear_nlink(inode);
  9170. /* One for the block groups ref */
  9171. spin_lock(&block_group->lock);
  9172. if (block_group->iref) {
  9173. block_group->iref = 0;
  9174. block_group->inode = NULL;
  9175. spin_unlock(&block_group->lock);
  9176. iput(inode);
  9177. } else {
  9178. spin_unlock(&block_group->lock);
  9179. }
  9180. /* One for our lookup ref */
  9181. btrfs_add_delayed_iput(inode);
  9182. }
  9183. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  9184. key.offset = block_group->key.objectid;
  9185. key.type = 0;
  9186. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  9187. if (ret < 0)
  9188. goto out;
  9189. if (ret > 0)
  9190. btrfs_release_path(path);
  9191. if (ret == 0) {
  9192. ret = btrfs_del_item(trans, tree_root, path);
  9193. if (ret)
  9194. goto out;
  9195. btrfs_release_path(path);
  9196. }
  9197. spin_lock(&fs_info->block_group_cache_lock);
  9198. rb_erase(&block_group->cache_node,
  9199. &fs_info->block_group_cache_tree);
  9200. RB_CLEAR_NODE(&block_group->cache_node);
  9201. if (fs_info->first_logical_byte == block_group->key.objectid)
  9202. fs_info->first_logical_byte = (u64)-1;
  9203. spin_unlock(&fs_info->block_group_cache_lock);
  9204. down_write(&block_group->space_info->groups_sem);
  9205. /*
  9206. * we must use list_del_init so people can check to see if they
  9207. * are still on the list after taking the semaphore
  9208. */
  9209. list_del_init(&block_group->list);
  9210. if (list_empty(&block_group->space_info->block_groups[index])) {
  9211. kobj = block_group->space_info->block_group_kobjs[index];
  9212. block_group->space_info->block_group_kobjs[index] = NULL;
  9213. clear_avail_alloc_bits(fs_info, block_group->flags);
  9214. }
  9215. up_write(&block_group->space_info->groups_sem);
  9216. if (kobj) {
  9217. kobject_del(kobj);
  9218. kobject_put(kobj);
  9219. }
  9220. if (block_group->has_caching_ctl)
  9221. caching_ctl = get_caching_control(block_group);
  9222. if (block_group->cached == BTRFS_CACHE_STARTED)
  9223. wait_block_group_cache_done(block_group);
  9224. if (block_group->has_caching_ctl) {
  9225. down_write(&fs_info->commit_root_sem);
  9226. if (!caching_ctl) {
  9227. struct btrfs_caching_control *ctl;
  9228. list_for_each_entry(ctl,
  9229. &fs_info->caching_block_groups, list)
  9230. if (ctl->block_group == block_group) {
  9231. caching_ctl = ctl;
  9232. refcount_inc(&caching_ctl->count);
  9233. break;
  9234. }
  9235. }
  9236. if (caching_ctl)
  9237. list_del_init(&caching_ctl->list);
  9238. up_write(&fs_info->commit_root_sem);
  9239. if (caching_ctl) {
  9240. /* Once for the caching bgs list and once for us. */
  9241. put_caching_control(caching_ctl);
  9242. put_caching_control(caching_ctl);
  9243. }
  9244. }
  9245. spin_lock(&trans->transaction->dirty_bgs_lock);
  9246. if (!list_empty(&block_group->dirty_list)) {
  9247. WARN_ON(1);
  9248. }
  9249. if (!list_empty(&block_group->io_list)) {
  9250. WARN_ON(1);
  9251. }
  9252. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9253. btrfs_remove_free_space_cache(block_group);
  9254. spin_lock(&block_group->space_info->lock);
  9255. list_del_init(&block_group->ro_list);
  9256. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  9257. WARN_ON(block_group->space_info->total_bytes
  9258. < block_group->key.offset);
  9259. WARN_ON(block_group->space_info->bytes_readonly
  9260. < block_group->key.offset);
  9261. WARN_ON(block_group->space_info->disk_total
  9262. < block_group->key.offset * factor);
  9263. }
  9264. block_group->space_info->total_bytes -= block_group->key.offset;
  9265. block_group->space_info->bytes_readonly -= block_group->key.offset;
  9266. block_group->space_info->disk_total -= block_group->key.offset * factor;
  9267. spin_unlock(&block_group->space_info->lock);
  9268. memcpy(&key, &block_group->key, sizeof(key));
  9269. mutex_lock(&fs_info->chunk_mutex);
  9270. if (!list_empty(&em->list)) {
  9271. /* We're in the transaction->pending_chunks list. */
  9272. free_extent_map(em);
  9273. }
  9274. spin_lock(&block_group->lock);
  9275. block_group->removed = 1;
  9276. /*
  9277. * At this point trimming can't start on this block group, because we
  9278. * removed the block group from the tree fs_info->block_group_cache_tree
  9279. * so no one can't find it anymore and even if someone already got this
  9280. * block group before we removed it from the rbtree, they have already
  9281. * incremented block_group->trimming - if they didn't, they won't find
  9282. * any free space entries because we already removed them all when we
  9283. * called btrfs_remove_free_space_cache().
  9284. *
  9285. * And we must not remove the extent map from the fs_info->mapping_tree
  9286. * to prevent the same logical address range and physical device space
  9287. * ranges from being reused for a new block group. This is because our
  9288. * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  9289. * completely transactionless, so while it is trimming a range the
  9290. * currently running transaction might finish and a new one start,
  9291. * allowing for new block groups to be created that can reuse the same
  9292. * physical device locations unless we take this special care.
  9293. *
  9294. * There may also be an implicit trim operation if the file system
  9295. * is mounted with -odiscard. The same protections must remain
  9296. * in place until the extents have been discarded completely when
  9297. * the transaction commit has completed.
  9298. */
  9299. remove_em = (atomic_read(&block_group->trimming) == 0);
  9300. /*
  9301. * Make sure a trimmer task always sees the em in the pinned_chunks list
  9302. * if it sees block_group->removed == 1 (needs to lock block_group->lock
  9303. * before checking block_group->removed).
  9304. */
  9305. if (!remove_em) {
  9306. /*
  9307. * Our em might be in trans->transaction->pending_chunks which
  9308. * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
  9309. * and so is the fs_info->pinned_chunks list.
  9310. *
  9311. * So at this point we must be holding the chunk_mutex to avoid
  9312. * any races with chunk allocation (more specifically at
  9313. * volumes.c:contains_pending_extent()), to ensure it always
  9314. * sees the em, either in the pending_chunks list or in the
  9315. * pinned_chunks list.
  9316. */
  9317. list_move_tail(&em->list, &fs_info->pinned_chunks);
  9318. }
  9319. spin_unlock(&block_group->lock);
  9320. if (remove_em) {
  9321. struct extent_map_tree *em_tree;
  9322. em_tree = &fs_info->mapping_tree.map_tree;
  9323. write_lock(&em_tree->lock);
  9324. /*
  9325. * The em might be in the pending_chunks list, so make sure the
  9326. * chunk mutex is locked, since remove_extent_mapping() will
  9327. * delete us from that list.
  9328. */
  9329. remove_extent_mapping(em_tree, em);
  9330. write_unlock(&em_tree->lock);
  9331. /* once for the tree */
  9332. free_extent_map(em);
  9333. }
  9334. mutex_unlock(&fs_info->chunk_mutex);
  9335. ret = remove_block_group_free_space(trans, fs_info, block_group);
  9336. if (ret)
  9337. goto out;
  9338. btrfs_put_block_group(block_group);
  9339. btrfs_put_block_group(block_group);
  9340. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  9341. if (ret > 0)
  9342. ret = -EIO;
  9343. if (ret < 0)
  9344. goto out;
  9345. ret = btrfs_del_item(trans, root, path);
  9346. out:
  9347. btrfs_free_path(path);
  9348. return ret;
  9349. }
  9350. struct btrfs_trans_handle *
  9351. btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
  9352. const u64 chunk_offset)
  9353. {
  9354. struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
  9355. struct extent_map *em;
  9356. struct map_lookup *map;
  9357. unsigned int num_items;
  9358. read_lock(&em_tree->lock);
  9359. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  9360. read_unlock(&em_tree->lock);
  9361. ASSERT(em && em->start == chunk_offset);
  9362. /*
  9363. * We need to reserve 3 + N units from the metadata space info in order
  9364. * to remove a block group (done at btrfs_remove_chunk() and at
  9365. * btrfs_remove_block_group()), which are used for:
  9366. *
  9367. * 1 unit for adding the free space inode's orphan (located in the tree
  9368. * of tree roots).
  9369. * 1 unit for deleting the block group item (located in the extent
  9370. * tree).
  9371. * 1 unit for deleting the free space item (located in tree of tree
  9372. * roots).
  9373. * N units for deleting N device extent items corresponding to each
  9374. * stripe (located in the device tree).
  9375. *
  9376. * In order to remove a block group we also need to reserve units in the
  9377. * system space info in order to update the chunk tree (update one or
  9378. * more device items and remove one chunk item), but this is done at
  9379. * btrfs_remove_chunk() through a call to check_system_chunk().
  9380. */
  9381. map = em->map_lookup;
  9382. num_items = 3 + map->num_stripes;
  9383. free_extent_map(em);
  9384. return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
  9385. num_items, 1);
  9386. }
  9387. /*
  9388. * Process the unused_bgs list and remove any that don't have any allocated
  9389. * space inside of them.
  9390. */
  9391. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  9392. {
  9393. struct btrfs_block_group_cache *block_group;
  9394. struct btrfs_space_info *space_info;
  9395. struct btrfs_trans_handle *trans;
  9396. int ret = 0;
  9397. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  9398. return;
  9399. spin_lock(&fs_info->unused_bgs_lock);
  9400. while (!list_empty(&fs_info->unused_bgs)) {
  9401. u64 start, end;
  9402. int trimming;
  9403. block_group = list_first_entry(&fs_info->unused_bgs,
  9404. struct btrfs_block_group_cache,
  9405. bg_list);
  9406. list_del_init(&block_group->bg_list);
  9407. space_info = block_group->space_info;
  9408. if (ret || btrfs_mixed_space_info(space_info)) {
  9409. btrfs_put_block_group(block_group);
  9410. continue;
  9411. }
  9412. spin_unlock(&fs_info->unused_bgs_lock);
  9413. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  9414. /* Don't want to race with allocators so take the groups_sem */
  9415. down_write(&space_info->groups_sem);
  9416. spin_lock(&block_group->lock);
  9417. if (block_group->reserved ||
  9418. btrfs_block_group_used(&block_group->item) ||
  9419. block_group->ro ||
  9420. list_is_singular(&block_group->list)) {
  9421. /*
  9422. * We want to bail if we made new allocations or have
  9423. * outstanding allocations in this block group. We do
  9424. * the ro check in case balance is currently acting on
  9425. * this block group.
  9426. */
  9427. spin_unlock(&block_group->lock);
  9428. up_write(&space_info->groups_sem);
  9429. goto next;
  9430. }
  9431. spin_unlock(&block_group->lock);
  9432. /* We don't want to force the issue, only flip if it's ok. */
  9433. ret = inc_block_group_ro(block_group, 0);
  9434. up_write(&space_info->groups_sem);
  9435. if (ret < 0) {
  9436. ret = 0;
  9437. goto next;
  9438. }
  9439. /*
  9440. * Want to do this before we do anything else so we can recover
  9441. * properly if we fail to join the transaction.
  9442. */
  9443. trans = btrfs_start_trans_remove_block_group(fs_info,
  9444. block_group->key.objectid);
  9445. if (IS_ERR(trans)) {
  9446. btrfs_dec_block_group_ro(block_group);
  9447. ret = PTR_ERR(trans);
  9448. goto next;
  9449. }
  9450. /*
  9451. * We could have pending pinned extents for this block group,
  9452. * just delete them, we don't care about them anymore.
  9453. */
  9454. start = block_group->key.objectid;
  9455. end = start + block_group->key.offset - 1;
  9456. /*
  9457. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  9458. * btrfs_finish_extent_commit(). If we are at transaction N,
  9459. * another task might be running finish_extent_commit() for the
  9460. * previous transaction N - 1, and have seen a range belonging
  9461. * to the block group in freed_extents[] before we were able to
  9462. * clear the whole block group range from freed_extents[]. This
  9463. * means that task can lookup for the block group after we
  9464. * unpinned it from freed_extents[] and removed it, leading to
  9465. * a BUG_ON() at btrfs_unpin_extent_range().
  9466. */
  9467. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  9468. ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
  9469. EXTENT_DIRTY);
  9470. if (ret) {
  9471. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9472. btrfs_dec_block_group_ro(block_group);
  9473. goto end_trans;
  9474. }
  9475. ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
  9476. EXTENT_DIRTY);
  9477. if (ret) {
  9478. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9479. btrfs_dec_block_group_ro(block_group);
  9480. goto end_trans;
  9481. }
  9482. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9483. /* Reset pinned so btrfs_put_block_group doesn't complain */
  9484. spin_lock(&space_info->lock);
  9485. spin_lock(&block_group->lock);
  9486. space_info->bytes_pinned -= block_group->pinned;
  9487. space_info->bytes_readonly += block_group->pinned;
  9488. percpu_counter_add(&space_info->total_bytes_pinned,
  9489. -block_group->pinned);
  9490. block_group->pinned = 0;
  9491. spin_unlock(&block_group->lock);
  9492. spin_unlock(&space_info->lock);
  9493. /* DISCARD can flip during remount */
  9494. trimming = btrfs_test_opt(fs_info, DISCARD);
  9495. /* Implicit trim during transaction commit. */
  9496. if (trimming)
  9497. btrfs_get_block_group_trimming(block_group);
  9498. /*
  9499. * Btrfs_remove_chunk will abort the transaction if things go
  9500. * horribly wrong.
  9501. */
  9502. ret = btrfs_remove_chunk(trans, fs_info,
  9503. block_group->key.objectid);
  9504. if (ret) {
  9505. if (trimming)
  9506. btrfs_put_block_group_trimming(block_group);
  9507. goto end_trans;
  9508. }
  9509. /*
  9510. * If we're not mounted with -odiscard, we can just forget
  9511. * about this block group. Otherwise we'll need to wait
  9512. * until transaction commit to do the actual discard.
  9513. */
  9514. if (trimming) {
  9515. spin_lock(&fs_info->unused_bgs_lock);
  9516. /*
  9517. * A concurrent scrub might have added us to the list
  9518. * fs_info->unused_bgs, so use a list_move operation
  9519. * to add the block group to the deleted_bgs list.
  9520. */
  9521. list_move(&block_group->bg_list,
  9522. &trans->transaction->deleted_bgs);
  9523. spin_unlock(&fs_info->unused_bgs_lock);
  9524. btrfs_get_block_group(block_group);
  9525. }
  9526. end_trans:
  9527. btrfs_end_transaction(trans);
  9528. next:
  9529. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  9530. btrfs_put_block_group(block_group);
  9531. spin_lock(&fs_info->unused_bgs_lock);
  9532. }
  9533. spin_unlock(&fs_info->unused_bgs_lock);
  9534. }
  9535. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  9536. {
  9537. struct btrfs_space_info *space_info;
  9538. struct btrfs_super_block *disk_super;
  9539. u64 features;
  9540. u64 flags;
  9541. int mixed = 0;
  9542. int ret;
  9543. disk_super = fs_info->super_copy;
  9544. if (!btrfs_super_root(disk_super))
  9545. return -EINVAL;
  9546. features = btrfs_super_incompat_flags(disk_super);
  9547. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  9548. mixed = 1;
  9549. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  9550. ret = create_space_info(fs_info, flags, &space_info);
  9551. if (ret)
  9552. goto out;
  9553. if (mixed) {
  9554. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  9555. ret = create_space_info(fs_info, flags, &space_info);
  9556. } else {
  9557. flags = BTRFS_BLOCK_GROUP_METADATA;
  9558. ret = create_space_info(fs_info, flags, &space_info);
  9559. if (ret)
  9560. goto out;
  9561. flags = BTRFS_BLOCK_GROUP_DATA;
  9562. ret = create_space_info(fs_info, flags, &space_info);
  9563. }
  9564. out:
  9565. return ret;
  9566. }
  9567. int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
  9568. u64 start, u64 end)
  9569. {
  9570. return unpin_extent_range(fs_info, start, end, false);
  9571. }
  9572. /*
  9573. * It used to be that old block groups would be left around forever.
  9574. * Iterating over them would be enough to trim unused space. Since we
  9575. * now automatically remove them, we also need to iterate over unallocated
  9576. * space.
  9577. *
  9578. * We don't want a transaction for this since the discard may take a
  9579. * substantial amount of time. We don't require that a transaction be
  9580. * running, but we do need to take a running transaction into account
  9581. * to ensure that we're not discarding chunks that were released in
  9582. * the current transaction.
  9583. *
  9584. * Holding the chunks lock will prevent other threads from allocating
  9585. * or releasing chunks, but it won't prevent a running transaction
  9586. * from committing and releasing the memory that the pending chunks
  9587. * list head uses. For that, we need to take a reference to the
  9588. * transaction.
  9589. */
  9590. static int btrfs_trim_free_extents(struct btrfs_device *device,
  9591. u64 minlen, u64 *trimmed)
  9592. {
  9593. u64 start = 0, len = 0;
  9594. int ret;
  9595. *trimmed = 0;
  9596. /* Not writeable = nothing to do. */
  9597. if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
  9598. return 0;
  9599. /* No free space = nothing to do. */
  9600. if (device->total_bytes <= device->bytes_used)
  9601. return 0;
  9602. ret = 0;
  9603. while (1) {
  9604. struct btrfs_fs_info *fs_info = device->fs_info;
  9605. struct btrfs_transaction *trans;
  9606. u64 bytes;
  9607. ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
  9608. if (ret)
  9609. return ret;
  9610. down_read(&fs_info->commit_root_sem);
  9611. spin_lock(&fs_info->trans_lock);
  9612. trans = fs_info->running_transaction;
  9613. if (trans)
  9614. refcount_inc(&trans->use_count);
  9615. spin_unlock(&fs_info->trans_lock);
  9616. ret = find_free_dev_extent_start(trans, device, minlen, start,
  9617. &start, &len);
  9618. if (trans)
  9619. btrfs_put_transaction(trans);
  9620. if (ret) {
  9621. up_read(&fs_info->commit_root_sem);
  9622. mutex_unlock(&fs_info->chunk_mutex);
  9623. if (ret == -ENOSPC)
  9624. ret = 0;
  9625. break;
  9626. }
  9627. ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
  9628. up_read(&fs_info->commit_root_sem);
  9629. mutex_unlock(&fs_info->chunk_mutex);
  9630. if (ret)
  9631. break;
  9632. start += len;
  9633. *trimmed += bytes;
  9634. if (fatal_signal_pending(current)) {
  9635. ret = -ERESTARTSYS;
  9636. break;
  9637. }
  9638. cond_resched();
  9639. }
  9640. return ret;
  9641. }
  9642. int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
  9643. {
  9644. struct btrfs_block_group_cache *cache = NULL;
  9645. struct btrfs_device *device;
  9646. struct list_head *devices;
  9647. u64 group_trimmed;
  9648. u64 start;
  9649. u64 end;
  9650. u64 trimmed = 0;
  9651. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  9652. int ret = 0;
  9653. /*
  9654. * try to trim all FS space, our block group may start from non-zero.
  9655. */
  9656. if (range->len == total_bytes)
  9657. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  9658. else
  9659. cache = btrfs_lookup_block_group(fs_info, range->start);
  9660. while (cache) {
  9661. if (cache->key.objectid >= (range->start + range->len)) {
  9662. btrfs_put_block_group(cache);
  9663. break;
  9664. }
  9665. start = max(range->start, cache->key.objectid);
  9666. end = min(range->start + range->len,
  9667. cache->key.objectid + cache->key.offset);
  9668. if (end - start >= range->minlen) {
  9669. if (!block_group_cache_done(cache)) {
  9670. ret = cache_block_group(cache, 0);
  9671. if (ret) {
  9672. btrfs_put_block_group(cache);
  9673. break;
  9674. }
  9675. ret = wait_block_group_cache_done(cache);
  9676. if (ret) {
  9677. btrfs_put_block_group(cache);
  9678. break;
  9679. }
  9680. }
  9681. ret = btrfs_trim_block_group(cache,
  9682. &group_trimmed,
  9683. start,
  9684. end,
  9685. range->minlen);
  9686. trimmed += group_trimmed;
  9687. if (ret) {
  9688. btrfs_put_block_group(cache);
  9689. break;
  9690. }
  9691. }
  9692. cache = next_block_group(fs_info, cache);
  9693. }
  9694. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  9695. devices = &fs_info->fs_devices->alloc_list;
  9696. list_for_each_entry(device, devices, dev_alloc_list) {
  9697. ret = btrfs_trim_free_extents(device, range->minlen,
  9698. &group_trimmed);
  9699. if (ret)
  9700. break;
  9701. trimmed += group_trimmed;
  9702. }
  9703. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  9704. range->len = trimmed;
  9705. return ret;
  9706. }
  9707. /*
  9708. * btrfs_{start,end}_write_no_snapshotting() are similar to
  9709. * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
  9710. * data into the page cache through nocow before the subvolume is snapshoted,
  9711. * but flush the data into disk after the snapshot creation, or to prevent
  9712. * operations while snapshotting is ongoing and that cause the snapshot to be
  9713. * inconsistent (writes followed by expanding truncates for example).
  9714. */
  9715. void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
  9716. {
  9717. percpu_counter_dec(&root->subv_writers->counter);
  9718. /*
  9719. * Make sure counter is updated before we wake up waiters.
  9720. */
  9721. smp_mb();
  9722. if (waitqueue_active(&root->subv_writers->wait))
  9723. wake_up(&root->subv_writers->wait);
  9724. }
  9725. int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
  9726. {
  9727. if (atomic_read(&root->will_be_snapshotted))
  9728. return 0;
  9729. percpu_counter_inc(&root->subv_writers->counter);
  9730. /*
  9731. * Make sure counter is updated before we check for snapshot creation.
  9732. */
  9733. smp_mb();
  9734. if (atomic_read(&root->will_be_snapshotted)) {
  9735. btrfs_end_write_no_snapshotting(root);
  9736. return 0;
  9737. }
  9738. return 1;
  9739. }
  9740. void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
  9741. {
  9742. while (true) {
  9743. int ret;
  9744. ret = btrfs_start_write_no_snapshotting(root);
  9745. if (ret)
  9746. break;
  9747. wait_on_atomic_t(&root->will_be_snapshotted, atomic_t_wait,
  9748. TASK_UNINTERRUPTIBLE);
  9749. }
  9750. }