extent-tree.c 300 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/writeback.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/sort.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/kthread.h>
  26. #include <linux/slab.h>
  27. #include <linux/ratelimit.h>
  28. #include <linux/percpu_counter.h>
  29. #include <linux/lockdep.h>
  30. #include "hash.h"
  31. #include "tree-log.h"
  32. #include "disk-io.h"
  33. #include "print-tree.h"
  34. #include "volumes.h"
  35. #include "raid56.h"
  36. #include "locking.h"
  37. #include "free-space-cache.h"
  38. #include "free-space-tree.h"
  39. #include "math.h"
  40. #include "sysfs.h"
  41. #include "qgroup.h"
  42. #include "ref-verify.h"
  43. #undef SCRAMBLE_DELAYED_REFS
  44. /*
  45. * control flags for do_chunk_alloc's force field
  46. * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  47. * if we really need one.
  48. *
  49. * CHUNK_ALLOC_LIMITED means to only try and allocate one
  50. * if we have very few chunks already allocated. This is
  51. * used as part of the clustering code to help make sure
  52. * we have a good pool of storage to cluster in, without
  53. * filling the FS with empty chunks
  54. *
  55. * CHUNK_ALLOC_FORCE means it must try to allocate one
  56. *
  57. */
  58. enum {
  59. CHUNK_ALLOC_NO_FORCE = 0,
  60. CHUNK_ALLOC_LIMITED = 1,
  61. CHUNK_ALLOC_FORCE = 2,
  62. };
  63. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  64. struct btrfs_fs_info *fs_info,
  65. struct btrfs_delayed_ref_node *node, u64 parent,
  66. u64 root_objectid, u64 owner_objectid,
  67. u64 owner_offset, int refs_to_drop,
  68. struct btrfs_delayed_extent_op *extra_op);
  69. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  70. struct extent_buffer *leaf,
  71. struct btrfs_extent_item *ei);
  72. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  73. struct btrfs_fs_info *fs_info,
  74. u64 parent, u64 root_objectid,
  75. u64 flags, u64 owner, u64 offset,
  76. struct btrfs_key *ins, int ref_mod);
  77. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  78. struct btrfs_fs_info *fs_info,
  79. u64 parent, u64 root_objectid,
  80. u64 flags, struct btrfs_disk_key *key,
  81. int level, struct btrfs_key *ins);
  82. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  83. struct btrfs_fs_info *fs_info, u64 flags,
  84. int force);
  85. static int find_next_key(struct btrfs_path *path, int level,
  86. struct btrfs_key *key);
  87. static void dump_space_info(struct btrfs_fs_info *fs_info,
  88. struct btrfs_space_info *info, u64 bytes,
  89. int dump_block_groups);
  90. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  91. u64 num_bytes);
  92. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  93. struct btrfs_space_info *space_info,
  94. u64 num_bytes);
  95. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  96. struct btrfs_space_info *space_info,
  97. u64 num_bytes);
  98. static noinline int
  99. block_group_cache_done(struct btrfs_block_group_cache *cache)
  100. {
  101. smp_mb();
  102. return cache->cached == BTRFS_CACHE_FINISHED ||
  103. cache->cached == BTRFS_CACHE_ERROR;
  104. }
  105. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  106. {
  107. return (cache->flags & bits) == bits;
  108. }
  109. void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
  110. {
  111. atomic_inc(&cache->count);
  112. }
  113. void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
  114. {
  115. if (atomic_dec_and_test(&cache->count)) {
  116. WARN_ON(cache->pinned > 0);
  117. WARN_ON(cache->reserved > 0);
  118. /*
  119. * If not empty, someone is still holding mutex of
  120. * full_stripe_lock, which can only be released by caller.
  121. * And it will definitely cause use-after-free when caller
  122. * tries to release full stripe lock.
  123. *
  124. * No better way to resolve, but only to warn.
  125. */
  126. WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
  127. kfree(cache->free_space_ctl);
  128. kfree(cache);
  129. }
  130. }
  131. /*
  132. * this adds the block group to the fs_info rb tree for the block group
  133. * cache
  134. */
  135. static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  136. struct btrfs_block_group_cache *block_group)
  137. {
  138. struct rb_node **p;
  139. struct rb_node *parent = NULL;
  140. struct btrfs_block_group_cache *cache;
  141. spin_lock(&info->block_group_cache_lock);
  142. p = &info->block_group_cache_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. cache = rb_entry(parent, struct btrfs_block_group_cache,
  146. cache_node);
  147. if (block_group->key.objectid < cache->key.objectid) {
  148. p = &(*p)->rb_left;
  149. } else if (block_group->key.objectid > cache->key.objectid) {
  150. p = &(*p)->rb_right;
  151. } else {
  152. spin_unlock(&info->block_group_cache_lock);
  153. return -EEXIST;
  154. }
  155. }
  156. rb_link_node(&block_group->cache_node, parent, p);
  157. rb_insert_color(&block_group->cache_node,
  158. &info->block_group_cache_tree);
  159. if (info->first_logical_byte > block_group->key.objectid)
  160. info->first_logical_byte = block_group->key.objectid;
  161. spin_unlock(&info->block_group_cache_lock);
  162. return 0;
  163. }
  164. /*
  165. * This will return the block group at or after bytenr if contains is 0, else
  166. * it will return the block group that contains the bytenr
  167. */
  168. static struct btrfs_block_group_cache *
  169. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  170. int contains)
  171. {
  172. struct btrfs_block_group_cache *cache, *ret = NULL;
  173. struct rb_node *n;
  174. u64 end, start;
  175. spin_lock(&info->block_group_cache_lock);
  176. n = info->block_group_cache_tree.rb_node;
  177. while (n) {
  178. cache = rb_entry(n, struct btrfs_block_group_cache,
  179. cache_node);
  180. end = cache->key.objectid + cache->key.offset - 1;
  181. start = cache->key.objectid;
  182. if (bytenr < start) {
  183. if (!contains && (!ret || start < ret->key.objectid))
  184. ret = cache;
  185. n = n->rb_left;
  186. } else if (bytenr > start) {
  187. if (contains && bytenr <= end) {
  188. ret = cache;
  189. break;
  190. }
  191. n = n->rb_right;
  192. } else {
  193. ret = cache;
  194. break;
  195. }
  196. }
  197. if (ret) {
  198. btrfs_get_block_group(ret);
  199. if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
  200. info->first_logical_byte = ret->key.objectid;
  201. }
  202. spin_unlock(&info->block_group_cache_lock);
  203. return ret;
  204. }
  205. static int add_excluded_extent(struct btrfs_fs_info *fs_info,
  206. u64 start, u64 num_bytes)
  207. {
  208. u64 end = start + num_bytes - 1;
  209. set_extent_bits(&fs_info->freed_extents[0],
  210. start, end, EXTENT_UPTODATE);
  211. set_extent_bits(&fs_info->freed_extents[1],
  212. start, end, EXTENT_UPTODATE);
  213. return 0;
  214. }
  215. static void free_excluded_extents(struct btrfs_fs_info *fs_info,
  216. struct btrfs_block_group_cache *cache)
  217. {
  218. u64 start, end;
  219. start = cache->key.objectid;
  220. end = start + cache->key.offset - 1;
  221. clear_extent_bits(&fs_info->freed_extents[0],
  222. start, end, EXTENT_UPTODATE);
  223. clear_extent_bits(&fs_info->freed_extents[1],
  224. start, end, EXTENT_UPTODATE);
  225. }
  226. static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
  227. struct btrfs_block_group_cache *cache)
  228. {
  229. u64 bytenr;
  230. u64 *logical;
  231. int stripe_len;
  232. int i, nr, ret;
  233. if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
  234. stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
  235. cache->bytes_super += stripe_len;
  236. ret = add_excluded_extent(fs_info, cache->key.objectid,
  237. stripe_len);
  238. if (ret)
  239. return ret;
  240. }
  241. for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
  242. bytenr = btrfs_sb_offset(i);
  243. ret = btrfs_rmap_block(fs_info, cache->key.objectid,
  244. bytenr, 0, &logical, &nr, &stripe_len);
  245. if (ret)
  246. return ret;
  247. while (nr--) {
  248. u64 start, len;
  249. if (logical[nr] > cache->key.objectid +
  250. cache->key.offset)
  251. continue;
  252. if (logical[nr] + stripe_len <= cache->key.objectid)
  253. continue;
  254. start = logical[nr];
  255. if (start < cache->key.objectid) {
  256. start = cache->key.objectid;
  257. len = (logical[nr] + stripe_len) - start;
  258. } else {
  259. len = min_t(u64, stripe_len,
  260. cache->key.objectid +
  261. cache->key.offset - start);
  262. }
  263. cache->bytes_super += len;
  264. ret = add_excluded_extent(fs_info, start, len);
  265. if (ret) {
  266. kfree(logical);
  267. return ret;
  268. }
  269. }
  270. kfree(logical);
  271. }
  272. return 0;
  273. }
  274. static struct btrfs_caching_control *
  275. get_caching_control(struct btrfs_block_group_cache *cache)
  276. {
  277. struct btrfs_caching_control *ctl;
  278. spin_lock(&cache->lock);
  279. if (!cache->caching_ctl) {
  280. spin_unlock(&cache->lock);
  281. return NULL;
  282. }
  283. ctl = cache->caching_ctl;
  284. refcount_inc(&ctl->count);
  285. spin_unlock(&cache->lock);
  286. return ctl;
  287. }
  288. static void put_caching_control(struct btrfs_caching_control *ctl)
  289. {
  290. if (refcount_dec_and_test(&ctl->count))
  291. kfree(ctl);
  292. }
  293. #ifdef CONFIG_BTRFS_DEBUG
  294. static void fragment_free_space(struct btrfs_block_group_cache *block_group)
  295. {
  296. struct btrfs_fs_info *fs_info = block_group->fs_info;
  297. u64 start = block_group->key.objectid;
  298. u64 len = block_group->key.offset;
  299. u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
  300. fs_info->nodesize : fs_info->sectorsize;
  301. u64 step = chunk << 1;
  302. while (len > chunk) {
  303. btrfs_remove_free_space(block_group, start, chunk);
  304. start += step;
  305. if (len < step)
  306. len = 0;
  307. else
  308. len -= step;
  309. }
  310. }
  311. #endif
  312. /*
  313. * this is only called by cache_block_group, since we could have freed extents
  314. * we need to check the pinned_extents for any extents that can't be used yet
  315. * since their free space will be released as soon as the transaction commits.
  316. */
  317. u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
  318. struct btrfs_fs_info *info, u64 start, u64 end)
  319. {
  320. u64 extent_start, extent_end, size, total_added = 0;
  321. int ret;
  322. while (start < end) {
  323. ret = find_first_extent_bit(info->pinned_extents, start,
  324. &extent_start, &extent_end,
  325. EXTENT_DIRTY | EXTENT_UPTODATE,
  326. NULL);
  327. if (ret)
  328. break;
  329. if (extent_start <= start) {
  330. start = extent_end + 1;
  331. } else if (extent_start > start && extent_start < end) {
  332. size = extent_start - start;
  333. total_added += size;
  334. ret = btrfs_add_free_space(block_group, start,
  335. size);
  336. BUG_ON(ret); /* -ENOMEM or logic error */
  337. start = extent_end + 1;
  338. } else {
  339. break;
  340. }
  341. }
  342. if (start < end) {
  343. size = end - start;
  344. total_added += size;
  345. ret = btrfs_add_free_space(block_group, start, size);
  346. BUG_ON(ret); /* -ENOMEM or logic error */
  347. }
  348. return total_added;
  349. }
  350. static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
  351. {
  352. struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
  353. struct btrfs_fs_info *fs_info = block_group->fs_info;
  354. struct btrfs_root *extent_root = fs_info->extent_root;
  355. struct btrfs_path *path;
  356. struct extent_buffer *leaf;
  357. struct btrfs_key key;
  358. u64 total_found = 0;
  359. u64 last = 0;
  360. u32 nritems;
  361. int ret;
  362. bool wakeup = true;
  363. path = btrfs_alloc_path();
  364. if (!path)
  365. return -ENOMEM;
  366. last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
  367. #ifdef CONFIG_BTRFS_DEBUG
  368. /*
  369. * If we're fragmenting we don't want to make anybody think we can
  370. * allocate from this block group until we've had a chance to fragment
  371. * the free space.
  372. */
  373. if (btrfs_should_fragment_free_space(block_group))
  374. wakeup = false;
  375. #endif
  376. /*
  377. * We don't want to deadlock with somebody trying to allocate a new
  378. * extent for the extent root while also trying to search the extent
  379. * root to add free space. So we skip locking and search the commit
  380. * root, since its read-only
  381. */
  382. path->skip_locking = 1;
  383. path->search_commit_root = 1;
  384. path->reada = READA_FORWARD;
  385. key.objectid = last;
  386. key.offset = 0;
  387. key.type = BTRFS_EXTENT_ITEM_KEY;
  388. next:
  389. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  390. if (ret < 0)
  391. goto out;
  392. leaf = path->nodes[0];
  393. nritems = btrfs_header_nritems(leaf);
  394. while (1) {
  395. if (btrfs_fs_closing(fs_info) > 1) {
  396. last = (u64)-1;
  397. break;
  398. }
  399. if (path->slots[0] < nritems) {
  400. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  401. } else {
  402. ret = find_next_key(path, 0, &key);
  403. if (ret)
  404. break;
  405. if (need_resched() ||
  406. rwsem_is_contended(&fs_info->commit_root_sem)) {
  407. if (wakeup)
  408. caching_ctl->progress = last;
  409. btrfs_release_path(path);
  410. up_read(&fs_info->commit_root_sem);
  411. mutex_unlock(&caching_ctl->mutex);
  412. cond_resched();
  413. mutex_lock(&caching_ctl->mutex);
  414. down_read(&fs_info->commit_root_sem);
  415. goto next;
  416. }
  417. ret = btrfs_next_leaf(extent_root, path);
  418. if (ret < 0)
  419. goto out;
  420. if (ret)
  421. break;
  422. leaf = path->nodes[0];
  423. nritems = btrfs_header_nritems(leaf);
  424. continue;
  425. }
  426. if (key.objectid < last) {
  427. key.objectid = last;
  428. key.offset = 0;
  429. key.type = BTRFS_EXTENT_ITEM_KEY;
  430. if (wakeup)
  431. caching_ctl->progress = last;
  432. btrfs_release_path(path);
  433. goto next;
  434. }
  435. if (key.objectid < block_group->key.objectid) {
  436. path->slots[0]++;
  437. continue;
  438. }
  439. if (key.objectid >= block_group->key.objectid +
  440. block_group->key.offset)
  441. break;
  442. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  443. key.type == BTRFS_METADATA_ITEM_KEY) {
  444. total_found += add_new_free_space(block_group,
  445. fs_info, last,
  446. key.objectid);
  447. if (key.type == BTRFS_METADATA_ITEM_KEY)
  448. last = key.objectid +
  449. fs_info->nodesize;
  450. else
  451. last = key.objectid + key.offset;
  452. if (total_found > CACHING_CTL_WAKE_UP) {
  453. total_found = 0;
  454. if (wakeup)
  455. wake_up(&caching_ctl->wait);
  456. }
  457. }
  458. path->slots[0]++;
  459. }
  460. ret = 0;
  461. total_found += add_new_free_space(block_group, fs_info, last,
  462. block_group->key.objectid +
  463. block_group->key.offset);
  464. caching_ctl->progress = (u64)-1;
  465. out:
  466. btrfs_free_path(path);
  467. return ret;
  468. }
  469. static noinline void caching_thread(struct btrfs_work *work)
  470. {
  471. struct btrfs_block_group_cache *block_group;
  472. struct btrfs_fs_info *fs_info;
  473. struct btrfs_caching_control *caching_ctl;
  474. struct btrfs_root *extent_root;
  475. int ret;
  476. caching_ctl = container_of(work, struct btrfs_caching_control, work);
  477. block_group = caching_ctl->block_group;
  478. fs_info = block_group->fs_info;
  479. extent_root = fs_info->extent_root;
  480. mutex_lock(&caching_ctl->mutex);
  481. down_read(&fs_info->commit_root_sem);
  482. if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
  483. ret = load_free_space_tree(caching_ctl);
  484. else
  485. ret = load_extent_tree_free(caching_ctl);
  486. spin_lock(&block_group->lock);
  487. block_group->caching_ctl = NULL;
  488. block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
  489. spin_unlock(&block_group->lock);
  490. #ifdef CONFIG_BTRFS_DEBUG
  491. if (btrfs_should_fragment_free_space(block_group)) {
  492. u64 bytes_used;
  493. spin_lock(&block_group->space_info->lock);
  494. spin_lock(&block_group->lock);
  495. bytes_used = block_group->key.offset -
  496. btrfs_block_group_used(&block_group->item);
  497. block_group->space_info->bytes_used += bytes_used >> 1;
  498. spin_unlock(&block_group->lock);
  499. spin_unlock(&block_group->space_info->lock);
  500. fragment_free_space(block_group);
  501. }
  502. #endif
  503. caching_ctl->progress = (u64)-1;
  504. up_read(&fs_info->commit_root_sem);
  505. free_excluded_extents(fs_info, block_group);
  506. mutex_unlock(&caching_ctl->mutex);
  507. wake_up(&caching_ctl->wait);
  508. put_caching_control(caching_ctl);
  509. btrfs_put_block_group(block_group);
  510. }
  511. static int cache_block_group(struct btrfs_block_group_cache *cache,
  512. int load_cache_only)
  513. {
  514. DEFINE_WAIT(wait);
  515. struct btrfs_fs_info *fs_info = cache->fs_info;
  516. struct btrfs_caching_control *caching_ctl;
  517. int ret = 0;
  518. caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
  519. if (!caching_ctl)
  520. return -ENOMEM;
  521. INIT_LIST_HEAD(&caching_ctl->list);
  522. mutex_init(&caching_ctl->mutex);
  523. init_waitqueue_head(&caching_ctl->wait);
  524. caching_ctl->block_group = cache;
  525. caching_ctl->progress = cache->key.objectid;
  526. refcount_set(&caching_ctl->count, 1);
  527. btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
  528. caching_thread, NULL, NULL);
  529. spin_lock(&cache->lock);
  530. /*
  531. * This should be a rare occasion, but this could happen I think in the
  532. * case where one thread starts to load the space cache info, and then
  533. * some other thread starts a transaction commit which tries to do an
  534. * allocation while the other thread is still loading the space cache
  535. * info. The previous loop should have kept us from choosing this block
  536. * group, but if we've moved to the state where we will wait on caching
  537. * block groups we need to first check if we're doing a fast load here,
  538. * so we can wait for it to finish, otherwise we could end up allocating
  539. * from a block group who's cache gets evicted for one reason or
  540. * another.
  541. */
  542. while (cache->cached == BTRFS_CACHE_FAST) {
  543. struct btrfs_caching_control *ctl;
  544. ctl = cache->caching_ctl;
  545. refcount_inc(&ctl->count);
  546. prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
  547. spin_unlock(&cache->lock);
  548. schedule();
  549. finish_wait(&ctl->wait, &wait);
  550. put_caching_control(ctl);
  551. spin_lock(&cache->lock);
  552. }
  553. if (cache->cached != BTRFS_CACHE_NO) {
  554. spin_unlock(&cache->lock);
  555. kfree(caching_ctl);
  556. return 0;
  557. }
  558. WARN_ON(cache->caching_ctl);
  559. cache->caching_ctl = caching_ctl;
  560. cache->cached = BTRFS_CACHE_FAST;
  561. spin_unlock(&cache->lock);
  562. if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
  563. mutex_lock(&caching_ctl->mutex);
  564. ret = load_free_space_cache(fs_info, cache);
  565. spin_lock(&cache->lock);
  566. if (ret == 1) {
  567. cache->caching_ctl = NULL;
  568. cache->cached = BTRFS_CACHE_FINISHED;
  569. cache->last_byte_to_unpin = (u64)-1;
  570. caching_ctl->progress = (u64)-1;
  571. } else {
  572. if (load_cache_only) {
  573. cache->caching_ctl = NULL;
  574. cache->cached = BTRFS_CACHE_NO;
  575. } else {
  576. cache->cached = BTRFS_CACHE_STARTED;
  577. cache->has_caching_ctl = 1;
  578. }
  579. }
  580. spin_unlock(&cache->lock);
  581. #ifdef CONFIG_BTRFS_DEBUG
  582. if (ret == 1 &&
  583. btrfs_should_fragment_free_space(cache)) {
  584. u64 bytes_used;
  585. spin_lock(&cache->space_info->lock);
  586. spin_lock(&cache->lock);
  587. bytes_used = cache->key.offset -
  588. btrfs_block_group_used(&cache->item);
  589. cache->space_info->bytes_used += bytes_used >> 1;
  590. spin_unlock(&cache->lock);
  591. spin_unlock(&cache->space_info->lock);
  592. fragment_free_space(cache);
  593. }
  594. #endif
  595. mutex_unlock(&caching_ctl->mutex);
  596. wake_up(&caching_ctl->wait);
  597. if (ret == 1) {
  598. put_caching_control(caching_ctl);
  599. free_excluded_extents(fs_info, cache);
  600. return 0;
  601. }
  602. } else {
  603. /*
  604. * We're either using the free space tree or no caching at all.
  605. * Set cached to the appropriate value and wakeup any waiters.
  606. */
  607. spin_lock(&cache->lock);
  608. if (load_cache_only) {
  609. cache->caching_ctl = NULL;
  610. cache->cached = BTRFS_CACHE_NO;
  611. } else {
  612. cache->cached = BTRFS_CACHE_STARTED;
  613. cache->has_caching_ctl = 1;
  614. }
  615. spin_unlock(&cache->lock);
  616. wake_up(&caching_ctl->wait);
  617. }
  618. if (load_cache_only) {
  619. put_caching_control(caching_ctl);
  620. return 0;
  621. }
  622. down_write(&fs_info->commit_root_sem);
  623. refcount_inc(&caching_ctl->count);
  624. list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
  625. up_write(&fs_info->commit_root_sem);
  626. btrfs_get_block_group(cache);
  627. btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
  628. return ret;
  629. }
  630. /*
  631. * return the block group that starts at or after bytenr
  632. */
  633. static struct btrfs_block_group_cache *
  634. btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
  635. {
  636. return block_group_cache_tree_search(info, bytenr, 0);
  637. }
  638. /*
  639. * return the block group that contains the given bytenr
  640. */
  641. struct btrfs_block_group_cache *btrfs_lookup_block_group(
  642. struct btrfs_fs_info *info,
  643. u64 bytenr)
  644. {
  645. return block_group_cache_tree_search(info, bytenr, 1);
  646. }
  647. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  648. u64 flags)
  649. {
  650. struct list_head *head = &info->space_info;
  651. struct btrfs_space_info *found;
  652. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  653. rcu_read_lock();
  654. list_for_each_entry_rcu(found, head, list) {
  655. if (found->flags & flags) {
  656. rcu_read_unlock();
  657. return found;
  658. }
  659. }
  660. rcu_read_unlock();
  661. return NULL;
  662. }
  663. static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
  664. u64 owner, u64 root_objectid)
  665. {
  666. struct btrfs_space_info *space_info;
  667. u64 flags;
  668. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  669. if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
  670. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  671. else
  672. flags = BTRFS_BLOCK_GROUP_METADATA;
  673. } else {
  674. flags = BTRFS_BLOCK_GROUP_DATA;
  675. }
  676. space_info = __find_space_info(fs_info, flags);
  677. ASSERT(space_info);
  678. percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
  679. }
  680. /*
  681. * after adding space to the filesystem, we need to clear the full flags
  682. * on all the space infos.
  683. */
  684. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  685. {
  686. struct list_head *head = &info->space_info;
  687. struct btrfs_space_info *found;
  688. rcu_read_lock();
  689. list_for_each_entry_rcu(found, head, list)
  690. found->full = 0;
  691. rcu_read_unlock();
  692. }
  693. /* simple helper to search for an existing data extent at a given offset */
  694. int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
  695. {
  696. int ret;
  697. struct btrfs_key key;
  698. struct btrfs_path *path;
  699. path = btrfs_alloc_path();
  700. if (!path)
  701. return -ENOMEM;
  702. key.objectid = start;
  703. key.offset = len;
  704. key.type = BTRFS_EXTENT_ITEM_KEY;
  705. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  706. btrfs_free_path(path);
  707. return ret;
  708. }
  709. /*
  710. * helper function to lookup reference count and flags of a tree block.
  711. *
  712. * the head node for delayed ref is used to store the sum of all the
  713. * reference count modifications queued up in the rbtree. the head
  714. * node may also store the extent flags to set. This way you can check
  715. * to see what the reference count and extent flags would be if all of
  716. * the delayed refs are not processed.
  717. */
  718. int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
  719. struct btrfs_fs_info *fs_info, u64 bytenr,
  720. u64 offset, int metadata, u64 *refs, u64 *flags)
  721. {
  722. struct btrfs_delayed_ref_head *head;
  723. struct btrfs_delayed_ref_root *delayed_refs;
  724. struct btrfs_path *path;
  725. struct btrfs_extent_item *ei;
  726. struct extent_buffer *leaf;
  727. struct btrfs_key key;
  728. u32 item_size;
  729. u64 num_refs;
  730. u64 extent_flags;
  731. int ret;
  732. /*
  733. * If we don't have skinny metadata, don't bother doing anything
  734. * different
  735. */
  736. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
  737. offset = fs_info->nodesize;
  738. metadata = 0;
  739. }
  740. path = btrfs_alloc_path();
  741. if (!path)
  742. return -ENOMEM;
  743. if (!trans) {
  744. path->skip_locking = 1;
  745. path->search_commit_root = 1;
  746. }
  747. search_again:
  748. key.objectid = bytenr;
  749. key.offset = offset;
  750. if (metadata)
  751. key.type = BTRFS_METADATA_ITEM_KEY;
  752. else
  753. key.type = BTRFS_EXTENT_ITEM_KEY;
  754. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
  755. if (ret < 0)
  756. goto out_free;
  757. if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
  758. if (path->slots[0]) {
  759. path->slots[0]--;
  760. btrfs_item_key_to_cpu(path->nodes[0], &key,
  761. path->slots[0]);
  762. if (key.objectid == bytenr &&
  763. key.type == BTRFS_EXTENT_ITEM_KEY &&
  764. key.offset == fs_info->nodesize)
  765. ret = 0;
  766. }
  767. }
  768. if (ret == 0) {
  769. leaf = path->nodes[0];
  770. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  771. if (item_size >= sizeof(*ei)) {
  772. ei = btrfs_item_ptr(leaf, path->slots[0],
  773. struct btrfs_extent_item);
  774. num_refs = btrfs_extent_refs(leaf, ei);
  775. extent_flags = btrfs_extent_flags(leaf, ei);
  776. } else {
  777. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  778. struct btrfs_extent_item_v0 *ei0;
  779. BUG_ON(item_size != sizeof(*ei0));
  780. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  781. struct btrfs_extent_item_v0);
  782. num_refs = btrfs_extent_refs_v0(leaf, ei0);
  783. /* FIXME: this isn't correct for data */
  784. extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  785. #else
  786. BUG();
  787. #endif
  788. }
  789. BUG_ON(num_refs == 0);
  790. } else {
  791. num_refs = 0;
  792. extent_flags = 0;
  793. ret = 0;
  794. }
  795. if (!trans)
  796. goto out;
  797. delayed_refs = &trans->transaction->delayed_refs;
  798. spin_lock(&delayed_refs->lock);
  799. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  800. if (head) {
  801. if (!mutex_trylock(&head->mutex)) {
  802. refcount_inc(&head->refs);
  803. spin_unlock(&delayed_refs->lock);
  804. btrfs_release_path(path);
  805. /*
  806. * Mutex was contended, block until it's released and try
  807. * again
  808. */
  809. mutex_lock(&head->mutex);
  810. mutex_unlock(&head->mutex);
  811. btrfs_put_delayed_ref_head(head);
  812. goto search_again;
  813. }
  814. spin_lock(&head->lock);
  815. if (head->extent_op && head->extent_op->update_flags)
  816. extent_flags |= head->extent_op->flags_to_set;
  817. else
  818. BUG_ON(num_refs == 0);
  819. num_refs += head->ref_mod;
  820. spin_unlock(&head->lock);
  821. mutex_unlock(&head->mutex);
  822. }
  823. spin_unlock(&delayed_refs->lock);
  824. out:
  825. WARN_ON(num_refs == 0);
  826. if (refs)
  827. *refs = num_refs;
  828. if (flags)
  829. *flags = extent_flags;
  830. out_free:
  831. btrfs_free_path(path);
  832. return ret;
  833. }
  834. /*
  835. * Back reference rules. Back refs have three main goals:
  836. *
  837. * 1) differentiate between all holders of references to an extent so that
  838. * when a reference is dropped we can make sure it was a valid reference
  839. * before freeing the extent.
  840. *
  841. * 2) Provide enough information to quickly find the holders of an extent
  842. * if we notice a given block is corrupted or bad.
  843. *
  844. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  845. * maintenance. This is actually the same as #2, but with a slightly
  846. * different use case.
  847. *
  848. * There are two kinds of back refs. The implicit back refs is optimized
  849. * for pointers in non-shared tree blocks. For a given pointer in a block,
  850. * back refs of this kind provide information about the block's owner tree
  851. * and the pointer's key. These information allow us to find the block by
  852. * b-tree searching. The full back refs is for pointers in tree blocks not
  853. * referenced by their owner trees. The location of tree block is recorded
  854. * in the back refs. Actually the full back refs is generic, and can be
  855. * used in all cases the implicit back refs is used. The major shortcoming
  856. * of the full back refs is its overhead. Every time a tree block gets
  857. * COWed, we have to update back refs entry for all pointers in it.
  858. *
  859. * For a newly allocated tree block, we use implicit back refs for
  860. * pointers in it. This means most tree related operations only involve
  861. * implicit back refs. For a tree block created in old transaction, the
  862. * only way to drop a reference to it is COW it. So we can detect the
  863. * event that tree block loses its owner tree's reference and do the
  864. * back refs conversion.
  865. *
  866. * When a tree block is COWed through a tree, there are four cases:
  867. *
  868. * The reference count of the block is one and the tree is the block's
  869. * owner tree. Nothing to do in this case.
  870. *
  871. * The reference count of the block is one and the tree is not the
  872. * block's owner tree. In this case, full back refs is used for pointers
  873. * in the block. Remove these full back refs, add implicit back refs for
  874. * every pointers in the new block.
  875. *
  876. * The reference count of the block is greater than one and the tree is
  877. * the block's owner tree. In this case, implicit back refs is used for
  878. * pointers in the block. Add full back refs for every pointers in the
  879. * block, increase lower level extents' reference counts. The original
  880. * implicit back refs are entailed to the new block.
  881. *
  882. * The reference count of the block is greater than one and the tree is
  883. * not the block's owner tree. Add implicit back refs for every pointer in
  884. * the new block, increase lower level extents' reference count.
  885. *
  886. * Back Reference Key composing:
  887. *
  888. * The key objectid corresponds to the first byte in the extent,
  889. * The key type is used to differentiate between types of back refs.
  890. * There are different meanings of the key offset for different types
  891. * of back refs.
  892. *
  893. * File extents can be referenced by:
  894. *
  895. * - multiple snapshots, subvolumes, or different generations in one subvol
  896. * - different files inside a single subvolume
  897. * - different offsets inside a file (bookend extents in file.c)
  898. *
  899. * The extent ref structure for the implicit back refs has fields for:
  900. *
  901. * - Objectid of the subvolume root
  902. * - objectid of the file holding the reference
  903. * - original offset in the file
  904. * - how many bookend extents
  905. *
  906. * The key offset for the implicit back refs is hash of the first
  907. * three fields.
  908. *
  909. * The extent ref structure for the full back refs has field for:
  910. *
  911. * - number of pointers in the tree leaf
  912. *
  913. * The key offset for the implicit back refs is the first byte of
  914. * the tree leaf
  915. *
  916. * When a file extent is allocated, The implicit back refs is used.
  917. * the fields are filled in:
  918. *
  919. * (root_key.objectid, inode objectid, offset in file, 1)
  920. *
  921. * When a file extent is removed file truncation, we find the
  922. * corresponding implicit back refs and check the following fields:
  923. *
  924. * (btrfs_header_owner(leaf), inode objectid, offset in file)
  925. *
  926. * Btree extents can be referenced by:
  927. *
  928. * - Different subvolumes
  929. *
  930. * Both the implicit back refs and the full back refs for tree blocks
  931. * only consist of key. The key offset for the implicit back refs is
  932. * objectid of block's owner tree. The key offset for the full back refs
  933. * is the first byte of parent block.
  934. *
  935. * When implicit back refs is used, information about the lowest key and
  936. * level of the tree block are required. These information are stored in
  937. * tree block info structure.
  938. */
  939. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  940. static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
  941. struct btrfs_fs_info *fs_info,
  942. struct btrfs_path *path,
  943. u64 owner, u32 extra_size)
  944. {
  945. struct btrfs_root *root = fs_info->extent_root;
  946. struct btrfs_extent_item *item;
  947. struct btrfs_extent_item_v0 *ei0;
  948. struct btrfs_extent_ref_v0 *ref0;
  949. struct btrfs_tree_block_info *bi;
  950. struct extent_buffer *leaf;
  951. struct btrfs_key key;
  952. struct btrfs_key found_key;
  953. u32 new_size = sizeof(*item);
  954. u64 refs;
  955. int ret;
  956. leaf = path->nodes[0];
  957. BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
  958. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  959. ei0 = btrfs_item_ptr(leaf, path->slots[0],
  960. struct btrfs_extent_item_v0);
  961. refs = btrfs_extent_refs_v0(leaf, ei0);
  962. if (owner == (u64)-1) {
  963. while (1) {
  964. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  965. ret = btrfs_next_leaf(root, path);
  966. if (ret < 0)
  967. return ret;
  968. BUG_ON(ret > 0); /* Corruption */
  969. leaf = path->nodes[0];
  970. }
  971. btrfs_item_key_to_cpu(leaf, &found_key,
  972. path->slots[0]);
  973. BUG_ON(key.objectid != found_key.objectid);
  974. if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
  975. path->slots[0]++;
  976. continue;
  977. }
  978. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  979. struct btrfs_extent_ref_v0);
  980. owner = btrfs_ref_objectid_v0(leaf, ref0);
  981. break;
  982. }
  983. }
  984. btrfs_release_path(path);
  985. if (owner < BTRFS_FIRST_FREE_OBJECTID)
  986. new_size += sizeof(*bi);
  987. new_size -= sizeof(*ei0);
  988. ret = btrfs_search_slot(trans, root, &key, path,
  989. new_size + extra_size, 1);
  990. if (ret < 0)
  991. return ret;
  992. BUG_ON(ret); /* Corruption */
  993. btrfs_extend_item(fs_info, path, new_size);
  994. leaf = path->nodes[0];
  995. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  996. btrfs_set_extent_refs(leaf, item, refs);
  997. /* FIXME: get real generation */
  998. btrfs_set_extent_generation(leaf, item, 0);
  999. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1000. btrfs_set_extent_flags(leaf, item,
  1001. BTRFS_EXTENT_FLAG_TREE_BLOCK |
  1002. BTRFS_BLOCK_FLAG_FULL_BACKREF);
  1003. bi = (struct btrfs_tree_block_info *)(item + 1);
  1004. /* FIXME: get first key of the block */
  1005. memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
  1006. btrfs_set_tree_block_level(leaf, bi, (int)owner);
  1007. } else {
  1008. btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
  1009. }
  1010. btrfs_mark_buffer_dirty(leaf);
  1011. return 0;
  1012. }
  1013. #endif
  1014. /*
  1015. * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
  1016. * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
  1017. * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
  1018. */
  1019. int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
  1020. struct btrfs_extent_inline_ref *iref,
  1021. enum btrfs_inline_ref_type is_data)
  1022. {
  1023. int type = btrfs_extent_inline_ref_type(eb, iref);
  1024. u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
  1025. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  1026. type == BTRFS_SHARED_BLOCK_REF_KEY ||
  1027. type == BTRFS_SHARED_DATA_REF_KEY ||
  1028. type == BTRFS_EXTENT_DATA_REF_KEY) {
  1029. if (is_data == BTRFS_REF_TYPE_BLOCK) {
  1030. if (type == BTRFS_TREE_BLOCK_REF_KEY)
  1031. return type;
  1032. if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1033. ASSERT(eb->fs_info);
  1034. /*
  1035. * Every shared one has parent tree
  1036. * block, which must be aligned to
  1037. * nodesize.
  1038. */
  1039. if (offset &&
  1040. IS_ALIGNED(offset, eb->fs_info->nodesize))
  1041. return type;
  1042. }
  1043. } else if (is_data == BTRFS_REF_TYPE_DATA) {
  1044. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1045. return type;
  1046. if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1047. ASSERT(eb->fs_info);
  1048. /*
  1049. * Every shared one has parent tree
  1050. * block, which must be aligned to
  1051. * nodesize.
  1052. */
  1053. if (offset &&
  1054. IS_ALIGNED(offset, eb->fs_info->nodesize))
  1055. return type;
  1056. }
  1057. } else {
  1058. ASSERT(is_data == BTRFS_REF_TYPE_ANY);
  1059. return type;
  1060. }
  1061. }
  1062. btrfs_print_leaf((struct extent_buffer *)eb);
  1063. btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
  1064. eb->start, type);
  1065. WARN_ON(1);
  1066. return BTRFS_REF_TYPE_INVALID;
  1067. }
  1068. static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
  1069. {
  1070. u32 high_crc = ~(u32)0;
  1071. u32 low_crc = ~(u32)0;
  1072. __le64 lenum;
  1073. lenum = cpu_to_le64(root_objectid);
  1074. high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
  1075. lenum = cpu_to_le64(owner);
  1076. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1077. lenum = cpu_to_le64(offset);
  1078. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  1079. return ((u64)high_crc << 31) ^ (u64)low_crc;
  1080. }
  1081. static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
  1082. struct btrfs_extent_data_ref *ref)
  1083. {
  1084. return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
  1085. btrfs_extent_data_ref_objectid(leaf, ref),
  1086. btrfs_extent_data_ref_offset(leaf, ref));
  1087. }
  1088. static int match_extent_data_ref(struct extent_buffer *leaf,
  1089. struct btrfs_extent_data_ref *ref,
  1090. u64 root_objectid, u64 owner, u64 offset)
  1091. {
  1092. if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
  1093. btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
  1094. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  1095. return 0;
  1096. return 1;
  1097. }
  1098. static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
  1099. struct btrfs_fs_info *fs_info,
  1100. struct btrfs_path *path,
  1101. u64 bytenr, u64 parent,
  1102. u64 root_objectid,
  1103. u64 owner, u64 offset)
  1104. {
  1105. struct btrfs_root *root = fs_info->extent_root;
  1106. struct btrfs_key key;
  1107. struct btrfs_extent_data_ref *ref;
  1108. struct extent_buffer *leaf;
  1109. u32 nritems;
  1110. int ret;
  1111. int recow;
  1112. int err = -ENOENT;
  1113. key.objectid = bytenr;
  1114. if (parent) {
  1115. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1116. key.offset = parent;
  1117. } else {
  1118. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1119. key.offset = hash_extent_data_ref(root_objectid,
  1120. owner, offset);
  1121. }
  1122. again:
  1123. recow = 0;
  1124. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1125. if (ret < 0) {
  1126. err = ret;
  1127. goto fail;
  1128. }
  1129. if (parent) {
  1130. if (!ret)
  1131. return 0;
  1132. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1133. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1134. btrfs_release_path(path);
  1135. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1136. if (ret < 0) {
  1137. err = ret;
  1138. goto fail;
  1139. }
  1140. if (!ret)
  1141. return 0;
  1142. #endif
  1143. goto fail;
  1144. }
  1145. leaf = path->nodes[0];
  1146. nritems = btrfs_header_nritems(leaf);
  1147. while (1) {
  1148. if (path->slots[0] >= nritems) {
  1149. ret = btrfs_next_leaf(root, path);
  1150. if (ret < 0)
  1151. err = ret;
  1152. if (ret)
  1153. goto fail;
  1154. leaf = path->nodes[0];
  1155. nritems = btrfs_header_nritems(leaf);
  1156. recow = 1;
  1157. }
  1158. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1159. if (key.objectid != bytenr ||
  1160. key.type != BTRFS_EXTENT_DATA_REF_KEY)
  1161. goto fail;
  1162. ref = btrfs_item_ptr(leaf, path->slots[0],
  1163. struct btrfs_extent_data_ref);
  1164. if (match_extent_data_ref(leaf, ref, root_objectid,
  1165. owner, offset)) {
  1166. if (recow) {
  1167. btrfs_release_path(path);
  1168. goto again;
  1169. }
  1170. err = 0;
  1171. break;
  1172. }
  1173. path->slots[0]++;
  1174. }
  1175. fail:
  1176. return err;
  1177. }
  1178. static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
  1179. struct btrfs_fs_info *fs_info,
  1180. struct btrfs_path *path,
  1181. u64 bytenr, u64 parent,
  1182. u64 root_objectid, u64 owner,
  1183. u64 offset, int refs_to_add)
  1184. {
  1185. struct btrfs_root *root = fs_info->extent_root;
  1186. struct btrfs_key key;
  1187. struct extent_buffer *leaf;
  1188. u32 size;
  1189. u32 num_refs;
  1190. int ret;
  1191. key.objectid = bytenr;
  1192. if (parent) {
  1193. key.type = BTRFS_SHARED_DATA_REF_KEY;
  1194. key.offset = parent;
  1195. size = sizeof(struct btrfs_shared_data_ref);
  1196. } else {
  1197. key.type = BTRFS_EXTENT_DATA_REF_KEY;
  1198. key.offset = hash_extent_data_ref(root_objectid,
  1199. owner, offset);
  1200. size = sizeof(struct btrfs_extent_data_ref);
  1201. }
  1202. ret = btrfs_insert_empty_item(trans, root, path, &key, size);
  1203. if (ret && ret != -EEXIST)
  1204. goto fail;
  1205. leaf = path->nodes[0];
  1206. if (parent) {
  1207. struct btrfs_shared_data_ref *ref;
  1208. ref = btrfs_item_ptr(leaf, path->slots[0],
  1209. struct btrfs_shared_data_ref);
  1210. if (ret == 0) {
  1211. btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
  1212. } else {
  1213. num_refs = btrfs_shared_data_ref_count(leaf, ref);
  1214. num_refs += refs_to_add;
  1215. btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
  1216. }
  1217. } else {
  1218. struct btrfs_extent_data_ref *ref;
  1219. while (ret == -EEXIST) {
  1220. ref = btrfs_item_ptr(leaf, path->slots[0],
  1221. struct btrfs_extent_data_ref);
  1222. if (match_extent_data_ref(leaf, ref, root_objectid,
  1223. owner, offset))
  1224. break;
  1225. btrfs_release_path(path);
  1226. key.offset++;
  1227. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1228. size);
  1229. if (ret && ret != -EEXIST)
  1230. goto fail;
  1231. leaf = path->nodes[0];
  1232. }
  1233. ref = btrfs_item_ptr(leaf, path->slots[0],
  1234. struct btrfs_extent_data_ref);
  1235. if (ret == 0) {
  1236. btrfs_set_extent_data_ref_root(leaf, ref,
  1237. root_objectid);
  1238. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  1239. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  1240. btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
  1241. } else {
  1242. num_refs = btrfs_extent_data_ref_count(leaf, ref);
  1243. num_refs += refs_to_add;
  1244. btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
  1245. }
  1246. }
  1247. btrfs_mark_buffer_dirty(leaf);
  1248. ret = 0;
  1249. fail:
  1250. btrfs_release_path(path);
  1251. return ret;
  1252. }
  1253. static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
  1254. struct btrfs_fs_info *fs_info,
  1255. struct btrfs_path *path,
  1256. int refs_to_drop, int *last_ref)
  1257. {
  1258. struct btrfs_key key;
  1259. struct btrfs_extent_data_ref *ref1 = NULL;
  1260. struct btrfs_shared_data_ref *ref2 = NULL;
  1261. struct extent_buffer *leaf;
  1262. u32 num_refs = 0;
  1263. int ret = 0;
  1264. leaf = path->nodes[0];
  1265. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1266. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1267. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1268. struct btrfs_extent_data_ref);
  1269. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1270. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1271. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1272. struct btrfs_shared_data_ref);
  1273. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1274. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1275. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1276. struct btrfs_extent_ref_v0 *ref0;
  1277. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1278. struct btrfs_extent_ref_v0);
  1279. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1280. #endif
  1281. } else {
  1282. BUG();
  1283. }
  1284. BUG_ON(num_refs < refs_to_drop);
  1285. num_refs -= refs_to_drop;
  1286. if (num_refs == 0) {
  1287. ret = btrfs_del_item(trans, fs_info->extent_root, path);
  1288. *last_ref = 1;
  1289. } else {
  1290. if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
  1291. btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
  1292. else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
  1293. btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
  1294. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1295. else {
  1296. struct btrfs_extent_ref_v0 *ref0;
  1297. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1298. struct btrfs_extent_ref_v0);
  1299. btrfs_set_ref_count_v0(leaf, ref0, num_refs);
  1300. }
  1301. #endif
  1302. btrfs_mark_buffer_dirty(leaf);
  1303. }
  1304. return ret;
  1305. }
  1306. static noinline u32 extent_data_ref_count(struct btrfs_path *path,
  1307. struct btrfs_extent_inline_ref *iref)
  1308. {
  1309. struct btrfs_key key;
  1310. struct extent_buffer *leaf;
  1311. struct btrfs_extent_data_ref *ref1;
  1312. struct btrfs_shared_data_ref *ref2;
  1313. u32 num_refs = 0;
  1314. int type;
  1315. leaf = path->nodes[0];
  1316. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1317. if (iref) {
  1318. /*
  1319. * If type is invalid, we should have bailed out earlier than
  1320. * this call.
  1321. */
  1322. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  1323. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1324. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1325. ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
  1326. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1327. } else {
  1328. ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
  1329. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1330. }
  1331. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  1332. ref1 = btrfs_item_ptr(leaf, path->slots[0],
  1333. struct btrfs_extent_data_ref);
  1334. num_refs = btrfs_extent_data_ref_count(leaf, ref1);
  1335. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  1336. ref2 = btrfs_item_ptr(leaf, path->slots[0],
  1337. struct btrfs_shared_data_ref);
  1338. num_refs = btrfs_shared_data_ref_count(leaf, ref2);
  1339. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1340. } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  1341. struct btrfs_extent_ref_v0 *ref0;
  1342. ref0 = btrfs_item_ptr(leaf, path->slots[0],
  1343. struct btrfs_extent_ref_v0);
  1344. num_refs = btrfs_ref_count_v0(leaf, ref0);
  1345. #endif
  1346. } else {
  1347. WARN_ON(1);
  1348. }
  1349. return num_refs;
  1350. }
  1351. static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
  1352. struct btrfs_fs_info *fs_info,
  1353. struct btrfs_path *path,
  1354. u64 bytenr, u64 parent,
  1355. u64 root_objectid)
  1356. {
  1357. struct btrfs_root *root = fs_info->extent_root;
  1358. struct btrfs_key key;
  1359. int ret;
  1360. key.objectid = bytenr;
  1361. if (parent) {
  1362. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1363. key.offset = parent;
  1364. } else {
  1365. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1366. key.offset = root_objectid;
  1367. }
  1368. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1369. if (ret > 0)
  1370. ret = -ENOENT;
  1371. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1372. if (ret == -ENOENT && parent) {
  1373. btrfs_release_path(path);
  1374. key.type = BTRFS_EXTENT_REF_V0_KEY;
  1375. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1376. if (ret > 0)
  1377. ret = -ENOENT;
  1378. }
  1379. #endif
  1380. return ret;
  1381. }
  1382. static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
  1383. struct btrfs_fs_info *fs_info,
  1384. struct btrfs_path *path,
  1385. u64 bytenr, u64 parent,
  1386. u64 root_objectid)
  1387. {
  1388. struct btrfs_key key;
  1389. int ret;
  1390. key.objectid = bytenr;
  1391. if (parent) {
  1392. key.type = BTRFS_SHARED_BLOCK_REF_KEY;
  1393. key.offset = parent;
  1394. } else {
  1395. key.type = BTRFS_TREE_BLOCK_REF_KEY;
  1396. key.offset = root_objectid;
  1397. }
  1398. ret = btrfs_insert_empty_item(trans, fs_info->extent_root,
  1399. path, &key, 0);
  1400. btrfs_release_path(path);
  1401. return ret;
  1402. }
  1403. static inline int extent_ref_type(u64 parent, u64 owner)
  1404. {
  1405. int type;
  1406. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1407. if (parent > 0)
  1408. type = BTRFS_SHARED_BLOCK_REF_KEY;
  1409. else
  1410. type = BTRFS_TREE_BLOCK_REF_KEY;
  1411. } else {
  1412. if (parent > 0)
  1413. type = BTRFS_SHARED_DATA_REF_KEY;
  1414. else
  1415. type = BTRFS_EXTENT_DATA_REF_KEY;
  1416. }
  1417. return type;
  1418. }
  1419. static int find_next_key(struct btrfs_path *path, int level,
  1420. struct btrfs_key *key)
  1421. {
  1422. for (; level < BTRFS_MAX_LEVEL; level++) {
  1423. if (!path->nodes[level])
  1424. break;
  1425. if (path->slots[level] + 1 >=
  1426. btrfs_header_nritems(path->nodes[level]))
  1427. continue;
  1428. if (level == 0)
  1429. btrfs_item_key_to_cpu(path->nodes[level], key,
  1430. path->slots[level] + 1);
  1431. else
  1432. btrfs_node_key_to_cpu(path->nodes[level], key,
  1433. path->slots[level] + 1);
  1434. return 0;
  1435. }
  1436. return 1;
  1437. }
  1438. /*
  1439. * look for inline back ref. if back ref is found, *ref_ret is set
  1440. * to the address of inline back ref, and 0 is returned.
  1441. *
  1442. * if back ref isn't found, *ref_ret is set to the address where it
  1443. * should be inserted, and -ENOENT is returned.
  1444. *
  1445. * if insert is true and there are too many inline back refs, the path
  1446. * points to the extent item, and -EAGAIN is returned.
  1447. *
  1448. * NOTE: inline back refs are ordered in the same way that back ref
  1449. * items in the tree are ordered.
  1450. */
  1451. static noinline_for_stack
  1452. int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
  1453. struct btrfs_fs_info *fs_info,
  1454. struct btrfs_path *path,
  1455. struct btrfs_extent_inline_ref **ref_ret,
  1456. u64 bytenr, u64 num_bytes,
  1457. u64 parent, u64 root_objectid,
  1458. u64 owner, u64 offset, int insert)
  1459. {
  1460. struct btrfs_root *root = fs_info->extent_root;
  1461. struct btrfs_key key;
  1462. struct extent_buffer *leaf;
  1463. struct btrfs_extent_item *ei;
  1464. struct btrfs_extent_inline_ref *iref;
  1465. u64 flags;
  1466. u64 item_size;
  1467. unsigned long ptr;
  1468. unsigned long end;
  1469. int extra_size;
  1470. int type;
  1471. int want;
  1472. int ret;
  1473. int err = 0;
  1474. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  1475. int needed;
  1476. key.objectid = bytenr;
  1477. key.type = BTRFS_EXTENT_ITEM_KEY;
  1478. key.offset = num_bytes;
  1479. want = extent_ref_type(parent, owner);
  1480. if (insert) {
  1481. extra_size = btrfs_extent_inline_ref_size(want);
  1482. path->keep_locks = 1;
  1483. } else
  1484. extra_size = -1;
  1485. /*
  1486. * Owner is our parent level, so we can just add one to get the level
  1487. * for the block we are interested in.
  1488. */
  1489. if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
  1490. key.type = BTRFS_METADATA_ITEM_KEY;
  1491. key.offset = owner;
  1492. }
  1493. again:
  1494. ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
  1495. if (ret < 0) {
  1496. err = ret;
  1497. goto out;
  1498. }
  1499. /*
  1500. * We may be a newly converted file system which still has the old fat
  1501. * extent entries for metadata, so try and see if we have one of those.
  1502. */
  1503. if (ret > 0 && skinny_metadata) {
  1504. skinny_metadata = false;
  1505. if (path->slots[0]) {
  1506. path->slots[0]--;
  1507. btrfs_item_key_to_cpu(path->nodes[0], &key,
  1508. path->slots[0]);
  1509. if (key.objectid == bytenr &&
  1510. key.type == BTRFS_EXTENT_ITEM_KEY &&
  1511. key.offset == num_bytes)
  1512. ret = 0;
  1513. }
  1514. if (ret) {
  1515. key.objectid = bytenr;
  1516. key.type = BTRFS_EXTENT_ITEM_KEY;
  1517. key.offset = num_bytes;
  1518. btrfs_release_path(path);
  1519. goto again;
  1520. }
  1521. }
  1522. if (ret && !insert) {
  1523. err = -ENOENT;
  1524. goto out;
  1525. } else if (WARN_ON(ret)) {
  1526. err = -EIO;
  1527. goto out;
  1528. }
  1529. leaf = path->nodes[0];
  1530. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1531. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  1532. if (item_size < sizeof(*ei)) {
  1533. if (!insert) {
  1534. err = -ENOENT;
  1535. goto out;
  1536. }
  1537. ret = convert_extent_item_v0(trans, fs_info, path, owner,
  1538. extra_size);
  1539. if (ret < 0) {
  1540. err = ret;
  1541. goto out;
  1542. }
  1543. leaf = path->nodes[0];
  1544. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1545. }
  1546. #endif
  1547. BUG_ON(item_size < sizeof(*ei));
  1548. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1549. flags = btrfs_extent_flags(leaf, ei);
  1550. ptr = (unsigned long)(ei + 1);
  1551. end = (unsigned long)ei + item_size;
  1552. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
  1553. ptr += sizeof(struct btrfs_tree_block_info);
  1554. BUG_ON(ptr > end);
  1555. }
  1556. if (owner >= BTRFS_FIRST_FREE_OBJECTID)
  1557. needed = BTRFS_REF_TYPE_DATA;
  1558. else
  1559. needed = BTRFS_REF_TYPE_BLOCK;
  1560. err = -ENOENT;
  1561. while (1) {
  1562. if (ptr >= end) {
  1563. WARN_ON(ptr > end);
  1564. break;
  1565. }
  1566. iref = (struct btrfs_extent_inline_ref *)ptr;
  1567. type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
  1568. if (type == BTRFS_REF_TYPE_INVALID) {
  1569. err = -EINVAL;
  1570. goto out;
  1571. }
  1572. if (want < type)
  1573. break;
  1574. if (want > type) {
  1575. ptr += btrfs_extent_inline_ref_size(type);
  1576. continue;
  1577. }
  1578. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1579. struct btrfs_extent_data_ref *dref;
  1580. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1581. if (match_extent_data_ref(leaf, dref, root_objectid,
  1582. owner, offset)) {
  1583. err = 0;
  1584. break;
  1585. }
  1586. if (hash_extent_data_ref_item(leaf, dref) <
  1587. hash_extent_data_ref(root_objectid, owner, offset))
  1588. break;
  1589. } else {
  1590. u64 ref_offset;
  1591. ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1592. if (parent > 0) {
  1593. if (parent == ref_offset) {
  1594. err = 0;
  1595. break;
  1596. }
  1597. if (ref_offset < parent)
  1598. break;
  1599. } else {
  1600. if (root_objectid == ref_offset) {
  1601. err = 0;
  1602. break;
  1603. }
  1604. if (ref_offset < root_objectid)
  1605. break;
  1606. }
  1607. }
  1608. ptr += btrfs_extent_inline_ref_size(type);
  1609. }
  1610. if (err == -ENOENT && insert) {
  1611. if (item_size + extra_size >=
  1612. BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
  1613. err = -EAGAIN;
  1614. goto out;
  1615. }
  1616. /*
  1617. * To add new inline back ref, we have to make sure
  1618. * there is no corresponding back ref item.
  1619. * For simplicity, we just do not add new inline back
  1620. * ref if there is any kind of item for this block
  1621. */
  1622. if (find_next_key(path, 0, &key) == 0 &&
  1623. key.objectid == bytenr &&
  1624. key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
  1625. err = -EAGAIN;
  1626. goto out;
  1627. }
  1628. }
  1629. *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
  1630. out:
  1631. if (insert) {
  1632. path->keep_locks = 0;
  1633. btrfs_unlock_up_safe(path, 1);
  1634. }
  1635. return err;
  1636. }
  1637. /*
  1638. * helper to add new inline back ref
  1639. */
  1640. static noinline_for_stack
  1641. void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
  1642. struct btrfs_path *path,
  1643. struct btrfs_extent_inline_ref *iref,
  1644. u64 parent, u64 root_objectid,
  1645. u64 owner, u64 offset, int refs_to_add,
  1646. struct btrfs_delayed_extent_op *extent_op)
  1647. {
  1648. struct extent_buffer *leaf;
  1649. struct btrfs_extent_item *ei;
  1650. unsigned long ptr;
  1651. unsigned long end;
  1652. unsigned long item_offset;
  1653. u64 refs;
  1654. int size;
  1655. int type;
  1656. leaf = path->nodes[0];
  1657. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1658. item_offset = (unsigned long)iref - (unsigned long)ei;
  1659. type = extent_ref_type(parent, owner);
  1660. size = btrfs_extent_inline_ref_size(type);
  1661. btrfs_extend_item(fs_info, path, size);
  1662. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1663. refs = btrfs_extent_refs(leaf, ei);
  1664. refs += refs_to_add;
  1665. btrfs_set_extent_refs(leaf, ei, refs);
  1666. if (extent_op)
  1667. __run_delayed_extent_op(extent_op, leaf, ei);
  1668. ptr = (unsigned long)ei + item_offset;
  1669. end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
  1670. if (ptr < end - size)
  1671. memmove_extent_buffer(leaf, ptr + size, ptr,
  1672. end - size - ptr);
  1673. iref = (struct btrfs_extent_inline_ref *)ptr;
  1674. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  1675. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1676. struct btrfs_extent_data_ref *dref;
  1677. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1678. btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
  1679. btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
  1680. btrfs_set_extent_data_ref_offset(leaf, dref, offset);
  1681. btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
  1682. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1683. struct btrfs_shared_data_ref *sref;
  1684. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1685. btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
  1686. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1687. } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
  1688. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  1689. } else {
  1690. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  1691. }
  1692. btrfs_mark_buffer_dirty(leaf);
  1693. }
  1694. static int lookup_extent_backref(struct btrfs_trans_handle *trans,
  1695. struct btrfs_fs_info *fs_info,
  1696. struct btrfs_path *path,
  1697. struct btrfs_extent_inline_ref **ref_ret,
  1698. u64 bytenr, u64 num_bytes, u64 parent,
  1699. u64 root_objectid, u64 owner, u64 offset)
  1700. {
  1701. int ret;
  1702. ret = lookup_inline_extent_backref(trans, fs_info, path, ref_ret,
  1703. bytenr, num_bytes, parent,
  1704. root_objectid, owner, offset, 0);
  1705. if (ret != -ENOENT)
  1706. return ret;
  1707. btrfs_release_path(path);
  1708. *ref_ret = NULL;
  1709. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1710. ret = lookup_tree_block_ref(trans, fs_info, path, bytenr,
  1711. parent, root_objectid);
  1712. } else {
  1713. ret = lookup_extent_data_ref(trans, fs_info, path, bytenr,
  1714. parent, root_objectid, owner,
  1715. offset);
  1716. }
  1717. return ret;
  1718. }
  1719. /*
  1720. * helper to update/remove inline back ref
  1721. */
  1722. static noinline_for_stack
  1723. void update_inline_extent_backref(struct btrfs_fs_info *fs_info,
  1724. struct btrfs_path *path,
  1725. struct btrfs_extent_inline_ref *iref,
  1726. int refs_to_mod,
  1727. struct btrfs_delayed_extent_op *extent_op,
  1728. int *last_ref)
  1729. {
  1730. struct extent_buffer *leaf;
  1731. struct btrfs_extent_item *ei;
  1732. struct btrfs_extent_data_ref *dref = NULL;
  1733. struct btrfs_shared_data_ref *sref = NULL;
  1734. unsigned long ptr;
  1735. unsigned long end;
  1736. u32 item_size;
  1737. int size;
  1738. int type;
  1739. u64 refs;
  1740. leaf = path->nodes[0];
  1741. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  1742. refs = btrfs_extent_refs(leaf, ei);
  1743. WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
  1744. refs += refs_to_mod;
  1745. btrfs_set_extent_refs(leaf, ei, refs);
  1746. if (extent_op)
  1747. __run_delayed_extent_op(extent_op, leaf, ei);
  1748. /*
  1749. * If type is invalid, we should have bailed out after
  1750. * lookup_inline_extent_backref().
  1751. */
  1752. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
  1753. ASSERT(type != BTRFS_REF_TYPE_INVALID);
  1754. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  1755. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1756. refs = btrfs_extent_data_ref_count(leaf, dref);
  1757. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  1758. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1759. refs = btrfs_shared_data_ref_count(leaf, sref);
  1760. } else {
  1761. refs = 1;
  1762. BUG_ON(refs_to_mod != -1);
  1763. }
  1764. BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
  1765. refs += refs_to_mod;
  1766. if (refs > 0) {
  1767. if (type == BTRFS_EXTENT_DATA_REF_KEY)
  1768. btrfs_set_extent_data_ref_count(leaf, dref, refs);
  1769. else
  1770. btrfs_set_shared_data_ref_count(leaf, sref, refs);
  1771. } else {
  1772. *last_ref = 1;
  1773. size = btrfs_extent_inline_ref_size(type);
  1774. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  1775. ptr = (unsigned long)iref;
  1776. end = (unsigned long)ei + item_size;
  1777. if (ptr + size < end)
  1778. memmove_extent_buffer(leaf, ptr, ptr + size,
  1779. end - ptr - size);
  1780. item_size -= size;
  1781. btrfs_truncate_item(fs_info, path, item_size, 1);
  1782. }
  1783. btrfs_mark_buffer_dirty(leaf);
  1784. }
  1785. static noinline_for_stack
  1786. int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
  1787. struct btrfs_fs_info *fs_info,
  1788. struct btrfs_path *path,
  1789. u64 bytenr, u64 num_bytes, u64 parent,
  1790. u64 root_objectid, u64 owner,
  1791. u64 offset, int refs_to_add,
  1792. struct btrfs_delayed_extent_op *extent_op)
  1793. {
  1794. struct btrfs_extent_inline_ref *iref;
  1795. int ret;
  1796. ret = lookup_inline_extent_backref(trans, fs_info, path, &iref,
  1797. bytenr, num_bytes, parent,
  1798. root_objectid, owner, offset, 1);
  1799. if (ret == 0) {
  1800. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
  1801. update_inline_extent_backref(fs_info, path, iref,
  1802. refs_to_add, extent_op, NULL);
  1803. } else if (ret == -ENOENT) {
  1804. setup_inline_extent_backref(fs_info, path, iref, parent,
  1805. root_objectid, owner, offset,
  1806. refs_to_add, extent_op);
  1807. ret = 0;
  1808. }
  1809. return ret;
  1810. }
  1811. static int insert_extent_backref(struct btrfs_trans_handle *trans,
  1812. struct btrfs_fs_info *fs_info,
  1813. struct btrfs_path *path,
  1814. u64 bytenr, u64 parent, u64 root_objectid,
  1815. u64 owner, u64 offset, int refs_to_add)
  1816. {
  1817. int ret;
  1818. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1819. BUG_ON(refs_to_add != 1);
  1820. ret = insert_tree_block_ref(trans, fs_info, path, bytenr,
  1821. parent, root_objectid);
  1822. } else {
  1823. ret = insert_extent_data_ref(trans, fs_info, path, bytenr,
  1824. parent, root_objectid,
  1825. owner, offset, refs_to_add);
  1826. }
  1827. return ret;
  1828. }
  1829. static int remove_extent_backref(struct btrfs_trans_handle *trans,
  1830. struct btrfs_fs_info *fs_info,
  1831. struct btrfs_path *path,
  1832. struct btrfs_extent_inline_ref *iref,
  1833. int refs_to_drop, int is_data, int *last_ref)
  1834. {
  1835. int ret = 0;
  1836. BUG_ON(!is_data && refs_to_drop != 1);
  1837. if (iref) {
  1838. update_inline_extent_backref(fs_info, path, iref,
  1839. -refs_to_drop, NULL, last_ref);
  1840. } else if (is_data) {
  1841. ret = remove_extent_data_ref(trans, fs_info, path, refs_to_drop,
  1842. last_ref);
  1843. } else {
  1844. *last_ref = 1;
  1845. ret = btrfs_del_item(trans, fs_info->extent_root, path);
  1846. }
  1847. return ret;
  1848. }
  1849. #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
  1850. static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
  1851. u64 *discarded_bytes)
  1852. {
  1853. int j, ret = 0;
  1854. u64 bytes_left, end;
  1855. u64 aligned_start = ALIGN(start, 1 << 9);
  1856. if (WARN_ON(start != aligned_start)) {
  1857. len -= aligned_start - start;
  1858. len = round_down(len, 1 << 9);
  1859. start = aligned_start;
  1860. }
  1861. *discarded_bytes = 0;
  1862. if (!len)
  1863. return 0;
  1864. end = start + len;
  1865. bytes_left = len;
  1866. /* Skip any superblocks on this device. */
  1867. for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
  1868. u64 sb_start = btrfs_sb_offset(j);
  1869. u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
  1870. u64 size = sb_start - start;
  1871. if (!in_range(sb_start, start, bytes_left) &&
  1872. !in_range(sb_end, start, bytes_left) &&
  1873. !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
  1874. continue;
  1875. /*
  1876. * Superblock spans beginning of range. Adjust start and
  1877. * try again.
  1878. */
  1879. if (sb_start <= start) {
  1880. start += sb_end - start;
  1881. if (start > end) {
  1882. bytes_left = 0;
  1883. break;
  1884. }
  1885. bytes_left = end - start;
  1886. continue;
  1887. }
  1888. if (size) {
  1889. ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
  1890. GFP_NOFS, 0);
  1891. if (!ret)
  1892. *discarded_bytes += size;
  1893. else if (ret != -EOPNOTSUPP)
  1894. return ret;
  1895. }
  1896. start = sb_end;
  1897. if (start > end) {
  1898. bytes_left = 0;
  1899. break;
  1900. }
  1901. bytes_left = end - start;
  1902. }
  1903. if (bytes_left) {
  1904. ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
  1905. GFP_NOFS, 0);
  1906. if (!ret)
  1907. *discarded_bytes += bytes_left;
  1908. }
  1909. return ret;
  1910. }
  1911. int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
  1912. u64 num_bytes, u64 *actual_bytes)
  1913. {
  1914. int ret;
  1915. u64 discarded_bytes = 0;
  1916. struct btrfs_bio *bbio = NULL;
  1917. /*
  1918. * Avoid races with device replace and make sure our bbio has devices
  1919. * associated to its stripes that don't go away while we are discarding.
  1920. */
  1921. btrfs_bio_counter_inc_blocked(fs_info);
  1922. /* Tell the block device(s) that the sectors can be discarded */
  1923. ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
  1924. &bbio, 0);
  1925. /* Error condition is -ENOMEM */
  1926. if (!ret) {
  1927. struct btrfs_bio_stripe *stripe = bbio->stripes;
  1928. int i;
  1929. for (i = 0; i < bbio->num_stripes; i++, stripe++) {
  1930. u64 bytes;
  1931. if (!stripe->dev->can_discard)
  1932. continue;
  1933. ret = btrfs_issue_discard(stripe->dev->bdev,
  1934. stripe->physical,
  1935. stripe->length,
  1936. &bytes);
  1937. if (!ret)
  1938. discarded_bytes += bytes;
  1939. else if (ret != -EOPNOTSUPP)
  1940. break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
  1941. /*
  1942. * Just in case we get back EOPNOTSUPP for some reason,
  1943. * just ignore the return value so we don't screw up
  1944. * people calling discard_extent.
  1945. */
  1946. ret = 0;
  1947. }
  1948. btrfs_put_bbio(bbio);
  1949. }
  1950. btrfs_bio_counter_dec(fs_info);
  1951. if (actual_bytes)
  1952. *actual_bytes = discarded_bytes;
  1953. if (ret == -EOPNOTSUPP)
  1954. ret = 0;
  1955. return ret;
  1956. }
  1957. /* Can return -ENOMEM */
  1958. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1959. struct btrfs_root *root,
  1960. u64 bytenr, u64 num_bytes, u64 parent,
  1961. u64 root_objectid, u64 owner, u64 offset)
  1962. {
  1963. struct btrfs_fs_info *fs_info = root->fs_info;
  1964. int old_ref_mod, new_ref_mod;
  1965. int ret;
  1966. BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
  1967. root_objectid == BTRFS_TREE_LOG_OBJECTID);
  1968. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
  1969. owner, offset, BTRFS_ADD_DELAYED_REF);
  1970. if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  1971. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  1972. num_bytes, parent,
  1973. root_objectid, (int)owner,
  1974. BTRFS_ADD_DELAYED_REF, NULL,
  1975. &old_ref_mod, &new_ref_mod);
  1976. } else {
  1977. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  1978. num_bytes, parent,
  1979. root_objectid, owner, offset,
  1980. 0, BTRFS_ADD_DELAYED_REF,
  1981. &old_ref_mod, &new_ref_mod);
  1982. }
  1983. if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
  1984. add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
  1985. return ret;
  1986. }
  1987. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  1988. struct btrfs_fs_info *fs_info,
  1989. struct btrfs_delayed_ref_node *node,
  1990. u64 parent, u64 root_objectid,
  1991. u64 owner, u64 offset, int refs_to_add,
  1992. struct btrfs_delayed_extent_op *extent_op)
  1993. {
  1994. struct btrfs_path *path;
  1995. struct extent_buffer *leaf;
  1996. struct btrfs_extent_item *item;
  1997. struct btrfs_key key;
  1998. u64 bytenr = node->bytenr;
  1999. u64 num_bytes = node->num_bytes;
  2000. u64 refs;
  2001. int ret;
  2002. path = btrfs_alloc_path();
  2003. if (!path)
  2004. return -ENOMEM;
  2005. path->reada = READA_FORWARD;
  2006. path->leave_spinning = 1;
  2007. /* this will setup the path even if it fails to insert the back ref */
  2008. ret = insert_inline_extent_backref(trans, fs_info, path, bytenr,
  2009. num_bytes, parent, root_objectid,
  2010. owner, offset,
  2011. refs_to_add, extent_op);
  2012. if ((ret < 0 && ret != -EAGAIN) || !ret)
  2013. goto out;
  2014. /*
  2015. * Ok we had -EAGAIN which means we didn't have space to insert and
  2016. * inline extent ref, so just update the reference count and add a
  2017. * normal backref.
  2018. */
  2019. leaf = path->nodes[0];
  2020. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2021. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2022. refs = btrfs_extent_refs(leaf, item);
  2023. btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
  2024. if (extent_op)
  2025. __run_delayed_extent_op(extent_op, leaf, item);
  2026. btrfs_mark_buffer_dirty(leaf);
  2027. btrfs_release_path(path);
  2028. path->reada = READA_FORWARD;
  2029. path->leave_spinning = 1;
  2030. /* now insert the actual backref */
  2031. ret = insert_extent_backref(trans, fs_info, path, bytenr, parent,
  2032. root_objectid, owner, offset, refs_to_add);
  2033. if (ret)
  2034. btrfs_abort_transaction(trans, ret);
  2035. out:
  2036. btrfs_free_path(path);
  2037. return ret;
  2038. }
  2039. static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
  2040. struct btrfs_fs_info *fs_info,
  2041. struct btrfs_delayed_ref_node *node,
  2042. struct btrfs_delayed_extent_op *extent_op,
  2043. int insert_reserved)
  2044. {
  2045. int ret = 0;
  2046. struct btrfs_delayed_data_ref *ref;
  2047. struct btrfs_key ins;
  2048. u64 parent = 0;
  2049. u64 ref_root = 0;
  2050. u64 flags = 0;
  2051. ins.objectid = node->bytenr;
  2052. ins.offset = node->num_bytes;
  2053. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2054. ref = btrfs_delayed_node_to_data_ref(node);
  2055. trace_run_delayed_data_ref(fs_info, node, ref, node->action);
  2056. if (node->type == BTRFS_SHARED_DATA_REF_KEY)
  2057. parent = ref->parent;
  2058. ref_root = ref->root;
  2059. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2060. if (extent_op)
  2061. flags |= extent_op->flags_to_set;
  2062. ret = alloc_reserved_file_extent(trans, fs_info,
  2063. parent, ref_root, flags,
  2064. ref->objectid, ref->offset,
  2065. &ins, node->ref_mod);
  2066. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2067. ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
  2068. ref_root, ref->objectid,
  2069. ref->offset, node->ref_mod,
  2070. extent_op);
  2071. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2072. ret = __btrfs_free_extent(trans, fs_info, node, parent,
  2073. ref_root, ref->objectid,
  2074. ref->offset, node->ref_mod,
  2075. extent_op);
  2076. } else {
  2077. BUG();
  2078. }
  2079. return ret;
  2080. }
  2081. static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
  2082. struct extent_buffer *leaf,
  2083. struct btrfs_extent_item *ei)
  2084. {
  2085. u64 flags = btrfs_extent_flags(leaf, ei);
  2086. if (extent_op->update_flags) {
  2087. flags |= extent_op->flags_to_set;
  2088. btrfs_set_extent_flags(leaf, ei, flags);
  2089. }
  2090. if (extent_op->update_key) {
  2091. struct btrfs_tree_block_info *bi;
  2092. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
  2093. bi = (struct btrfs_tree_block_info *)(ei + 1);
  2094. btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
  2095. }
  2096. }
  2097. static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
  2098. struct btrfs_fs_info *fs_info,
  2099. struct btrfs_delayed_ref_head *head,
  2100. struct btrfs_delayed_extent_op *extent_op)
  2101. {
  2102. struct btrfs_key key;
  2103. struct btrfs_path *path;
  2104. struct btrfs_extent_item *ei;
  2105. struct extent_buffer *leaf;
  2106. u32 item_size;
  2107. int ret;
  2108. int err = 0;
  2109. int metadata = !extent_op->is_data;
  2110. if (trans->aborted)
  2111. return 0;
  2112. if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2113. metadata = 0;
  2114. path = btrfs_alloc_path();
  2115. if (!path)
  2116. return -ENOMEM;
  2117. key.objectid = head->bytenr;
  2118. if (metadata) {
  2119. key.type = BTRFS_METADATA_ITEM_KEY;
  2120. key.offset = extent_op->level;
  2121. } else {
  2122. key.type = BTRFS_EXTENT_ITEM_KEY;
  2123. key.offset = head->num_bytes;
  2124. }
  2125. again:
  2126. path->reada = READA_FORWARD;
  2127. path->leave_spinning = 1;
  2128. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
  2129. if (ret < 0) {
  2130. err = ret;
  2131. goto out;
  2132. }
  2133. if (ret > 0) {
  2134. if (metadata) {
  2135. if (path->slots[0] > 0) {
  2136. path->slots[0]--;
  2137. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2138. path->slots[0]);
  2139. if (key.objectid == head->bytenr &&
  2140. key.type == BTRFS_EXTENT_ITEM_KEY &&
  2141. key.offset == head->num_bytes)
  2142. ret = 0;
  2143. }
  2144. if (ret > 0) {
  2145. btrfs_release_path(path);
  2146. metadata = 0;
  2147. key.objectid = head->bytenr;
  2148. key.offset = head->num_bytes;
  2149. key.type = BTRFS_EXTENT_ITEM_KEY;
  2150. goto again;
  2151. }
  2152. } else {
  2153. err = -EIO;
  2154. goto out;
  2155. }
  2156. }
  2157. leaf = path->nodes[0];
  2158. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2159. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2160. if (item_size < sizeof(*ei)) {
  2161. ret = convert_extent_item_v0(trans, fs_info, path, (u64)-1, 0);
  2162. if (ret < 0) {
  2163. err = ret;
  2164. goto out;
  2165. }
  2166. leaf = path->nodes[0];
  2167. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2168. }
  2169. #endif
  2170. BUG_ON(item_size < sizeof(*ei));
  2171. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2172. __run_delayed_extent_op(extent_op, leaf, ei);
  2173. btrfs_mark_buffer_dirty(leaf);
  2174. out:
  2175. btrfs_free_path(path);
  2176. return err;
  2177. }
  2178. static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
  2179. struct btrfs_fs_info *fs_info,
  2180. struct btrfs_delayed_ref_node *node,
  2181. struct btrfs_delayed_extent_op *extent_op,
  2182. int insert_reserved)
  2183. {
  2184. int ret = 0;
  2185. struct btrfs_delayed_tree_ref *ref;
  2186. struct btrfs_key ins;
  2187. u64 parent = 0;
  2188. u64 ref_root = 0;
  2189. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  2190. ref = btrfs_delayed_node_to_tree_ref(node);
  2191. trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
  2192. if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2193. parent = ref->parent;
  2194. ref_root = ref->root;
  2195. ins.objectid = node->bytenr;
  2196. if (skinny_metadata) {
  2197. ins.offset = ref->level;
  2198. ins.type = BTRFS_METADATA_ITEM_KEY;
  2199. } else {
  2200. ins.offset = node->num_bytes;
  2201. ins.type = BTRFS_EXTENT_ITEM_KEY;
  2202. }
  2203. if (node->ref_mod != 1) {
  2204. btrfs_err(fs_info,
  2205. "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
  2206. node->bytenr, node->ref_mod, node->action, ref_root,
  2207. parent);
  2208. return -EIO;
  2209. }
  2210. if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
  2211. BUG_ON(!extent_op || !extent_op->update_flags);
  2212. ret = alloc_reserved_tree_block(trans, fs_info,
  2213. parent, ref_root,
  2214. extent_op->flags_to_set,
  2215. &extent_op->key,
  2216. ref->level, &ins);
  2217. } else if (node->action == BTRFS_ADD_DELAYED_REF) {
  2218. ret = __btrfs_inc_extent_ref(trans, fs_info, node,
  2219. parent, ref_root,
  2220. ref->level, 0, 1,
  2221. extent_op);
  2222. } else if (node->action == BTRFS_DROP_DELAYED_REF) {
  2223. ret = __btrfs_free_extent(trans, fs_info, node,
  2224. parent, ref_root,
  2225. ref->level, 0, 1, extent_op);
  2226. } else {
  2227. BUG();
  2228. }
  2229. return ret;
  2230. }
  2231. /* helper function to actually process a single delayed ref entry */
  2232. static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
  2233. struct btrfs_fs_info *fs_info,
  2234. struct btrfs_delayed_ref_node *node,
  2235. struct btrfs_delayed_extent_op *extent_op,
  2236. int insert_reserved)
  2237. {
  2238. int ret = 0;
  2239. if (trans->aborted) {
  2240. if (insert_reserved)
  2241. btrfs_pin_extent(fs_info, node->bytenr,
  2242. node->num_bytes, 1);
  2243. return 0;
  2244. }
  2245. if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
  2246. node->type == BTRFS_SHARED_BLOCK_REF_KEY)
  2247. ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
  2248. insert_reserved);
  2249. else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
  2250. node->type == BTRFS_SHARED_DATA_REF_KEY)
  2251. ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
  2252. insert_reserved);
  2253. else
  2254. BUG();
  2255. return ret;
  2256. }
  2257. static inline struct btrfs_delayed_ref_node *
  2258. select_delayed_ref(struct btrfs_delayed_ref_head *head)
  2259. {
  2260. struct btrfs_delayed_ref_node *ref;
  2261. if (RB_EMPTY_ROOT(&head->ref_tree))
  2262. return NULL;
  2263. /*
  2264. * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
  2265. * This is to prevent a ref count from going down to zero, which deletes
  2266. * the extent item from the extent tree, when there still are references
  2267. * to add, which would fail because they would not find the extent item.
  2268. */
  2269. if (!list_empty(&head->ref_add_list))
  2270. return list_first_entry(&head->ref_add_list,
  2271. struct btrfs_delayed_ref_node, add_list);
  2272. ref = rb_entry(rb_first(&head->ref_tree),
  2273. struct btrfs_delayed_ref_node, ref_node);
  2274. ASSERT(list_empty(&ref->add_list));
  2275. return ref;
  2276. }
  2277. static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
  2278. struct btrfs_delayed_ref_head *head)
  2279. {
  2280. spin_lock(&delayed_refs->lock);
  2281. head->processing = 0;
  2282. delayed_refs->num_heads_ready++;
  2283. spin_unlock(&delayed_refs->lock);
  2284. btrfs_delayed_ref_unlock(head);
  2285. }
  2286. static int cleanup_extent_op(struct btrfs_trans_handle *trans,
  2287. struct btrfs_fs_info *fs_info,
  2288. struct btrfs_delayed_ref_head *head)
  2289. {
  2290. struct btrfs_delayed_extent_op *extent_op = head->extent_op;
  2291. int ret;
  2292. if (!extent_op)
  2293. return 0;
  2294. head->extent_op = NULL;
  2295. if (head->must_insert_reserved) {
  2296. btrfs_free_delayed_extent_op(extent_op);
  2297. return 0;
  2298. }
  2299. spin_unlock(&head->lock);
  2300. ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
  2301. btrfs_free_delayed_extent_op(extent_op);
  2302. return ret ? ret : 1;
  2303. }
  2304. static int cleanup_ref_head(struct btrfs_trans_handle *trans,
  2305. struct btrfs_fs_info *fs_info,
  2306. struct btrfs_delayed_ref_head *head)
  2307. {
  2308. struct btrfs_delayed_ref_root *delayed_refs;
  2309. int ret;
  2310. delayed_refs = &trans->transaction->delayed_refs;
  2311. ret = cleanup_extent_op(trans, fs_info, head);
  2312. if (ret < 0) {
  2313. unselect_delayed_ref_head(delayed_refs, head);
  2314. btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
  2315. return ret;
  2316. } else if (ret) {
  2317. return ret;
  2318. }
  2319. /*
  2320. * Need to drop our head ref lock and re-acquire the delayed ref lock
  2321. * and then re-check to make sure nobody got added.
  2322. */
  2323. spin_unlock(&head->lock);
  2324. spin_lock(&delayed_refs->lock);
  2325. spin_lock(&head->lock);
  2326. if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
  2327. spin_unlock(&head->lock);
  2328. spin_unlock(&delayed_refs->lock);
  2329. return 1;
  2330. }
  2331. delayed_refs->num_heads--;
  2332. rb_erase(&head->href_node, &delayed_refs->href_root);
  2333. RB_CLEAR_NODE(&head->href_node);
  2334. spin_unlock(&delayed_refs->lock);
  2335. spin_unlock(&head->lock);
  2336. atomic_dec(&delayed_refs->num_entries);
  2337. trace_run_delayed_ref_head(fs_info, head, 0);
  2338. if (head->total_ref_mod < 0) {
  2339. struct btrfs_block_group_cache *cache;
  2340. cache = btrfs_lookup_block_group(fs_info, head->bytenr);
  2341. ASSERT(cache);
  2342. percpu_counter_add(&cache->space_info->total_bytes_pinned,
  2343. -head->num_bytes);
  2344. btrfs_put_block_group(cache);
  2345. if (head->is_data) {
  2346. spin_lock(&delayed_refs->lock);
  2347. delayed_refs->pending_csums -= head->num_bytes;
  2348. spin_unlock(&delayed_refs->lock);
  2349. }
  2350. }
  2351. if (head->must_insert_reserved) {
  2352. btrfs_pin_extent(fs_info, head->bytenr,
  2353. head->num_bytes, 1);
  2354. if (head->is_data) {
  2355. ret = btrfs_del_csums(trans, fs_info, head->bytenr,
  2356. head->num_bytes);
  2357. }
  2358. }
  2359. /* Also free its reserved qgroup space */
  2360. btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
  2361. head->qgroup_reserved);
  2362. btrfs_delayed_ref_unlock(head);
  2363. btrfs_put_delayed_ref_head(head);
  2364. return 0;
  2365. }
  2366. /*
  2367. * Returns 0 on success or if called with an already aborted transaction.
  2368. * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  2369. */
  2370. static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2371. struct btrfs_fs_info *fs_info,
  2372. unsigned long nr)
  2373. {
  2374. struct btrfs_delayed_ref_root *delayed_refs;
  2375. struct btrfs_delayed_ref_node *ref;
  2376. struct btrfs_delayed_ref_head *locked_ref = NULL;
  2377. struct btrfs_delayed_extent_op *extent_op;
  2378. ktime_t start = ktime_get();
  2379. int ret;
  2380. unsigned long count = 0;
  2381. unsigned long actual_count = 0;
  2382. int must_insert_reserved = 0;
  2383. delayed_refs = &trans->transaction->delayed_refs;
  2384. while (1) {
  2385. if (!locked_ref) {
  2386. if (count >= nr)
  2387. break;
  2388. spin_lock(&delayed_refs->lock);
  2389. locked_ref = btrfs_select_ref_head(trans);
  2390. if (!locked_ref) {
  2391. spin_unlock(&delayed_refs->lock);
  2392. break;
  2393. }
  2394. /* grab the lock that says we are going to process
  2395. * all the refs for this head */
  2396. ret = btrfs_delayed_ref_lock(trans, locked_ref);
  2397. spin_unlock(&delayed_refs->lock);
  2398. /*
  2399. * we may have dropped the spin lock to get the head
  2400. * mutex lock, and that might have given someone else
  2401. * time to free the head. If that's true, it has been
  2402. * removed from our list and we can move on.
  2403. */
  2404. if (ret == -EAGAIN) {
  2405. locked_ref = NULL;
  2406. count++;
  2407. continue;
  2408. }
  2409. }
  2410. /*
  2411. * We need to try and merge add/drops of the same ref since we
  2412. * can run into issues with relocate dropping the implicit ref
  2413. * and then it being added back again before the drop can
  2414. * finish. If we merged anything we need to re-loop so we can
  2415. * get a good ref.
  2416. * Or we can get node references of the same type that weren't
  2417. * merged when created due to bumps in the tree mod seq, and
  2418. * we need to merge them to prevent adding an inline extent
  2419. * backref before dropping it (triggering a BUG_ON at
  2420. * insert_inline_extent_backref()).
  2421. */
  2422. spin_lock(&locked_ref->lock);
  2423. btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
  2424. locked_ref);
  2425. /*
  2426. * locked_ref is the head node, so we have to go one
  2427. * node back for any delayed ref updates
  2428. */
  2429. ref = select_delayed_ref(locked_ref);
  2430. if (ref && ref->seq &&
  2431. btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
  2432. spin_unlock(&locked_ref->lock);
  2433. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2434. locked_ref = NULL;
  2435. cond_resched();
  2436. count++;
  2437. continue;
  2438. }
  2439. /*
  2440. * We're done processing refs in this ref_head, clean everything
  2441. * up and move on to the next ref_head.
  2442. */
  2443. if (!ref) {
  2444. ret = cleanup_ref_head(trans, fs_info, locked_ref);
  2445. if (ret > 0 ) {
  2446. /* We dropped our lock, we need to loop. */
  2447. ret = 0;
  2448. continue;
  2449. } else if (ret) {
  2450. return ret;
  2451. }
  2452. locked_ref = NULL;
  2453. count++;
  2454. continue;
  2455. }
  2456. actual_count++;
  2457. ref->in_tree = 0;
  2458. rb_erase(&ref->ref_node, &locked_ref->ref_tree);
  2459. RB_CLEAR_NODE(&ref->ref_node);
  2460. if (!list_empty(&ref->add_list))
  2461. list_del(&ref->add_list);
  2462. /*
  2463. * When we play the delayed ref, also correct the ref_mod on
  2464. * head
  2465. */
  2466. switch (ref->action) {
  2467. case BTRFS_ADD_DELAYED_REF:
  2468. case BTRFS_ADD_DELAYED_EXTENT:
  2469. locked_ref->ref_mod -= ref->ref_mod;
  2470. break;
  2471. case BTRFS_DROP_DELAYED_REF:
  2472. locked_ref->ref_mod += ref->ref_mod;
  2473. break;
  2474. default:
  2475. WARN_ON(1);
  2476. }
  2477. atomic_dec(&delayed_refs->num_entries);
  2478. /*
  2479. * Record the must-insert_reserved flag before we drop the spin
  2480. * lock.
  2481. */
  2482. must_insert_reserved = locked_ref->must_insert_reserved;
  2483. locked_ref->must_insert_reserved = 0;
  2484. extent_op = locked_ref->extent_op;
  2485. locked_ref->extent_op = NULL;
  2486. spin_unlock(&locked_ref->lock);
  2487. ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
  2488. must_insert_reserved);
  2489. btrfs_free_delayed_extent_op(extent_op);
  2490. if (ret) {
  2491. unselect_delayed_ref_head(delayed_refs, locked_ref);
  2492. btrfs_put_delayed_ref(ref);
  2493. btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
  2494. ret);
  2495. return ret;
  2496. }
  2497. btrfs_put_delayed_ref(ref);
  2498. count++;
  2499. cond_resched();
  2500. }
  2501. /*
  2502. * We don't want to include ref heads since we can have empty ref heads
  2503. * and those will drastically skew our runtime down since we just do
  2504. * accounting, no actual extent tree updates.
  2505. */
  2506. if (actual_count > 0) {
  2507. u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
  2508. u64 avg;
  2509. /*
  2510. * We weigh the current average higher than our current runtime
  2511. * to avoid large swings in the average.
  2512. */
  2513. spin_lock(&delayed_refs->lock);
  2514. avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
  2515. fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
  2516. spin_unlock(&delayed_refs->lock);
  2517. }
  2518. return 0;
  2519. }
  2520. #ifdef SCRAMBLE_DELAYED_REFS
  2521. /*
  2522. * Normally delayed refs get processed in ascending bytenr order. This
  2523. * correlates in most cases to the order added. To expose dependencies on this
  2524. * order, we start to process the tree in the middle instead of the beginning
  2525. */
  2526. static u64 find_middle(struct rb_root *root)
  2527. {
  2528. struct rb_node *n = root->rb_node;
  2529. struct btrfs_delayed_ref_node *entry;
  2530. int alt = 1;
  2531. u64 middle;
  2532. u64 first = 0, last = 0;
  2533. n = rb_first(root);
  2534. if (n) {
  2535. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2536. first = entry->bytenr;
  2537. }
  2538. n = rb_last(root);
  2539. if (n) {
  2540. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2541. last = entry->bytenr;
  2542. }
  2543. n = root->rb_node;
  2544. while (n) {
  2545. entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
  2546. WARN_ON(!entry->in_tree);
  2547. middle = entry->bytenr;
  2548. if (alt)
  2549. n = n->rb_left;
  2550. else
  2551. n = n->rb_right;
  2552. alt = 1 - alt;
  2553. }
  2554. return middle;
  2555. }
  2556. #endif
  2557. static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
  2558. {
  2559. u64 num_bytes;
  2560. num_bytes = heads * (sizeof(struct btrfs_extent_item) +
  2561. sizeof(struct btrfs_extent_inline_ref));
  2562. if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
  2563. num_bytes += heads * sizeof(struct btrfs_tree_block_info);
  2564. /*
  2565. * We don't ever fill up leaves all the way so multiply by 2 just to be
  2566. * closer to what we're really going to want to use.
  2567. */
  2568. return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
  2569. }
  2570. /*
  2571. * Takes the number of bytes to be csumm'ed and figures out how many leaves it
  2572. * would require to store the csums for that many bytes.
  2573. */
  2574. u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
  2575. {
  2576. u64 csum_size;
  2577. u64 num_csums_per_leaf;
  2578. u64 num_csums;
  2579. csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
  2580. num_csums_per_leaf = div64_u64(csum_size,
  2581. (u64)btrfs_super_csum_size(fs_info->super_copy));
  2582. num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
  2583. num_csums += num_csums_per_leaf - 1;
  2584. num_csums = div64_u64(num_csums, num_csums_per_leaf);
  2585. return num_csums;
  2586. }
  2587. int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
  2588. struct btrfs_fs_info *fs_info)
  2589. {
  2590. struct btrfs_block_rsv *global_rsv;
  2591. u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
  2592. u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
  2593. u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
  2594. u64 num_bytes, num_dirty_bgs_bytes;
  2595. int ret = 0;
  2596. num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  2597. num_heads = heads_to_leaves(fs_info, num_heads);
  2598. if (num_heads > 1)
  2599. num_bytes += (num_heads - 1) * fs_info->nodesize;
  2600. num_bytes <<= 1;
  2601. num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
  2602. fs_info->nodesize;
  2603. num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
  2604. num_dirty_bgs);
  2605. global_rsv = &fs_info->global_block_rsv;
  2606. /*
  2607. * If we can't allocate any more chunks lets make sure we have _lots_ of
  2608. * wiggle room since running delayed refs can create more delayed refs.
  2609. */
  2610. if (global_rsv->space_info->full) {
  2611. num_dirty_bgs_bytes <<= 1;
  2612. num_bytes <<= 1;
  2613. }
  2614. spin_lock(&global_rsv->lock);
  2615. if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
  2616. ret = 1;
  2617. spin_unlock(&global_rsv->lock);
  2618. return ret;
  2619. }
  2620. int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
  2621. struct btrfs_fs_info *fs_info)
  2622. {
  2623. u64 num_entries =
  2624. atomic_read(&trans->transaction->delayed_refs.num_entries);
  2625. u64 avg_runtime;
  2626. u64 val;
  2627. smp_mb();
  2628. avg_runtime = fs_info->avg_delayed_ref_runtime;
  2629. val = num_entries * avg_runtime;
  2630. if (val >= NSEC_PER_SEC)
  2631. return 1;
  2632. if (val >= NSEC_PER_SEC / 2)
  2633. return 2;
  2634. return btrfs_check_space_for_delayed_refs(trans, fs_info);
  2635. }
  2636. struct async_delayed_refs {
  2637. struct btrfs_root *root;
  2638. u64 transid;
  2639. int count;
  2640. int error;
  2641. int sync;
  2642. struct completion wait;
  2643. struct btrfs_work work;
  2644. };
  2645. static inline struct async_delayed_refs *
  2646. to_async_delayed_refs(struct btrfs_work *work)
  2647. {
  2648. return container_of(work, struct async_delayed_refs, work);
  2649. }
  2650. static void delayed_ref_async_start(struct btrfs_work *work)
  2651. {
  2652. struct async_delayed_refs *async = to_async_delayed_refs(work);
  2653. struct btrfs_trans_handle *trans;
  2654. struct btrfs_fs_info *fs_info = async->root->fs_info;
  2655. int ret;
  2656. /* if the commit is already started, we don't need to wait here */
  2657. if (btrfs_transaction_blocked(fs_info))
  2658. goto done;
  2659. trans = btrfs_join_transaction(async->root);
  2660. if (IS_ERR(trans)) {
  2661. async->error = PTR_ERR(trans);
  2662. goto done;
  2663. }
  2664. /*
  2665. * trans->sync means that when we call end_transaction, we won't
  2666. * wait on delayed refs
  2667. */
  2668. trans->sync = true;
  2669. /* Don't bother flushing if we got into a different transaction */
  2670. if (trans->transid > async->transid)
  2671. goto end;
  2672. ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
  2673. if (ret)
  2674. async->error = ret;
  2675. end:
  2676. ret = btrfs_end_transaction(trans);
  2677. if (ret && !async->error)
  2678. async->error = ret;
  2679. done:
  2680. if (async->sync)
  2681. complete(&async->wait);
  2682. else
  2683. kfree(async);
  2684. }
  2685. int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
  2686. unsigned long count, u64 transid, int wait)
  2687. {
  2688. struct async_delayed_refs *async;
  2689. int ret;
  2690. async = kmalloc(sizeof(*async), GFP_NOFS);
  2691. if (!async)
  2692. return -ENOMEM;
  2693. async->root = fs_info->tree_root;
  2694. async->count = count;
  2695. async->error = 0;
  2696. async->transid = transid;
  2697. if (wait)
  2698. async->sync = 1;
  2699. else
  2700. async->sync = 0;
  2701. init_completion(&async->wait);
  2702. btrfs_init_work(&async->work, btrfs_extent_refs_helper,
  2703. delayed_ref_async_start, NULL, NULL);
  2704. btrfs_queue_work(fs_info->extent_workers, &async->work);
  2705. if (wait) {
  2706. wait_for_completion(&async->wait);
  2707. ret = async->error;
  2708. kfree(async);
  2709. return ret;
  2710. }
  2711. return 0;
  2712. }
  2713. /*
  2714. * this starts processing the delayed reference count updates and
  2715. * extent insertions we have queued up so far. count can be
  2716. * 0, which means to process everything in the tree at the start
  2717. * of the run (but not newly added entries), or it can be some target
  2718. * number you'd like to process.
  2719. *
  2720. * Returns 0 on success or if called with an aborted transaction
  2721. * Returns <0 on error and aborts the transaction
  2722. */
  2723. int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
  2724. struct btrfs_fs_info *fs_info, unsigned long count)
  2725. {
  2726. struct rb_node *node;
  2727. struct btrfs_delayed_ref_root *delayed_refs;
  2728. struct btrfs_delayed_ref_head *head;
  2729. int ret;
  2730. int run_all = count == (unsigned long)-1;
  2731. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  2732. /* We'll clean this up in btrfs_cleanup_transaction */
  2733. if (trans->aborted)
  2734. return 0;
  2735. if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
  2736. return 0;
  2737. delayed_refs = &trans->transaction->delayed_refs;
  2738. if (count == 0)
  2739. count = atomic_read(&delayed_refs->num_entries) * 2;
  2740. again:
  2741. #ifdef SCRAMBLE_DELAYED_REFS
  2742. delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
  2743. #endif
  2744. trans->can_flush_pending_bgs = false;
  2745. ret = __btrfs_run_delayed_refs(trans, fs_info, count);
  2746. if (ret < 0) {
  2747. btrfs_abort_transaction(trans, ret);
  2748. return ret;
  2749. }
  2750. if (run_all) {
  2751. if (!list_empty(&trans->new_bgs))
  2752. btrfs_create_pending_block_groups(trans, fs_info);
  2753. spin_lock(&delayed_refs->lock);
  2754. node = rb_first(&delayed_refs->href_root);
  2755. if (!node) {
  2756. spin_unlock(&delayed_refs->lock);
  2757. goto out;
  2758. }
  2759. head = rb_entry(node, struct btrfs_delayed_ref_head,
  2760. href_node);
  2761. refcount_inc(&head->refs);
  2762. spin_unlock(&delayed_refs->lock);
  2763. /* Mutex was contended, block until it's released and retry. */
  2764. mutex_lock(&head->mutex);
  2765. mutex_unlock(&head->mutex);
  2766. btrfs_put_delayed_ref_head(head);
  2767. cond_resched();
  2768. goto again;
  2769. }
  2770. out:
  2771. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  2772. return 0;
  2773. }
  2774. int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
  2775. struct btrfs_fs_info *fs_info,
  2776. u64 bytenr, u64 num_bytes, u64 flags,
  2777. int level, int is_data)
  2778. {
  2779. struct btrfs_delayed_extent_op *extent_op;
  2780. int ret;
  2781. extent_op = btrfs_alloc_delayed_extent_op();
  2782. if (!extent_op)
  2783. return -ENOMEM;
  2784. extent_op->flags_to_set = flags;
  2785. extent_op->update_flags = true;
  2786. extent_op->update_key = false;
  2787. extent_op->is_data = is_data ? true : false;
  2788. extent_op->level = level;
  2789. ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
  2790. num_bytes, extent_op);
  2791. if (ret)
  2792. btrfs_free_delayed_extent_op(extent_op);
  2793. return ret;
  2794. }
  2795. static noinline int check_delayed_ref(struct btrfs_root *root,
  2796. struct btrfs_path *path,
  2797. u64 objectid, u64 offset, u64 bytenr)
  2798. {
  2799. struct btrfs_delayed_ref_head *head;
  2800. struct btrfs_delayed_ref_node *ref;
  2801. struct btrfs_delayed_data_ref *data_ref;
  2802. struct btrfs_delayed_ref_root *delayed_refs;
  2803. struct btrfs_transaction *cur_trans;
  2804. struct rb_node *node;
  2805. int ret = 0;
  2806. cur_trans = root->fs_info->running_transaction;
  2807. if (!cur_trans)
  2808. return 0;
  2809. delayed_refs = &cur_trans->delayed_refs;
  2810. spin_lock(&delayed_refs->lock);
  2811. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  2812. if (!head) {
  2813. spin_unlock(&delayed_refs->lock);
  2814. return 0;
  2815. }
  2816. if (!mutex_trylock(&head->mutex)) {
  2817. refcount_inc(&head->refs);
  2818. spin_unlock(&delayed_refs->lock);
  2819. btrfs_release_path(path);
  2820. /*
  2821. * Mutex was contended, block until it's released and let
  2822. * caller try again
  2823. */
  2824. mutex_lock(&head->mutex);
  2825. mutex_unlock(&head->mutex);
  2826. btrfs_put_delayed_ref_head(head);
  2827. return -EAGAIN;
  2828. }
  2829. spin_unlock(&delayed_refs->lock);
  2830. spin_lock(&head->lock);
  2831. /*
  2832. * XXX: We should replace this with a proper search function in the
  2833. * future.
  2834. */
  2835. for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
  2836. ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
  2837. /* If it's a shared ref we know a cross reference exists */
  2838. if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
  2839. ret = 1;
  2840. break;
  2841. }
  2842. data_ref = btrfs_delayed_node_to_data_ref(ref);
  2843. /*
  2844. * If our ref doesn't match the one we're currently looking at
  2845. * then we have a cross reference.
  2846. */
  2847. if (data_ref->root != root->root_key.objectid ||
  2848. data_ref->objectid != objectid ||
  2849. data_ref->offset != offset) {
  2850. ret = 1;
  2851. break;
  2852. }
  2853. }
  2854. spin_unlock(&head->lock);
  2855. mutex_unlock(&head->mutex);
  2856. return ret;
  2857. }
  2858. static noinline int check_committed_ref(struct btrfs_root *root,
  2859. struct btrfs_path *path,
  2860. u64 objectid, u64 offset, u64 bytenr)
  2861. {
  2862. struct btrfs_fs_info *fs_info = root->fs_info;
  2863. struct btrfs_root *extent_root = fs_info->extent_root;
  2864. struct extent_buffer *leaf;
  2865. struct btrfs_extent_data_ref *ref;
  2866. struct btrfs_extent_inline_ref *iref;
  2867. struct btrfs_extent_item *ei;
  2868. struct btrfs_key key;
  2869. u32 item_size;
  2870. int type;
  2871. int ret;
  2872. key.objectid = bytenr;
  2873. key.offset = (u64)-1;
  2874. key.type = BTRFS_EXTENT_ITEM_KEY;
  2875. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2876. if (ret < 0)
  2877. goto out;
  2878. BUG_ON(ret == 0); /* Corruption */
  2879. ret = -ENOENT;
  2880. if (path->slots[0] == 0)
  2881. goto out;
  2882. path->slots[0]--;
  2883. leaf = path->nodes[0];
  2884. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  2885. if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
  2886. goto out;
  2887. ret = 1;
  2888. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2889. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2890. if (item_size < sizeof(*ei)) {
  2891. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2892. goto out;
  2893. }
  2894. #endif
  2895. ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
  2896. if (item_size != sizeof(*ei) +
  2897. btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
  2898. goto out;
  2899. if (btrfs_extent_generation(leaf, ei) <=
  2900. btrfs_root_last_snapshot(&root->root_item))
  2901. goto out;
  2902. iref = (struct btrfs_extent_inline_ref *)(ei + 1);
  2903. type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
  2904. if (type != BTRFS_EXTENT_DATA_REF_KEY)
  2905. goto out;
  2906. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  2907. if (btrfs_extent_refs(leaf, ei) !=
  2908. btrfs_extent_data_ref_count(leaf, ref) ||
  2909. btrfs_extent_data_ref_root(leaf, ref) !=
  2910. root->root_key.objectid ||
  2911. btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
  2912. btrfs_extent_data_ref_offset(leaf, ref) != offset)
  2913. goto out;
  2914. ret = 0;
  2915. out:
  2916. return ret;
  2917. }
  2918. int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
  2919. u64 bytenr)
  2920. {
  2921. struct btrfs_path *path;
  2922. int ret;
  2923. int ret2;
  2924. path = btrfs_alloc_path();
  2925. if (!path)
  2926. return -ENOENT;
  2927. do {
  2928. ret = check_committed_ref(root, path, objectid,
  2929. offset, bytenr);
  2930. if (ret && ret != -ENOENT)
  2931. goto out;
  2932. ret2 = check_delayed_ref(root, path, objectid,
  2933. offset, bytenr);
  2934. } while (ret2 == -EAGAIN);
  2935. if (ret2 && ret2 != -ENOENT) {
  2936. ret = ret2;
  2937. goto out;
  2938. }
  2939. if (ret != -ENOENT || ret2 != -ENOENT)
  2940. ret = 0;
  2941. out:
  2942. btrfs_free_path(path);
  2943. if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
  2944. WARN_ON(ret > 0);
  2945. return ret;
  2946. }
  2947. static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
  2948. struct btrfs_root *root,
  2949. struct extent_buffer *buf,
  2950. int full_backref, int inc)
  2951. {
  2952. struct btrfs_fs_info *fs_info = root->fs_info;
  2953. u64 bytenr;
  2954. u64 num_bytes;
  2955. u64 parent;
  2956. u64 ref_root;
  2957. u32 nritems;
  2958. struct btrfs_key key;
  2959. struct btrfs_file_extent_item *fi;
  2960. int i;
  2961. int level;
  2962. int ret = 0;
  2963. int (*process_func)(struct btrfs_trans_handle *,
  2964. struct btrfs_root *,
  2965. u64, u64, u64, u64, u64, u64);
  2966. if (btrfs_is_testing(fs_info))
  2967. return 0;
  2968. ref_root = btrfs_header_owner(buf);
  2969. nritems = btrfs_header_nritems(buf);
  2970. level = btrfs_header_level(buf);
  2971. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
  2972. return 0;
  2973. if (inc)
  2974. process_func = btrfs_inc_extent_ref;
  2975. else
  2976. process_func = btrfs_free_extent;
  2977. if (full_backref)
  2978. parent = buf->start;
  2979. else
  2980. parent = 0;
  2981. for (i = 0; i < nritems; i++) {
  2982. if (level == 0) {
  2983. btrfs_item_key_to_cpu(buf, &key, i);
  2984. if (key.type != BTRFS_EXTENT_DATA_KEY)
  2985. continue;
  2986. fi = btrfs_item_ptr(buf, i,
  2987. struct btrfs_file_extent_item);
  2988. if (btrfs_file_extent_type(buf, fi) ==
  2989. BTRFS_FILE_EXTENT_INLINE)
  2990. continue;
  2991. bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  2992. if (bytenr == 0)
  2993. continue;
  2994. num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
  2995. key.offset -= btrfs_file_extent_offset(buf, fi);
  2996. ret = process_func(trans, root, bytenr, num_bytes,
  2997. parent, ref_root, key.objectid,
  2998. key.offset);
  2999. if (ret)
  3000. goto fail;
  3001. } else {
  3002. bytenr = btrfs_node_blockptr(buf, i);
  3003. num_bytes = fs_info->nodesize;
  3004. ret = process_func(trans, root, bytenr, num_bytes,
  3005. parent, ref_root, level - 1, 0);
  3006. if (ret)
  3007. goto fail;
  3008. }
  3009. }
  3010. return 0;
  3011. fail:
  3012. return ret;
  3013. }
  3014. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  3015. struct extent_buffer *buf, int full_backref)
  3016. {
  3017. return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
  3018. }
  3019. int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  3020. struct extent_buffer *buf, int full_backref)
  3021. {
  3022. return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
  3023. }
  3024. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  3025. struct btrfs_fs_info *fs_info,
  3026. struct btrfs_path *path,
  3027. struct btrfs_block_group_cache *cache)
  3028. {
  3029. int ret;
  3030. struct btrfs_root *extent_root = fs_info->extent_root;
  3031. unsigned long bi;
  3032. struct extent_buffer *leaf;
  3033. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  3034. if (ret) {
  3035. if (ret > 0)
  3036. ret = -ENOENT;
  3037. goto fail;
  3038. }
  3039. leaf = path->nodes[0];
  3040. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  3041. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  3042. btrfs_mark_buffer_dirty(leaf);
  3043. fail:
  3044. btrfs_release_path(path);
  3045. return ret;
  3046. }
  3047. static struct btrfs_block_group_cache *
  3048. next_block_group(struct btrfs_fs_info *fs_info,
  3049. struct btrfs_block_group_cache *cache)
  3050. {
  3051. struct rb_node *node;
  3052. spin_lock(&fs_info->block_group_cache_lock);
  3053. /* If our block group was removed, we need a full search. */
  3054. if (RB_EMPTY_NODE(&cache->cache_node)) {
  3055. const u64 next_bytenr = cache->key.objectid + cache->key.offset;
  3056. spin_unlock(&fs_info->block_group_cache_lock);
  3057. btrfs_put_block_group(cache);
  3058. cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
  3059. }
  3060. node = rb_next(&cache->cache_node);
  3061. btrfs_put_block_group(cache);
  3062. if (node) {
  3063. cache = rb_entry(node, struct btrfs_block_group_cache,
  3064. cache_node);
  3065. btrfs_get_block_group(cache);
  3066. } else
  3067. cache = NULL;
  3068. spin_unlock(&fs_info->block_group_cache_lock);
  3069. return cache;
  3070. }
  3071. static int cache_save_setup(struct btrfs_block_group_cache *block_group,
  3072. struct btrfs_trans_handle *trans,
  3073. struct btrfs_path *path)
  3074. {
  3075. struct btrfs_fs_info *fs_info = block_group->fs_info;
  3076. struct btrfs_root *root = fs_info->tree_root;
  3077. struct inode *inode = NULL;
  3078. struct extent_changeset *data_reserved = NULL;
  3079. u64 alloc_hint = 0;
  3080. int dcs = BTRFS_DC_ERROR;
  3081. u64 num_pages = 0;
  3082. int retries = 0;
  3083. int ret = 0;
  3084. /*
  3085. * If this block group is smaller than 100 megs don't bother caching the
  3086. * block group.
  3087. */
  3088. if (block_group->key.offset < (100 * SZ_1M)) {
  3089. spin_lock(&block_group->lock);
  3090. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  3091. spin_unlock(&block_group->lock);
  3092. return 0;
  3093. }
  3094. if (trans->aborted)
  3095. return 0;
  3096. again:
  3097. inode = lookup_free_space_inode(fs_info, block_group, path);
  3098. if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
  3099. ret = PTR_ERR(inode);
  3100. btrfs_release_path(path);
  3101. goto out;
  3102. }
  3103. if (IS_ERR(inode)) {
  3104. BUG_ON(retries);
  3105. retries++;
  3106. if (block_group->ro)
  3107. goto out_free;
  3108. ret = create_free_space_inode(fs_info, trans, block_group,
  3109. path);
  3110. if (ret)
  3111. goto out_free;
  3112. goto again;
  3113. }
  3114. /*
  3115. * We want to set the generation to 0, that way if anything goes wrong
  3116. * from here on out we know not to trust this cache when we load up next
  3117. * time.
  3118. */
  3119. BTRFS_I(inode)->generation = 0;
  3120. ret = btrfs_update_inode(trans, root, inode);
  3121. if (ret) {
  3122. /*
  3123. * So theoretically we could recover from this, simply set the
  3124. * super cache generation to 0 so we know to invalidate the
  3125. * cache, but then we'd have to keep track of the block groups
  3126. * that fail this way so we know we _have_ to reset this cache
  3127. * before the next commit or risk reading stale cache. So to
  3128. * limit our exposure to horrible edge cases lets just abort the
  3129. * transaction, this only happens in really bad situations
  3130. * anyway.
  3131. */
  3132. btrfs_abort_transaction(trans, ret);
  3133. goto out_put;
  3134. }
  3135. WARN_ON(ret);
  3136. /* We've already setup this transaction, go ahead and exit */
  3137. if (block_group->cache_generation == trans->transid &&
  3138. i_size_read(inode)) {
  3139. dcs = BTRFS_DC_SETUP;
  3140. goto out_put;
  3141. }
  3142. if (i_size_read(inode) > 0) {
  3143. ret = btrfs_check_trunc_cache_free_space(fs_info,
  3144. &fs_info->global_block_rsv);
  3145. if (ret)
  3146. goto out_put;
  3147. ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
  3148. if (ret)
  3149. goto out_put;
  3150. }
  3151. spin_lock(&block_group->lock);
  3152. if (block_group->cached != BTRFS_CACHE_FINISHED ||
  3153. !btrfs_test_opt(fs_info, SPACE_CACHE)) {
  3154. /*
  3155. * don't bother trying to write stuff out _if_
  3156. * a) we're not cached,
  3157. * b) we're with nospace_cache mount option,
  3158. * c) we're with v2 space_cache (FREE_SPACE_TREE).
  3159. */
  3160. dcs = BTRFS_DC_WRITTEN;
  3161. spin_unlock(&block_group->lock);
  3162. goto out_put;
  3163. }
  3164. spin_unlock(&block_group->lock);
  3165. /*
  3166. * We hit an ENOSPC when setting up the cache in this transaction, just
  3167. * skip doing the setup, we've already cleared the cache so we're safe.
  3168. */
  3169. if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
  3170. ret = -ENOSPC;
  3171. goto out_put;
  3172. }
  3173. /*
  3174. * Try to preallocate enough space based on how big the block group is.
  3175. * Keep in mind this has to include any pinned space which could end up
  3176. * taking up quite a bit since it's not folded into the other space
  3177. * cache.
  3178. */
  3179. num_pages = div_u64(block_group->key.offset, SZ_256M);
  3180. if (!num_pages)
  3181. num_pages = 1;
  3182. num_pages *= 16;
  3183. num_pages *= PAGE_SIZE;
  3184. ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
  3185. if (ret)
  3186. goto out_put;
  3187. ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
  3188. num_pages, num_pages,
  3189. &alloc_hint);
  3190. /*
  3191. * Our cache requires contiguous chunks so that we don't modify a bunch
  3192. * of metadata or split extents when writing the cache out, which means
  3193. * we can enospc if we are heavily fragmented in addition to just normal
  3194. * out of space conditions. So if we hit this just skip setting up any
  3195. * other block groups for this transaction, maybe we'll unpin enough
  3196. * space the next time around.
  3197. */
  3198. if (!ret)
  3199. dcs = BTRFS_DC_SETUP;
  3200. else if (ret == -ENOSPC)
  3201. set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
  3202. out_put:
  3203. iput(inode);
  3204. out_free:
  3205. btrfs_release_path(path);
  3206. out:
  3207. spin_lock(&block_group->lock);
  3208. if (!ret && dcs == BTRFS_DC_SETUP)
  3209. block_group->cache_generation = trans->transid;
  3210. block_group->disk_cache_state = dcs;
  3211. spin_unlock(&block_group->lock);
  3212. extent_changeset_free(data_reserved);
  3213. return ret;
  3214. }
  3215. int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  3216. struct btrfs_fs_info *fs_info)
  3217. {
  3218. struct btrfs_block_group_cache *cache, *tmp;
  3219. struct btrfs_transaction *cur_trans = trans->transaction;
  3220. struct btrfs_path *path;
  3221. if (list_empty(&cur_trans->dirty_bgs) ||
  3222. !btrfs_test_opt(fs_info, SPACE_CACHE))
  3223. return 0;
  3224. path = btrfs_alloc_path();
  3225. if (!path)
  3226. return -ENOMEM;
  3227. /* Could add new block groups, use _safe just in case */
  3228. list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
  3229. dirty_list) {
  3230. if (cache->disk_cache_state == BTRFS_DC_CLEAR)
  3231. cache_save_setup(cache, trans, path);
  3232. }
  3233. btrfs_free_path(path);
  3234. return 0;
  3235. }
  3236. /*
  3237. * transaction commit does final block group cache writeback during a
  3238. * critical section where nothing is allowed to change the FS. This is
  3239. * required in order for the cache to actually match the block group,
  3240. * but can introduce a lot of latency into the commit.
  3241. *
  3242. * So, btrfs_start_dirty_block_groups is here to kick off block group
  3243. * cache IO. There's a chance we'll have to redo some of it if the
  3244. * block group changes again during the commit, but it greatly reduces
  3245. * the commit latency by getting rid of the easy block groups while
  3246. * we're still allowing others to join the commit.
  3247. */
  3248. int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
  3249. struct btrfs_fs_info *fs_info)
  3250. {
  3251. struct btrfs_block_group_cache *cache;
  3252. struct btrfs_transaction *cur_trans = trans->transaction;
  3253. int ret = 0;
  3254. int should_put;
  3255. struct btrfs_path *path = NULL;
  3256. LIST_HEAD(dirty);
  3257. struct list_head *io = &cur_trans->io_bgs;
  3258. int num_started = 0;
  3259. int loops = 0;
  3260. spin_lock(&cur_trans->dirty_bgs_lock);
  3261. if (list_empty(&cur_trans->dirty_bgs)) {
  3262. spin_unlock(&cur_trans->dirty_bgs_lock);
  3263. return 0;
  3264. }
  3265. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3266. spin_unlock(&cur_trans->dirty_bgs_lock);
  3267. again:
  3268. /*
  3269. * make sure all the block groups on our dirty list actually
  3270. * exist
  3271. */
  3272. btrfs_create_pending_block_groups(trans, fs_info);
  3273. if (!path) {
  3274. path = btrfs_alloc_path();
  3275. if (!path)
  3276. return -ENOMEM;
  3277. }
  3278. /*
  3279. * cache_write_mutex is here only to save us from balance or automatic
  3280. * removal of empty block groups deleting this block group while we are
  3281. * writing out the cache
  3282. */
  3283. mutex_lock(&trans->transaction->cache_write_mutex);
  3284. while (!list_empty(&dirty)) {
  3285. cache = list_first_entry(&dirty,
  3286. struct btrfs_block_group_cache,
  3287. dirty_list);
  3288. /*
  3289. * this can happen if something re-dirties a block
  3290. * group that is already under IO. Just wait for it to
  3291. * finish and then do it all again
  3292. */
  3293. if (!list_empty(&cache->io_list)) {
  3294. list_del_init(&cache->io_list);
  3295. btrfs_wait_cache_io(trans, cache, path);
  3296. btrfs_put_block_group(cache);
  3297. }
  3298. /*
  3299. * btrfs_wait_cache_io uses the cache->dirty_list to decide
  3300. * if it should update the cache_state. Don't delete
  3301. * until after we wait.
  3302. *
  3303. * Since we're not running in the commit critical section
  3304. * we need the dirty_bgs_lock to protect from update_block_group
  3305. */
  3306. spin_lock(&cur_trans->dirty_bgs_lock);
  3307. list_del_init(&cache->dirty_list);
  3308. spin_unlock(&cur_trans->dirty_bgs_lock);
  3309. should_put = 1;
  3310. cache_save_setup(cache, trans, path);
  3311. if (cache->disk_cache_state == BTRFS_DC_SETUP) {
  3312. cache->io_ctl.inode = NULL;
  3313. ret = btrfs_write_out_cache(fs_info, trans,
  3314. cache, path);
  3315. if (ret == 0 && cache->io_ctl.inode) {
  3316. num_started++;
  3317. should_put = 0;
  3318. /*
  3319. * the cache_write_mutex is protecting
  3320. * the io_list
  3321. */
  3322. list_add_tail(&cache->io_list, io);
  3323. } else {
  3324. /*
  3325. * if we failed to write the cache, the
  3326. * generation will be bad and life goes on
  3327. */
  3328. ret = 0;
  3329. }
  3330. }
  3331. if (!ret) {
  3332. ret = write_one_cache_group(trans, fs_info,
  3333. path, cache);
  3334. /*
  3335. * Our block group might still be attached to the list
  3336. * of new block groups in the transaction handle of some
  3337. * other task (struct btrfs_trans_handle->new_bgs). This
  3338. * means its block group item isn't yet in the extent
  3339. * tree. If this happens ignore the error, as we will
  3340. * try again later in the critical section of the
  3341. * transaction commit.
  3342. */
  3343. if (ret == -ENOENT) {
  3344. ret = 0;
  3345. spin_lock(&cur_trans->dirty_bgs_lock);
  3346. if (list_empty(&cache->dirty_list)) {
  3347. list_add_tail(&cache->dirty_list,
  3348. &cur_trans->dirty_bgs);
  3349. btrfs_get_block_group(cache);
  3350. }
  3351. spin_unlock(&cur_trans->dirty_bgs_lock);
  3352. } else if (ret) {
  3353. btrfs_abort_transaction(trans, ret);
  3354. }
  3355. }
  3356. /* if its not on the io list, we need to put the block group */
  3357. if (should_put)
  3358. btrfs_put_block_group(cache);
  3359. if (ret)
  3360. break;
  3361. /*
  3362. * Avoid blocking other tasks for too long. It might even save
  3363. * us from writing caches for block groups that are going to be
  3364. * removed.
  3365. */
  3366. mutex_unlock(&trans->transaction->cache_write_mutex);
  3367. mutex_lock(&trans->transaction->cache_write_mutex);
  3368. }
  3369. mutex_unlock(&trans->transaction->cache_write_mutex);
  3370. /*
  3371. * go through delayed refs for all the stuff we've just kicked off
  3372. * and then loop back (just once)
  3373. */
  3374. ret = btrfs_run_delayed_refs(trans, fs_info, 0);
  3375. if (!ret && loops == 0) {
  3376. loops++;
  3377. spin_lock(&cur_trans->dirty_bgs_lock);
  3378. list_splice_init(&cur_trans->dirty_bgs, &dirty);
  3379. /*
  3380. * dirty_bgs_lock protects us from concurrent block group
  3381. * deletes too (not just cache_write_mutex).
  3382. */
  3383. if (!list_empty(&dirty)) {
  3384. spin_unlock(&cur_trans->dirty_bgs_lock);
  3385. goto again;
  3386. }
  3387. spin_unlock(&cur_trans->dirty_bgs_lock);
  3388. } else if (ret < 0) {
  3389. btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
  3390. }
  3391. btrfs_free_path(path);
  3392. return ret;
  3393. }
  3394. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  3395. struct btrfs_fs_info *fs_info)
  3396. {
  3397. struct btrfs_block_group_cache *cache;
  3398. struct btrfs_transaction *cur_trans = trans->transaction;
  3399. int ret = 0;
  3400. int should_put;
  3401. struct btrfs_path *path;
  3402. struct list_head *io = &cur_trans->io_bgs;
  3403. int num_started = 0;
  3404. path = btrfs_alloc_path();
  3405. if (!path)
  3406. return -ENOMEM;
  3407. /*
  3408. * Even though we are in the critical section of the transaction commit,
  3409. * we can still have concurrent tasks adding elements to this
  3410. * transaction's list of dirty block groups. These tasks correspond to
  3411. * endio free space workers started when writeback finishes for a
  3412. * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
  3413. * allocate new block groups as a result of COWing nodes of the root
  3414. * tree when updating the free space inode. The writeback for the space
  3415. * caches is triggered by an earlier call to
  3416. * btrfs_start_dirty_block_groups() and iterations of the following
  3417. * loop.
  3418. * Also we want to do the cache_save_setup first and then run the
  3419. * delayed refs to make sure we have the best chance at doing this all
  3420. * in one shot.
  3421. */
  3422. spin_lock(&cur_trans->dirty_bgs_lock);
  3423. while (!list_empty(&cur_trans->dirty_bgs)) {
  3424. cache = list_first_entry(&cur_trans->dirty_bgs,
  3425. struct btrfs_block_group_cache,
  3426. dirty_list);
  3427. /*
  3428. * this can happen if cache_save_setup re-dirties a block
  3429. * group that is already under IO. Just wait for it to
  3430. * finish and then do it all again
  3431. */
  3432. if (!list_empty(&cache->io_list)) {
  3433. spin_unlock(&cur_trans->dirty_bgs_lock);
  3434. list_del_init(&cache->io_list);
  3435. btrfs_wait_cache_io(trans, cache, path);
  3436. btrfs_put_block_group(cache);
  3437. spin_lock(&cur_trans->dirty_bgs_lock);
  3438. }
  3439. /*
  3440. * don't remove from the dirty list until after we've waited
  3441. * on any pending IO
  3442. */
  3443. list_del_init(&cache->dirty_list);
  3444. spin_unlock(&cur_trans->dirty_bgs_lock);
  3445. should_put = 1;
  3446. cache_save_setup(cache, trans, path);
  3447. if (!ret)
  3448. ret = btrfs_run_delayed_refs(trans, fs_info,
  3449. (unsigned long) -1);
  3450. if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
  3451. cache->io_ctl.inode = NULL;
  3452. ret = btrfs_write_out_cache(fs_info, trans,
  3453. cache, path);
  3454. if (ret == 0 && cache->io_ctl.inode) {
  3455. num_started++;
  3456. should_put = 0;
  3457. list_add_tail(&cache->io_list, io);
  3458. } else {
  3459. /*
  3460. * if we failed to write the cache, the
  3461. * generation will be bad and life goes on
  3462. */
  3463. ret = 0;
  3464. }
  3465. }
  3466. if (!ret) {
  3467. ret = write_one_cache_group(trans, fs_info,
  3468. path, cache);
  3469. /*
  3470. * One of the free space endio workers might have
  3471. * created a new block group while updating a free space
  3472. * cache's inode (at inode.c:btrfs_finish_ordered_io())
  3473. * and hasn't released its transaction handle yet, in
  3474. * which case the new block group is still attached to
  3475. * its transaction handle and its creation has not
  3476. * finished yet (no block group item in the extent tree
  3477. * yet, etc). If this is the case, wait for all free
  3478. * space endio workers to finish and retry. This is a
  3479. * a very rare case so no need for a more efficient and
  3480. * complex approach.
  3481. */
  3482. if (ret == -ENOENT) {
  3483. wait_event(cur_trans->writer_wait,
  3484. atomic_read(&cur_trans->num_writers) == 1);
  3485. ret = write_one_cache_group(trans, fs_info,
  3486. path, cache);
  3487. }
  3488. if (ret)
  3489. btrfs_abort_transaction(trans, ret);
  3490. }
  3491. /* if its not on the io list, we need to put the block group */
  3492. if (should_put)
  3493. btrfs_put_block_group(cache);
  3494. spin_lock(&cur_trans->dirty_bgs_lock);
  3495. }
  3496. spin_unlock(&cur_trans->dirty_bgs_lock);
  3497. while (!list_empty(io)) {
  3498. cache = list_first_entry(io, struct btrfs_block_group_cache,
  3499. io_list);
  3500. list_del_init(&cache->io_list);
  3501. btrfs_wait_cache_io(trans, cache, path);
  3502. btrfs_put_block_group(cache);
  3503. }
  3504. btrfs_free_path(path);
  3505. return ret;
  3506. }
  3507. int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
  3508. {
  3509. struct btrfs_block_group_cache *block_group;
  3510. int readonly = 0;
  3511. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  3512. if (!block_group || block_group->ro)
  3513. readonly = 1;
  3514. if (block_group)
  3515. btrfs_put_block_group(block_group);
  3516. return readonly;
  3517. }
  3518. bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3519. {
  3520. struct btrfs_block_group_cache *bg;
  3521. bool ret = true;
  3522. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3523. if (!bg)
  3524. return false;
  3525. spin_lock(&bg->lock);
  3526. if (bg->ro)
  3527. ret = false;
  3528. else
  3529. atomic_inc(&bg->nocow_writers);
  3530. spin_unlock(&bg->lock);
  3531. /* no put on block group, done by btrfs_dec_nocow_writers */
  3532. if (!ret)
  3533. btrfs_put_block_group(bg);
  3534. return ret;
  3535. }
  3536. void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
  3537. {
  3538. struct btrfs_block_group_cache *bg;
  3539. bg = btrfs_lookup_block_group(fs_info, bytenr);
  3540. ASSERT(bg);
  3541. if (atomic_dec_and_test(&bg->nocow_writers))
  3542. wake_up_atomic_t(&bg->nocow_writers);
  3543. /*
  3544. * Once for our lookup and once for the lookup done by a previous call
  3545. * to btrfs_inc_nocow_writers()
  3546. */
  3547. btrfs_put_block_group(bg);
  3548. btrfs_put_block_group(bg);
  3549. }
  3550. static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a)
  3551. {
  3552. schedule();
  3553. return 0;
  3554. }
  3555. void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
  3556. {
  3557. wait_on_atomic_t(&bg->nocow_writers,
  3558. btrfs_wait_nocow_writers_atomic_t,
  3559. TASK_UNINTERRUPTIBLE);
  3560. }
  3561. static const char *alloc_name(u64 flags)
  3562. {
  3563. switch (flags) {
  3564. case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
  3565. return "mixed";
  3566. case BTRFS_BLOCK_GROUP_METADATA:
  3567. return "metadata";
  3568. case BTRFS_BLOCK_GROUP_DATA:
  3569. return "data";
  3570. case BTRFS_BLOCK_GROUP_SYSTEM:
  3571. return "system";
  3572. default:
  3573. WARN_ON(1);
  3574. return "invalid-combination";
  3575. };
  3576. }
  3577. static int create_space_info(struct btrfs_fs_info *info, u64 flags,
  3578. struct btrfs_space_info **new)
  3579. {
  3580. struct btrfs_space_info *space_info;
  3581. int i;
  3582. int ret;
  3583. space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
  3584. if (!space_info)
  3585. return -ENOMEM;
  3586. ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
  3587. GFP_KERNEL);
  3588. if (ret) {
  3589. kfree(space_info);
  3590. return ret;
  3591. }
  3592. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  3593. INIT_LIST_HEAD(&space_info->block_groups[i]);
  3594. init_rwsem(&space_info->groups_sem);
  3595. spin_lock_init(&space_info->lock);
  3596. space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  3597. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  3598. init_waitqueue_head(&space_info->wait);
  3599. INIT_LIST_HEAD(&space_info->ro_bgs);
  3600. INIT_LIST_HEAD(&space_info->tickets);
  3601. INIT_LIST_HEAD(&space_info->priority_tickets);
  3602. ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype,
  3603. info->space_info_kobj, "%s",
  3604. alloc_name(space_info->flags));
  3605. if (ret) {
  3606. percpu_counter_destroy(&space_info->total_bytes_pinned);
  3607. kfree(space_info);
  3608. return ret;
  3609. }
  3610. *new = space_info;
  3611. list_add_rcu(&space_info->list, &info->space_info);
  3612. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3613. info->data_sinfo = space_info;
  3614. return ret;
  3615. }
  3616. static void update_space_info(struct btrfs_fs_info *info, u64 flags,
  3617. u64 total_bytes, u64 bytes_used,
  3618. u64 bytes_readonly,
  3619. struct btrfs_space_info **space_info)
  3620. {
  3621. struct btrfs_space_info *found;
  3622. int factor;
  3623. if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  3624. BTRFS_BLOCK_GROUP_RAID10))
  3625. factor = 2;
  3626. else
  3627. factor = 1;
  3628. found = __find_space_info(info, flags);
  3629. ASSERT(found);
  3630. spin_lock(&found->lock);
  3631. found->total_bytes += total_bytes;
  3632. found->disk_total += total_bytes * factor;
  3633. found->bytes_used += bytes_used;
  3634. found->disk_used += bytes_used * factor;
  3635. found->bytes_readonly += bytes_readonly;
  3636. if (total_bytes > 0)
  3637. found->full = 0;
  3638. space_info_add_new_bytes(info, found, total_bytes -
  3639. bytes_used - bytes_readonly);
  3640. spin_unlock(&found->lock);
  3641. *space_info = found;
  3642. }
  3643. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  3644. {
  3645. u64 extra_flags = chunk_to_extended(flags) &
  3646. BTRFS_EXTENDED_PROFILE_MASK;
  3647. write_seqlock(&fs_info->profiles_lock);
  3648. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3649. fs_info->avail_data_alloc_bits |= extra_flags;
  3650. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3651. fs_info->avail_metadata_alloc_bits |= extra_flags;
  3652. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3653. fs_info->avail_system_alloc_bits |= extra_flags;
  3654. write_sequnlock(&fs_info->profiles_lock);
  3655. }
  3656. /*
  3657. * returns target flags in extended format or 0 if restripe for this
  3658. * chunk_type is not in progress
  3659. *
  3660. * should be called with either volume_mutex or balance_lock held
  3661. */
  3662. static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  3663. {
  3664. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  3665. u64 target = 0;
  3666. if (!bctl)
  3667. return 0;
  3668. if (flags & BTRFS_BLOCK_GROUP_DATA &&
  3669. bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3670. target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
  3671. } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
  3672. bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3673. target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
  3674. } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
  3675. bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
  3676. target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
  3677. }
  3678. return target;
  3679. }
  3680. /*
  3681. * @flags: available profiles in extended format (see ctree.h)
  3682. *
  3683. * Returns reduced profile in chunk format. If profile changing is in
  3684. * progress (either running or paused) picks the target profile (if it's
  3685. * already available), otherwise falls back to plain reducing.
  3686. */
  3687. static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
  3688. {
  3689. u64 num_devices = fs_info->fs_devices->rw_devices;
  3690. u64 target;
  3691. u64 raid_type;
  3692. u64 allowed = 0;
  3693. /*
  3694. * see if restripe for this chunk_type is in progress, if so
  3695. * try to reduce to the target profile
  3696. */
  3697. spin_lock(&fs_info->balance_lock);
  3698. target = get_restripe_target(fs_info, flags);
  3699. if (target) {
  3700. /* pick target profile only if it's already available */
  3701. if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
  3702. spin_unlock(&fs_info->balance_lock);
  3703. return extended_to_chunk(target);
  3704. }
  3705. }
  3706. spin_unlock(&fs_info->balance_lock);
  3707. /* First, mask out the RAID levels which aren't possible */
  3708. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  3709. if (num_devices >= btrfs_raid_array[raid_type].devs_min)
  3710. allowed |= btrfs_raid_group[raid_type];
  3711. }
  3712. allowed &= flags;
  3713. if (allowed & BTRFS_BLOCK_GROUP_RAID6)
  3714. allowed = BTRFS_BLOCK_GROUP_RAID6;
  3715. else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
  3716. allowed = BTRFS_BLOCK_GROUP_RAID5;
  3717. else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
  3718. allowed = BTRFS_BLOCK_GROUP_RAID10;
  3719. else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
  3720. allowed = BTRFS_BLOCK_GROUP_RAID1;
  3721. else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
  3722. allowed = BTRFS_BLOCK_GROUP_RAID0;
  3723. flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
  3724. return extended_to_chunk(flags | allowed);
  3725. }
  3726. static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
  3727. {
  3728. unsigned seq;
  3729. u64 flags;
  3730. do {
  3731. flags = orig_flags;
  3732. seq = read_seqbegin(&fs_info->profiles_lock);
  3733. if (flags & BTRFS_BLOCK_GROUP_DATA)
  3734. flags |= fs_info->avail_data_alloc_bits;
  3735. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  3736. flags |= fs_info->avail_system_alloc_bits;
  3737. else if (flags & BTRFS_BLOCK_GROUP_METADATA)
  3738. flags |= fs_info->avail_metadata_alloc_bits;
  3739. } while (read_seqretry(&fs_info->profiles_lock, seq));
  3740. return btrfs_reduce_alloc_profile(fs_info, flags);
  3741. }
  3742. static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data)
  3743. {
  3744. struct btrfs_fs_info *fs_info = root->fs_info;
  3745. u64 flags;
  3746. u64 ret;
  3747. if (data)
  3748. flags = BTRFS_BLOCK_GROUP_DATA;
  3749. else if (root == fs_info->chunk_root)
  3750. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  3751. else
  3752. flags = BTRFS_BLOCK_GROUP_METADATA;
  3753. ret = get_alloc_profile(fs_info, flags);
  3754. return ret;
  3755. }
  3756. u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
  3757. {
  3758. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
  3759. }
  3760. u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
  3761. {
  3762. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  3763. }
  3764. u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
  3765. {
  3766. return get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  3767. }
  3768. static u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
  3769. bool may_use_included)
  3770. {
  3771. ASSERT(s_info);
  3772. return s_info->bytes_used + s_info->bytes_reserved +
  3773. s_info->bytes_pinned + s_info->bytes_readonly +
  3774. (may_use_included ? s_info->bytes_may_use : 0);
  3775. }
  3776. int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
  3777. {
  3778. struct btrfs_root *root = inode->root;
  3779. struct btrfs_fs_info *fs_info = root->fs_info;
  3780. struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
  3781. u64 used;
  3782. int ret = 0;
  3783. int need_commit = 2;
  3784. int have_pinned_space;
  3785. /* make sure bytes are sectorsize aligned */
  3786. bytes = ALIGN(bytes, fs_info->sectorsize);
  3787. if (btrfs_is_free_space_inode(inode)) {
  3788. need_commit = 0;
  3789. ASSERT(current->journal_info);
  3790. }
  3791. again:
  3792. /* make sure we have enough space to handle the data first */
  3793. spin_lock(&data_sinfo->lock);
  3794. used = btrfs_space_info_used(data_sinfo, true);
  3795. if (used + bytes > data_sinfo->total_bytes) {
  3796. struct btrfs_trans_handle *trans;
  3797. /*
  3798. * if we don't have enough free bytes in this space then we need
  3799. * to alloc a new chunk.
  3800. */
  3801. if (!data_sinfo->full) {
  3802. u64 alloc_target;
  3803. data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
  3804. spin_unlock(&data_sinfo->lock);
  3805. alloc_target = btrfs_data_alloc_profile(fs_info);
  3806. /*
  3807. * It is ugly that we don't call nolock join
  3808. * transaction for the free space inode case here.
  3809. * But it is safe because we only do the data space
  3810. * reservation for the free space cache in the
  3811. * transaction context, the common join transaction
  3812. * just increase the counter of the current transaction
  3813. * handler, doesn't try to acquire the trans_lock of
  3814. * the fs.
  3815. */
  3816. trans = btrfs_join_transaction(root);
  3817. if (IS_ERR(trans))
  3818. return PTR_ERR(trans);
  3819. ret = do_chunk_alloc(trans, fs_info, alloc_target,
  3820. CHUNK_ALLOC_NO_FORCE);
  3821. btrfs_end_transaction(trans);
  3822. if (ret < 0) {
  3823. if (ret != -ENOSPC)
  3824. return ret;
  3825. else {
  3826. have_pinned_space = 1;
  3827. goto commit_trans;
  3828. }
  3829. }
  3830. goto again;
  3831. }
  3832. /*
  3833. * If we don't have enough pinned space to deal with this
  3834. * allocation, and no removed chunk in current transaction,
  3835. * don't bother committing the transaction.
  3836. */
  3837. have_pinned_space = percpu_counter_compare(
  3838. &data_sinfo->total_bytes_pinned,
  3839. used + bytes - data_sinfo->total_bytes);
  3840. spin_unlock(&data_sinfo->lock);
  3841. /* commit the current transaction and try again */
  3842. commit_trans:
  3843. if (need_commit &&
  3844. !atomic_read(&fs_info->open_ioctl_trans)) {
  3845. need_commit--;
  3846. if (need_commit > 0) {
  3847. btrfs_start_delalloc_roots(fs_info, 0, -1);
  3848. btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
  3849. (u64)-1);
  3850. }
  3851. trans = btrfs_join_transaction(root);
  3852. if (IS_ERR(trans))
  3853. return PTR_ERR(trans);
  3854. if (have_pinned_space >= 0 ||
  3855. test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
  3856. &trans->transaction->flags) ||
  3857. need_commit > 0) {
  3858. ret = btrfs_commit_transaction(trans);
  3859. if (ret)
  3860. return ret;
  3861. /*
  3862. * The cleaner kthread might still be doing iput
  3863. * operations. Wait for it to finish so that
  3864. * more space is released.
  3865. */
  3866. mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
  3867. mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
  3868. goto again;
  3869. } else {
  3870. btrfs_end_transaction(trans);
  3871. }
  3872. }
  3873. trace_btrfs_space_reservation(fs_info,
  3874. "space_info:enospc",
  3875. data_sinfo->flags, bytes, 1);
  3876. return -ENOSPC;
  3877. }
  3878. data_sinfo->bytes_may_use += bytes;
  3879. trace_btrfs_space_reservation(fs_info, "space_info",
  3880. data_sinfo->flags, bytes, 1);
  3881. spin_unlock(&data_sinfo->lock);
  3882. return ret;
  3883. }
  3884. int btrfs_check_data_free_space(struct inode *inode,
  3885. struct extent_changeset **reserved, u64 start, u64 len)
  3886. {
  3887. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3888. int ret;
  3889. /* align the range */
  3890. len = round_up(start + len, fs_info->sectorsize) -
  3891. round_down(start, fs_info->sectorsize);
  3892. start = round_down(start, fs_info->sectorsize);
  3893. ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len);
  3894. if (ret < 0)
  3895. return ret;
  3896. /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
  3897. ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
  3898. if (ret < 0)
  3899. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3900. else
  3901. ret = 0;
  3902. return ret;
  3903. }
  3904. /*
  3905. * Called if we need to clear a data reservation for this inode
  3906. * Normally in a error case.
  3907. *
  3908. * This one will *NOT* use accurate qgroup reserved space API, just for case
  3909. * which we can't sleep and is sure it won't affect qgroup reserved space.
  3910. * Like clear_bit_hook().
  3911. */
  3912. void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
  3913. u64 len)
  3914. {
  3915. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  3916. struct btrfs_space_info *data_sinfo;
  3917. /* Make sure the range is aligned to sectorsize */
  3918. len = round_up(start + len, fs_info->sectorsize) -
  3919. round_down(start, fs_info->sectorsize);
  3920. start = round_down(start, fs_info->sectorsize);
  3921. data_sinfo = fs_info->data_sinfo;
  3922. spin_lock(&data_sinfo->lock);
  3923. if (WARN_ON(data_sinfo->bytes_may_use < len))
  3924. data_sinfo->bytes_may_use = 0;
  3925. else
  3926. data_sinfo->bytes_may_use -= len;
  3927. trace_btrfs_space_reservation(fs_info, "space_info",
  3928. data_sinfo->flags, len, 0);
  3929. spin_unlock(&data_sinfo->lock);
  3930. }
  3931. /*
  3932. * Called if we need to clear a data reservation for this inode
  3933. * Normally in a error case.
  3934. *
  3935. * This one will handle the per-inode data rsv map for accurate reserved
  3936. * space framework.
  3937. */
  3938. void btrfs_free_reserved_data_space(struct inode *inode,
  3939. struct extent_changeset *reserved, u64 start, u64 len)
  3940. {
  3941. struct btrfs_root *root = BTRFS_I(inode)->root;
  3942. /* Make sure the range is aligned to sectorsize */
  3943. len = round_up(start + len, root->fs_info->sectorsize) -
  3944. round_down(start, root->fs_info->sectorsize);
  3945. start = round_down(start, root->fs_info->sectorsize);
  3946. btrfs_free_reserved_data_space_noquota(inode, start, len);
  3947. btrfs_qgroup_free_data(inode, reserved, start, len);
  3948. }
  3949. static void force_metadata_allocation(struct btrfs_fs_info *info)
  3950. {
  3951. struct list_head *head = &info->space_info;
  3952. struct btrfs_space_info *found;
  3953. rcu_read_lock();
  3954. list_for_each_entry_rcu(found, head, list) {
  3955. if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
  3956. found->force_alloc = CHUNK_ALLOC_FORCE;
  3957. }
  3958. rcu_read_unlock();
  3959. }
  3960. static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
  3961. {
  3962. return (global->size << 1);
  3963. }
  3964. static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
  3965. struct btrfs_space_info *sinfo, int force)
  3966. {
  3967. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  3968. u64 bytes_used = btrfs_space_info_used(sinfo, false);
  3969. u64 thresh;
  3970. if (force == CHUNK_ALLOC_FORCE)
  3971. return 1;
  3972. /*
  3973. * We need to take into account the global rsv because for all intents
  3974. * and purposes it's used space. Don't worry about locking the
  3975. * global_rsv, it doesn't change except when the transaction commits.
  3976. */
  3977. if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
  3978. bytes_used += calc_global_rsv_need_space(global_rsv);
  3979. /*
  3980. * in limited mode, we want to have some free space up to
  3981. * about 1% of the FS size.
  3982. */
  3983. if (force == CHUNK_ALLOC_LIMITED) {
  3984. thresh = btrfs_super_total_bytes(fs_info->super_copy);
  3985. thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
  3986. if (sinfo->total_bytes - bytes_used < thresh)
  3987. return 1;
  3988. }
  3989. if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
  3990. return 0;
  3991. return 1;
  3992. }
  3993. static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
  3994. {
  3995. u64 num_dev;
  3996. if (type & (BTRFS_BLOCK_GROUP_RAID10 |
  3997. BTRFS_BLOCK_GROUP_RAID0 |
  3998. BTRFS_BLOCK_GROUP_RAID5 |
  3999. BTRFS_BLOCK_GROUP_RAID6))
  4000. num_dev = fs_info->fs_devices->rw_devices;
  4001. else if (type & BTRFS_BLOCK_GROUP_RAID1)
  4002. num_dev = 2;
  4003. else
  4004. num_dev = 1; /* DUP or single */
  4005. return num_dev;
  4006. }
  4007. /*
  4008. * If @is_allocation is true, reserve space in the system space info necessary
  4009. * for allocating a chunk, otherwise if it's false, reserve space necessary for
  4010. * removing a chunk.
  4011. */
  4012. void check_system_chunk(struct btrfs_trans_handle *trans,
  4013. struct btrfs_fs_info *fs_info, u64 type)
  4014. {
  4015. struct btrfs_space_info *info;
  4016. u64 left;
  4017. u64 thresh;
  4018. int ret = 0;
  4019. u64 num_devs;
  4020. /*
  4021. * Needed because we can end up allocating a system chunk and for an
  4022. * atomic and race free space reservation in the chunk block reserve.
  4023. */
  4024. ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
  4025. info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  4026. spin_lock(&info->lock);
  4027. left = info->total_bytes - btrfs_space_info_used(info, true);
  4028. spin_unlock(&info->lock);
  4029. num_devs = get_profile_num_devs(fs_info, type);
  4030. /* num_devs device items to update and 1 chunk item to add or remove */
  4031. thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
  4032. btrfs_calc_trans_metadata_size(fs_info, 1);
  4033. if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  4034. btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
  4035. left, thresh, type);
  4036. dump_space_info(fs_info, info, 0, 0);
  4037. }
  4038. if (left < thresh) {
  4039. u64 flags = btrfs_system_alloc_profile(fs_info);
  4040. /*
  4041. * Ignore failure to create system chunk. We might end up not
  4042. * needing it, as we might not need to COW all nodes/leafs from
  4043. * the paths we visit in the chunk tree (they were already COWed
  4044. * or created in the current transaction for example).
  4045. */
  4046. ret = btrfs_alloc_chunk(trans, fs_info, flags);
  4047. }
  4048. if (!ret) {
  4049. ret = btrfs_block_rsv_add(fs_info->chunk_root,
  4050. &fs_info->chunk_block_rsv,
  4051. thresh, BTRFS_RESERVE_NO_FLUSH);
  4052. if (!ret)
  4053. trans->chunk_bytes_reserved += thresh;
  4054. }
  4055. }
  4056. /*
  4057. * If force is CHUNK_ALLOC_FORCE:
  4058. * - return 1 if it successfully allocates a chunk,
  4059. * - return errors including -ENOSPC otherwise.
  4060. * If force is NOT CHUNK_ALLOC_FORCE:
  4061. * - return 0 if it doesn't need to allocate a new chunk,
  4062. * - return 1 if it successfully allocates a chunk,
  4063. * - return errors including -ENOSPC otherwise.
  4064. */
  4065. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  4066. struct btrfs_fs_info *fs_info, u64 flags, int force)
  4067. {
  4068. struct btrfs_space_info *space_info;
  4069. int wait_for_alloc = 0;
  4070. int ret = 0;
  4071. /* Don't re-enter if we're already allocating a chunk */
  4072. if (trans->allocating_chunk)
  4073. return -ENOSPC;
  4074. space_info = __find_space_info(fs_info, flags);
  4075. if (!space_info) {
  4076. ret = create_space_info(fs_info, flags, &space_info);
  4077. if (ret)
  4078. return ret;
  4079. }
  4080. again:
  4081. spin_lock(&space_info->lock);
  4082. if (force < space_info->force_alloc)
  4083. force = space_info->force_alloc;
  4084. if (space_info->full) {
  4085. if (should_alloc_chunk(fs_info, space_info, force))
  4086. ret = -ENOSPC;
  4087. else
  4088. ret = 0;
  4089. spin_unlock(&space_info->lock);
  4090. return ret;
  4091. }
  4092. if (!should_alloc_chunk(fs_info, space_info, force)) {
  4093. spin_unlock(&space_info->lock);
  4094. return 0;
  4095. } else if (space_info->chunk_alloc) {
  4096. wait_for_alloc = 1;
  4097. } else {
  4098. space_info->chunk_alloc = 1;
  4099. }
  4100. spin_unlock(&space_info->lock);
  4101. mutex_lock(&fs_info->chunk_mutex);
  4102. /*
  4103. * The chunk_mutex is held throughout the entirety of a chunk
  4104. * allocation, so once we've acquired the chunk_mutex we know that the
  4105. * other guy is done and we need to recheck and see if we should
  4106. * allocate.
  4107. */
  4108. if (wait_for_alloc) {
  4109. mutex_unlock(&fs_info->chunk_mutex);
  4110. wait_for_alloc = 0;
  4111. goto again;
  4112. }
  4113. trans->allocating_chunk = true;
  4114. /*
  4115. * If we have mixed data/metadata chunks we want to make sure we keep
  4116. * allocating mixed chunks instead of individual chunks.
  4117. */
  4118. if (btrfs_mixed_space_info(space_info))
  4119. flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
  4120. /*
  4121. * if we're doing a data chunk, go ahead and make sure that
  4122. * we keep a reasonable number of metadata chunks allocated in the
  4123. * FS as well.
  4124. */
  4125. if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
  4126. fs_info->data_chunk_allocations++;
  4127. if (!(fs_info->data_chunk_allocations %
  4128. fs_info->metadata_ratio))
  4129. force_metadata_allocation(fs_info);
  4130. }
  4131. /*
  4132. * Check if we have enough space in SYSTEM chunk because we may need
  4133. * to update devices.
  4134. */
  4135. check_system_chunk(trans, fs_info, flags);
  4136. ret = btrfs_alloc_chunk(trans, fs_info, flags);
  4137. trans->allocating_chunk = false;
  4138. spin_lock(&space_info->lock);
  4139. if (ret < 0 && ret != -ENOSPC)
  4140. goto out;
  4141. if (ret)
  4142. space_info->full = 1;
  4143. else
  4144. ret = 1;
  4145. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  4146. out:
  4147. space_info->chunk_alloc = 0;
  4148. spin_unlock(&space_info->lock);
  4149. mutex_unlock(&fs_info->chunk_mutex);
  4150. /*
  4151. * When we allocate a new chunk we reserve space in the chunk block
  4152. * reserve to make sure we can COW nodes/leafs in the chunk tree or
  4153. * add new nodes/leafs to it if we end up needing to do it when
  4154. * inserting the chunk item and updating device items as part of the
  4155. * second phase of chunk allocation, performed by
  4156. * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
  4157. * large number of new block groups to create in our transaction
  4158. * handle's new_bgs list to avoid exhausting the chunk block reserve
  4159. * in extreme cases - like having a single transaction create many new
  4160. * block groups when starting to write out the free space caches of all
  4161. * the block groups that were made dirty during the lifetime of the
  4162. * transaction.
  4163. */
  4164. if (trans->can_flush_pending_bgs &&
  4165. trans->chunk_bytes_reserved >= (u64)SZ_2M) {
  4166. btrfs_create_pending_block_groups(trans, fs_info);
  4167. btrfs_trans_release_chunk_metadata(trans);
  4168. }
  4169. return ret;
  4170. }
  4171. static int can_overcommit(struct btrfs_fs_info *fs_info,
  4172. struct btrfs_space_info *space_info, u64 bytes,
  4173. enum btrfs_reserve_flush_enum flush,
  4174. bool system_chunk)
  4175. {
  4176. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4177. u64 profile;
  4178. u64 space_size;
  4179. u64 avail;
  4180. u64 used;
  4181. /* Don't overcommit when in mixed mode. */
  4182. if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
  4183. return 0;
  4184. if (system_chunk)
  4185. profile = btrfs_system_alloc_profile(fs_info);
  4186. else
  4187. profile = btrfs_metadata_alloc_profile(fs_info);
  4188. used = btrfs_space_info_used(space_info, false);
  4189. /*
  4190. * We only want to allow over committing if we have lots of actual space
  4191. * free, but if we don't have enough space to handle the global reserve
  4192. * space then we could end up having a real enospc problem when trying
  4193. * to allocate a chunk or some other such important allocation.
  4194. */
  4195. spin_lock(&global_rsv->lock);
  4196. space_size = calc_global_rsv_need_space(global_rsv);
  4197. spin_unlock(&global_rsv->lock);
  4198. if (used + space_size >= space_info->total_bytes)
  4199. return 0;
  4200. used += space_info->bytes_may_use;
  4201. avail = atomic64_read(&fs_info->free_chunk_space);
  4202. /*
  4203. * If we have dup, raid1 or raid10 then only half of the free
  4204. * space is actually useable. For raid56, the space info used
  4205. * doesn't include the parity drive, so we don't have to
  4206. * change the math
  4207. */
  4208. if (profile & (BTRFS_BLOCK_GROUP_DUP |
  4209. BTRFS_BLOCK_GROUP_RAID1 |
  4210. BTRFS_BLOCK_GROUP_RAID10))
  4211. avail >>= 1;
  4212. /*
  4213. * If we aren't flushing all things, let us overcommit up to
  4214. * 1/2th of the space. If we can flush, don't let us overcommit
  4215. * too much, let it overcommit up to 1/8 of the space.
  4216. */
  4217. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4218. avail >>= 3;
  4219. else
  4220. avail >>= 1;
  4221. if (used + bytes < space_info->total_bytes + avail)
  4222. return 1;
  4223. return 0;
  4224. }
  4225. static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
  4226. unsigned long nr_pages, int nr_items)
  4227. {
  4228. struct super_block *sb = fs_info->sb;
  4229. if (down_read_trylock(&sb->s_umount)) {
  4230. writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
  4231. up_read(&sb->s_umount);
  4232. } else {
  4233. /*
  4234. * We needn't worry the filesystem going from r/w to r/o though
  4235. * we don't acquire ->s_umount mutex, because the filesystem
  4236. * should guarantee the delalloc inodes list be empty after
  4237. * the filesystem is readonly(all dirty pages are written to
  4238. * the disk).
  4239. */
  4240. btrfs_start_delalloc_roots(fs_info, 0, nr_items);
  4241. if (!current->journal_info)
  4242. btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
  4243. }
  4244. }
  4245. static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
  4246. u64 to_reclaim)
  4247. {
  4248. u64 bytes;
  4249. u64 nr;
  4250. bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  4251. nr = div64_u64(to_reclaim, bytes);
  4252. if (!nr)
  4253. nr = 1;
  4254. return nr;
  4255. }
  4256. #define EXTENT_SIZE_PER_ITEM SZ_256K
  4257. /*
  4258. * shrink metadata reservation for delalloc
  4259. */
  4260. static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
  4261. u64 orig, bool wait_ordered)
  4262. {
  4263. struct btrfs_space_info *space_info;
  4264. struct btrfs_trans_handle *trans;
  4265. u64 delalloc_bytes;
  4266. u64 max_reclaim;
  4267. u64 items;
  4268. long time_left;
  4269. unsigned long nr_pages;
  4270. int loops;
  4271. enum btrfs_reserve_flush_enum flush;
  4272. /* Calc the number of the pages we need flush for space reservation */
  4273. items = calc_reclaim_items_nr(fs_info, to_reclaim);
  4274. to_reclaim = items * EXTENT_SIZE_PER_ITEM;
  4275. trans = (struct btrfs_trans_handle *)current->journal_info;
  4276. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4277. delalloc_bytes = percpu_counter_sum_positive(
  4278. &fs_info->delalloc_bytes);
  4279. if (delalloc_bytes == 0) {
  4280. if (trans)
  4281. return;
  4282. if (wait_ordered)
  4283. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4284. return;
  4285. }
  4286. loops = 0;
  4287. while (delalloc_bytes && loops < 3) {
  4288. max_reclaim = min(delalloc_bytes, to_reclaim);
  4289. nr_pages = max_reclaim >> PAGE_SHIFT;
  4290. btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
  4291. /*
  4292. * We need to wait for the async pages to actually start before
  4293. * we do anything.
  4294. */
  4295. max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
  4296. if (!max_reclaim)
  4297. goto skip_async;
  4298. if (max_reclaim <= nr_pages)
  4299. max_reclaim = 0;
  4300. else
  4301. max_reclaim -= nr_pages;
  4302. wait_event(fs_info->async_submit_wait,
  4303. atomic_read(&fs_info->async_delalloc_pages) <=
  4304. (int)max_reclaim);
  4305. skip_async:
  4306. if (!trans)
  4307. flush = BTRFS_RESERVE_FLUSH_ALL;
  4308. else
  4309. flush = BTRFS_RESERVE_NO_FLUSH;
  4310. spin_lock(&space_info->lock);
  4311. if (list_empty(&space_info->tickets) &&
  4312. list_empty(&space_info->priority_tickets)) {
  4313. spin_unlock(&space_info->lock);
  4314. break;
  4315. }
  4316. spin_unlock(&space_info->lock);
  4317. loops++;
  4318. if (wait_ordered && !trans) {
  4319. btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
  4320. } else {
  4321. time_left = schedule_timeout_killable(1);
  4322. if (time_left)
  4323. break;
  4324. }
  4325. delalloc_bytes = percpu_counter_sum_positive(
  4326. &fs_info->delalloc_bytes);
  4327. }
  4328. }
  4329. struct reserve_ticket {
  4330. u64 bytes;
  4331. int error;
  4332. struct list_head list;
  4333. wait_queue_head_t wait;
  4334. };
  4335. /**
  4336. * maybe_commit_transaction - possibly commit the transaction if its ok to
  4337. * @root - the root we're allocating for
  4338. * @bytes - the number of bytes we want to reserve
  4339. * @force - force the commit
  4340. *
  4341. * This will check to make sure that committing the transaction will actually
  4342. * get us somewhere and then commit the transaction if it does. Otherwise it
  4343. * will return -ENOSPC.
  4344. */
  4345. static int may_commit_transaction(struct btrfs_fs_info *fs_info,
  4346. struct btrfs_space_info *space_info)
  4347. {
  4348. struct reserve_ticket *ticket = NULL;
  4349. struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
  4350. struct btrfs_trans_handle *trans;
  4351. u64 bytes;
  4352. trans = (struct btrfs_trans_handle *)current->journal_info;
  4353. if (trans)
  4354. return -EAGAIN;
  4355. spin_lock(&space_info->lock);
  4356. if (!list_empty(&space_info->priority_tickets))
  4357. ticket = list_first_entry(&space_info->priority_tickets,
  4358. struct reserve_ticket, list);
  4359. else if (!list_empty(&space_info->tickets))
  4360. ticket = list_first_entry(&space_info->tickets,
  4361. struct reserve_ticket, list);
  4362. bytes = (ticket) ? ticket->bytes : 0;
  4363. spin_unlock(&space_info->lock);
  4364. if (!bytes)
  4365. return 0;
  4366. /* See if there is enough pinned space to make this reservation */
  4367. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4368. bytes) >= 0)
  4369. goto commit;
  4370. /*
  4371. * See if there is some space in the delayed insertion reservation for
  4372. * this reservation.
  4373. */
  4374. if (space_info != delayed_rsv->space_info)
  4375. return -ENOSPC;
  4376. spin_lock(&delayed_rsv->lock);
  4377. if (delayed_rsv->size > bytes)
  4378. bytes = 0;
  4379. else
  4380. bytes -= delayed_rsv->size;
  4381. if (percpu_counter_compare(&space_info->total_bytes_pinned,
  4382. bytes) < 0) {
  4383. spin_unlock(&delayed_rsv->lock);
  4384. return -ENOSPC;
  4385. }
  4386. spin_unlock(&delayed_rsv->lock);
  4387. commit:
  4388. trans = btrfs_join_transaction(fs_info->extent_root);
  4389. if (IS_ERR(trans))
  4390. return -ENOSPC;
  4391. return btrfs_commit_transaction(trans);
  4392. }
  4393. /*
  4394. * Try to flush some data based on policy set by @state. This is only advisory
  4395. * and may fail for various reasons. The caller is supposed to examine the
  4396. * state of @space_info to detect the outcome.
  4397. */
  4398. static void flush_space(struct btrfs_fs_info *fs_info,
  4399. struct btrfs_space_info *space_info, u64 num_bytes,
  4400. int state)
  4401. {
  4402. struct btrfs_root *root = fs_info->extent_root;
  4403. struct btrfs_trans_handle *trans;
  4404. int nr;
  4405. int ret = 0;
  4406. switch (state) {
  4407. case FLUSH_DELAYED_ITEMS_NR:
  4408. case FLUSH_DELAYED_ITEMS:
  4409. if (state == FLUSH_DELAYED_ITEMS_NR)
  4410. nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
  4411. else
  4412. nr = -1;
  4413. trans = btrfs_join_transaction(root);
  4414. if (IS_ERR(trans)) {
  4415. ret = PTR_ERR(trans);
  4416. break;
  4417. }
  4418. ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
  4419. btrfs_end_transaction(trans);
  4420. break;
  4421. case FLUSH_DELALLOC:
  4422. case FLUSH_DELALLOC_WAIT:
  4423. shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
  4424. state == FLUSH_DELALLOC_WAIT);
  4425. break;
  4426. case ALLOC_CHUNK:
  4427. trans = btrfs_join_transaction(root);
  4428. if (IS_ERR(trans)) {
  4429. ret = PTR_ERR(trans);
  4430. break;
  4431. }
  4432. ret = do_chunk_alloc(trans, fs_info,
  4433. btrfs_metadata_alloc_profile(fs_info),
  4434. CHUNK_ALLOC_NO_FORCE);
  4435. btrfs_end_transaction(trans);
  4436. if (ret > 0 || ret == -ENOSPC)
  4437. ret = 0;
  4438. break;
  4439. case COMMIT_TRANS:
  4440. ret = may_commit_transaction(fs_info, space_info);
  4441. break;
  4442. default:
  4443. ret = -ENOSPC;
  4444. break;
  4445. }
  4446. trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
  4447. ret);
  4448. return;
  4449. }
  4450. static inline u64
  4451. btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
  4452. struct btrfs_space_info *space_info,
  4453. bool system_chunk)
  4454. {
  4455. struct reserve_ticket *ticket;
  4456. u64 used;
  4457. u64 expected;
  4458. u64 to_reclaim = 0;
  4459. list_for_each_entry(ticket, &space_info->tickets, list)
  4460. to_reclaim += ticket->bytes;
  4461. list_for_each_entry(ticket, &space_info->priority_tickets, list)
  4462. to_reclaim += ticket->bytes;
  4463. if (to_reclaim)
  4464. return to_reclaim;
  4465. to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
  4466. if (can_overcommit(fs_info, space_info, to_reclaim,
  4467. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4468. return 0;
  4469. used = btrfs_space_info_used(space_info, true);
  4470. if (can_overcommit(fs_info, space_info, SZ_1M,
  4471. BTRFS_RESERVE_FLUSH_ALL, system_chunk))
  4472. expected = div_factor_fine(space_info->total_bytes, 95);
  4473. else
  4474. expected = div_factor_fine(space_info->total_bytes, 90);
  4475. if (used > expected)
  4476. to_reclaim = used - expected;
  4477. else
  4478. to_reclaim = 0;
  4479. to_reclaim = min(to_reclaim, space_info->bytes_may_use +
  4480. space_info->bytes_reserved);
  4481. return to_reclaim;
  4482. }
  4483. static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
  4484. struct btrfs_space_info *space_info,
  4485. u64 used, bool system_chunk)
  4486. {
  4487. u64 thresh = div_factor_fine(space_info->total_bytes, 98);
  4488. /* If we're just plain full then async reclaim just slows us down. */
  4489. if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
  4490. return 0;
  4491. if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4492. system_chunk))
  4493. return 0;
  4494. return (used >= thresh && !btrfs_fs_closing(fs_info) &&
  4495. !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
  4496. }
  4497. static void wake_all_tickets(struct list_head *head)
  4498. {
  4499. struct reserve_ticket *ticket;
  4500. while (!list_empty(head)) {
  4501. ticket = list_first_entry(head, struct reserve_ticket, list);
  4502. list_del_init(&ticket->list);
  4503. ticket->error = -ENOSPC;
  4504. wake_up(&ticket->wait);
  4505. }
  4506. }
  4507. /*
  4508. * This is for normal flushers, we can wait all goddamned day if we want to. We
  4509. * will loop and continuously try to flush as long as we are making progress.
  4510. * We count progress as clearing off tickets each time we have to loop.
  4511. */
  4512. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  4513. {
  4514. struct btrfs_fs_info *fs_info;
  4515. struct btrfs_space_info *space_info;
  4516. u64 to_reclaim;
  4517. int flush_state;
  4518. int commit_cycles = 0;
  4519. u64 last_tickets_id;
  4520. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  4521. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  4522. spin_lock(&space_info->lock);
  4523. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4524. false);
  4525. if (!to_reclaim) {
  4526. space_info->flush = 0;
  4527. spin_unlock(&space_info->lock);
  4528. return;
  4529. }
  4530. last_tickets_id = space_info->tickets_id;
  4531. spin_unlock(&space_info->lock);
  4532. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4533. do {
  4534. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4535. spin_lock(&space_info->lock);
  4536. if (list_empty(&space_info->tickets)) {
  4537. space_info->flush = 0;
  4538. spin_unlock(&space_info->lock);
  4539. return;
  4540. }
  4541. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
  4542. space_info,
  4543. false);
  4544. if (last_tickets_id == space_info->tickets_id) {
  4545. flush_state++;
  4546. } else {
  4547. last_tickets_id = space_info->tickets_id;
  4548. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4549. if (commit_cycles)
  4550. commit_cycles--;
  4551. }
  4552. if (flush_state > COMMIT_TRANS) {
  4553. commit_cycles++;
  4554. if (commit_cycles > 2) {
  4555. wake_all_tickets(&space_info->tickets);
  4556. space_info->flush = 0;
  4557. } else {
  4558. flush_state = FLUSH_DELAYED_ITEMS_NR;
  4559. }
  4560. }
  4561. spin_unlock(&space_info->lock);
  4562. } while (flush_state <= COMMIT_TRANS);
  4563. }
  4564. void btrfs_init_async_reclaim_work(struct work_struct *work)
  4565. {
  4566. INIT_WORK(work, btrfs_async_reclaim_metadata_space);
  4567. }
  4568. static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
  4569. struct btrfs_space_info *space_info,
  4570. struct reserve_ticket *ticket)
  4571. {
  4572. u64 to_reclaim;
  4573. int flush_state = FLUSH_DELAYED_ITEMS_NR;
  4574. spin_lock(&space_info->lock);
  4575. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info,
  4576. false);
  4577. if (!to_reclaim) {
  4578. spin_unlock(&space_info->lock);
  4579. return;
  4580. }
  4581. spin_unlock(&space_info->lock);
  4582. do {
  4583. flush_space(fs_info, space_info, to_reclaim, flush_state);
  4584. flush_state++;
  4585. spin_lock(&space_info->lock);
  4586. if (ticket->bytes == 0) {
  4587. spin_unlock(&space_info->lock);
  4588. return;
  4589. }
  4590. spin_unlock(&space_info->lock);
  4591. /*
  4592. * Priority flushers can't wait on delalloc without
  4593. * deadlocking.
  4594. */
  4595. if (flush_state == FLUSH_DELALLOC ||
  4596. flush_state == FLUSH_DELALLOC_WAIT)
  4597. flush_state = ALLOC_CHUNK;
  4598. } while (flush_state < COMMIT_TRANS);
  4599. }
  4600. static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  4601. struct btrfs_space_info *space_info,
  4602. struct reserve_ticket *ticket, u64 orig_bytes)
  4603. {
  4604. DEFINE_WAIT(wait);
  4605. int ret = 0;
  4606. spin_lock(&space_info->lock);
  4607. while (ticket->bytes > 0 && ticket->error == 0) {
  4608. ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
  4609. if (ret) {
  4610. ret = -EINTR;
  4611. break;
  4612. }
  4613. spin_unlock(&space_info->lock);
  4614. schedule();
  4615. finish_wait(&ticket->wait, &wait);
  4616. spin_lock(&space_info->lock);
  4617. }
  4618. if (!ret)
  4619. ret = ticket->error;
  4620. if (!list_empty(&ticket->list))
  4621. list_del_init(&ticket->list);
  4622. if (ticket->bytes && ticket->bytes < orig_bytes) {
  4623. u64 num_bytes = orig_bytes - ticket->bytes;
  4624. space_info->bytes_may_use -= num_bytes;
  4625. trace_btrfs_space_reservation(fs_info, "space_info",
  4626. space_info->flags, num_bytes, 0);
  4627. }
  4628. spin_unlock(&space_info->lock);
  4629. return ret;
  4630. }
  4631. /**
  4632. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4633. * @root - the root we're allocating for
  4634. * @space_info - the space info we want to allocate from
  4635. * @orig_bytes - the number of bytes we want
  4636. * @flush - whether or not we can flush to make our reservation
  4637. *
  4638. * This will reserve orig_bytes number of bytes from the space info associated
  4639. * with the block_rsv. If there is not enough space it will make an attempt to
  4640. * flush out space to make room. It will do this by flushing delalloc if
  4641. * possible or committing the transaction. If flush is 0 then no attempts to
  4642. * regain reservations will be made and this will fail if there is not enough
  4643. * space already.
  4644. */
  4645. static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
  4646. struct btrfs_space_info *space_info,
  4647. u64 orig_bytes,
  4648. enum btrfs_reserve_flush_enum flush,
  4649. bool system_chunk)
  4650. {
  4651. struct reserve_ticket ticket;
  4652. u64 used;
  4653. int ret = 0;
  4654. ASSERT(orig_bytes);
  4655. ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
  4656. spin_lock(&space_info->lock);
  4657. ret = -ENOSPC;
  4658. used = btrfs_space_info_used(space_info, true);
  4659. /*
  4660. * If we have enough space then hooray, make our reservation and carry
  4661. * on. If not see if we can overcommit, and if we can, hooray carry on.
  4662. * If not things get more complicated.
  4663. */
  4664. if (used + orig_bytes <= space_info->total_bytes) {
  4665. space_info->bytes_may_use += orig_bytes;
  4666. trace_btrfs_space_reservation(fs_info, "space_info",
  4667. space_info->flags, orig_bytes, 1);
  4668. ret = 0;
  4669. } else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
  4670. system_chunk)) {
  4671. space_info->bytes_may_use += orig_bytes;
  4672. trace_btrfs_space_reservation(fs_info, "space_info",
  4673. space_info->flags, orig_bytes, 1);
  4674. ret = 0;
  4675. }
  4676. /*
  4677. * If we couldn't make a reservation then setup our reservation ticket
  4678. * and kick the async worker if it's not already running.
  4679. *
  4680. * If we are a priority flusher then we just need to add our ticket to
  4681. * the list and we will do our own flushing further down.
  4682. */
  4683. if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
  4684. ticket.bytes = orig_bytes;
  4685. ticket.error = 0;
  4686. init_waitqueue_head(&ticket.wait);
  4687. if (flush == BTRFS_RESERVE_FLUSH_ALL) {
  4688. list_add_tail(&ticket.list, &space_info->tickets);
  4689. if (!space_info->flush) {
  4690. space_info->flush = 1;
  4691. trace_btrfs_trigger_flush(fs_info,
  4692. space_info->flags,
  4693. orig_bytes, flush,
  4694. "enospc");
  4695. queue_work(system_unbound_wq,
  4696. &fs_info->async_reclaim_work);
  4697. }
  4698. } else {
  4699. list_add_tail(&ticket.list,
  4700. &space_info->priority_tickets);
  4701. }
  4702. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  4703. used += orig_bytes;
  4704. /*
  4705. * We will do the space reservation dance during log replay,
  4706. * which means we won't have fs_info->fs_root set, so don't do
  4707. * the async reclaim as we will panic.
  4708. */
  4709. if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
  4710. need_do_async_reclaim(fs_info, space_info,
  4711. used, system_chunk) &&
  4712. !work_busy(&fs_info->async_reclaim_work)) {
  4713. trace_btrfs_trigger_flush(fs_info, space_info->flags,
  4714. orig_bytes, flush, "preempt");
  4715. queue_work(system_unbound_wq,
  4716. &fs_info->async_reclaim_work);
  4717. }
  4718. }
  4719. spin_unlock(&space_info->lock);
  4720. if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
  4721. return ret;
  4722. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  4723. return wait_reserve_ticket(fs_info, space_info, &ticket,
  4724. orig_bytes);
  4725. ret = 0;
  4726. priority_reclaim_metadata_space(fs_info, space_info, &ticket);
  4727. spin_lock(&space_info->lock);
  4728. if (ticket.bytes) {
  4729. if (ticket.bytes < orig_bytes) {
  4730. u64 num_bytes = orig_bytes - ticket.bytes;
  4731. space_info->bytes_may_use -= num_bytes;
  4732. trace_btrfs_space_reservation(fs_info, "space_info",
  4733. space_info->flags,
  4734. num_bytes, 0);
  4735. }
  4736. list_del_init(&ticket.list);
  4737. ret = -ENOSPC;
  4738. }
  4739. spin_unlock(&space_info->lock);
  4740. ASSERT(list_empty(&ticket.list));
  4741. return ret;
  4742. }
  4743. /**
  4744. * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  4745. * @root - the root we're allocating for
  4746. * @block_rsv - the block_rsv we're allocating for
  4747. * @orig_bytes - the number of bytes we want
  4748. * @flush - whether or not we can flush to make our reservation
  4749. *
  4750. * This will reserve orgi_bytes number of bytes from the space info associated
  4751. * with the block_rsv. If there is not enough space it will make an attempt to
  4752. * flush out space to make room. It will do this by flushing delalloc if
  4753. * possible or committing the transaction. If flush is 0 then no attempts to
  4754. * regain reservations will be made and this will fail if there is not enough
  4755. * space already.
  4756. */
  4757. static int reserve_metadata_bytes(struct btrfs_root *root,
  4758. struct btrfs_block_rsv *block_rsv,
  4759. u64 orig_bytes,
  4760. enum btrfs_reserve_flush_enum flush)
  4761. {
  4762. struct btrfs_fs_info *fs_info = root->fs_info;
  4763. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4764. int ret;
  4765. bool system_chunk = (root == fs_info->chunk_root);
  4766. ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
  4767. orig_bytes, flush, system_chunk);
  4768. if (ret == -ENOSPC &&
  4769. unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
  4770. if (block_rsv != global_rsv &&
  4771. !block_rsv_use_bytes(global_rsv, orig_bytes))
  4772. ret = 0;
  4773. }
  4774. if (ret == -ENOSPC)
  4775. trace_btrfs_space_reservation(fs_info, "space_info:enospc",
  4776. block_rsv->space_info->flags,
  4777. orig_bytes, 1);
  4778. return ret;
  4779. }
  4780. static struct btrfs_block_rsv *get_block_rsv(
  4781. const struct btrfs_trans_handle *trans,
  4782. const struct btrfs_root *root)
  4783. {
  4784. struct btrfs_fs_info *fs_info = root->fs_info;
  4785. struct btrfs_block_rsv *block_rsv = NULL;
  4786. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  4787. (root == fs_info->csum_root && trans->adding_csums) ||
  4788. (root == fs_info->uuid_root))
  4789. block_rsv = trans->block_rsv;
  4790. if (!block_rsv)
  4791. block_rsv = root->block_rsv;
  4792. if (!block_rsv)
  4793. block_rsv = &fs_info->empty_block_rsv;
  4794. return block_rsv;
  4795. }
  4796. static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
  4797. u64 num_bytes)
  4798. {
  4799. int ret = -ENOSPC;
  4800. spin_lock(&block_rsv->lock);
  4801. if (block_rsv->reserved >= num_bytes) {
  4802. block_rsv->reserved -= num_bytes;
  4803. if (block_rsv->reserved < block_rsv->size)
  4804. block_rsv->full = 0;
  4805. ret = 0;
  4806. }
  4807. spin_unlock(&block_rsv->lock);
  4808. return ret;
  4809. }
  4810. static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
  4811. u64 num_bytes, int update_size)
  4812. {
  4813. spin_lock(&block_rsv->lock);
  4814. block_rsv->reserved += num_bytes;
  4815. if (update_size)
  4816. block_rsv->size += num_bytes;
  4817. else if (block_rsv->reserved >= block_rsv->size)
  4818. block_rsv->full = 1;
  4819. spin_unlock(&block_rsv->lock);
  4820. }
  4821. int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
  4822. struct btrfs_block_rsv *dest, u64 num_bytes,
  4823. int min_factor)
  4824. {
  4825. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  4826. u64 min_bytes;
  4827. if (global_rsv->space_info != dest->space_info)
  4828. return -ENOSPC;
  4829. spin_lock(&global_rsv->lock);
  4830. min_bytes = div_factor(global_rsv->size, min_factor);
  4831. if (global_rsv->reserved < min_bytes + num_bytes) {
  4832. spin_unlock(&global_rsv->lock);
  4833. return -ENOSPC;
  4834. }
  4835. global_rsv->reserved -= num_bytes;
  4836. if (global_rsv->reserved < global_rsv->size)
  4837. global_rsv->full = 0;
  4838. spin_unlock(&global_rsv->lock);
  4839. block_rsv_add_bytes(dest, num_bytes, 1);
  4840. return 0;
  4841. }
  4842. /*
  4843. * This is for space we already have accounted in space_info->bytes_may_use, so
  4844. * basically when we're returning space from block_rsv's.
  4845. */
  4846. static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
  4847. struct btrfs_space_info *space_info,
  4848. u64 num_bytes)
  4849. {
  4850. struct reserve_ticket *ticket;
  4851. struct list_head *head;
  4852. u64 used;
  4853. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
  4854. bool check_overcommit = false;
  4855. spin_lock(&space_info->lock);
  4856. head = &space_info->priority_tickets;
  4857. /*
  4858. * If we are over our limit then we need to check and see if we can
  4859. * overcommit, and if we can't then we just need to free up our space
  4860. * and not satisfy any requests.
  4861. */
  4862. used = btrfs_space_info_used(space_info, true);
  4863. if (used - num_bytes >= space_info->total_bytes)
  4864. check_overcommit = true;
  4865. again:
  4866. while (!list_empty(head) && num_bytes) {
  4867. ticket = list_first_entry(head, struct reserve_ticket,
  4868. list);
  4869. /*
  4870. * We use 0 bytes because this space is already reserved, so
  4871. * adding the ticket space would be a double count.
  4872. */
  4873. if (check_overcommit &&
  4874. !can_overcommit(fs_info, space_info, 0, flush, false))
  4875. break;
  4876. if (num_bytes >= ticket->bytes) {
  4877. list_del_init(&ticket->list);
  4878. num_bytes -= ticket->bytes;
  4879. ticket->bytes = 0;
  4880. space_info->tickets_id++;
  4881. wake_up(&ticket->wait);
  4882. } else {
  4883. ticket->bytes -= num_bytes;
  4884. num_bytes = 0;
  4885. }
  4886. }
  4887. if (num_bytes && head == &space_info->priority_tickets) {
  4888. head = &space_info->tickets;
  4889. flush = BTRFS_RESERVE_FLUSH_ALL;
  4890. goto again;
  4891. }
  4892. space_info->bytes_may_use -= num_bytes;
  4893. trace_btrfs_space_reservation(fs_info, "space_info",
  4894. space_info->flags, num_bytes, 0);
  4895. spin_unlock(&space_info->lock);
  4896. }
  4897. /*
  4898. * This is for newly allocated space that isn't accounted in
  4899. * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
  4900. * we use this helper.
  4901. */
  4902. static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
  4903. struct btrfs_space_info *space_info,
  4904. u64 num_bytes)
  4905. {
  4906. struct reserve_ticket *ticket;
  4907. struct list_head *head = &space_info->priority_tickets;
  4908. again:
  4909. while (!list_empty(head) && num_bytes) {
  4910. ticket = list_first_entry(head, struct reserve_ticket,
  4911. list);
  4912. if (num_bytes >= ticket->bytes) {
  4913. trace_btrfs_space_reservation(fs_info, "space_info",
  4914. space_info->flags,
  4915. ticket->bytes, 1);
  4916. list_del_init(&ticket->list);
  4917. num_bytes -= ticket->bytes;
  4918. space_info->bytes_may_use += ticket->bytes;
  4919. ticket->bytes = 0;
  4920. space_info->tickets_id++;
  4921. wake_up(&ticket->wait);
  4922. } else {
  4923. trace_btrfs_space_reservation(fs_info, "space_info",
  4924. space_info->flags,
  4925. num_bytes, 1);
  4926. space_info->bytes_may_use += num_bytes;
  4927. ticket->bytes -= num_bytes;
  4928. num_bytes = 0;
  4929. }
  4930. }
  4931. if (num_bytes && head == &space_info->priority_tickets) {
  4932. head = &space_info->tickets;
  4933. goto again;
  4934. }
  4935. }
  4936. static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
  4937. struct btrfs_block_rsv *block_rsv,
  4938. struct btrfs_block_rsv *dest, u64 num_bytes)
  4939. {
  4940. struct btrfs_space_info *space_info = block_rsv->space_info;
  4941. u64 ret;
  4942. spin_lock(&block_rsv->lock);
  4943. if (num_bytes == (u64)-1)
  4944. num_bytes = block_rsv->size;
  4945. block_rsv->size -= num_bytes;
  4946. if (block_rsv->reserved >= block_rsv->size) {
  4947. num_bytes = block_rsv->reserved - block_rsv->size;
  4948. block_rsv->reserved = block_rsv->size;
  4949. block_rsv->full = 1;
  4950. } else {
  4951. num_bytes = 0;
  4952. }
  4953. spin_unlock(&block_rsv->lock);
  4954. ret = num_bytes;
  4955. if (num_bytes > 0) {
  4956. if (dest) {
  4957. spin_lock(&dest->lock);
  4958. if (!dest->full) {
  4959. u64 bytes_to_add;
  4960. bytes_to_add = dest->size - dest->reserved;
  4961. bytes_to_add = min(num_bytes, bytes_to_add);
  4962. dest->reserved += bytes_to_add;
  4963. if (dest->reserved >= dest->size)
  4964. dest->full = 1;
  4965. num_bytes -= bytes_to_add;
  4966. }
  4967. spin_unlock(&dest->lock);
  4968. }
  4969. if (num_bytes)
  4970. space_info_add_old_bytes(fs_info, space_info,
  4971. num_bytes);
  4972. }
  4973. return ret;
  4974. }
  4975. int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
  4976. struct btrfs_block_rsv *dst, u64 num_bytes,
  4977. int update_size)
  4978. {
  4979. int ret;
  4980. ret = block_rsv_use_bytes(src, num_bytes);
  4981. if (ret)
  4982. return ret;
  4983. block_rsv_add_bytes(dst, num_bytes, update_size);
  4984. return 0;
  4985. }
  4986. void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
  4987. {
  4988. memset(rsv, 0, sizeof(*rsv));
  4989. spin_lock_init(&rsv->lock);
  4990. rsv->type = type;
  4991. }
  4992. void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
  4993. struct btrfs_block_rsv *rsv,
  4994. unsigned short type)
  4995. {
  4996. btrfs_init_block_rsv(rsv, type);
  4997. rsv->space_info = __find_space_info(fs_info,
  4998. BTRFS_BLOCK_GROUP_METADATA);
  4999. }
  5000. struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
  5001. unsigned short type)
  5002. {
  5003. struct btrfs_block_rsv *block_rsv;
  5004. block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
  5005. if (!block_rsv)
  5006. return NULL;
  5007. btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
  5008. return block_rsv;
  5009. }
  5010. void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
  5011. struct btrfs_block_rsv *rsv)
  5012. {
  5013. if (!rsv)
  5014. return;
  5015. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  5016. kfree(rsv);
  5017. }
  5018. void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
  5019. {
  5020. kfree(rsv);
  5021. }
  5022. int btrfs_block_rsv_add(struct btrfs_root *root,
  5023. struct btrfs_block_rsv *block_rsv, u64 num_bytes,
  5024. enum btrfs_reserve_flush_enum flush)
  5025. {
  5026. int ret;
  5027. if (num_bytes == 0)
  5028. return 0;
  5029. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5030. if (!ret) {
  5031. block_rsv_add_bytes(block_rsv, num_bytes, 1);
  5032. return 0;
  5033. }
  5034. return ret;
  5035. }
  5036. int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
  5037. {
  5038. u64 num_bytes = 0;
  5039. int ret = -ENOSPC;
  5040. if (!block_rsv)
  5041. return 0;
  5042. spin_lock(&block_rsv->lock);
  5043. num_bytes = div_factor(block_rsv->size, min_factor);
  5044. if (block_rsv->reserved >= num_bytes)
  5045. ret = 0;
  5046. spin_unlock(&block_rsv->lock);
  5047. return ret;
  5048. }
  5049. int btrfs_block_rsv_refill(struct btrfs_root *root,
  5050. struct btrfs_block_rsv *block_rsv, u64 min_reserved,
  5051. enum btrfs_reserve_flush_enum flush)
  5052. {
  5053. u64 num_bytes = 0;
  5054. int ret = -ENOSPC;
  5055. if (!block_rsv)
  5056. return 0;
  5057. spin_lock(&block_rsv->lock);
  5058. num_bytes = min_reserved;
  5059. if (block_rsv->reserved >= num_bytes)
  5060. ret = 0;
  5061. else
  5062. num_bytes -= block_rsv->reserved;
  5063. spin_unlock(&block_rsv->lock);
  5064. if (!ret)
  5065. return 0;
  5066. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5067. if (!ret) {
  5068. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  5069. return 0;
  5070. }
  5071. return ret;
  5072. }
  5073. /**
  5074. * btrfs_inode_rsv_refill - refill the inode block rsv.
  5075. * @inode - the inode we are refilling.
  5076. * @flush - the flusing restriction.
  5077. *
  5078. * Essentially the same as btrfs_block_rsv_refill, except it uses the
  5079. * block_rsv->size as the minimum size. We'll either refill the missing amount
  5080. * or return if we already have enough space. This will also handle the resreve
  5081. * tracepoint for the reserved amount.
  5082. */
  5083. int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
  5084. enum btrfs_reserve_flush_enum flush)
  5085. {
  5086. struct btrfs_root *root = inode->root;
  5087. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5088. u64 num_bytes = 0;
  5089. int ret = -ENOSPC;
  5090. spin_lock(&block_rsv->lock);
  5091. if (block_rsv->reserved < block_rsv->size)
  5092. num_bytes = block_rsv->size - block_rsv->reserved;
  5093. spin_unlock(&block_rsv->lock);
  5094. if (num_bytes == 0)
  5095. return 0;
  5096. ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
  5097. if (!ret) {
  5098. block_rsv_add_bytes(block_rsv, num_bytes, 0);
  5099. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  5100. btrfs_ino(inode), num_bytes, 1);
  5101. }
  5102. return ret;
  5103. }
  5104. /**
  5105. * btrfs_inode_rsv_release - release any excessive reservation.
  5106. * @inode - the inode we need to release from.
  5107. *
  5108. * This is the same as btrfs_block_rsv_release, except that it handles the
  5109. * tracepoint for the reservation.
  5110. */
  5111. void btrfs_inode_rsv_release(struct btrfs_inode *inode)
  5112. {
  5113. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  5114. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5115. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5116. u64 released = 0;
  5117. /*
  5118. * Since we statically set the block_rsv->size we just want to say we
  5119. * are releasing 0 bytes, and then we'll just get the reservation over
  5120. * the size free'd.
  5121. */
  5122. released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
  5123. if (released > 0)
  5124. trace_btrfs_space_reservation(fs_info, "delalloc",
  5125. btrfs_ino(inode), released, 0);
  5126. }
  5127. void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
  5128. struct btrfs_block_rsv *block_rsv,
  5129. u64 num_bytes)
  5130. {
  5131. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5132. if (global_rsv == block_rsv ||
  5133. block_rsv->space_info != global_rsv->space_info)
  5134. global_rsv = NULL;
  5135. block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
  5136. }
  5137. static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
  5138. {
  5139. struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
  5140. struct btrfs_space_info *sinfo = block_rsv->space_info;
  5141. u64 num_bytes;
  5142. /*
  5143. * The global block rsv is based on the size of the extent tree, the
  5144. * checksum tree and the root tree. If the fs is empty we want to set
  5145. * it to a minimal amount for safety.
  5146. */
  5147. num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
  5148. btrfs_root_used(&fs_info->csum_root->root_item) +
  5149. btrfs_root_used(&fs_info->tree_root->root_item);
  5150. num_bytes = max_t(u64, num_bytes, SZ_16M);
  5151. spin_lock(&sinfo->lock);
  5152. spin_lock(&block_rsv->lock);
  5153. block_rsv->size = min_t(u64, num_bytes, SZ_512M);
  5154. if (block_rsv->reserved < block_rsv->size) {
  5155. num_bytes = btrfs_space_info_used(sinfo, true);
  5156. if (sinfo->total_bytes > num_bytes) {
  5157. num_bytes = sinfo->total_bytes - num_bytes;
  5158. num_bytes = min(num_bytes,
  5159. block_rsv->size - block_rsv->reserved);
  5160. block_rsv->reserved += num_bytes;
  5161. sinfo->bytes_may_use += num_bytes;
  5162. trace_btrfs_space_reservation(fs_info, "space_info",
  5163. sinfo->flags, num_bytes,
  5164. 1);
  5165. }
  5166. } else if (block_rsv->reserved > block_rsv->size) {
  5167. num_bytes = block_rsv->reserved - block_rsv->size;
  5168. sinfo->bytes_may_use -= num_bytes;
  5169. trace_btrfs_space_reservation(fs_info, "space_info",
  5170. sinfo->flags, num_bytes, 0);
  5171. block_rsv->reserved = block_rsv->size;
  5172. }
  5173. if (block_rsv->reserved == block_rsv->size)
  5174. block_rsv->full = 1;
  5175. else
  5176. block_rsv->full = 0;
  5177. spin_unlock(&block_rsv->lock);
  5178. spin_unlock(&sinfo->lock);
  5179. }
  5180. static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
  5181. {
  5182. struct btrfs_space_info *space_info;
  5183. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
  5184. fs_info->chunk_block_rsv.space_info = space_info;
  5185. space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  5186. fs_info->global_block_rsv.space_info = space_info;
  5187. fs_info->trans_block_rsv.space_info = space_info;
  5188. fs_info->empty_block_rsv.space_info = space_info;
  5189. fs_info->delayed_block_rsv.space_info = space_info;
  5190. fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
  5191. fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
  5192. fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
  5193. fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
  5194. if (fs_info->quota_root)
  5195. fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
  5196. fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
  5197. update_global_block_rsv(fs_info);
  5198. }
  5199. static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
  5200. {
  5201. block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
  5202. (u64)-1);
  5203. WARN_ON(fs_info->trans_block_rsv.size > 0);
  5204. WARN_ON(fs_info->trans_block_rsv.reserved > 0);
  5205. WARN_ON(fs_info->chunk_block_rsv.size > 0);
  5206. WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
  5207. WARN_ON(fs_info->delayed_block_rsv.size > 0);
  5208. WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
  5209. }
  5210. void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
  5211. struct btrfs_fs_info *fs_info)
  5212. {
  5213. if (!trans->block_rsv) {
  5214. ASSERT(!trans->bytes_reserved);
  5215. return;
  5216. }
  5217. if (!trans->bytes_reserved)
  5218. return;
  5219. ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
  5220. trace_btrfs_space_reservation(fs_info, "transaction",
  5221. trans->transid, trans->bytes_reserved, 0);
  5222. btrfs_block_rsv_release(fs_info, trans->block_rsv,
  5223. trans->bytes_reserved);
  5224. trans->bytes_reserved = 0;
  5225. }
  5226. /*
  5227. * To be called after all the new block groups attached to the transaction
  5228. * handle have been created (btrfs_create_pending_block_groups()).
  5229. */
  5230. void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  5231. {
  5232. struct btrfs_fs_info *fs_info = trans->fs_info;
  5233. if (!trans->chunk_bytes_reserved)
  5234. return;
  5235. WARN_ON_ONCE(!list_empty(&trans->new_bgs));
  5236. block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
  5237. trans->chunk_bytes_reserved);
  5238. trans->chunk_bytes_reserved = 0;
  5239. }
  5240. /* Can only return 0 or -ENOSPC */
  5241. int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
  5242. struct btrfs_inode *inode)
  5243. {
  5244. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5245. struct btrfs_root *root = inode->root;
  5246. /*
  5247. * We always use trans->block_rsv here as we will have reserved space
  5248. * for our orphan when starting the transaction, using get_block_rsv()
  5249. * here will sometimes make us choose the wrong block rsv as we could be
  5250. * doing a reloc inode for a non refcounted root.
  5251. */
  5252. struct btrfs_block_rsv *src_rsv = trans->block_rsv;
  5253. struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
  5254. /*
  5255. * We need to hold space in order to delete our orphan item once we've
  5256. * added it, so this takes the reservation so we can release it later
  5257. * when we are truly done with the orphan item.
  5258. */
  5259. u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  5260. trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
  5261. num_bytes, 1);
  5262. return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  5263. }
  5264. void btrfs_orphan_release_metadata(struct btrfs_inode *inode)
  5265. {
  5266. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5267. struct btrfs_root *root = inode->root;
  5268. u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  5269. trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode),
  5270. num_bytes, 0);
  5271. btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
  5272. }
  5273. /*
  5274. * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
  5275. * root: the root of the parent directory
  5276. * rsv: block reservation
  5277. * items: the number of items that we need do reservation
  5278. * qgroup_reserved: used to return the reserved size in qgroup
  5279. *
  5280. * This function is used to reserve the space for snapshot/subvolume
  5281. * creation and deletion. Those operations are different with the
  5282. * common file/directory operations, they change two fs/file trees
  5283. * and root tree, the number of items that the qgroup reserves is
  5284. * different with the free space reservation. So we can not use
  5285. * the space reservation mechanism in start_transaction().
  5286. */
  5287. int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
  5288. struct btrfs_block_rsv *rsv,
  5289. int items,
  5290. u64 *qgroup_reserved,
  5291. bool use_global_rsv)
  5292. {
  5293. u64 num_bytes;
  5294. int ret;
  5295. struct btrfs_fs_info *fs_info = root->fs_info;
  5296. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5297. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
  5298. /* One for parent inode, two for dir entries */
  5299. num_bytes = 3 * fs_info->nodesize;
  5300. ret = btrfs_qgroup_reserve_meta(root, num_bytes, true);
  5301. if (ret)
  5302. return ret;
  5303. } else {
  5304. num_bytes = 0;
  5305. }
  5306. *qgroup_reserved = num_bytes;
  5307. num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
  5308. rsv->space_info = __find_space_info(fs_info,
  5309. BTRFS_BLOCK_GROUP_METADATA);
  5310. ret = btrfs_block_rsv_add(root, rsv, num_bytes,
  5311. BTRFS_RESERVE_FLUSH_ALL);
  5312. if (ret == -ENOSPC && use_global_rsv)
  5313. ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
  5314. if (ret && *qgroup_reserved)
  5315. btrfs_qgroup_free_meta(root, *qgroup_reserved);
  5316. return ret;
  5317. }
  5318. void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
  5319. struct btrfs_block_rsv *rsv)
  5320. {
  5321. btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
  5322. }
  5323. static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
  5324. struct btrfs_inode *inode)
  5325. {
  5326. struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
  5327. u64 reserve_size = 0;
  5328. u64 csum_leaves;
  5329. unsigned outstanding_extents;
  5330. lockdep_assert_held(&inode->lock);
  5331. outstanding_extents = inode->outstanding_extents;
  5332. if (outstanding_extents)
  5333. reserve_size = btrfs_calc_trans_metadata_size(fs_info,
  5334. outstanding_extents + 1);
  5335. csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
  5336. inode->csum_bytes);
  5337. reserve_size += btrfs_calc_trans_metadata_size(fs_info,
  5338. csum_leaves);
  5339. spin_lock(&block_rsv->lock);
  5340. block_rsv->size = reserve_size;
  5341. spin_unlock(&block_rsv->lock);
  5342. }
  5343. int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
  5344. {
  5345. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5346. struct btrfs_root *root = inode->root;
  5347. unsigned nr_extents;
  5348. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
  5349. int ret = 0;
  5350. bool delalloc_lock = true;
  5351. /* If we are a free space inode we need to not flush since we will be in
  5352. * the middle of a transaction commit. We also don't need the delalloc
  5353. * mutex since we won't race with anybody. We need this mostly to make
  5354. * lockdep shut its filthy mouth.
  5355. *
  5356. * If we have a transaction open (can happen if we call truncate_block
  5357. * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
  5358. */
  5359. if (btrfs_is_free_space_inode(inode)) {
  5360. flush = BTRFS_RESERVE_NO_FLUSH;
  5361. delalloc_lock = false;
  5362. } else if (current->journal_info) {
  5363. flush = BTRFS_RESERVE_FLUSH_LIMIT;
  5364. }
  5365. if (flush != BTRFS_RESERVE_NO_FLUSH &&
  5366. btrfs_transaction_in_commit(fs_info))
  5367. schedule_timeout(1);
  5368. if (delalloc_lock)
  5369. mutex_lock(&inode->delalloc_mutex);
  5370. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5371. /* Add our new extents and calculate the new rsv size. */
  5372. spin_lock(&inode->lock);
  5373. nr_extents = count_max_extents(num_bytes);
  5374. btrfs_mod_outstanding_extents(inode, nr_extents);
  5375. inode->csum_bytes += num_bytes;
  5376. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5377. spin_unlock(&inode->lock);
  5378. if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
  5379. ret = btrfs_qgroup_reserve_meta(root,
  5380. nr_extents * fs_info->nodesize, true);
  5381. if (ret)
  5382. goto out_fail;
  5383. }
  5384. ret = btrfs_inode_rsv_refill(inode, flush);
  5385. if (unlikely(ret)) {
  5386. btrfs_qgroup_free_meta(root,
  5387. nr_extents * fs_info->nodesize);
  5388. goto out_fail;
  5389. }
  5390. if (delalloc_lock)
  5391. mutex_unlock(&inode->delalloc_mutex);
  5392. return 0;
  5393. out_fail:
  5394. spin_lock(&inode->lock);
  5395. nr_extents = count_max_extents(num_bytes);
  5396. btrfs_mod_outstanding_extents(inode, -nr_extents);
  5397. inode->csum_bytes -= num_bytes;
  5398. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5399. spin_unlock(&inode->lock);
  5400. btrfs_inode_rsv_release(inode);
  5401. if (delalloc_lock)
  5402. mutex_unlock(&inode->delalloc_mutex);
  5403. return ret;
  5404. }
  5405. /**
  5406. * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
  5407. * @inode: the inode to release the reservation for.
  5408. * @num_bytes: the number of bytes we are releasing.
  5409. *
  5410. * This will release the metadata reservation for an inode. This can be called
  5411. * once we complete IO for a given set of bytes to release their metadata
  5412. * reservations, or on error for the same reason.
  5413. */
  5414. void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
  5415. {
  5416. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5417. num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
  5418. spin_lock(&inode->lock);
  5419. inode->csum_bytes -= num_bytes;
  5420. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5421. spin_unlock(&inode->lock);
  5422. if (btrfs_is_testing(fs_info))
  5423. return;
  5424. btrfs_inode_rsv_release(inode);
  5425. }
  5426. /**
  5427. * btrfs_delalloc_release_extents - release our outstanding_extents
  5428. * @inode: the inode to balance the reservation for.
  5429. * @num_bytes: the number of bytes we originally reserved with
  5430. *
  5431. * When we reserve space we increase outstanding_extents for the extents we may
  5432. * add. Once we've set the range as delalloc or created our ordered extents we
  5433. * have outstanding_extents to track the real usage, so we use this to free our
  5434. * temporarily tracked outstanding_extents. This _must_ be used in conjunction
  5435. * with btrfs_delalloc_reserve_metadata.
  5436. */
  5437. void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
  5438. {
  5439. struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
  5440. unsigned num_extents;
  5441. spin_lock(&inode->lock);
  5442. num_extents = count_max_extents(num_bytes);
  5443. btrfs_mod_outstanding_extents(inode, -num_extents);
  5444. btrfs_calculate_inode_block_rsv_size(fs_info, inode);
  5445. spin_unlock(&inode->lock);
  5446. if (btrfs_is_testing(fs_info))
  5447. return;
  5448. btrfs_inode_rsv_release(inode);
  5449. }
  5450. /**
  5451. * btrfs_delalloc_reserve_space - reserve data and metadata space for
  5452. * delalloc
  5453. * @inode: inode we're writing to
  5454. * @start: start range we are writing to
  5455. * @len: how long the range we are writing to
  5456. * @reserved: mandatory parameter, record actually reserved qgroup ranges of
  5457. * current reservation.
  5458. *
  5459. * This will do the following things
  5460. *
  5461. * o reserve space in data space info for num bytes
  5462. * and reserve precious corresponding qgroup space
  5463. * (Done in check_data_free_space)
  5464. *
  5465. * o reserve space for metadata space, based on the number of outstanding
  5466. * extents and how much csums will be needed
  5467. * also reserve metadata space in a per root over-reserve method.
  5468. * o add to the inodes->delalloc_bytes
  5469. * o add it to the fs_info's delalloc inodes list.
  5470. * (Above 3 all done in delalloc_reserve_metadata)
  5471. *
  5472. * Return 0 for success
  5473. * Return <0 for error(-ENOSPC or -EQUOT)
  5474. */
  5475. int btrfs_delalloc_reserve_space(struct inode *inode,
  5476. struct extent_changeset **reserved, u64 start, u64 len)
  5477. {
  5478. int ret;
  5479. ret = btrfs_check_data_free_space(inode, reserved, start, len);
  5480. if (ret < 0)
  5481. return ret;
  5482. ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len);
  5483. if (ret < 0)
  5484. btrfs_free_reserved_data_space(inode, *reserved, start, len);
  5485. return ret;
  5486. }
  5487. /**
  5488. * btrfs_delalloc_release_space - release data and metadata space for delalloc
  5489. * @inode: inode we're releasing space for
  5490. * @start: start position of the space already reserved
  5491. * @len: the len of the space already reserved
  5492. * @release_bytes: the len of the space we consumed or didn't use
  5493. *
  5494. * This function will release the metadata space that was not used and will
  5495. * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
  5496. * list if there are no delalloc bytes left.
  5497. * Also it will handle the qgroup reserved space.
  5498. */
  5499. void btrfs_delalloc_release_space(struct inode *inode,
  5500. struct extent_changeset *reserved,
  5501. u64 start, u64 len)
  5502. {
  5503. btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
  5504. btrfs_free_reserved_data_space(inode, reserved, start, len);
  5505. }
  5506. static int update_block_group(struct btrfs_trans_handle *trans,
  5507. struct btrfs_fs_info *info, u64 bytenr,
  5508. u64 num_bytes, int alloc)
  5509. {
  5510. struct btrfs_block_group_cache *cache = NULL;
  5511. u64 total = num_bytes;
  5512. u64 old_val;
  5513. u64 byte_in_group;
  5514. int factor;
  5515. /* block accounting for super block */
  5516. spin_lock(&info->delalloc_root_lock);
  5517. old_val = btrfs_super_bytes_used(info->super_copy);
  5518. if (alloc)
  5519. old_val += num_bytes;
  5520. else
  5521. old_val -= num_bytes;
  5522. btrfs_set_super_bytes_used(info->super_copy, old_val);
  5523. spin_unlock(&info->delalloc_root_lock);
  5524. while (total) {
  5525. cache = btrfs_lookup_block_group(info, bytenr);
  5526. if (!cache)
  5527. return -ENOENT;
  5528. if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
  5529. BTRFS_BLOCK_GROUP_RAID1 |
  5530. BTRFS_BLOCK_GROUP_RAID10))
  5531. factor = 2;
  5532. else
  5533. factor = 1;
  5534. /*
  5535. * If this block group has free space cache written out, we
  5536. * need to make sure to load it if we are removing space. This
  5537. * is because we need the unpinning stage to actually add the
  5538. * space back to the block group, otherwise we will leak space.
  5539. */
  5540. if (!alloc && cache->cached == BTRFS_CACHE_NO)
  5541. cache_block_group(cache, 1);
  5542. byte_in_group = bytenr - cache->key.objectid;
  5543. WARN_ON(byte_in_group > cache->key.offset);
  5544. spin_lock(&cache->space_info->lock);
  5545. spin_lock(&cache->lock);
  5546. if (btrfs_test_opt(info, SPACE_CACHE) &&
  5547. cache->disk_cache_state < BTRFS_DC_CLEAR)
  5548. cache->disk_cache_state = BTRFS_DC_CLEAR;
  5549. old_val = btrfs_block_group_used(&cache->item);
  5550. num_bytes = min(total, cache->key.offset - byte_in_group);
  5551. if (alloc) {
  5552. old_val += num_bytes;
  5553. btrfs_set_block_group_used(&cache->item, old_val);
  5554. cache->reserved -= num_bytes;
  5555. cache->space_info->bytes_reserved -= num_bytes;
  5556. cache->space_info->bytes_used += num_bytes;
  5557. cache->space_info->disk_used += num_bytes * factor;
  5558. spin_unlock(&cache->lock);
  5559. spin_unlock(&cache->space_info->lock);
  5560. } else {
  5561. old_val -= num_bytes;
  5562. btrfs_set_block_group_used(&cache->item, old_val);
  5563. cache->pinned += num_bytes;
  5564. cache->space_info->bytes_pinned += num_bytes;
  5565. cache->space_info->bytes_used -= num_bytes;
  5566. cache->space_info->disk_used -= num_bytes * factor;
  5567. spin_unlock(&cache->lock);
  5568. spin_unlock(&cache->space_info->lock);
  5569. trace_btrfs_space_reservation(info, "pinned",
  5570. cache->space_info->flags,
  5571. num_bytes, 1);
  5572. percpu_counter_add(&cache->space_info->total_bytes_pinned,
  5573. num_bytes);
  5574. set_extent_dirty(info->pinned_extents,
  5575. bytenr, bytenr + num_bytes - 1,
  5576. GFP_NOFS | __GFP_NOFAIL);
  5577. }
  5578. spin_lock(&trans->transaction->dirty_bgs_lock);
  5579. if (list_empty(&cache->dirty_list)) {
  5580. list_add_tail(&cache->dirty_list,
  5581. &trans->transaction->dirty_bgs);
  5582. trans->transaction->num_dirty_bgs++;
  5583. btrfs_get_block_group(cache);
  5584. }
  5585. spin_unlock(&trans->transaction->dirty_bgs_lock);
  5586. /*
  5587. * No longer have used bytes in this block group, queue it for
  5588. * deletion. We do this after adding the block group to the
  5589. * dirty list to avoid races between cleaner kthread and space
  5590. * cache writeout.
  5591. */
  5592. if (!alloc && old_val == 0) {
  5593. spin_lock(&info->unused_bgs_lock);
  5594. if (list_empty(&cache->bg_list)) {
  5595. btrfs_get_block_group(cache);
  5596. list_add_tail(&cache->bg_list,
  5597. &info->unused_bgs);
  5598. }
  5599. spin_unlock(&info->unused_bgs_lock);
  5600. }
  5601. btrfs_put_block_group(cache);
  5602. total -= num_bytes;
  5603. bytenr += num_bytes;
  5604. }
  5605. return 0;
  5606. }
  5607. static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
  5608. {
  5609. struct btrfs_block_group_cache *cache;
  5610. u64 bytenr;
  5611. spin_lock(&fs_info->block_group_cache_lock);
  5612. bytenr = fs_info->first_logical_byte;
  5613. spin_unlock(&fs_info->block_group_cache_lock);
  5614. if (bytenr < (u64)-1)
  5615. return bytenr;
  5616. cache = btrfs_lookup_first_block_group(fs_info, search_start);
  5617. if (!cache)
  5618. return 0;
  5619. bytenr = cache->key.objectid;
  5620. btrfs_put_block_group(cache);
  5621. return bytenr;
  5622. }
  5623. static int pin_down_extent(struct btrfs_fs_info *fs_info,
  5624. struct btrfs_block_group_cache *cache,
  5625. u64 bytenr, u64 num_bytes, int reserved)
  5626. {
  5627. spin_lock(&cache->space_info->lock);
  5628. spin_lock(&cache->lock);
  5629. cache->pinned += num_bytes;
  5630. cache->space_info->bytes_pinned += num_bytes;
  5631. if (reserved) {
  5632. cache->reserved -= num_bytes;
  5633. cache->space_info->bytes_reserved -= num_bytes;
  5634. }
  5635. spin_unlock(&cache->lock);
  5636. spin_unlock(&cache->space_info->lock);
  5637. trace_btrfs_space_reservation(fs_info, "pinned",
  5638. cache->space_info->flags, num_bytes, 1);
  5639. percpu_counter_add(&cache->space_info->total_bytes_pinned, num_bytes);
  5640. set_extent_dirty(fs_info->pinned_extents, bytenr,
  5641. bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
  5642. return 0;
  5643. }
  5644. /*
  5645. * this function must be called within transaction
  5646. */
  5647. int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
  5648. u64 bytenr, u64 num_bytes, int reserved)
  5649. {
  5650. struct btrfs_block_group_cache *cache;
  5651. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5652. BUG_ON(!cache); /* Logic error */
  5653. pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
  5654. btrfs_put_block_group(cache);
  5655. return 0;
  5656. }
  5657. /*
  5658. * this function must be called within transaction
  5659. */
  5660. int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
  5661. u64 bytenr, u64 num_bytes)
  5662. {
  5663. struct btrfs_block_group_cache *cache;
  5664. int ret;
  5665. cache = btrfs_lookup_block_group(fs_info, bytenr);
  5666. if (!cache)
  5667. return -EINVAL;
  5668. /*
  5669. * pull in the free space cache (if any) so that our pin
  5670. * removes the free space from the cache. We have load_only set
  5671. * to one because the slow code to read in the free extents does check
  5672. * the pinned extents.
  5673. */
  5674. cache_block_group(cache, 1);
  5675. pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
  5676. /* remove us from the free space cache (if we're there at all) */
  5677. ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
  5678. btrfs_put_block_group(cache);
  5679. return ret;
  5680. }
  5681. static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
  5682. u64 start, u64 num_bytes)
  5683. {
  5684. int ret;
  5685. struct btrfs_block_group_cache *block_group;
  5686. struct btrfs_caching_control *caching_ctl;
  5687. block_group = btrfs_lookup_block_group(fs_info, start);
  5688. if (!block_group)
  5689. return -EINVAL;
  5690. cache_block_group(block_group, 0);
  5691. caching_ctl = get_caching_control(block_group);
  5692. if (!caching_ctl) {
  5693. /* Logic error */
  5694. BUG_ON(!block_group_cache_done(block_group));
  5695. ret = btrfs_remove_free_space(block_group, start, num_bytes);
  5696. } else {
  5697. mutex_lock(&caching_ctl->mutex);
  5698. if (start >= caching_ctl->progress) {
  5699. ret = add_excluded_extent(fs_info, start, num_bytes);
  5700. } else if (start + num_bytes <= caching_ctl->progress) {
  5701. ret = btrfs_remove_free_space(block_group,
  5702. start, num_bytes);
  5703. } else {
  5704. num_bytes = caching_ctl->progress - start;
  5705. ret = btrfs_remove_free_space(block_group,
  5706. start, num_bytes);
  5707. if (ret)
  5708. goto out_lock;
  5709. num_bytes = (start + num_bytes) -
  5710. caching_ctl->progress;
  5711. start = caching_ctl->progress;
  5712. ret = add_excluded_extent(fs_info, start, num_bytes);
  5713. }
  5714. out_lock:
  5715. mutex_unlock(&caching_ctl->mutex);
  5716. put_caching_control(caching_ctl);
  5717. }
  5718. btrfs_put_block_group(block_group);
  5719. return ret;
  5720. }
  5721. int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
  5722. struct extent_buffer *eb)
  5723. {
  5724. struct btrfs_file_extent_item *item;
  5725. struct btrfs_key key;
  5726. int found_type;
  5727. int i;
  5728. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
  5729. return 0;
  5730. for (i = 0; i < btrfs_header_nritems(eb); i++) {
  5731. btrfs_item_key_to_cpu(eb, &key, i);
  5732. if (key.type != BTRFS_EXTENT_DATA_KEY)
  5733. continue;
  5734. item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
  5735. found_type = btrfs_file_extent_type(eb, item);
  5736. if (found_type == BTRFS_FILE_EXTENT_INLINE)
  5737. continue;
  5738. if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
  5739. continue;
  5740. key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
  5741. key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
  5742. __exclude_logged_extent(fs_info, key.objectid, key.offset);
  5743. }
  5744. return 0;
  5745. }
  5746. static void
  5747. btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg)
  5748. {
  5749. atomic_inc(&bg->reservations);
  5750. }
  5751. void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
  5752. const u64 start)
  5753. {
  5754. struct btrfs_block_group_cache *bg;
  5755. bg = btrfs_lookup_block_group(fs_info, start);
  5756. ASSERT(bg);
  5757. if (atomic_dec_and_test(&bg->reservations))
  5758. wake_up_atomic_t(&bg->reservations);
  5759. btrfs_put_block_group(bg);
  5760. }
  5761. static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a)
  5762. {
  5763. schedule();
  5764. return 0;
  5765. }
  5766. void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  5767. {
  5768. struct btrfs_space_info *space_info = bg->space_info;
  5769. ASSERT(bg->ro);
  5770. if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
  5771. return;
  5772. /*
  5773. * Our block group is read only but before we set it to read only,
  5774. * some task might have had allocated an extent from it already, but it
  5775. * has not yet created a respective ordered extent (and added it to a
  5776. * root's list of ordered extents).
  5777. * Therefore wait for any task currently allocating extents, since the
  5778. * block group's reservations counter is incremented while a read lock
  5779. * on the groups' semaphore is held and decremented after releasing
  5780. * the read access on that semaphore and creating the ordered extent.
  5781. */
  5782. down_write(&space_info->groups_sem);
  5783. up_write(&space_info->groups_sem);
  5784. wait_on_atomic_t(&bg->reservations,
  5785. btrfs_wait_bg_reservations_atomic_t,
  5786. TASK_UNINTERRUPTIBLE);
  5787. }
  5788. /**
  5789. * btrfs_add_reserved_bytes - update the block_group and space info counters
  5790. * @cache: The cache we are manipulating
  5791. * @ram_bytes: The number of bytes of file content, and will be same to
  5792. * @num_bytes except for the compress path.
  5793. * @num_bytes: The number of bytes in question
  5794. * @delalloc: The blocks are allocated for the delalloc write
  5795. *
  5796. * This is called by the allocator when it reserves space. If this is a
  5797. * reservation and the block group has become read only we cannot make the
  5798. * reservation and return -EAGAIN, otherwise this function always succeeds.
  5799. */
  5800. static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
  5801. u64 ram_bytes, u64 num_bytes, int delalloc)
  5802. {
  5803. struct btrfs_space_info *space_info = cache->space_info;
  5804. int ret = 0;
  5805. spin_lock(&space_info->lock);
  5806. spin_lock(&cache->lock);
  5807. if (cache->ro) {
  5808. ret = -EAGAIN;
  5809. } else {
  5810. cache->reserved += num_bytes;
  5811. space_info->bytes_reserved += num_bytes;
  5812. trace_btrfs_space_reservation(cache->fs_info,
  5813. "space_info", space_info->flags,
  5814. ram_bytes, 0);
  5815. space_info->bytes_may_use -= ram_bytes;
  5816. if (delalloc)
  5817. cache->delalloc_bytes += num_bytes;
  5818. }
  5819. spin_unlock(&cache->lock);
  5820. spin_unlock(&space_info->lock);
  5821. return ret;
  5822. }
  5823. /**
  5824. * btrfs_free_reserved_bytes - update the block_group and space info counters
  5825. * @cache: The cache we are manipulating
  5826. * @num_bytes: The number of bytes in question
  5827. * @delalloc: The blocks are allocated for the delalloc write
  5828. *
  5829. * This is called by somebody who is freeing space that was never actually used
  5830. * on disk. For example if you reserve some space for a new leaf in transaction
  5831. * A and before transaction A commits you free that leaf, you call this with
  5832. * reserve set to 0 in order to clear the reservation.
  5833. */
  5834. static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
  5835. u64 num_bytes, int delalloc)
  5836. {
  5837. struct btrfs_space_info *space_info = cache->space_info;
  5838. int ret = 0;
  5839. spin_lock(&space_info->lock);
  5840. spin_lock(&cache->lock);
  5841. if (cache->ro)
  5842. space_info->bytes_readonly += num_bytes;
  5843. cache->reserved -= num_bytes;
  5844. space_info->bytes_reserved -= num_bytes;
  5845. if (delalloc)
  5846. cache->delalloc_bytes -= num_bytes;
  5847. spin_unlock(&cache->lock);
  5848. spin_unlock(&space_info->lock);
  5849. return ret;
  5850. }
  5851. void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
  5852. {
  5853. struct btrfs_caching_control *next;
  5854. struct btrfs_caching_control *caching_ctl;
  5855. struct btrfs_block_group_cache *cache;
  5856. down_write(&fs_info->commit_root_sem);
  5857. list_for_each_entry_safe(caching_ctl, next,
  5858. &fs_info->caching_block_groups, list) {
  5859. cache = caching_ctl->block_group;
  5860. if (block_group_cache_done(cache)) {
  5861. cache->last_byte_to_unpin = (u64)-1;
  5862. list_del_init(&caching_ctl->list);
  5863. put_caching_control(caching_ctl);
  5864. } else {
  5865. cache->last_byte_to_unpin = caching_ctl->progress;
  5866. }
  5867. }
  5868. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  5869. fs_info->pinned_extents = &fs_info->freed_extents[1];
  5870. else
  5871. fs_info->pinned_extents = &fs_info->freed_extents[0];
  5872. up_write(&fs_info->commit_root_sem);
  5873. update_global_block_rsv(fs_info);
  5874. }
  5875. /*
  5876. * Returns the free cluster for the given space info and sets empty_cluster to
  5877. * what it should be based on the mount options.
  5878. */
  5879. static struct btrfs_free_cluster *
  5880. fetch_cluster_info(struct btrfs_fs_info *fs_info,
  5881. struct btrfs_space_info *space_info, u64 *empty_cluster)
  5882. {
  5883. struct btrfs_free_cluster *ret = NULL;
  5884. *empty_cluster = 0;
  5885. if (btrfs_mixed_space_info(space_info))
  5886. return ret;
  5887. if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  5888. ret = &fs_info->meta_alloc_cluster;
  5889. if (btrfs_test_opt(fs_info, SSD))
  5890. *empty_cluster = SZ_2M;
  5891. else
  5892. *empty_cluster = SZ_64K;
  5893. } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) &&
  5894. btrfs_test_opt(fs_info, SSD_SPREAD)) {
  5895. *empty_cluster = SZ_2M;
  5896. ret = &fs_info->data_alloc_cluster;
  5897. }
  5898. return ret;
  5899. }
  5900. static int unpin_extent_range(struct btrfs_fs_info *fs_info,
  5901. u64 start, u64 end,
  5902. const bool return_free_space)
  5903. {
  5904. struct btrfs_block_group_cache *cache = NULL;
  5905. struct btrfs_space_info *space_info;
  5906. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  5907. struct btrfs_free_cluster *cluster = NULL;
  5908. u64 len;
  5909. u64 total_unpinned = 0;
  5910. u64 empty_cluster = 0;
  5911. bool readonly;
  5912. while (start <= end) {
  5913. readonly = false;
  5914. if (!cache ||
  5915. start >= cache->key.objectid + cache->key.offset) {
  5916. if (cache)
  5917. btrfs_put_block_group(cache);
  5918. total_unpinned = 0;
  5919. cache = btrfs_lookup_block_group(fs_info, start);
  5920. BUG_ON(!cache); /* Logic error */
  5921. cluster = fetch_cluster_info(fs_info,
  5922. cache->space_info,
  5923. &empty_cluster);
  5924. empty_cluster <<= 1;
  5925. }
  5926. len = cache->key.objectid + cache->key.offset - start;
  5927. len = min(len, end + 1 - start);
  5928. if (start < cache->last_byte_to_unpin) {
  5929. len = min(len, cache->last_byte_to_unpin - start);
  5930. if (return_free_space)
  5931. btrfs_add_free_space(cache, start, len);
  5932. }
  5933. start += len;
  5934. total_unpinned += len;
  5935. space_info = cache->space_info;
  5936. /*
  5937. * If this space cluster has been marked as fragmented and we've
  5938. * unpinned enough in this block group to potentially allow a
  5939. * cluster to be created inside of it go ahead and clear the
  5940. * fragmented check.
  5941. */
  5942. if (cluster && cluster->fragmented &&
  5943. total_unpinned > empty_cluster) {
  5944. spin_lock(&cluster->lock);
  5945. cluster->fragmented = 0;
  5946. spin_unlock(&cluster->lock);
  5947. }
  5948. spin_lock(&space_info->lock);
  5949. spin_lock(&cache->lock);
  5950. cache->pinned -= len;
  5951. space_info->bytes_pinned -= len;
  5952. trace_btrfs_space_reservation(fs_info, "pinned",
  5953. space_info->flags, len, 0);
  5954. space_info->max_extent_size = 0;
  5955. percpu_counter_add(&space_info->total_bytes_pinned, -len);
  5956. if (cache->ro) {
  5957. space_info->bytes_readonly += len;
  5958. readonly = true;
  5959. }
  5960. spin_unlock(&cache->lock);
  5961. if (!readonly && return_free_space &&
  5962. global_rsv->space_info == space_info) {
  5963. u64 to_add = len;
  5964. spin_lock(&global_rsv->lock);
  5965. if (!global_rsv->full) {
  5966. to_add = min(len, global_rsv->size -
  5967. global_rsv->reserved);
  5968. global_rsv->reserved += to_add;
  5969. space_info->bytes_may_use += to_add;
  5970. if (global_rsv->reserved >= global_rsv->size)
  5971. global_rsv->full = 1;
  5972. trace_btrfs_space_reservation(fs_info,
  5973. "space_info",
  5974. space_info->flags,
  5975. to_add, 1);
  5976. len -= to_add;
  5977. }
  5978. spin_unlock(&global_rsv->lock);
  5979. /* Add to any tickets we may have */
  5980. if (len)
  5981. space_info_add_new_bytes(fs_info, space_info,
  5982. len);
  5983. }
  5984. spin_unlock(&space_info->lock);
  5985. }
  5986. if (cache)
  5987. btrfs_put_block_group(cache);
  5988. return 0;
  5989. }
  5990. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  5991. struct btrfs_fs_info *fs_info)
  5992. {
  5993. struct btrfs_block_group_cache *block_group, *tmp;
  5994. struct list_head *deleted_bgs;
  5995. struct extent_io_tree *unpin;
  5996. u64 start;
  5997. u64 end;
  5998. int ret;
  5999. if (fs_info->pinned_extents == &fs_info->freed_extents[0])
  6000. unpin = &fs_info->freed_extents[1];
  6001. else
  6002. unpin = &fs_info->freed_extents[0];
  6003. while (!trans->aborted) {
  6004. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  6005. ret = find_first_extent_bit(unpin, 0, &start, &end,
  6006. EXTENT_DIRTY, NULL);
  6007. if (ret) {
  6008. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  6009. break;
  6010. }
  6011. if (btrfs_test_opt(fs_info, DISCARD))
  6012. ret = btrfs_discard_extent(fs_info, start,
  6013. end + 1 - start, NULL);
  6014. clear_extent_dirty(unpin, start, end);
  6015. unpin_extent_range(fs_info, start, end, true);
  6016. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  6017. cond_resched();
  6018. }
  6019. /*
  6020. * Transaction is finished. We don't need the lock anymore. We
  6021. * do need to clean up the block groups in case of a transaction
  6022. * abort.
  6023. */
  6024. deleted_bgs = &trans->transaction->deleted_bgs;
  6025. list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
  6026. u64 trimmed = 0;
  6027. ret = -EROFS;
  6028. if (!trans->aborted)
  6029. ret = btrfs_discard_extent(fs_info,
  6030. block_group->key.objectid,
  6031. block_group->key.offset,
  6032. &trimmed);
  6033. list_del_init(&block_group->bg_list);
  6034. btrfs_put_block_group_trimming(block_group);
  6035. btrfs_put_block_group(block_group);
  6036. if (ret) {
  6037. const char *errstr = btrfs_decode_error(ret);
  6038. btrfs_warn(fs_info,
  6039. "discard failed while removing blockgroup: errno=%d %s",
  6040. ret, errstr);
  6041. }
  6042. }
  6043. return 0;
  6044. }
  6045. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  6046. struct btrfs_fs_info *info,
  6047. struct btrfs_delayed_ref_node *node, u64 parent,
  6048. u64 root_objectid, u64 owner_objectid,
  6049. u64 owner_offset, int refs_to_drop,
  6050. struct btrfs_delayed_extent_op *extent_op)
  6051. {
  6052. struct btrfs_key key;
  6053. struct btrfs_path *path;
  6054. struct btrfs_root *extent_root = info->extent_root;
  6055. struct extent_buffer *leaf;
  6056. struct btrfs_extent_item *ei;
  6057. struct btrfs_extent_inline_ref *iref;
  6058. int ret;
  6059. int is_data;
  6060. int extent_slot = 0;
  6061. int found_extent = 0;
  6062. int num_to_del = 1;
  6063. u32 item_size;
  6064. u64 refs;
  6065. u64 bytenr = node->bytenr;
  6066. u64 num_bytes = node->num_bytes;
  6067. int last_ref = 0;
  6068. bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
  6069. path = btrfs_alloc_path();
  6070. if (!path)
  6071. return -ENOMEM;
  6072. path->reada = READA_FORWARD;
  6073. path->leave_spinning = 1;
  6074. is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
  6075. BUG_ON(!is_data && refs_to_drop != 1);
  6076. if (is_data)
  6077. skinny_metadata = false;
  6078. ret = lookup_extent_backref(trans, info, path, &iref,
  6079. bytenr, num_bytes, parent,
  6080. root_objectid, owner_objectid,
  6081. owner_offset);
  6082. if (ret == 0) {
  6083. extent_slot = path->slots[0];
  6084. while (extent_slot >= 0) {
  6085. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6086. extent_slot);
  6087. if (key.objectid != bytenr)
  6088. break;
  6089. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  6090. key.offset == num_bytes) {
  6091. found_extent = 1;
  6092. break;
  6093. }
  6094. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  6095. key.offset == owner_objectid) {
  6096. found_extent = 1;
  6097. break;
  6098. }
  6099. if (path->slots[0] - extent_slot > 5)
  6100. break;
  6101. extent_slot--;
  6102. }
  6103. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6104. item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
  6105. if (found_extent && item_size < sizeof(*ei))
  6106. found_extent = 0;
  6107. #endif
  6108. if (!found_extent) {
  6109. BUG_ON(iref);
  6110. ret = remove_extent_backref(trans, info, path, NULL,
  6111. refs_to_drop,
  6112. is_data, &last_ref);
  6113. if (ret) {
  6114. btrfs_abort_transaction(trans, ret);
  6115. goto out;
  6116. }
  6117. btrfs_release_path(path);
  6118. path->leave_spinning = 1;
  6119. key.objectid = bytenr;
  6120. key.type = BTRFS_EXTENT_ITEM_KEY;
  6121. key.offset = num_bytes;
  6122. if (!is_data && skinny_metadata) {
  6123. key.type = BTRFS_METADATA_ITEM_KEY;
  6124. key.offset = owner_objectid;
  6125. }
  6126. ret = btrfs_search_slot(trans, extent_root,
  6127. &key, path, -1, 1);
  6128. if (ret > 0 && skinny_metadata && path->slots[0]) {
  6129. /*
  6130. * Couldn't find our skinny metadata item,
  6131. * see if we have ye olde extent item.
  6132. */
  6133. path->slots[0]--;
  6134. btrfs_item_key_to_cpu(path->nodes[0], &key,
  6135. path->slots[0]);
  6136. if (key.objectid == bytenr &&
  6137. key.type == BTRFS_EXTENT_ITEM_KEY &&
  6138. key.offset == num_bytes)
  6139. ret = 0;
  6140. }
  6141. if (ret > 0 && skinny_metadata) {
  6142. skinny_metadata = false;
  6143. key.objectid = bytenr;
  6144. key.type = BTRFS_EXTENT_ITEM_KEY;
  6145. key.offset = num_bytes;
  6146. btrfs_release_path(path);
  6147. ret = btrfs_search_slot(trans, extent_root,
  6148. &key, path, -1, 1);
  6149. }
  6150. if (ret) {
  6151. btrfs_err(info,
  6152. "umm, got %d back from search, was looking for %llu",
  6153. ret, bytenr);
  6154. if (ret > 0)
  6155. btrfs_print_leaf(path->nodes[0]);
  6156. }
  6157. if (ret < 0) {
  6158. btrfs_abort_transaction(trans, ret);
  6159. goto out;
  6160. }
  6161. extent_slot = path->slots[0];
  6162. }
  6163. } else if (WARN_ON(ret == -ENOENT)) {
  6164. btrfs_print_leaf(path->nodes[0]);
  6165. btrfs_err(info,
  6166. "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
  6167. bytenr, parent, root_objectid, owner_objectid,
  6168. owner_offset);
  6169. btrfs_abort_transaction(trans, ret);
  6170. goto out;
  6171. } else {
  6172. btrfs_abort_transaction(trans, ret);
  6173. goto out;
  6174. }
  6175. leaf = path->nodes[0];
  6176. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6177. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  6178. if (item_size < sizeof(*ei)) {
  6179. BUG_ON(found_extent || extent_slot != path->slots[0]);
  6180. ret = convert_extent_item_v0(trans, info, path, owner_objectid,
  6181. 0);
  6182. if (ret < 0) {
  6183. btrfs_abort_transaction(trans, ret);
  6184. goto out;
  6185. }
  6186. btrfs_release_path(path);
  6187. path->leave_spinning = 1;
  6188. key.objectid = bytenr;
  6189. key.type = BTRFS_EXTENT_ITEM_KEY;
  6190. key.offset = num_bytes;
  6191. ret = btrfs_search_slot(trans, extent_root, &key, path,
  6192. -1, 1);
  6193. if (ret) {
  6194. btrfs_err(info,
  6195. "umm, got %d back from search, was looking for %llu",
  6196. ret, bytenr);
  6197. btrfs_print_leaf(path->nodes[0]);
  6198. }
  6199. if (ret < 0) {
  6200. btrfs_abort_transaction(trans, ret);
  6201. goto out;
  6202. }
  6203. extent_slot = path->slots[0];
  6204. leaf = path->nodes[0];
  6205. item_size = btrfs_item_size_nr(leaf, extent_slot);
  6206. }
  6207. #endif
  6208. BUG_ON(item_size < sizeof(*ei));
  6209. ei = btrfs_item_ptr(leaf, extent_slot,
  6210. struct btrfs_extent_item);
  6211. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
  6212. key.type == BTRFS_EXTENT_ITEM_KEY) {
  6213. struct btrfs_tree_block_info *bi;
  6214. BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
  6215. bi = (struct btrfs_tree_block_info *)(ei + 1);
  6216. WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
  6217. }
  6218. refs = btrfs_extent_refs(leaf, ei);
  6219. if (refs < refs_to_drop) {
  6220. btrfs_err(info,
  6221. "trying to drop %d refs but we only have %Lu for bytenr %Lu",
  6222. refs_to_drop, refs, bytenr);
  6223. ret = -EINVAL;
  6224. btrfs_abort_transaction(trans, ret);
  6225. goto out;
  6226. }
  6227. refs -= refs_to_drop;
  6228. if (refs > 0) {
  6229. if (extent_op)
  6230. __run_delayed_extent_op(extent_op, leaf, ei);
  6231. /*
  6232. * In the case of inline back ref, reference count will
  6233. * be updated by remove_extent_backref
  6234. */
  6235. if (iref) {
  6236. BUG_ON(!found_extent);
  6237. } else {
  6238. btrfs_set_extent_refs(leaf, ei, refs);
  6239. btrfs_mark_buffer_dirty(leaf);
  6240. }
  6241. if (found_extent) {
  6242. ret = remove_extent_backref(trans, info, path,
  6243. iref, refs_to_drop,
  6244. is_data, &last_ref);
  6245. if (ret) {
  6246. btrfs_abort_transaction(trans, ret);
  6247. goto out;
  6248. }
  6249. }
  6250. } else {
  6251. if (found_extent) {
  6252. BUG_ON(is_data && refs_to_drop !=
  6253. extent_data_ref_count(path, iref));
  6254. if (iref) {
  6255. BUG_ON(path->slots[0] != extent_slot);
  6256. } else {
  6257. BUG_ON(path->slots[0] != extent_slot + 1);
  6258. path->slots[0] = extent_slot;
  6259. num_to_del = 2;
  6260. }
  6261. }
  6262. last_ref = 1;
  6263. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  6264. num_to_del);
  6265. if (ret) {
  6266. btrfs_abort_transaction(trans, ret);
  6267. goto out;
  6268. }
  6269. btrfs_release_path(path);
  6270. if (is_data) {
  6271. ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
  6272. if (ret) {
  6273. btrfs_abort_transaction(trans, ret);
  6274. goto out;
  6275. }
  6276. }
  6277. ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
  6278. if (ret) {
  6279. btrfs_abort_transaction(trans, ret);
  6280. goto out;
  6281. }
  6282. ret = update_block_group(trans, info, bytenr, num_bytes, 0);
  6283. if (ret) {
  6284. btrfs_abort_transaction(trans, ret);
  6285. goto out;
  6286. }
  6287. }
  6288. btrfs_release_path(path);
  6289. out:
  6290. btrfs_free_path(path);
  6291. return ret;
  6292. }
  6293. /*
  6294. * when we free an block, it is possible (and likely) that we free the last
  6295. * delayed ref for that extent as well. This searches the delayed ref tree for
  6296. * a given extent, and if there are no other delayed refs to be processed, it
  6297. * removes it from the tree.
  6298. */
  6299. static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
  6300. u64 bytenr)
  6301. {
  6302. struct btrfs_delayed_ref_head *head;
  6303. struct btrfs_delayed_ref_root *delayed_refs;
  6304. int ret = 0;
  6305. delayed_refs = &trans->transaction->delayed_refs;
  6306. spin_lock(&delayed_refs->lock);
  6307. head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
  6308. if (!head)
  6309. goto out_delayed_unlock;
  6310. spin_lock(&head->lock);
  6311. if (!RB_EMPTY_ROOT(&head->ref_tree))
  6312. goto out;
  6313. if (head->extent_op) {
  6314. if (!head->must_insert_reserved)
  6315. goto out;
  6316. btrfs_free_delayed_extent_op(head->extent_op);
  6317. head->extent_op = NULL;
  6318. }
  6319. /*
  6320. * waiting for the lock here would deadlock. If someone else has it
  6321. * locked they are already in the process of dropping it anyway
  6322. */
  6323. if (!mutex_trylock(&head->mutex))
  6324. goto out;
  6325. /*
  6326. * at this point we have a head with no other entries. Go
  6327. * ahead and process it.
  6328. */
  6329. rb_erase(&head->href_node, &delayed_refs->href_root);
  6330. RB_CLEAR_NODE(&head->href_node);
  6331. atomic_dec(&delayed_refs->num_entries);
  6332. /*
  6333. * we don't take a ref on the node because we're removing it from the
  6334. * tree, so we just steal the ref the tree was holding.
  6335. */
  6336. delayed_refs->num_heads--;
  6337. if (head->processing == 0)
  6338. delayed_refs->num_heads_ready--;
  6339. head->processing = 0;
  6340. spin_unlock(&head->lock);
  6341. spin_unlock(&delayed_refs->lock);
  6342. BUG_ON(head->extent_op);
  6343. if (head->must_insert_reserved)
  6344. ret = 1;
  6345. mutex_unlock(&head->mutex);
  6346. btrfs_put_delayed_ref_head(head);
  6347. return ret;
  6348. out:
  6349. spin_unlock(&head->lock);
  6350. out_delayed_unlock:
  6351. spin_unlock(&delayed_refs->lock);
  6352. return 0;
  6353. }
  6354. void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
  6355. struct btrfs_root *root,
  6356. struct extent_buffer *buf,
  6357. u64 parent, int last_ref)
  6358. {
  6359. struct btrfs_fs_info *fs_info = root->fs_info;
  6360. int pin = 1;
  6361. int ret;
  6362. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6363. int old_ref_mod, new_ref_mod;
  6364. btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
  6365. root->root_key.objectid,
  6366. btrfs_header_level(buf), 0,
  6367. BTRFS_DROP_DELAYED_REF);
  6368. ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
  6369. buf->len, parent,
  6370. root->root_key.objectid,
  6371. btrfs_header_level(buf),
  6372. BTRFS_DROP_DELAYED_REF, NULL,
  6373. &old_ref_mod, &new_ref_mod);
  6374. BUG_ON(ret); /* -ENOMEM */
  6375. pin = old_ref_mod >= 0 && new_ref_mod < 0;
  6376. }
  6377. if (last_ref && btrfs_header_generation(buf) == trans->transid) {
  6378. struct btrfs_block_group_cache *cache;
  6379. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  6380. ret = check_ref_cleanup(trans, buf->start);
  6381. if (!ret)
  6382. goto out;
  6383. }
  6384. pin = 0;
  6385. cache = btrfs_lookup_block_group(fs_info, buf->start);
  6386. if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
  6387. pin_down_extent(fs_info, cache, buf->start,
  6388. buf->len, 1);
  6389. btrfs_put_block_group(cache);
  6390. goto out;
  6391. }
  6392. WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
  6393. btrfs_add_free_space(cache, buf->start, buf->len);
  6394. btrfs_free_reserved_bytes(cache, buf->len, 0);
  6395. btrfs_put_block_group(cache);
  6396. trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
  6397. }
  6398. out:
  6399. if (pin)
  6400. add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
  6401. root->root_key.objectid);
  6402. if (last_ref) {
  6403. /*
  6404. * Deleting the buffer, clear the corrupt flag since it doesn't
  6405. * matter anymore.
  6406. */
  6407. clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
  6408. }
  6409. }
  6410. /* Can return -ENOMEM */
  6411. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  6412. struct btrfs_root *root,
  6413. u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
  6414. u64 owner, u64 offset)
  6415. {
  6416. struct btrfs_fs_info *fs_info = root->fs_info;
  6417. int old_ref_mod, new_ref_mod;
  6418. int ret;
  6419. if (btrfs_is_testing(fs_info))
  6420. return 0;
  6421. if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
  6422. btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
  6423. root_objectid, owner, offset,
  6424. BTRFS_DROP_DELAYED_REF);
  6425. /*
  6426. * tree log blocks never actually go into the extent allocation
  6427. * tree, just update pinning info and exit early.
  6428. */
  6429. if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
  6430. WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
  6431. /* unlocks the pinned mutex */
  6432. btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
  6433. old_ref_mod = new_ref_mod = 0;
  6434. ret = 0;
  6435. } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
  6436. ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
  6437. num_bytes, parent,
  6438. root_objectid, (int)owner,
  6439. BTRFS_DROP_DELAYED_REF, NULL,
  6440. &old_ref_mod, &new_ref_mod);
  6441. } else {
  6442. ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
  6443. num_bytes, parent,
  6444. root_objectid, owner, offset,
  6445. 0, BTRFS_DROP_DELAYED_REF,
  6446. &old_ref_mod, &new_ref_mod);
  6447. }
  6448. if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
  6449. add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
  6450. return ret;
  6451. }
  6452. /*
  6453. * when we wait for progress in the block group caching, its because
  6454. * our allocation attempt failed at least once. So, we must sleep
  6455. * and let some progress happen before we try again.
  6456. *
  6457. * This function will sleep at least once waiting for new free space to
  6458. * show up, and then it will check the block group free space numbers
  6459. * for our min num_bytes. Another option is to have it go ahead
  6460. * and look in the rbtree for a free extent of a given size, but this
  6461. * is a good start.
  6462. *
  6463. * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
  6464. * any of the information in this block group.
  6465. */
  6466. static noinline void
  6467. wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
  6468. u64 num_bytes)
  6469. {
  6470. struct btrfs_caching_control *caching_ctl;
  6471. caching_ctl = get_caching_control(cache);
  6472. if (!caching_ctl)
  6473. return;
  6474. wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
  6475. (cache->free_space_ctl->free_space >= num_bytes));
  6476. put_caching_control(caching_ctl);
  6477. }
  6478. static noinline int
  6479. wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
  6480. {
  6481. struct btrfs_caching_control *caching_ctl;
  6482. int ret = 0;
  6483. caching_ctl = get_caching_control(cache);
  6484. if (!caching_ctl)
  6485. return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
  6486. wait_event(caching_ctl->wait, block_group_cache_done(cache));
  6487. if (cache->cached == BTRFS_CACHE_ERROR)
  6488. ret = -EIO;
  6489. put_caching_control(caching_ctl);
  6490. return ret;
  6491. }
  6492. int __get_raid_index(u64 flags)
  6493. {
  6494. if (flags & BTRFS_BLOCK_GROUP_RAID10)
  6495. return BTRFS_RAID_RAID10;
  6496. else if (flags & BTRFS_BLOCK_GROUP_RAID1)
  6497. return BTRFS_RAID_RAID1;
  6498. else if (flags & BTRFS_BLOCK_GROUP_DUP)
  6499. return BTRFS_RAID_DUP;
  6500. else if (flags & BTRFS_BLOCK_GROUP_RAID0)
  6501. return BTRFS_RAID_RAID0;
  6502. else if (flags & BTRFS_BLOCK_GROUP_RAID5)
  6503. return BTRFS_RAID_RAID5;
  6504. else if (flags & BTRFS_BLOCK_GROUP_RAID6)
  6505. return BTRFS_RAID_RAID6;
  6506. return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
  6507. }
  6508. int get_block_group_index(struct btrfs_block_group_cache *cache)
  6509. {
  6510. return __get_raid_index(cache->flags);
  6511. }
  6512. static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
  6513. [BTRFS_RAID_RAID10] = "raid10",
  6514. [BTRFS_RAID_RAID1] = "raid1",
  6515. [BTRFS_RAID_DUP] = "dup",
  6516. [BTRFS_RAID_RAID0] = "raid0",
  6517. [BTRFS_RAID_SINGLE] = "single",
  6518. [BTRFS_RAID_RAID5] = "raid5",
  6519. [BTRFS_RAID_RAID6] = "raid6",
  6520. };
  6521. static const char *get_raid_name(enum btrfs_raid_types type)
  6522. {
  6523. if (type >= BTRFS_NR_RAID_TYPES)
  6524. return NULL;
  6525. return btrfs_raid_type_names[type];
  6526. }
  6527. enum btrfs_loop_type {
  6528. LOOP_CACHING_NOWAIT = 0,
  6529. LOOP_CACHING_WAIT = 1,
  6530. LOOP_ALLOC_CHUNK = 2,
  6531. LOOP_NO_EMPTY_SIZE = 3,
  6532. };
  6533. static inline void
  6534. btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
  6535. int delalloc)
  6536. {
  6537. if (delalloc)
  6538. down_read(&cache->data_rwsem);
  6539. }
  6540. static inline void
  6541. btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
  6542. int delalloc)
  6543. {
  6544. btrfs_get_block_group(cache);
  6545. if (delalloc)
  6546. down_read(&cache->data_rwsem);
  6547. }
  6548. static struct btrfs_block_group_cache *
  6549. btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
  6550. struct btrfs_free_cluster *cluster,
  6551. int delalloc)
  6552. {
  6553. struct btrfs_block_group_cache *used_bg = NULL;
  6554. spin_lock(&cluster->refill_lock);
  6555. while (1) {
  6556. used_bg = cluster->block_group;
  6557. if (!used_bg)
  6558. return NULL;
  6559. if (used_bg == block_group)
  6560. return used_bg;
  6561. btrfs_get_block_group(used_bg);
  6562. if (!delalloc)
  6563. return used_bg;
  6564. if (down_read_trylock(&used_bg->data_rwsem))
  6565. return used_bg;
  6566. spin_unlock(&cluster->refill_lock);
  6567. /* We should only have one-level nested. */
  6568. down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
  6569. spin_lock(&cluster->refill_lock);
  6570. if (used_bg == cluster->block_group)
  6571. return used_bg;
  6572. up_read(&used_bg->data_rwsem);
  6573. btrfs_put_block_group(used_bg);
  6574. }
  6575. }
  6576. static inline void
  6577. btrfs_release_block_group(struct btrfs_block_group_cache *cache,
  6578. int delalloc)
  6579. {
  6580. if (delalloc)
  6581. up_read(&cache->data_rwsem);
  6582. btrfs_put_block_group(cache);
  6583. }
  6584. /*
  6585. * walks the btree of allocated extents and find a hole of a given size.
  6586. * The key ins is changed to record the hole:
  6587. * ins->objectid == start position
  6588. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  6589. * ins->offset == the size of the hole.
  6590. * Any available blocks before search_start are skipped.
  6591. *
  6592. * If there is no suitable free space, we will record the max size of
  6593. * the free space extent currently.
  6594. */
  6595. static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
  6596. u64 ram_bytes, u64 num_bytes, u64 empty_size,
  6597. u64 hint_byte, struct btrfs_key *ins,
  6598. u64 flags, int delalloc)
  6599. {
  6600. int ret = 0;
  6601. struct btrfs_root *root = fs_info->extent_root;
  6602. struct btrfs_free_cluster *last_ptr = NULL;
  6603. struct btrfs_block_group_cache *block_group = NULL;
  6604. u64 search_start = 0;
  6605. u64 max_extent_size = 0;
  6606. u64 empty_cluster = 0;
  6607. struct btrfs_space_info *space_info;
  6608. int loop = 0;
  6609. int index = __get_raid_index(flags);
  6610. bool failed_cluster_refill = false;
  6611. bool failed_alloc = false;
  6612. bool use_cluster = true;
  6613. bool have_caching_bg = false;
  6614. bool orig_have_caching_bg = false;
  6615. bool full_search = false;
  6616. WARN_ON(num_bytes < fs_info->sectorsize);
  6617. ins->type = BTRFS_EXTENT_ITEM_KEY;
  6618. ins->objectid = 0;
  6619. ins->offset = 0;
  6620. trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
  6621. space_info = __find_space_info(fs_info, flags);
  6622. if (!space_info) {
  6623. btrfs_err(fs_info, "No space info for %llu", flags);
  6624. return -ENOSPC;
  6625. }
  6626. /*
  6627. * If our free space is heavily fragmented we may not be able to make
  6628. * big contiguous allocations, so instead of doing the expensive search
  6629. * for free space, simply return ENOSPC with our max_extent_size so we
  6630. * can go ahead and search for a more manageable chunk.
  6631. *
  6632. * If our max_extent_size is large enough for our allocation simply
  6633. * disable clustering since we will likely not be able to find enough
  6634. * space to create a cluster and induce latency trying.
  6635. */
  6636. if (unlikely(space_info->max_extent_size)) {
  6637. spin_lock(&space_info->lock);
  6638. if (space_info->max_extent_size &&
  6639. num_bytes > space_info->max_extent_size) {
  6640. ins->offset = space_info->max_extent_size;
  6641. spin_unlock(&space_info->lock);
  6642. return -ENOSPC;
  6643. } else if (space_info->max_extent_size) {
  6644. use_cluster = false;
  6645. }
  6646. spin_unlock(&space_info->lock);
  6647. }
  6648. last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
  6649. if (last_ptr) {
  6650. spin_lock(&last_ptr->lock);
  6651. if (last_ptr->block_group)
  6652. hint_byte = last_ptr->window_start;
  6653. if (last_ptr->fragmented) {
  6654. /*
  6655. * We still set window_start so we can keep track of the
  6656. * last place we found an allocation to try and save
  6657. * some time.
  6658. */
  6659. hint_byte = last_ptr->window_start;
  6660. use_cluster = false;
  6661. }
  6662. spin_unlock(&last_ptr->lock);
  6663. }
  6664. search_start = max(search_start, first_logical_byte(fs_info, 0));
  6665. search_start = max(search_start, hint_byte);
  6666. if (search_start == hint_byte) {
  6667. block_group = btrfs_lookup_block_group(fs_info, search_start);
  6668. /*
  6669. * we don't want to use the block group if it doesn't match our
  6670. * allocation bits, or if its not cached.
  6671. *
  6672. * However if we are re-searching with an ideal block group
  6673. * picked out then we don't care that the block group is cached.
  6674. */
  6675. if (block_group && block_group_bits(block_group, flags) &&
  6676. block_group->cached != BTRFS_CACHE_NO) {
  6677. down_read(&space_info->groups_sem);
  6678. if (list_empty(&block_group->list) ||
  6679. block_group->ro) {
  6680. /*
  6681. * someone is removing this block group,
  6682. * we can't jump into the have_block_group
  6683. * target because our list pointers are not
  6684. * valid
  6685. */
  6686. btrfs_put_block_group(block_group);
  6687. up_read(&space_info->groups_sem);
  6688. } else {
  6689. index = get_block_group_index(block_group);
  6690. btrfs_lock_block_group(block_group, delalloc);
  6691. goto have_block_group;
  6692. }
  6693. } else if (block_group) {
  6694. btrfs_put_block_group(block_group);
  6695. }
  6696. }
  6697. search:
  6698. have_caching_bg = false;
  6699. if (index == 0 || index == __get_raid_index(flags))
  6700. full_search = true;
  6701. down_read(&space_info->groups_sem);
  6702. list_for_each_entry(block_group, &space_info->block_groups[index],
  6703. list) {
  6704. u64 offset;
  6705. int cached;
  6706. /* If the block group is read-only, we can skip it entirely. */
  6707. if (unlikely(block_group->ro))
  6708. continue;
  6709. btrfs_grab_block_group(block_group, delalloc);
  6710. search_start = block_group->key.objectid;
  6711. /*
  6712. * this can happen if we end up cycling through all the
  6713. * raid types, but we want to make sure we only allocate
  6714. * for the proper type.
  6715. */
  6716. if (!block_group_bits(block_group, flags)) {
  6717. u64 extra = BTRFS_BLOCK_GROUP_DUP |
  6718. BTRFS_BLOCK_GROUP_RAID1 |
  6719. BTRFS_BLOCK_GROUP_RAID5 |
  6720. BTRFS_BLOCK_GROUP_RAID6 |
  6721. BTRFS_BLOCK_GROUP_RAID10;
  6722. /*
  6723. * if they asked for extra copies and this block group
  6724. * doesn't provide them, bail. This does allow us to
  6725. * fill raid0 from raid1.
  6726. */
  6727. if ((flags & extra) && !(block_group->flags & extra))
  6728. goto loop;
  6729. }
  6730. have_block_group:
  6731. cached = block_group_cache_done(block_group);
  6732. if (unlikely(!cached)) {
  6733. have_caching_bg = true;
  6734. ret = cache_block_group(block_group, 0);
  6735. BUG_ON(ret < 0);
  6736. ret = 0;
  6737. }
  6738. if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
  6739. goto loop;
  6740. /*
  6741. * Ok we want to try and use the cluster allocator, so
  6742. * lets look there
  6743. */
  6744. if (last_ptr && use_cluster) {
  6745. struct btrfs_block_group_cache *used_block_group;
  6746. unsigned long aligned_cluster;
  6747. /*
  6748. * the refill lock keeps out other
  6749. * people trying to start a new cluster
  6750. */
  6751. used_block_group = btrfs_lock_cluster(block_group,
  6752. last_ptr,
  6753. delalloc);
  6754. if (!used_block_group)
  6755. goto refill_cluster;
  6756. if (used_block_group != block_group &&
  6757. (used_block_group->ro ||
  6758. !block_group_bits(used_block_group, flags)))
  6759. goto release_cluster;
  6760. offset = btrfs_alloc_from_cluster(used_block_group,
  6761. last_ptr,
  6762. num_bytes,
  6763. used_block_group->key.objectid,
  6764. &max_extent_size);
  6765. if (offset) {
  6766. /* we have a block, we're done */
  6767. spin_unlock(&last_ptr->refill_lock);
  6768. trace_btrfs_reserve_extent_cluster(fs_info,
  6769. used_block_group,
  6770. search_start, num_bytes);
  6771. if (used_block_group != block_group) {
  6772. btrfs_release_block_group(block_group,
  6773. delalloc);
  6774. block_group = used_block_group;
  6775. }
  6776. goto checks;
  6777. }
  6778. WARN_ON(last_ptr->block_group != used_block_group);
  6779. release_cluster:
  6780. /* If we are on LOOP_NO_EMPTY_SIZE, we can't
  6781. * set up a new clusters, so lets just skip it
  6782. * and let the allocator find whatever block
  6783. * it can find. If we reach this point, we
  6784. * will have tried the cluster allocator
  6785. * plenty of times and not have found
  6786. * anything, so we are likely way too
  6787. * fragmented for the clustering stuff to find
  6788. * anything.
  6789. *
  6790. * However, if the cluster is taken from the
  6791. * current block group, release the cluster
  6792. * first, so that we stand a better chance of
  6793. * succeeding in the unclustered
  6794. * allocation. */
  6795. if (loop >= LOOP_NO_EMPTY_SIZE &&
  6796. used_block_group != block_group) {
  6797. spin_unlock(&last_ptr->refill_lock);
  6798. btrfs_release_block_group(used_block_group,
  6799. delalloc);
  6800. goto unclustered_alloc;
  6801. }
  6802. /*
  6803. * this cluster didn't work out, free it and
  6804. * start over
  6805. */
  6806. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6807. if (used_block_group != block_group)
  6808. btrfs_release_block_group(used_block_group,
  6809. delalloc);
  6810. refill_cluster:
  6811. if (loop >= LOOP_NO_EMPTY_SIZE) {
  6812. spin_unlock(&last_ptr->refill_lock);
  6813. goto unclustered_alloc;
  6814. }
  6815. aligned_cluster = max_t(unsigned long,
  6816. empty_cluster + empty_size,
  6817. block_group->full_stripe_len);
  6818. /* allocate a cluster in this block group */
  6819. ret = btrfs_find_space_cluster(fs_info, block_group,
  6820. last_ptr, search_start,
  6821. num_bytes,
  6822. aligned_cluster);
  6823. if (ret == 0) {
  6824. /*
  6825. * now pull our allocation out of this
  6826. * cluster
  6827. */
  6828. offset = btrfs_alloc_from_cluster(block_group,
  6829. last_ptr,
  6830. num_bytes,
  6831. search_start,
  6832. &max_extent_size);
  6833. if (offset) {
  6834. /* we found one, proceed */
  6835. spin_unlock(&last_ptr->refill_lock);
  6836. trace_btrfs_reserve_extent_cluster(fs_info,
  6837. block_group, search_start,
  6838. num_bytes);
  6839. goto checks;
  6840. }
  6841. } else if (!cached && loop > LOOP_CACHING_NOWAIT
  6842. && !failed_cluster_refill) {
  6843. spin_unlock(&last_ptr->refill_lock);
  6844. failed_cluster_refill = true;
  6845. wait_block_group_cache_progress(block_group,
  6846. num_bytes + empty_cluster + empty_size);
  6847. goto have_block_group;
  6848. }
  6849. /*
  6850. * at this point we either didn't find a cluster
  6851. * or we weren't able to allocate a block from our
  6852. * cluster. Free the cluster we've been trying
  6853. * to use, and go to the next block group
  6854. */
  6855. btrfs_return_cluster_to_free_space(NULL, last_ptr);
  6856. spin_unlock(&last_ptr->refill_lock);
  6857. goto loop;
  6858. }
  6859. unclustered_alloc:
  6860. /*
  6861. * We are doing an unclustered alloc, set the fragmented flag so
  6862. * we don't bother trying to setup a cluster again until we get
  6863. * more space.
  6864. */
  6865. if (unlikely(last_ptr)) {
  6866. spin_lock(&last_ptr->lock);
  6867. last_ptr->fragmented = 1;
  6868. spin_unlock(&last_ptr->lock);
  6869. }
  6870. if (cached) {
  6871. struct btrfs_free_space_ctl *ctl =
  6872. block_group->free_space_ctl;
  6873. spin_lock(&ctl->tree_lock);
  6874. if (ctl->free_space <
  6875. num_bytes + empty_cluster + empty_size) {
  6876. if (ctl->free_space > max_extent_size)
  6877. max_extent_size = ctl->free_space;
  6878. spin_unlock(&ctl->tree_lock);
  6879. goto loop;
  6880. }
  6881. spin_unlock(&ctl->tree_lock);
  6882. }
  6883. offset = btrfs_find_space_for_alloc(block_group, search_start,
  6884. num_bytes, empty_size,
  6885. &max_extent_size);
  6886. /*
  6887. * If we didn't find a chunk, and we haven't failed on this
  6888. * block group before, and this block group is in the middle of
  6889. * caching and we are ok with waiting, then go ahead and wait
  6890. * for progress to be made, and set failed_alloc to true.
  6891. *
  6892. * If failed_alloc is true then we've already waited on this
  6893. * block group once and should move on to the next block group.
  6894. */
  6895. if (!offset && !failed_alloc && !cached &&
  6896. loop > LOOP_CACHING_NOWAIT) {
  6897. wait_block_group_cache_progress(block_group,
  6898. num_bytes + empty_size);
  6899. failed_alloc = true;
  6900. goto have_block_group;
  6901. } else if (!offset) {
  6902. goto loop;
  6903. }
  6904. checks:
  6905. search_start = ALIGN(offset, fs_info->stripesize);
  6906. /* move on to the next group */
  6907. if (search_start + num_bytes >
  6908. block_group->key.objectid + block_group->key.offset) {
  6909. btrfs_add_free_space(block_group, offset, num_bytes);
  6910. goto loop;
  6911. }
  6912. if (offset < search_start)
  6913. btrfs_add_free_space(block_group, offset,
  6914. search_start - offset);
  6915. BUG_ON(offset > search_start);
  6916. ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
  6917. num_bytes, delalloc);
  6918. if (ret == -EAGAIN) {
  6919. btrfs_add_free_space(block_group, offset, num_bytes);
  6920. goto loop;
  6921. }
  6922. btrfs_inc_block_group_reservations(block_group);
  6923. /* we are all good, lets return */
  6924. ins->objectid = search_start;
  6925. ins->offset = num_bytes;
  6926. trace_btrfs_reserve_extent(fs_info, block_group,
  6927. search_start, num_bytes);
  6928. btrfs_release_block_group(block_group, delalloc);
  6929. break;
  6930. loop:
  6931. failed_cluster_refill = false;
  6932. failed_alloc = false;
  6933. BUG_ON(index != get_block_group_index(block_group));
  6934. btrfs_release_block_group(block_group, delalloc);
  6935. cond_resched();
  6936. }
  6937. up_read(&space_info->groups_sem);
  6938. if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
  6939. && !orig_have_caching_bg)
  6940. orig_have_caching_bg = true;
  6941. if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
  6942. goto search;
  6943. if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
  6944. goto search;
  6945. /*
  6946. * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
  6947. * caching kthreads as we move along
  6948. * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
  6949. * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
  6950. * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
  6951. * again
  6952. */
  6953. if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
  6954. index = 0;
  6955. if (loop == LOOP_CACHING_NOWAIT) {
  6956. /*
  6957. * We want to skip the LOOP_CACHING_WAIT step if we
  6958. * don't have any uncached bgs and we've already done a
  6959. * full search through.
  6960. */
  6961. if (orig_have_caching_bg || !full_search)
  6962. loop = LOOP_CACHING_WAIT;
  6963. else
  6964. loop = LOOP_ALLOC_CHUNK;
  6965. } else {
  6966. loop++;
  6967. }
  6968. if (loop == LOOP_ALLOC_CHUNK) {
  6969. struct btrfs_trans_handle *trans;
  6970. int exist = 0;
  6971. trans = current->journal_info;
  6972. if (trans)
  6973. exist = 1;
  6974. else
  6975. trans = btrfs_join_transaction(root);
  6976. if (IS_ERR(trans)) {
  6977. ret = PTR_ERR(trans);
  6978. goto out;
  6979. }
  6980. ret = do_chunk_alloc(trans, fs_info, flags,
  6981. CHUNK_ALLOC_FORCE);
  6982. /*
  6983. * If we can't allocate a new chunk we've already looped
  6984. * through at least once, move on to the NO_EMPTY_SIZE
  6985. * case.
  6986. */
  6987. if (ret == -ENOSPC)
  6988. loop = LOOP_NO_EMPTY_SIZE;
  6989. /*
  6990. * Do not bail out on ENOSPC since we
  6991. * can do more things.
  6992. */
  6993. if (ret < 0 && ret != -ENOSPC)
  6994. btrfs_abort_transaction(trans, ret);
  6995. else
  6996. ret = 0;
  6997. if (!exist)
  6998. btrfs_end_transaction(trans);
  6999. if (ret)
  7000. goto out;
  7001. }
  7002. if (loop == LOOP_NO_EMPTY_SIZE) {
  7003. /*
  7004. * Don't loop again if we already have no empty_size and
  7005. * no empty_cluster.
  7006. */
  7007. if (empty_size == 0 &&
  7008. empty_cluster == 0) {
  7009. ret = -ENOSPC;
  7010. goto out;
  7011. }
  7012. empty_size = 0;
  7013. empty_cluster = 0;
  7014. }
  7015. goto search;
  7016. } else if (!ins->objectid) {
  7017. ret = -ENOSPC;
  7018. } else if (ins->objectid) {
  7019. if (!use_cluster && last_ptr) {
  7020. spin_lock(&last_ptr->lock);
  7021. last_ptr->window_start = ins->objectid;
  7022. spin_unlock(&last_ptr->lock);
  7023. }
  7024. ret = 0;
  7025. }
  7026. out:
  7027. if (ret == -ENOSPC) {
  7028. spin_lock(&space_info->lock);
  7029. space_info->max_extent_size = max_extent_size;
  7030. spin_unlock(&space_info->lock);
  7031. ins->offset = max_extent_size;
  7032. }
  7033. return ret;
  7034. }
  7035. static void dump_space_info(struct btrfs_fs_info *fs_info,
  7036. struct btrfs_space_info *info, u64 bytes,
  7037. int dump_block_groups)
  7038. {
  7039. struct btrfs_block_group_cache *cache;
  7040. int index = 0;
  7041. spin_lock(&info->lock);
  7042. btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
  7043. info->flags,
  7044. info->total_bytes - btrfs_space_info_used(info, true),
  7045. info->full ? "" : "not ");
  7046. btrfs_info(fs_info,
  7047. "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
  7048. info->total_bytes, info->bytes_used, info->bytes_pinned,
  7049. info->bytes_reserved, info->bytes_may_use,
  7050. info->bytes_readonly);
  7051. spin_unlock(&info->lock);
  7052. if (!dump_block_groups)
  7053. return;
  7054. down_read(&info->groups_sem);
  7055. again:
  7056. list_for_each_entry(cache, &info->block_groups[index], list) {
  7057. spin_lock(&cache->lock);
  7058. btrfs_info(fs_info,
  7059. "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
  7060. cache->key.objectid, cache->key.offset,
  7061. btrfs_block_group_used(&cache->item), cache->pinned,
  7062. cache->reserved, cache->ro ? "[readonly]" : "");
  7063. btrfs_dump_free_space(cache, bytes);
  7064. spin_unlock(&cache->lock);
  7065. }
  7066. if (++index < BTRFS_NR_RAID_TYPES)
  7067. goto again;
  7068. up_read(&info->groups_sem);
  7069. }
  7070. int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
  7071. u64 num_bytes, u64 min_alloc_size,
  7072. u64 empty_size, u64 hint_byte,
  7073. struct btrfs_key *ins, int is_data, int delalloc)
  7074. {
  7075. struct btrfs_fs_info *fs_info = root->fs_info;
  7076. bool final_tried = num_bytes == min_alloc_size;
  7077. u64 flags;
  7078. int ret;
  7079. flags = get_alloc_profile_by_root(root, is_data);
  7080. again:
  7081. WARN_ON(num_bytes < fs_info->sectorsize);
  7082. ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
  7083. hint_byte, ins, flags, delalloc);
  7084. if (!ret && !is_data) {
  7085. btrfs_dec_block_group_reservations(fs_info, ins->objectid);
  7086. } else if (ret == -ENOSPC) {
  7087. if (!final_tried && ins->offset) {
  7088. num_bytes = min(num_bytes >> 1, ins->offset);
  7089. num_bytes = round_down(num_bytes,
  7090. fs_info->sectorsize);
  7091. num_bytes = max(num_bytes, min_alloc_size);
  7092. ram_bytes = num_bytes;
  7093. if (num_bytes == min_alloc_size)
  7094. final_tried = true;
  7095. goto again;
  7096. } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  7097. struct btrfs_space_info *sinfo;
  7098. sinfo = __find_space_info(fs_info, flags);
  7099. btrfs_err(fs_info,
  7100. "allocation failed flags %llu, wanted %llu",
  7101. flags, num_bytes);
  7102. if (sinfo)
  7103. dump_space_info(fs_info, sinfo, num_bytes, 1);
  7104. }
  7105. }
  7106. return ret;
  7107. }
  7108. static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  7109. u64 start, u64 len,
  7110. int pin, int delalloc)
  7111. {
  7112. struct btrfs_block_group_cache *cache;
  7113. int ret = 0;
  7114. cache = btrfs_lookup_block_group(fs_info, start);
  7115. if (!cache) {
  7116. btrfs_err(fs_info, "Unable to find block group for %llu",
  7117. start);
  7118. return -ENOSPC;
  7119. }
  7120. if (pin)
  7121. pin_down_extent(fs_info, cache, start, len, 1);
  7122. else {
  7123. if (btrfs_test_opt(fs_info, DISCARD))
  7124. ret = btrfs_discard_extent(fs_info, start, len, NULL);
  7125. btrfs_add_free_space(cache, start, len);
  7126. btrfs_free_reserved_bytes(cache, len, delalloc);
  7127. trace_btrfs_reserved_extent_free(fs_info, start, len);
  7128. }
  7129. btrfs_put_block_group(cache);
  7130. return ret;
  7131. }
  7132. int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
  7133. u64 start, u64 len, int delalloc)
  7134. {
  7135. return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
  7136. }
  7137. int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
  7138. u64 start, u64 len)
  7139. {
  7140. return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
  7141. }
  7142. static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7143. struct btrfs_fs_info *fs_info,
  7144. u64 parent, u64 root_objectid,
  7145. u64 flags, u64 owner, u64 offset,
  7146. struct btrfs_key *ins, int ref_mod)
  7147. {
  7148. int ret;
  7149. struct btrfs_extent_item *extent_item;
  7150. struct btrfs_extent_inline_ref *iref;
  7151. struct btrfs_path *path;
  7152. struct extent_buffer *leaf;
  7153. int type;
  7154. u32 size;
  7155. if (parent > 0)
  7156. type = BTRFS_SHARED_DATA_REF_KEY;
  7157. else
  7158. type = BTRFS_EXTENT_DATA_REF_KEY;
  7159. size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
  7160. path = btrfs_alloc_path();
  7161. if (!path)
  7162. return -ENOMEM;
  7163. path->leave_spinning = 1;
  7164. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7165. ins, size);
  7166. if (ret) {
  7167. btrfs_free_path(path);
  7168. return ret;
  7169. }
  7170. leaf = path->nodes[0];
  7171. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7172. struct btrfs_extent_item);
  7173. btrfs_set_extent_refs(leaf, extent_item, ref_mod);
  7174. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7175. btrfs_set_extent_flags(leaf, extent_item,
  7176. flags | BTRFS_EXTENT_FLAG_DATA);
  7177. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7178. btrfs_set_extent_inline_ref_type(leaf, iref, type);
  7179. if (parent > 0) {
  7180. struct btrfs_shared_data_ref *ref;
  7181. ref = (struct btrfs_shared_data_ref *)(iref + 1);
  7182. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7183. btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
  7184. } else {
  7185. struct btrfs_extent_data_ref *ref;
  7186. ref = (struct btrfs_extent_data_ref *)(&iref->offset);
  7187. btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
  7188. btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
  7189. btrfs_set_extent_data_ref_offset(leaf, ref, offset);
  7190. btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
  7191. }
  7192. btrfs_mark_buffer_dirty(path->nodes[0]);
  7193. btrfs_free_path(path);
  7194. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7195. ins->offset);
  7196. if (ret)
  7197. return ret;
  7198. ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
  7199. if (ret) { /* -ENOENT, logic error */
  7200. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7201. ins->objectid, ins->offset);
  7202. BUG();
  7203. }
  7204. trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
  7205. return ret;
  7206. }
  7207. static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
  7208. struct btrfs_fs_info *fs_info,
  7209. u64 parent, u64 root_objectid,
  7210. u64 flags, struct btrfs_disk_key *key,
  7211. int level, struct btrfs_key *ins)
  7212. {
  7213. int ret;
  7214. struct btrfs_extent_item *extent_item;
  7215. struct btrfs_tree_block_info *block_info;
  7216. struct btrfs_extent_inline_ref *iref;
  7217. struct btrfs_path *path;
  7218. struct extent_buffer *leaf;
  7219. u32 size = sizeof(*extent_item) + sizeof(*iref);
  7220. u64 num_bytes = ins->offset;
  7221. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7222. if (!skinny_metadata)
  7223. size += sizeof(*block_info);
  7224. path = btrfs_alloc_path();
  7225. if (!path) {
  7226. btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
  7227. fs_info->nodesize);
  7228. return -ENOMEM;
  7229. }
  7230. path->leave_spinning = 1;
  7231. ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
  7232. ins, size);
  7233. if (ret) {
  7234. btrfs_free_path(path);
  7235. btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
  7236. fs_info->nodesize);
  7237. return ret;
  7238. }
  7239. leaf = path->nodes[0];
  7240. extent_item = btrfs_item_ptr(leaf, path->slots[0],
  7241. struct btrfs_extent_item);
  7242. btrfs_set_extent_refs(leaf, extent_item, 1);
  7243. btrfs_set_extent_generation(leaf, extent_item, trans->transid);
  7244. btrfs_set_extent_flags(leaf, extent_item,
  7245. flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
  7246. if (skinny_metadata) {
  7247. iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
  7248. num_bytes = fs_info->nodesize;
  7249. } else {
  7250. block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
  7251. btrfs_set_tree_block_key(leaf, block_info, key);
  7252. btrfs_set_tree_block_level(leaf, block_info, level);
  7253. iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
  7254. }
  7255. if (parent > 0) {
  7256. BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  7257. btrfs_set_extent_inline_ref_type(leaf, iref,
  7258. BTRFS_SHARED_BLOCK_REF_KEY);
  7259. btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
  7260. } else {
  7261. btrfs_set_extent_inline_ref_type(leaf, iref,
  7262. BTRFS_TREE_BLOCK_REF_KEY);
  7263. btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
  7264. }
  7265. btrfs_mark_buffer_dirty(leaf);
  7266. btrfs_free_path(path);
  7267. ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
  7268. num_bytes);
  7269. if (ret)
  7270. return ret;
  7271. ret = update_block_group(trans, fs_info, ins->objectid,
  7272. fs_info->nodesize, 1);
  7273. if (ret) { /* -ENOENT, logic error */
  7274. btrfs_err(fs_info, "update block group failed for %llu %llu",
  7275. ins->objectid, ins->offset);
  7276. BUG();
  7277. }
  7278. trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
  7279. fs_info->nodesize);
  7280. return ret;
  7281. }
  7282. int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  7283. struct btrfs_root *root, u64 owner,
  7284. u64 offset, u64 ram_bytes,
  7285. struct btrfs_key *ins)
  7286. {
  7287. struct btrfs_fs_info *fs_info = root->fs_info;
  7288. int ret;
  7289. BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
  7290. btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
  7291. root->root_key.objectid, owner, offset,
  7292. BTRFS_ADD_DELAYED_EXTENT);
  7293. ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
  7294. ins->offset, 0,
  7295. root->root_key.objectid, owner,
  7296. offset, ram_bytes,
  7297. BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
  7298. return ret;
  7299. }
  7300. /*
  7301. * this is used by the tree logging recovery code. It records that
  7302. * an extent has been allocated and makes sure to clear the free
  7303. * space cache bits as well
  7304. */
  7305. int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
  7306. struct btrfs_fs_info *fs_info,
  7307. u64 root_objectid, u64 owner, u64 offset,
  7308. struct btrfs_key *ins)
  7309. {
  7310. int ret;
  7311. struct btrfs_block_group_cache *block_group;
  7312. struct btrfs_space_info *space_info;
  7313. /*
  7314. * Mixed block groups will exclude before processing the log so we only
  7315. * need to do the exclude dance if this fs isn't mixed.
  7316. */
  7317. if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
  7318. ret = __exclude_logged_extent(fs_info, ins->objectid,
  7319. ins->offset);
  7320. if (ret)
  7321. return ret;
  7322. }
  7323. block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
  7324. if (!block_group)
  7325. return -EINVAL;
  7326. space_info = block_group->space_info;
  7327. spin_lock(&space_info->lock);
  7328. spin_lock(&block_group->lock);
  7329. space_info->bytes_reserved += ins->offset;
  7330. block_group->reserved += ins->offset;
  7331. spin_unlock(&block_group->lock);
  7332. spin_unlock(&space_info->lock);
  7333. ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
  7334. 0, owner, offset, ins, 1);
  7335. btrfs_put_block_group(block_group);
  7336. return ret;
  7337. }
  7338. static struct extent_buffer *
  7339. btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  7340. u64 bytenr, int level)
  7341. {
  7342. struct btrfs_fs_info *fs_info = root->fs_info;
  7343. struct extent_buffer *buf;
  7344. buf = btrfs_find_create_tree_block(fs_info, bytenr);
  7345. if (IS_ERR(buf))
  7346. return buf;
  7347. btrfs_set_header_generation(buf, trans->transid);
  7348. btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
  7349. btrfs_tree_lock(buf);
  7350. clean_tree_block(fs_info, buf);
  7351. clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  7352. btrfs_set_lock_blocking(buf);
  7353. set_extent_buffer_uptodate(buf);
  7354. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  7355. buf->log_index = root->log_transid % 2;
  7356. /*
  7357. * we allow two log transactions at a time, use different
  7358. * EXENT bit to differentiate dirty pages.
  7359. */
  7360. if (buf->log_index == 0)
  7361. set_extent_dirty(&root->dirty_log_pages, buf->start,
  7362. buf->start + buf->len - 1, GFP_NOFS);
  7363. else
  7364. set_extent_new(&root->dirty_log_pages, buf->start,
  7365. buf->start + buf->len - 1);
  7366. } else {
  7367. buf->log_index = -1;
  7368. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  7369. buf->start + buf->len - 1, GFP_NOFS);
  7370. }
  7371. trans->dirty = true;
  7372. /* this returns a buffer locked for blocking */
  7373. return buf;
  7374. }
  7375. static struct btrfs_block_rsv *
  7376. use_block_rsv(struct btrfs_trans_handle *trans,
  7377. struct btrfs_root *root, u32 blocksize)
  7378. {
  7379. struct btrfs_fs_info *fs_info = root->fs_info;
  7380. struct btrfs_block_rsv *block_rsv;
  7381. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  7382. int ret;
  7383. bool global_updated = false;
  7384. block_rsv = get_block_rsv(trans, root);
  7385. if (unlikely(block_rsv->size == 0))
  7386. goto try_reserve;
  7387. again:
  7388. ret = block_rsv_use_bytes(block_rsv, blocksize);
  7389. if (!ret)
  7390. return block_rsv;
  7391. if (block_rsv->failfast)
  7392. return ERR_PTR(ret);
  7393. if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
  7394. global_updated = true;
  7395. update_global_block_rsv(fs_info);
  7396. goto again;
  7397. }
  7398. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  7399. static DEFINE_RATELIMIT_STATE(_rs,
  7400. DEFAULT_RATELIMIT_INTERVAL * 10,
  7401. /*DEFAULT_RATELIMIT_BURST*/ 1);
  7402. if (__ratelimit(&_rs))
  7403. WARN(1, KERN_DEBUG
  7404. "BTRFS: block rsv returned %d\n", ret);
  7405. }
  7406. try_reserve:
  7407. ret = reserve_metadata_bytes(root, block_rsv, blocksize,
  7408. BTRFS_RESERVE_NO_FLUSH);
  7409. if (!ret)
  7410. return block_rsv;
  7411. /*
  7412. * If we couldn't reserve metadata bytes try and use some from
  7413. * the global reserve if its space type is the same as the global
  7414. * reservation.
  7415. */
  7416. if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
  7417. block_rsv->space_info == global_rsv->space_info) {
  7418. ret = block_rsv_use_bytes(global_rsv, blocksize);
  7419. if (!ret)
  7420. return global_rsv;
  7421. }
  7422. return ERR_PTR(ret);
  7423. }
  7424. static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
  7425. struct btrfs_block_rsv *block_rsv, u32 blocksize)
  7426. {
  7427. block_rsv_add_bytes(block_rsv, blocksize, 0);
  7428. block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
  7429. }
  7430. /*
  7431. * finds a free extent and does all the dirty work required for allocation
  7432. * returns the tree buffer or an ERR_PTR on error.
  7433. */
  7434. struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
  7435. struct btrfs_root *root,
  7436. u64 parent, u64 root_objectid,
  7437. const struct btrfs_disk_key *key,
  7438. int level, u64 hint,
  7439. u64 empty_size)
  7440. {
  7441. struct btrfs_fs_info *fs_info = root->fs_info;
  7442. struct btrfs_key ins;
  7443. struct btrfs_block_rsv *block_rsv;
  7444. struct extent_buffer *buf;
  7445. struct btrfs_delayed_extent_op *extent_op;
  7446. u64 flags = 0;
  7447. int ret;
  7448. u32 blocksize = fs_info->nodesize;
  7449. bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
  7450. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  7451. if (btrfs_is_testing(fs_info)) {
  7452. buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
  7453. level);
  7454. if (!IS_ERR(buf))
  7455. root->alloc_bytenr += blocksize;
  7456. return buf;
  7457. }
  7458. #endif
  7459. block_rsv = use_block_rsv(trans, root, blocksize);
  7460. if (IS_ERR(block_rsv))
  7461. return ERR_CAST(block_rsv);
  7462. ret = btrfs_reserve_extent(root, blocksize, blocksize, blocksize,
  7463. empty_size, hint, &ins, 0, 0);
  7464. if (ret)
  7465. goto out_unuse;
  7466. buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
  7467. if (IS_ERR(buf)) {
  7468. ret = PTR_ERR(buf);
  7469. goto out_free_reserved;
  7470. }
  7471. if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
  7472. if (parent == 0)
  7473. parent = ins.objectid;
  7474. flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7475. } else
  7476. BUG_ON(parent > 0);
  7477. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  7478. extent_op = btrfs_alloc_delayed_extent_op();
  7479. if (!extent_op) {
  7480. ret = -ENOMEM;
  7481. goto out_free_buf;
  7482. }
  7483. if (key)
  7484. memcpy(&extent_op->key, key, sizeof(extent_op->key));
  7485. else
  7486. memset(&extent_op->key, 0, sizeof(extent_op->key));
  7487. extent_op->flags_to_set = flags;
  7488. extent_op->update_key = skinny_metadata ? false : true;
  7489. extent_op->update_flags = true;
  7490. extent_op->is_data = false;
  7491. extent_op->level = level;
  7492. btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
  7493. root_objectid, level, 0,
  7494. BTRFS_ADD_DELAYED_EXTENT);
  7495. ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
  7496. ins.offset, parent,
  7497. root_objectid, level,
  7498. BTRFS_ADD_DELAYED_EXTENT,
  7499. extent_op, NULL, NULL);
  7500. if (ret)
  7501. goto out_free_delayed;
  7502. }
  7503. return buf;
  7504. out_free_delayed:
  7505. btrfs_free_delayed_extent_op(extent_op);
  7506. out_free_buf:
  7507. free_extent_buffer(buf);
  7508. out_free_reserved:
  7509. btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
  7510. out_unuse:
  7511. unuse_block_rsv(fs_info, block_rsv, blocksize);
  7512. return ERR_PTR(ret);
  7513. }
  7514. struct walk_control {
  7515. u64 refs[BTRFS_MAX_LEVEL];
  7516. u64 flags[BTRFS_MAX_LEVEL];
  7517. struct btrfs_key update_progress;
  7518. int stage;
  7519. int level;
  7520. int shared_level;
  7521. int update_ref;
  7522. int keep_locks;
  7523. int reada_slot;
  7524. int reada_count;
  7525. int for_reloc;
  7526. };
  7527. #define DROP_REFERENCE 1
  7528. #define UPDATE_BACKREF 2
  7529. static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
  7530. struct btrfs_root *root,
  7531. struct walk_control *wc,
  7532. struct btrfs_path *path)
  7533. {
  7534. struct btrfs_fs_info *fs_info = root->fs_info;
  7535. u64 bytenr;
  7536. u64 generation;
  7537. u64 refs;
  7538. u64 flags;
  7539. u32 nritems;
  7540. struct btrfs_key key;
  7541. struct extent_buffer *eb;
  7542. int ret;
  7543. int slot;
  7544. int nread = 0;
  7545. if (path->slots[wc->level] < wc->reada_slot) {
  7546. wc->reada_count = wc->reada_count * 2 / 3;
  7547. wc->reada_count = max(wc->reada_count, 2);
  7548. } else {
  7549. wc->reada_count = wc->reada_count * 3 / 2;
  7550. wc->reada_count = min_t(int, wc->reada_count,
  7551. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  7552. }
  7553. eb = path->nodes[wc->level];
  7554. nritems = btrfs_header_nritems(eb);
  7555. for (slot = path->slots[wc->level]; slot < nritems; slot++) {
  7556. if (nread >= wc->reada_count)
  7557. break;
  7558. cond_resched();
  7559. bytenr = btrfs_node_blockptr(eb, slot);
  7560. generation = btrfs_node_ptr_generation(eb, slot);
  7561. if (slot == path->slots[wc->level])
  7562. goto reada;
  7563. if (wc->stage == UPDATE_BACKREF &&
  7564. generation <= root->root_key.offset)
  7565. continue;
  7566. /* We don't lock the tree block, it's OK to be racy here */
  7567. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
  7568. wc->level - 1, 1, &refs,
  7569. &flags);
  7570. /* We don't care about errors in readahead. */
  7571. if (ret < 0)
  7572. continue;
  7573. BUG_ON(refs == 0);
  7574. if (wc->stage == DROP_REFERENCE) {
  7575. if (refs == 1)
  7576. goto reada;
  7577. if (wc->level == 1 &&
  7578. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7579. continue;
  7580. if (!wc->update_ref ||
  7581. generation <= root->root_key.offset)
  7582. continue;
  7583. btrfs_node_key_to_cpu(eb, &key, slot);
  7584. ret = btrfs_comp_cpu_keys(&key,
  7585. &wc->update_progress);
  7586. if (ret < 0)
  7587. continue;
  7588. } else {
  7589. if (wc->level == 1 &&
  7590. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7591. continue;
  7592. }
  7593. reada:
  7594. readahead_tree_block(fs_info, bytenr);
  7595. nread++;
  7596. }
  7597. wc->reada_slot = slot;
  7598. }
  7599. /*
  7600. * helper to process tree block while walking down the tree.
  7601. *
  7602. * when wc->stage == UPDATE_BACKREF, this function updates
  7603. * back refs for pointers in the block.
  7604. *
  7605. * NOTE: return value 1 means we should stop walking down.
  7606. */
  7607. static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
  7608. struct btrfs_root *root,
  7609. struct btrfs_path *path,
  7610. struct walk_control *wc, int lookup_info)
  7611. {
  7612. struct btrfs_fs_info *fs_info = root->fs_info;
  7613. int level = wc->level;
  7614. struct extent_buffer *eb = path->nodes[level];
  7615. u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  7616. int ret;
  7617. if (wc->stage == UPDATE_BACKREF &&
  7618. btrfs_header_owner(eb) != root->root_key.objectid)
  7619. return 1;
  7620. /*
  7621. * when reference count of tree block is 1, it won't increase
  7622. * again. once full backref flag is set, we never clear it.
  7623. */
  7624. if (lookup_info &&
  7625. ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
  7626. (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
  7627. BUG_ON(!path->locks[level]);
  7628. ret = btrfs_lookup_extent_info(trans, fs_info,
  7629. eb->start, level, 1,
  7630. &wc->refs[level],
  7631. &wc->flags[level]);
  7632. BUG_ON(ret == -ENOMEM);
  7633. if (ret)
  7634. return ret;
  7635. BUG_ON(wc->refs[level] == 0);
  7636. }
  7637. if (wc->stage == DROP_REFERENCE) {
  7638. if (wc->refs[level] > 1)
  7639. return 1;
  7640. if (path->locks[level] && !wc->keep_locks) {
  7641. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7642. path->locks[level] = 0;
  7643. }
  7644. return 0;
  7645. }
  7646. /* wc->stage == UPDATE_BACKREF */
  7647. if (!(wc->flags[level] & flag)) {
  7648. BUG_ON(!path->locks[level]);
  7649. ret = btrfs_inc_ref(trans, root, eb, 1);
  7650. BUG_ON(ret); /* -ENOMEM */
  7651. ret = btrfs_dec_ref(trans, root, eb, 0);
  7652. BUG_ON(ret); /* -ENOMEM */
  7653. ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
  7654. eb->len, flag,
  7655. btrfs_header_level(eb), 0);
  7656. BUG_ON(ret); /* -ENOMEM */
  7657. wc->flags[level] |= flag;
  7658. }
  7659. /*
  7660. * the block is shared by multiple trees, so it's not good to
  7661. * keep the tree lock
  7662. */
  7663. if (path->locks[level] && level > 0) {
  7664. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7665. path->locks[level] = 0;
  7666. }
  7667. return 0;
  7668. }
  7669. /*
  7670. * helper to process tree block pointer.
  7671. *
  7672. * when wc->stage == DROP_REFERENCE, this function checks
  7673. * reference count of the block pointed to. if the block
  7674. * is shared and we need update back refs for the subtree
  7675. * rooted at the block, this function changes wc->stage to
  7676. * UPDATE_BACKREF. if the block is shared and there is no
  7677. * need to update back, this function drops the reference
  7678. * to the block.
  7679. *
  7680. * NOTE: return value 1 means we should stop walking down.
  7681. */
  7682. static noinline int do_walk_down(struct btrfs_trans_handle *trans,
  7683. struct btrfs_root *root,
  7684. struct btrfs_path *path,
  7685. struct walk_control *wc, int *lookup_info)
  7686. {
  7687. struct btrfs_fs_info *fs_info = root->fs_info;
  7688. u64 bytenr;
  7689. u64 generation;
  7690. u64 parent;
  7691. u32 blocksize;
  7692. struct btrfs_key key;
  7693. struct extent_buffer *next;
  7694. int level = wc->level;
  7695. int reada = 0;
  7696. int ret = 0;
  7697. bool need_account = false;
  7698. generation = btrfs_node_ptr_generation(path->nodes[level],
  7699. path->slots[level]);
  7700. /*
  7701. * if the lower level block was created before the snapshot
  7702. * was created, we know there is no need to update back refs
  7703. * for the subtree
  7704. */
  7705. if (wc->stage == UPDATE_BACKREF &&
  7706. generation <= root->root_key.offset) {
  7707. *lookup_info = 1;
  7708. return 1;
  7709. }
  7710. bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
  7711. blocksize = fs_info->nodesize;
  7712. next = find_extent_buffer(fs_info, bytenr);
  7713. if (!next) {
  7714. next = btrfs_find_create_tree_block(fs_info, bytenr);
  7715. if (IS_ERR(next))
  7716. return PTR_ERR(next);
  7717. btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
  7718. level - 1);
  7719. reada = 1;
  7720. }
  7721. btrfs_tree_lock(next);
  7722. btrfs_set_lock_blocking(next);
  7723. ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
  7724. &wc->refs[level - 1],
  7725. &wc->flags[level - 1]);
  7726. if (ret < 0)
  7727. goto out_unlock;
  7728. if (unlikely(wc->refs[level - 1] == 0)) {
  7729. btrfs_err(fs_info, "Missing references.");
  7730. ret = -EIO;
  7731. goto out_unlock;
  7732. }
  7733. *lookup_info = 0;
  7734. if (wc->stage == DROP_REFERENCE) {
  7735. if (wc->refs[level - 1] > 1) {
  7736. need_account = true;
  7737. if (level == 1 &&
  7738. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7739. goto skip;
  7740. if (!wc->update_ref ||
  7741. generation <= root->root_key.offset)
  7742. goto skip;
  7743. btrfs_node_key_to_cpu(path->nodes[level], &key,
  7744. path->slots[level]);
  7745. ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
  7746. if (ret < 0)
  7747. goto skip;
  7748. wc->stage = UPDATE_BACKREF;
  7749. wc->shared_level = level - 1;
  7750. }
  7751. } else {
  7752. if (level == 1 &&
  7753. (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  7754. goto skip;
  7755. }
  7756. if (!btrfs_buffer_uptodate(next, generation, 0)) {
  7757. btrfs_tree_unlock(next);
  7758. free_extent_buffer(next);
  7759. next = NULL;
  7760. *lookup_info = 1;
  7761. }
  7762. if (!next) {
  7763. if (reada && level == 1)
  7764. reada_walk_down(trans, root, wc, path);
  7765. next = read_tree_block(fs_info, bytenr, generation);
  7766. if (IS_ERR(next)) {
  7767. return PTR_ERR(next);
  7768. } else if (!extent_buffer_uptodate(next)) {
  7769. free_extent_buffer(next);
  7770. return -EIO;
  7771. }
  7772. btrfs_tree_lock(next);
  7773. btrfs_set_lock_blocking(next);
  7774. }
  7775. level--;
  7776. ASSERT(level == btrfs_header_level(next));
  7777. if (level != btrfs_header_level(next)) {
  7778. btrfs_err(root->fs_info, "mismatched level");
  7779. ret = -EIO;
  7780. goto out_unlock;
  7781. }
  7782. path->nodes[level] = next;
  7783. path->slots[level] = 0;
  7784. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7785. wc->level = level;
  7786. if (wc->level == 1)
  7787. wc->reada_slot = 0;
  7788. return 0;
  7789. skip:
  7790. wc->refs[level - 1] = 0;
  7791. wc->flags[level - 1] = 0;
  7792. if (wc->stage == DROP_REFERENCE) {
  7793. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  7794. parent = path->nodes[level]->start;
  7795. } else {
  7796. ASSERT(root->root_key.objectid ==
  7797. btrfs_header_owner(path->nodes[level]));
  7798. if (root->root_key.objectid !=
  7799. btrfs_header_owner(path->nodes[level])) {
  7800. btrfs_err(root->fs_info,
  7801. "mismatched block owner");
  7802. ret = -EIO;
  7803. goto out_unlock;
  7804. }
  7805. parent = 0;
  7806. }
  7807. if (need_account) {
  7808. ret = btrfs_qgroup_trace_subtree(trans, root, next,
  7809. generation, level - 1);
  7810. if (ret) {
  7811. btrfs_err_rl(fs_info,
  7812. "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
  7813. ret);
  7814. }
  7815. }
  7816. ret = btrfs_free_extent(trans, root, bytenr, blocksize,
  7817. parent, root->root_key.objectid,
  7818. level - 1, 0);
  7819. if (ret)
  7820. goto out_unlock;
  7821. }
  7822. *lookup_info = 1;
  7823. ret = 1;
  7824. out_unlock:
  7825. btrfs_tree_unlock(next);
  7826. free_extent_buffer(next);
  7827. return ret;
  7828. }
  7829. /*
  7830. * helper to process tree block while walking up the tree.
  7831. *
  7832. * when wc->stage == DROP_REFERENCE, this function drops
  7833. * reference count on the block.
  7834. *
  7835. * when wc->stage == UPDATE_BACKREF, this function changes
  7836. * wc->stage back to DROP_REFERENCE if we changed wc->stage
  7837. * to UPDATE_BACKREF previously while processing the block.
  7838. *
  7839. * NOTE: return value 1 means we should stop walking up.
  7840. */
  7841. static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
  7842. struct btrfs_root *root,
  7843. struct btrfs_path *path,
  7844. struct walk_control *wc)
  7845. {
  7846. struct btrfs_fs_info *fs_info = root->fs_info;
  7847. int ret;
  7848. int level = wc->level;
  7849. struct extent_buffer *eb = path->nodes[level];
  7850. u64 parent = 0;
  7851. if (wc->stage == UPDATE_BACKREF) {
  7852. BUG_ON(wc->shared_level < level);
  7853. if (level < wc->shared_level)
  7854. goto out;
  7855. ret = find_next_key(path, level + 1, &wc->update_progress);
  7856. if (ret > 0)
  7857. wc->update_ref = 0;
  7858. wc->stage = DROP_REFERENCE;
  7859. wc->shared_level = -1;
  7860. path->slots[level] = 0;
  7861. /*
  7862. * check reference count again if the block isn't locked.
  7863. * we should start walking down the tree again if reference
  7864. * count is one.
  7865. */
  7866. if (!path->locks[level]) {
  7867. BUG_ON(level == 0);
  7868. btrfs_tree_lock(eb);
  7869. btrfs_set_lock_blocking(eb);
  7870. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7871. ret = btrfs_lookup_extent_info(trans, fs_info,
  7872. eb->start, level, 1,
  7873. &wc->refs[level],
  7874. &wc->flags[level]);
  7875. if (ret < 0) {
  7876. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7877. path->locks[level] = 0;
  7878. return ret;
  7879. }
  7880. BUG_ON(wc->refs[level] == 0);
  7881. if (wc->refs[level] == 1) {
  7882. btrfs_tree_unlock_rw(eb, path->locks[level]);
  7883. path->locks[level] = 0;
  7884. return 1;
  7885. }
  7886. }
  7887. }
  7888. /* wc->stage == DROP_REFERENCE */
  7889. BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
  7890. if (wc->refs[level] == 1) {
  7891. if (level == 0) {
  7892. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7893. ret = btrfs_dec_ref(trans, root, eb, 1);
  7894. else
  7895. ret = btrfs_dec_ref(trans, root, eb, 0);
  7896. BUG_ON(ret); /* -ENOMEM */
  7897. ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
  7898. if (ret) {
  7899. btrfs_err_rl(fs_info,
  7900. "error %d accounting leaf items. Quota is out of sync, rescan required.",
  7901. ret);
  7902. }
  7903. }
  7904. /* make block locked assertion in clean_tree_block happy */
  7905. if (!path->locks[level] &&
  7906. btrfs_header_generation(eb) == trans->transid) {
  7907. btrfs_tree_lock(eb);
  7908. btrfs_set_lock_blocking(eb);
  7909. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  7910. }
  7911. clean_tree_block(fs_info, eb);
  7912. }
  7913. if (eb == root->node) {
  7914. if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7915. parent = eb->start;
  7916. else
  7917. BUG_ON(root->root_key.objectid !=
  7918. btrfs_header_owner(eb));
  7919. } else {
  7920. if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  7921. parent = path->nodes[level + 1]->start;
  7922. else
  7923. BUG_ON(root->root_key.objectid !=
  7924. btrfs_header_owner(path->nodes[level + 1]));
  7925. }
  7926. btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
  7927. out:
  7928. wc->refs[level] = 0;
  7929. wc->flags[level] = 0;
  7930. return 0;
  7931. }
  7932. static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
  7933. struct btrfs_root *root,
  7934. struct btrfs_path *path,
  7935. struct walk_control *wc)
  7936. {
  7937. int level = wc->level;
  7938. int lookup_info = 1;
  7939. int ret;
  7940. while (level >= 0) {
  7941. ret = walk_down_proc(trans, root, path, wc, lookup_info);
  7942. if (ret > 0)
  7943. break;
  7944. if (level == 0)
  7945. break;
  7946. if (path->slots[level] >=
  7947. btrfs_header_nritems(path->nodes[level]))
  7948. break;
  7949. ret = do_walk_down(trans, root, path, wc, &lookup_info);
  7950. if (ret > 0) {
  7951. path->slots[level]++;
  7952. continue;
  7953. } else if (ret < 0)
  7954. return ret;
  7955. level = wc->level;
  7956. }
  7957. return 0;
  7958. }
  7959. static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  7960. struct btrfs_root *root,
  7961. struct btrfs_path *path,
  7962. struct walk_control *wc, int max_level)
  7963. {
  7964. int level = wc->level;
  7965. int ret;
  7966. path->slots[level] = btrfs_header_nritems(path->nodes[level]);
  7967. while (level < max_level && path->nodes[level]) {
  7968. wc->level = level;
  7969. if (path->slots[level] + 1 <
  7970. btrfs_header_nritems(path->nodes[level])) {
  7971. path->slots[level]++;
  7972. return 0;
  7973. } else {
  7974. ret = walk_up_proc(trans, root, path, wc);
  7975. if (ret > 0)
  7976. return 0;
  7977. if (path->locks[level]) {
  7978. btrfs_tree_unlock_rw(path->nodes[level],
  7979. path->locks[level]);
  7980. path->locks[level] = 0;
  7981. }
  7982. free_extent_buffer(path->nodes[level]);
  7983. path->nodes[level] = NULL;
  7984. level++;
  7985. }
  7986. }
  7987. return 1;
  7988. }
  7989. /*
  7990. * drop a subvolume tree.
  7991. *
  7992. * this function traverses the tree freeing any blocks that only
  7993. * referenced by the tree.
  7994. *
  7995. * when a shared tree block is found. this function decreases its
  7996. * reference count by one. if update_ref is true, this function
  7997. * also make sure backrefs for the shared block and all lower level
  7998. * blocks are properly updated.
  7999. *
  8000. * If called with for_reloc == 0, may exit early with -EAGAIN
  8001. */
  8002. int btrfs_drop_snapshot(struct btrfs_root *root,
  8003. struct btrfs_block_rsv *block_rsv, int update_ref,
  8004. int for_reloc)
  8005. {
  8006. struct btrfs_fs_info *fs_info = root->fs_info;
  8007. struct btrfs_path *path;
  8008. struct btrfs_trans_handle *trans;
  8009. struct btrfs_root *tree_root = fs_info->tree_root;
  8010. struct btrfs_root_item *root_item = &root->root_item;
  8011. struct walk_control *wc;
  8012. struct btrfs_key key;
  8013. int err = 0;
  8014. int ret;
  8015. int level;
  8016. bool root_dropped = false;
  8017. btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
  8018. path = btrfs_alloc_path();
  8019. if (!path) {
  8020. err = -ENOMEM;
  8021. goto out;
  8022. }
  8023. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8024. if (!wc) {
  8025. btrfs_free_path(path);
  8026. err = -ENOMEM;
  8027. goto out;
  8028. }
  8029. trans = btrfs_start_transaction(tree_root, 0);
  8030. if (IS_ERR(trans)) {
  8031. err = PTR_ERR(trans);
  8032. goto out_free;
  8033. }
  8034. if (block_rsv)
  8035. trans->block_rsv = block_rsv;
  8036. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  8037. level = btrfs_header_level(root->node);
  8038. path->nodes[level] = btrfs_lock_root_node(root);
  8039. btrfs_set_lock_blocking(path->nodes[level]);
  8040. path->slots[level] = 0;
  8041. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8042. memset(&wc->update_progress, 0,
  8043. sizeof(wc->update_progress));
  8044. } else {
  8045. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  8046. memcpy(&wc->update_progress, &key,
  8047. sizeof(wc->update_progress));
  8048. level = root_item->drop_level;
  8049. BUG_ON(level == 0);
  8050. path->lowest_level = level;
  8051. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  8052. path->lowest_level = 0;
  8053. if (ret < 0) {
  8054. err = ret;
  8055. goto out_end_trans;
  8056. }
  8057. WARN_ON(ret > 0);
  8058. /*
  8059. * unlock our path, this is safe because only this
  8060. * function is allowed to delete this snapshot
  8061. */
  8062. btrfs_unlock_up_safe(path, 0);
  8063. level = btrfs_header_level(root->node);
  8064. while (1) {
  8065. btrfs_tree_lock(path->nodes[level]);
  8066. btrfs_set_lock_blocking(path->nodes[level]);
  8067. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8068. ret = btrfs_lookup_extent_info(trans, fs_info,
  8069. path->nodes[level]->start,
  8070. level, 1, &wc->refs[level],
  8071. &wc->flags[level]);
  8072. if (ret < 0) {
  8073. err = ret;
  8074. goto out_end_trans;
  8075. }
  8076. BUG_ON(wc->refs[level] == 0);
  8077. if (level == root_item->drop_level)
  8078. break;
  8079. btrfs_tree_unlock(path->nodes[level]);
  8080. path->locks[level] = 0;
  8081. WARN_ON(wc->refs[level] != 1);
  8082. level--;
  8083. }
  8084. }
  8085. wc->level = level;
  8086. wc->shared_level = -1;
  8087. wc->stage = DROP_REFERENCE;
  8088. wc->update_ref = update_ref;
  8089. wc->keep_locks = 0;
  8090. wc->for_reloc = for_reloc;
  8091. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  8092. while (1) {
  8093. ret = walk_down_tree(trans, root, path, wc);
  8094. if (ret < 0) {
  8095. err = ret;
  8096. break;
  8097. }
  8098. ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
  8099. if (ret < 0) {
  8100. err = ret;
  8101. break;
  8102. }
  8103. if (ret > 0) {
  8104. BUG_ON(wc->stage != DROP_REFERENCE);
  8105. break;
  8106. }
  8107. if (wc->stage == DROP_REFERENCE) {
  8108. level = wc->level;
  8109. btrfs_node_key(path->nodes[level],
  8110. &root_item->drop_progress,
  8111. path->slots[level]);
  8112. root_item->drop_level = level;
  8113. }
  8114. BUG_ON(wc->level == 0);
  8115. if (btrfs_should_end_transaction(trans) ||
  8116. (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
  8117. ret = btrfs_update_root(trans, tree_root,
  8118. &root->root_key,
  8119. root_item);
  8120. if (ret) {
  8121. btrfs_abort_transaction(trans, ret);
  8122. err = ret;
  8123. goto out_end_trans;
  8124. }
  8125. btrfs_end_transaction_throttle(trans);
  8126. if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
  8127. btrfs_debug(fs_info,
  8128. "drop snapshot early exit");
  8129. err = -EAGAIN;
  8130. goto out_free;
  8131. }
  8132. trans = btrfs_start_transaction(tree_root, 0);
  8133. if (IS_ERR(trans)) {
  8134. err = PTR_ERR(trans);
  8135. goto out_free;
  8136. }
  8137. if (block_rsv)
  8138. trans->block_rsv = block_rsv;
  8139. }
  8140. }
  8141. btrfs_release_path(path);
  8142. if (err)
  8143. goto out_end_trans;
  8144. ret = btrfs_del_root(trans, fs_info, &root->root_key);
  8145. if (ret) {
  8146. btrfs_abort_transaction(trans, ret);
  8147. err = ret;
  8148. goto out_end_trans;
  8149. }
  8150. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  8151. ret = btrfs_find_root(tree_root, &root->root_key, path,
  8152. NULL, NULL);
  8153. if (ret < 0) {
  8154. btrfs_abort_transaction(trans, ret);
  8155. err = ret;
  8156. goto out_end_trans;
  8157. } else if (ret > 0) {
  8158. /* if we fail to delete the orphan item this time
  8159. * around, it'll get picked up the next time.
  8160. *
  8161. * The most common failure here is just -ENOENT.
  8162. */
  8163. btrfs_del_orphan_item(trans, tree_root,
  8164. root->root_key.objectid);
  8165. }
  8166. }
  8167. if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
  8168. btrfs_add_dropped_root(trans, root);
  8169. } else {
  8170. free_extent_buffer(root->node);
  8171. free_extent_buffer(root->commit_root);
  8172. btrfs_put_fs_root(root);
  8173. }
  8174. root_dropped = true;
  8175. out_end_trans:
  8176. btrfs_end_transaction_throttle(trans);
  8177. out_free:
  8178. kfree(wc);
  8179. btrfs_free_path(path);
  8180. out:
  8181. /*
  8182. * So if we need to stop dropping the snapshot for whatever reason we
  8183. * need to make sure to add it back to the dead root list so that we
  8184. * keep trying to do the work later. This also cleans up roots if we
  8185. * don't have it in the radix (like when we recover after a power fail
  8186. * or unmount) so we don't leak memory.
  8187. */
  8188. if (!for_reloc && !root_dropped)
  8189. btrfs_add_dead_root(root);
  8190. if (err && err != -EAGAIN)
  8191. btrfs_handle_fs_error(fs_info, err, NULL);
  8192. return err;
  8193. }
  8194. /*
  8195. * drop subtree rooted at tree block 'node'.
  8196. *
  8197. * NOTE: this function will unlock and release tree block 'node'
  8198. * only used by relocation code
  8199. */
  8200. int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
  8201. struct btrfs_root *root,
  8202. struct extent_buffer *node,
  8203. struct extent_buffer *parent)
  8204. {
  8205. struct btrfs_fs_info *fs_info = root->fs_info;
  8206. struct btrfs_path *path;
  8207. struct walk_control *wc;
  8208. int level;
  8209. int parent_level;
  8210. int ret = 0;
  8211. int wret;
  8212. BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  8213. path = btrfs_alloc_path();
  8214. if (!path)
  8215. return -ENOMEM;
  8216. wc = kzalloc(sizeof(*wc), GFP_NOFS);
  8217. if (!wc) {
  8218. btrfs_free_path(path);
  8219. return -ENOMEM;
  8220. }
  8221. btrfs_assert_tree_locked(parent);
  8222. parent_level = btrfs_header_level(parent);
  8223. extent_buffer_get(parent);
  8224. path->nodes[parent_level] = parent;
  8225. path->slots[parent_level] = btrfs_header_nritems(parent);
  8226. btrfs_assert_tree_locked(node);
  8227. level = btrfs_header_level(node);
  8228. path->nodes[level] = node;
  8229. path->slots[level] = 0;
  8230. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  8231. wc->refs[parent_level] = 1;
  8232. wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  8233. wc->level = level;
  8234. wc->shared_level = -1;
  8235. wc->stage = DROP_REFERENCE;
  8236. wc->update_ref = 0;
  8237. wc->keep_locks = 1;
  8238. wc->for_reloc = 1;
  8239. wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
  8240. while (1) {
  8241. wret = walk_down_tree(trans, root, path, wc);
  8242. if (wret < 0) {
  8243. ret = wret;
  8244. break;
  8245. }
  8246. wret = walk_up_tree(trans, root, path, wc, parent_level);
  8247. if (wret < 0)
  8248. ret = wret;
  8249. if (wret != 0)
  8250. break;
  8251. }
  8252. kfree(wc);
  8253. btrfs_free_path(path);
  8254. return ret;
  8255. }
  8256. static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
  8257. {
  8258. u64 num_devices;
  8259. u64 stripped;
  8260. /*
  8261. * if restripe for this chunk_type is on pick target profile and
  8262. * return, otherwise do the usual balance
  8263. */
  8264. stripped = get_restripe_target(fs_info, flags);
  8265. if (stripped)
  8266. return extended_to_chunk(stripped);
  8267. num_devices = fs_info->fs_devices->rw_devices;
  8268. stripped = BTRFS_BLOCK_GROUP_RAID0 |
  8269. BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
  8270. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  8271. if (num_devices == 1) {
  8272. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8273. stripped = flags & ~stripped;
  8274. /* turn raid0 into single device chunks */
  8275. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  8276. return stripped;
  8277. /* turn mirroring into duplication */
  8278. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8279. BTRFS_BLOCK_GROUP_RAID10))
  8280. return stripped | BTRFS_BLOCK_GROUP_DUP;
  8281. } else {
  8282. /* they already had raid on here, just return */
  8283. if (flags & stripped)
  8284. return flags;
  8285. stripped |= BTRFS_BLOCK_GROUP_DUP;
  8286. stripped = flags & ~stripped;
  8287. /* switch duplicated blocks with raid1 */
  8288. if (flags & BTRFS_BLOCK_GROUP_DUP)
  8289. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  8290. /* this is drive concat, leave it alone */
  8291. }
  8292. return flags;
  8293. }
  8294. static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
  8295. {
  8296. struct btrfs_space_info *sinfo = cache->space_info;
  8297. u64 num_bytes;
  8298. u64 min_allocable_bytes;
  8299. int ret = -ENOSPC;
  8300. /*
  8301. * We need some metadata space and system metadata space for
  8302. * allocating chunks in some corner cases until we force to set
  8303. * it to be readonly.
  8304. */
  8305. if ((sinfo->flags &
  8306. (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
  8307. !force)
  8308. min_allocable_bytes = SZ_1M;
  8309. else
  8310. min_allocable_bytes = 0;
  8311. spin_lock(&sinfo->lock);
  8312. spin_lock(&cache->lock);
  8313. if (cache->ro) {
  8314. cache->ro++;
  8315. ret = 0;
  8316. goto out;
  8317. }
  8318. num_bytes = cache->key.offset - cache->reserved - cache->pinned -
  8319. cache->bytes_super - btrfs_block_group_used(&cache->item);
  8320. if (btrfs_space_info_used(sinfo, true) + num_bytes +
  8321. min_allocable_bytes <= sinfo->total_bytes) {
  8322. sinfo->bytes_readonly += num_bytes;
  8323. cache->ro++;
  8324. list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
  8325. ret = 0;
  8326. }
  8327. out:
  8328. spin_unlock(&cache->lock);
  8329. spin_unlock(&sinfo->lock);
  8330. return ret;
  8331. }
  8332. int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info,
  8333. struct btrfs_block_group_cache *cache)
  8334. {
  8335. struct btrfs_trans_handle *trans;
  8336. u64 alloc_flags;
  8337. int ret;
  8338. again:
  8339. trans = btrfs_join_transaction(fs_info->extent_root);
  8340. if (IS_ERR(trans))
  8341. return PTR_ERR(trans);
  8342. /*
  8343. * we're not allowed to set block groups readonly after the dirty
  8344. * block groups cache has started writing. If it already started,
  8345. * back off and let this transaction commit
  8346. */
  8347. mutex_lock(&fs_info->ro_block_group_mutex);
  8348. if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
  8349. u64 transid = trans->transid;
  8350. mutex_unlock(&fs_info->ro_block_group_mutex);
  8351. btrfs_end_transaction(trans);
  8352. ret = btrfs_wait_for_commit(fs_info, transid);
  8353. if (ret)
  8354. return ret;
  8355. goto again;
  8356. }
  8357. /*
  8358. * if we are changing raid levels, try to allocate a corresponding
  8359. * block group with the new raid level.
  8360. */
  8361. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8362. if (alloc_flags != cache->flags) {
  8363. ret = do_chunk_alloc(trans, fs_info, alloc_flags,
  8364. CHUNK_ALLOC_FORCE);
  8365. /*
  8366. * ENOSPC is allowed here, we may have enough space
  8367. * already allocated at the new raid level to
  8368. * carry on
  8369. */
  8370. if (ret == -ENOSPC)
  8371. ret = 0;
  8372. if (ret < 0)
  8373. goto out;
  8374. }
  8375. ret = inc_block_group_ro(cache, 0);
  8376. if (!ret)
  8377. goto out;
  8378. alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
  8379. ret = do_chunk_alloc(trans, fs_info, alloc_flags,
  8380. CHUNK_ALLOC_FORCE);
  8381. if (ret < 0)
  8382. goto out;
  8383. ret = inc_block_group_ro(cache, 0);
  8384. out:
  8385. if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
  8386. alloc_flags = update_block_group_flags(fs_info, cache->flags);
  8387. mutex_lock(&fs_info->chunk_mutex);
  8388. check_system_chunk(trans, fs_info, alloc_flags);
  8389. mutex_unlock(&fs_info->chunk_mutex);
  8390. }
  8391. mutex_unlock(&fs_info->ro_block_group_mutex);
  8392. btrfs_end_transaction(trans);
  8393. return ret;
  8394. }
  8395. int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
  8396. struct btrfs_fs_info *fs_info, u64 type)
  8397. {
  8398. u64 alloc_flags = get_alloc_profile(fs_info, type);
  8399. return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
  8400. }
  8401. /*
  8402. * helper to account the unused space of all the readonly block group in the
  8403. * space_info. takes mirrors into account.
  8404. */
  8405. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  8406. {
  8407. struct btrfs_block_group_cache *block_group;
  8408. u64 free_bytes = 0;
  8409. int factor;
  8410. /* It's df, we don't care if it's racy */
  8411. if (list_empty(&sinfo->ro_bgs))
  8412. return 0;
  8413. spin_lock(&sinfo->lock);
  8414. list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
  8415. spin_lock(&block_group->lock);
  8416. if (!block_group->ro) {
  8417. spin_unlock(&block_group->lock);
  8418. continue;
  8419. }
  8420. if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
  8421. BTRFS_BLOCK_GROUP_RAID10 |
  8422. BTRFS_BLOCK_GROUP_DUP))
  8423. factor = 2;
  8424. else
  8425. factor = 1;
  8426. free_bytes += (block_group->key.offset -
  8427. btrfs_block_group_used(&block_group->item)) *
  8428. factor;
  8429. spin_unlock(&block_group->lock);
  8430. }
  8431. spin_unlock(&sinfo->lock);
  8432. return free_bytes;
  8433. }
  8434. void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
  8435. {
  8436. struct btrfs_space_info *sinfo = cache->space_info;
  8437. u64 num_bytes;
  8438. BUG_ON(!cache->ro);
  8439. spin_lock(&sinfo->lock);
  8440. spin_lock(&cache->lock);
  8441. if (!--cache->ro) {
  8442. num_bytes = cache->key.offset - cache->reserved -
  8443. cache->pinned - cache->bytes_super -
  8444. btrfs_block_group_used(&cache->item);
  8445. sinfo->bytes_readonly -= num_bytes;
  8446. list_del_init(&cache->ro_list);
  8447. }
  8448. spin_unlock(&cache->lock);
  8449. spin_unlock(&sinfo->lock);
  8450. }
  8451. /*
  8452. * checks to see if its even possible to relocate this block group.
  8453. *
  8454. * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  8455. * ok to go ahead and try.
  8456. */
  8457. int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
  8458. {
  8459. struct btrfs_root *root = fs_info->extent_root;
  8460. struct btrfs_block_group_cache *block_group;
  8461. struct btrfs_space_info *space_info;
  8462. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  8463. struct btrfs_device *device;
  8464. struct btrfs_trans_handle *trans;
  8465. u64 min_free;
  8466. u64 dev_min = 1;
  8467. u64 dev_nr = 0;
  8468. u64 target;
  8469. int debug;
  8470. int index;
  8471. int full = 0;
  8472. int ret = 0;
  8473. debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
  8474. block_group = btrfs_lookup_block_group(fs_info, bytenr);
  8475. /* odd, couldn't find the block group, leave it alone */
  8476. if (!block_group) {
  8477. if (debug)
  8478. btrfs_warn(fs_info,
  8479. "can't find block group for bytenr %llu",
  8480. bytenr);
  8481. return -1;
  8482. }
  8483. min_free = btrfs_block_group_used(&block_group->item);
  8484. /* no bytes used, we're good */
  8485. if (!min_free)
  8486. goto out;
  8487. space_info = block_group->space_info;
  8488. spin_lock(&space_info->lock);
  8489. full = space_info->full;
  8490. /*
  8491. * if this is the last block group we have in this space, we can't
  8492. * relocate it unless we're able to allocate a new chunk below.
  8493. *
  8494. * Otherwise, we need to make sure we have room in the space to handle
  8495. * all of the extents from this block group. If we can, we're good
  8496. */
  8497. if ((space_info->total_bytes != block_group->key.offset) &&
  8498. (btrfs_space_info_used(space_info, false) + min_free <
  8499. space_info->total_bytes)) {
  8500. spin_unlock(&space_info->lock);
  8501. goto out;
  8502. }
  8503. spin_unlock(&space_info->lock);
  8504. /*
  8505. * ok we don't have enough space, but maybe we have free space on our
  8506. * devices to allocate new chunks for relocation, so loop through our
  8507. * alloc devices and guess if we have enough space. if this block
  8508. * group is going to be restriped, run checks against the target
  8509. * profile instead of the current one.
  8510. */
  8511. ret = -1;
  8512. /*
  8513. * index:
  8514. * 0: raid10
  8515. * 1: raid1
  8516. * 2: dup
  8517. * 3: raid0
  8518. * 4: single
  8519. */
  8520. target = get_restripe_target(fs_info, block_group->flags);
  8521. if (target) {
  8522. index = __get_raid_index(extended_to_chunk(target));
  8523. } else {
  8524. /*
  8525. * this is just a balance, so if we were marked as full
  8526. * we know there is no space for a new chunk
  8527. */
  8528. if (full) {
  8529. if (debug)
  8530. btrfs_warn(fs_info,
  8531. "no space to alloc new chunk for block group %llu",
  8532. block_group->key.objectid);
  8533. goto out;
  8534. }
  8535. index = get_block_group_index(block_group);
  8536. }
  8537. if (index == BTRFS_RAID_RAID10) {
  8538. dev_min = 4;
  8539. /* Divide by 2 */
  8540. min_free >>= 1;
  8541. } else if (index == BTRFS_RAID_RAID1) {
  8542. dev_min = 2;
  8543. } else if (index == BTRFS_RAID_DUP) {
  8544. /* Multiply by 2 */
  8545. min_free <<= 1;
  8546. } else if (index == BTRFS_RAID_RAID0) {
  8547. dev_min = fs_devices->rw_devices;
  8548. min_free = div64_u64(min_free, dev_min);
  8549. }
  8550. /* We need to do this so that we can look at pending chunks */
  8551. trans = btrfs_join_transaction(root);
  8552. if (IS_ERR(trans)) {
  8553. ret = PTR_ERR(trans);
  8554. goto out;
  8555. }
  8556. mutex_lock(&fs_info->chunk_mutex);
  8557. list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
  8558. u64 dev_offset;
  8559. /*
  8560. * check to make sure we can actually find a chunk with enough
  8561. * space to fit our block group in.
  8562. */
  8563. if (device->total_bytes > device->bytes_used + min_free &&
  8564. !device->is_tgtdev_for_dev_replace) {
  8565. ret = find_free_dev_extent(trans, device, min_free,
  8566. &dev_offset, NULL);
  8567. if (!ret)
  8568. dev_nr++;
  8569. if (dev_nr >= dev_min)
  8570. break;
  8571. ret = -1;
  8572. }
  8573. }
  8574. if (debug && ret == -1)
  8575. btrfs_warn(fs_info,
  8576. "no space to allocate a new chunk for block group %llu",
  8577. block_group->key.objectid);
  8578. mutex_unlock(&fs_info->chunk_mutex);
  8579. btrfs_end_transaction(trans);
  8580. out:
  8581. btrfs_put_block_group(block_group);
  8582. return ret;
  8583. }
  8584. static int find_first_block_group(struct btrfs_fs_info *fs_info,
  8585. struct btrfs_path *path,
  8586. struct btrfs_key *key)
  8587. {
  8588. struct btrfs_root *root = fs_info->extent_root;
  8589. int ret = 0;
  8590. struct btrfs_key found_key;
  8591. struct extent_buffer *leaf;
  8592. int slot;
  8593. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  8594. if (ret < 0)
  8595. goto out;
  8596. while (1) {
  8597. slot = path->slots[0];
  8598. leaf = path->nodes[0];
  8599. if (slot >= btrfs_header_nritems(leaf)) {
  8600. ret = btrfs_next_leaf(root, path);
  8601. if (ret == 0)
  8602. continue;
  8603. if (ret < 0)
  8604. goto out;
  8605. break;
  8606. }
  8607. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  8608. if (found_key.objectid >= key->objectid &&
  8609. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  8610. struct extent_map_tree *em_tree;
  8611. struct extent_map *em;
  8612. em_tree = &root->fs_info->mapping_tree.map_tree;
  8613. read_lock(&em_tree->lock);
  8614. em = lookup_extent_mapping(em_tree, found_key.objectid,
  8615. found_key.offset);
  8616. read_unlock(&em_tree->lock);
  8617. if (!em) {
  8618. btrfs_err(fs_info,
  8619. "logical %llu len %llu found bg but no related chunk",
  8620. found_key.objectid, found_key.offset);
  8621. ret = -ENOENT;
  8622. } else {
  8623. ret = 0;
  8624. }
  8625. free_extent_map(em);
  8626. goto out;
  8627. }
  8628. path->slots[0]++;
  8629. }
  8630. out:
  8631. return ret;
  8632. }
  8633. void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
  8634. {
  8635. struct btrfs_block_group_cache *block_group;
  8636. u64 last = 0;
  8637. while (1) {
  8638. struct inode *inode;
  8639. block_group = btrfs_lookup_first_block_group(info, last);
  8640. while (block_group) {
  8641. spin_lock(&block_group->lock);
  8642. if (block_group->iref)
  8643. break;
  8644. spin_unlock(&block_group->lock);
  8645. block_group = next_block_group(info, block_group);
  8646. }
  8647. if (!block_group) {
  8648. if (last == 0)
  8649. break;
  8650. last = 0;
  8651. continue;
  8652. }
  8653. inode = block_group->inode;
  8654. block_group->iref = 0;
  8655. block_group->inode = NULL;
  8656. spin_unlock(&block_group->lock);
  8657. ASSERT(block_group->io_ctl.inode == NULL);
  8658. iput(inode);
  8659. last = block_group->key.objectid + block_group->key.offset;
  8660. btrfs_put_block_group(block_group);
  8661. }
  8662. }
  8663. /*
  8664. * Must be called only after stopping all workers, since we could have block
  8665. * group caching kthreads running, and therefore they could race with us if we
  8666. * freed the block groups before stopping them.
  8667. */
  8668. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  8669. {
  8670. struct btrfs_block_group_cache *block_group;
  8671. struct btrfs_space_info *space_info;
  8672. struct btrfs_caching_control *caching_ctl;
  8673. struct rb_node *n;
  8674. down_write(&info->commit_root_sem);
  8675. while (!list_empty(&info->caching_block_groups)) {
  8676. caching_ctl = list_entry(info->caching_block_groups.next,
  8677. struct btrfs_caching_control, list);
  8678. list_del(&caching_ctl->list);
  8679. put_caching_control(caching_ctl);
  8680. }
  8681. up_write(&info->commit_root_sem);
  8682. spin_lock(&info->unused_bgs_lock);
  8683. while (!list_empty(&info->unused_bgs)) {
  8684. block_group = list_first_entry(&info->unused_bgs,
  8685. struct btrfs_block_group_cache,
  8686. bg_list);
  8687. list_del_init(&block_group->bg_list);
  8688. btrfs_put_block_group(block_group);
  8689. }
  8690. spin_unlock(&info->unused_bgs_lock);
  8691. spin_lock(&info->block_group_cache_lock);
  8692. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  8693. block_group = rb_entry(n, struct btrfs_block_group_cache,
  8694. cache_node);
  8695. rb_erase(&block_group->cache_node,
  8696. &info->block_group_cache_tree);
  8697. RB_CLEAR_NODE(&block_group->cache_node);
  8698. spin_unlock(&info->block_group_cache_lock);
  8699. down_write(&block_group->space_info->groups_sem);
  8700. list_del(&block_group->list);
  8701. up_write(&block_group->space_info->groups_sem);
  8702. /*
  8703. * We haven't cached this block group, which means we could
  8704. * possibly have excluded extents on this block group.
  8705. */
  8706. if (block_group->cached == BTRFS_CACHE_NO ||
  8707. block_group->cached == BTRFS_CACHE_ERROR)
  8708. free_excluded_extents(info, block_group);
  8709. btrfs_remove_free_space_cache(block_group);
  8710. ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
  8711. ASSERT(list_empty(&block_group->dirty_list));
  8712. ASSERT(list_empty(&block_group->io_list));
  8713. ASSERT(list_empty(&block_group->bg_list));
  8714. ASSERT(atomic_read(&block_group->count) == 1);
  8715. btrfs_put_block_group(block_group);
  8716. spin_lock(&info->block_group_cache_lock);
  8717. }
  8718. spin_unlock(&info->block_group_cache_lock);
  8719. /* now that all the block groups are freed, go through and
  8720. * free all the space_info structs. This is only called during
  8721. * the final stages of unmount, and so we know nobody is
  8722. * using them. We call synchronize_rcu() once before we start,
  8723. * just to be on the safe side.
  8724. */
  8725. synchronize_rcu();
  8726. release_global_block_rsv(info);
  8727. while (!list_empty(&info->space_info)) {
  8728. int i;
  8729. space_info = list_entry(info->space_info.next,
  8730. struct btrfs_space_info,
  8731. list);
  8732. /*
  8733. * Do not hide this behind enospc_debug, this is actually
  8734. * important and indicates a real bug if this happens.
  8735. */
  8736. if (WARN_ON(space_info->bytes_pinned > 0 ||
  8737. space_info->bytes_reserved > 0 ||
  8738. space_info->bytes_may_use > 0))
  8739. dump_space_info(info, space_info, 0, 0);
  8740. list_del(&space_info->list);
  8741. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
  8742. struct kobject *kobj;
  8743. kobj = space_info->block_group_kobjs[i];
  8744. space_info->block_group_kobjs[i] = NULL;
  8745. if (kobj) {
  8746. kobject_del(kobj);
  8747. kobject_put(kobj);
  8748. }
  8749. }
  8750. kobject_del(&space_info->kobj);
  8751. kobject_put(&space_info->kobj);
  8752. }
  8753. return 0;
  8754. }
  8755. static void link_block_group(struct btrfs_block_group_cache *cache)
  8756. {
  8757. struct btrfs_space_info *space_info = cache->space_info;
  8758. int index = get_block_group_index(cache);
  8759. bool first = false;
  8760. down_write(&space_info->groups_sem);
  8761. if (list_empty(&space_info->block_groups[index]))
  8762. first = true;
  8763. list_add_tail(&cache->list, &space_info->block_groups[index]);
  8764. up_write(&space_info->groups_sem);
  8765. if (first) {
  8766. struct raid_kobject *rkobj;
  8767. int ret;
  8768. rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
  8769. if (!rkobj)
  8770. goto out_err;
  8771. rkobj->raid_type = index;
  8772. kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
  8773. ret = kobject_add(&rkobj->kobj, &space_info->kobj,
  8774. "%s", get_raid_name(index));
  8775. if (ret) {
  8776. kobject_put(&rkobj->kobj);
  8777. goto out_err;
  8778. }
  8779. space_info->block_group_kobjs[index] = &rkobj->kobj;
  8780. }
  8781. return;
  8782. out_err:
  8783. btrfs_warn(cache->fs_info,
  8784. "failed to add kobject for block cache, ignoring");
  8785. }
  8786. static struct btrfs_block_group_cache *
  8787. btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
  8788. u64 start, u64 size)
  8789. {
  8790. struct btrfs_block_group_cache *cache;
  8791. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  8792. if (!cache)
  8793. return NULL;
  8794. cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
  8795. GFP_NOFS);
  8796. if (!cache->free_space_ctl) {
  8797. kfree(cache);
  8798. return NULL;
  8799. }
  8800. cache->key.objectid = start;
  8801. cache->key.offset = size;
  8802. cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8803. cache->fs_info = fs_info;
  8804. cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
  8805. set_free_space_tree_thresholds(cache);
  8806. atomic_set(&cache->count, 1);
  8807. spin_lock_init(&cache->lock);
  8808. init_rwsem(&cache->data_rwsem);
  8809. INIT_LIST_HEAD(&cache->list);
  8810. INIT_LIST_HEAD(&cache->cluster_list);
  8811. INIT_LIST_HEAD(&cache->bg_list);
  8812. INIT_LIST_HEAD(&cache->ro_list);
  8813. INIT_LIST_HEAD(&cache->dirty_list);
  8814. INIT_LIST_HEAD(&cache->io_list);
  8815. btrfs_init_free_space_ctl(cache);
  8816. atomic_set(&cache->trimming, 0);
  8817. mutex_init(&cache->free_space_lock);
  8818. btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
  8819. return cache;
  8820. }
  8821. int btrfs_read_block_groups(struct btrfs_fs_info *info)
  8822. {
  8823. struct btrfs_path *path;
  8824. int ret;
  8825. struct btrfs_block_group_cache *cache;
  8826. struct btrfs_space_info *space_info;
  8827. struct btrfs_key key;
  8828. struct btrfs_key found_key;
  8829. struct extent_buffer *leaf;
  8830. int need_clear = 0;
  8831. u64 cache_gen;
  8832. u64 feature;
  8833. int mixed;
  8834. feature = btrfs_super_incompat_flags(info->super_copy);
  8835. mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
  8836. key.objectid = 0;
  8837. key.offset = 0;
  8838. key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
  8839. path = btrfs_alloc_path();
  8840. if (!path)
  8841. return -ENOMEM;
  8842. path->reada = READA_FORWARD;
  8843. cache_gen = btrfs_super_cache_generation(info->super_copy);
  8844. if (btrfs_test_opt(info, SPACE_CACHE) &&
  8845. btrfs_super_generation(info->super_copy) != cache_gen)
  8846. need_clear = 1;
  8847. if (btrfs_test_opt(info, CLEAR_CACHE))
  8848. need_clear = 1;
  8849. while (1) {
  8850. ret = find_first_block_group(info, path, &key);
  8851. if (ret > 0)
  8852. break;
  8853. if (ret != 0)
  8854. goto error;
  8855. leaf = path->nodes[0];
  8856. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  8857. cache = btrfs_create_block_group_cache(info, found_key.objectid,
  8858. found_key.offset);
  8859. if (!cache) {
  8860. ret = -ENOMEM;
  8861. goto error;
  8862. }
  8863. if (need_clear) {
  8864. /*
  8865. * When we mount with old space cache, we need to
  8866. * set BTRFS_DC_CLEAR and set dirty flag.
  8867. *
  8868. * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
  8869. * truncate the old free space cache inode and
  8870. * setup a new one.
  8871. * b) Setting 'dirty flag' makes sure that we flush
  8872. * the new space cache info onto disk.
  8873. */
  8874. if (btrfs_test_opt(info, SPACE_CACHE))
  8875. cache->disk_cache_state = BTRFS_DC_CLEAR;
  8876. }
  8877. read_extent_buffer(leaf, &cache->item,
  8878. btrfs_item_ptr_offset(leaf, path->slots[0]),
  8879. sizeof(cache->item));
  8880. cache->flags = btrfs_block_group_flags(&cache->item);
  8881. if (!mixed &&
  8882. ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
  8883. (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
  8884. btrfs_err(info,
  8885. "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
  8886. cache->key.objectid);
  8887. ret = -EINVAL;
  8888. goto error;
  8889. }
  8890. key.objectid = found_key.objectid + found_key.offset;
  8891. btrfs_release_path(path);
  8892. /*
  8893. * We need to exclude the super stripes now so that the space
  8894. * info has super bytes accounted for, otherwise we'll think
  8895. * we have more space than we actually do.
  8896. */
  8897. ret = exclude_super_stripes(info, cache);
  8898. if (ret) {
  8899. /*
  8900. * We may have excluded something, so call this just in
  8901. * case.
  8902. */
  8903. free_excluded_extents(info, cache);
  8904. btrfs_put_block_group(cache);
  8905. goto error;
  8906. }
  8907. /*
  8908. * check for two cases, either we are full, and therefore
  8909. * don't need to bother with the caching work since we won't
  8910. * find any space, or we are empty, and we can just add all
  8911. * the space in and be done with it. This saves us _alot_ of
  8912. * time, particularly in the full case.
  8913. */
  8914. if (found_key.offset == btrfs_block_group_used(&cache->item)) {
  8915. cache->last_byte_to_unpin = (u64)-1;
  8916. cache->cached = BTRFS_CACHE_FINISHED;
  8917. free_excluded_extents(info, cache);
  8918. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8919. cache->last_byte_to_unpin = (u64)-1;
  8920. cache->cached = BTRFS_CACHE_FINISHED;
  8921. add_new_free_space(cache, info,
  8922. found_key.objectid,
  8923. found_key.objectid +
  8924. found_key.offset);
  8925. free_excluded_extents(info, cache);
  8926. }
  8927. ret = btrfs_add_block_group_cache(info, cache);
  8928. if (ret) {
  8929. btrfs_remove_free_space_cache(cache);
  8930. btrfs_put_block_group(cache);
  8931. goto error;
  8932. }
  8933. trace_btrfs_add_block_group(info, cache, 0);
  8934. update_space_info(info, cache->flags, found_key.offset,
  8935. btrfs_block_group_used(&cache->item),
  8936. cache->bytes_super, &space_info);
  8937. cache->space_info = space_info;
  8938. link_block_group(cache);
  8939. set_avail_alloc_bits(info, cache->flags);
  8940. if (btrfs_chunk_readonly(info, cache->key.objectid)) {
  8941. inc_block_group_ro(cache, 1);
  8942. } else if (btrfs_block_group_used(&cache->item) == 0) {
  8943. spin_lock(&info->unused_bgs_lock);
  8944. /* Should always be true but just in case. */
  8945. if (list_empty(&cache->bg_list)) {
  8946. btrfs_get_block_group(cache);
  8947. list_add_tail(&cache->bg_list,
  8948. &info->unused_bgs);
  8949. }
  8950. spin_unlock(&info->unused_bgs_lock);
  8951. }
  8952. }
  8953. list_for_each_entry_rcu(space_info, &info->space_info, list) {
  8954. if (!(get_alloc_profile(info, space_info->flags) &
  8955. (BTRFS_BLOCK_GROUP_RAID10 |
  8956. BTRFS_BLOCK_GROUP_RAID1 |
  8957. BTRFS_BLOCK_GROUP_RAID5 |
  8958. BTRFS_BLOCK_GROUP_RAID6 |
  8959. BTRFS_BLOCK_GROUP_DUP)))
  8960. continue;
  8961. /*
  8962. * avoid allocating from un-mirrored block group if there are
  8963. * mirrored block groups.
  8964. */
  8965. list_for_each_entry(cache,
  8966. &space_info->block_groups[BTRFS_RAID_RAID0],
  8967. list)
  8968. inc_block_group_ro(cache, 1);
  8969. list_for_each_entry(cache,
  8970. &space_info->block_groups[BTRFS_RAID_SINGLE],
  8971. list)
  8972. inc_block_group_ro(cache, 1);
  8973. }
  8974. init_global_block_rsv(info);
  8975. ret = 0;
  8976. error:
  8977. btrfs_free_path(path);
  8978. return ret;
  8979. }
  8980. void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
  8981. struct btrfs_fs_info *fs_info)
  8982. {
  8983. struct btrfs_block_group_cache *block_group, *tmp;
  8984. struct btrfs_root *extent_root = fs_info->extent_root;
  8985. struct btrfs_block_group_item item;
  8986. struct btrfs_key key;
  8987. int ret = 0;
  8988. bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
  8989. trans->can_flush_pending_bgs = false;
  8990. list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
  8991. if (ret)
  8992. goto next;
  8993. spin_lock(&block_group->lock);
  8994. memcpy(&item, &block_group->item, sizeof(item));
  8995. memcpy(&key, &block_group->key, sizeof(key));
  8996. spin_unlock(&block_group->lock);
  8997. ret = btrfs_insert_item(trans, extent_root, &key, &item,
  8998. sizeof(item));
  8999. if (ret)
  9000. btrfs_abort_transaction(trans, ret);
  9001. ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
  9002. key.offset);
  9003. if (ret)
  9004. btrfs_abort_transaction(trans, ret);
  9005. add_block_group_free_space(trans, fs_info, block_group);
  9006. /* already aborted the transaction if it failed. */
  9007. next:
  9008. list_del_init(&block_group->bg_list);
  9009. }
  9010. trans->can_flush_pending_bgs = can_flush_pending_bgs;
  9011. }
  9012. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  9013. struct btrfs_fs_info *fs_info, u64 bytes_used,
  9014. u64 type, u64 chunk_offset, u64 size)
  9015. {
  9016. struct btrfs_block_group_cache *cache;
  9017. int ret;
  9018. btrfs_set_log_full_commit(fs_info, trans);
  9019. cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
  9020. if (!cache)
  9021. return -ENOMEM;
  9022. btrfs_set_block_group_used(&cache->item, bytes_used);
  9023. btrfs_set_block_group_chunk_objectid(&cache->item,
  9024. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  9025. btrfs_set_block_group_flags(&cache->item, type);
  9026. cache->flags = type;
  9027. cache->last_byte_to_unpin = (u64)-1;
  9028. cache->cached = BTRFS_CACHE_FINISHED;
  9029. cache->needs_free_space = 1;
  9030. ret = exclude_super_stripes(fs_info, cache);
  9031. if (ret) {
  9032. /*
  9033. * We may have excluded something, so call this just in
  9034. * case.
  9035. */
  9036. free_excluded_extents(fs_info, cache);
  9037. btrfs_put_block_group(cache);
  9038. return ret;
  9039. }
  9040. add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
  9041. free_excluded_extents(fs_info, cache);
  9042. #ifdef CONFIG_BTRFS_DEBUG
  9043. if (btrfs_should_fragment_free_space(cache)) {
  9044. u64 new_bytes_used = size - bytes_used;
  9045. bytes_used += new_bytes_used >> 1;
  9046. fragment_free_space(cache);
  9047. }
  9048. #endif
  9049. /*
  9050. * Ensure the corresponding space_info object is created and
  9051. * assigned to our block group. We want our bg to be added to the rbtree
  9052. * with its ->space_info set.
  9053. */
  9054. cache->space_info = __find_space_info(fs_info, cache->flags);
  9055. if (!cache->space_info) {
  9056. ret = create_space_info(fs_info, cache->flags,
  9057. &cache->space_info);
  9058. if (ret) {
  9059. btrfs_remove_free_space_cache(cache);
  9060. btrfs_put_block_group(cache);
  9061. return ret;
  9062. }
  9063. }
  9064. ret = btrfs_add_block_group_cache(fs_info, cache);
  9065. if (ret) {
  9066. btrfs_remove_free_space_cache(cache);
  9067. btrfs_put_block_group(cache);
  9068. return ret;
  9069. }
  9070. /*
  9071. * Now that our block group has its ->space_info set and is inserted in
  9072. * the rbtree, update the space info's counters.
  9073. */
  9074. trace_btrfs_add_block_group(fs_info, cache, 1);
  9075. update_space_info(fs_info, cache->flags, size, bytes_used,
  9076. cache->bytes_super, &cache->space_info);
  9077. update_global_block_rsv(fs_info);
  9078. link_block_group(cache);
  9079. list_add_tail(&cache->bg_list, &trans->new_bgs);
  9080. set_avail_alloc_bits(fs_info, type);
  9081. return 0;
  9082. }
  9083. static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  9084. {
  9085. u64 extra_flags = chunk_to_extended(flags) &
  9086. BTRFS_EXTENDED_PROFILE_MASK;
  9087. write_seqlock(&fs_info->profiles_lock);
  9088. if (flags & BTRFS_BLOCK_GROUP_DATA)
  9089. fs_info->avail_data_alloc_bits &= ~extra_flags;
  9090. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  9091. fs_info->avail_metadata_alloc_bits &= ~extra_flags;
  9092. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  9093. fs_info->avail_system_alloc_bits &= ~extra_flags;
  9094. write_sequnlock(&fs_info->profiles_lock);
  9095. }
  9096. int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
  9097. struct btrfs_fs_info *fs_info, u64 group_start,
  9098. struct extent_map *em)
  9099. {
  9100. struct btrfs_root *root = fs_info->extent_root;
  9101. struct btrfs_path *path;
  9102. struct btrfs_block_group_cache *block_group;
  9103. struct btrfs_free_cluster *cluster;
  9104. struct btrfs_root *tree_root = fs_info->tree_root;
  9105. struct btrfs_key key;
  9106. struct inode *inode;
  9107. struct kobject *kobj = NULL;
  9108. int ret;
  9109. int index;
  9110. int factor;
  9111. struct btrfs_caching_control *caching_ctl = NULL;
  9112. bool remove_em;
  9113. block_group = btrfs_lookup_block_group(fs_info, group_start);
  9114. BUG_ON(!block_group);
  9115. BUG_ON(!block_group->ro);
  9116. /*
  9117. * Free the reserved super bytes from this block group before
  9118. * remove it.
  9119. */
  9120. free_excluded_extents(fs_info, block_group);
  9121. btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
  9122. block_group->key.offset);
  9123. memcpy(&key, &block_group->key, sizeof(key));
  9124. index = get_block_group_index(block_group);
  9125. if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
  9126. BTRFS_BLOCK_GROUP_RAID1 |
  9127. BTRFS_BLOCK_GROUP_RAID10))
  9128. factor = 2;
  9129. else
  9130. factor = 1;
  9131. /* make sure this block group isn't part of an allocation cluster */
  9132. cluster = &fs_info->data_alloc_cluster;
  9133. spin_lock(&cluster->refill_lock);
  9134. btrfs_return_cluster_to_free_space(block_group, cluster);
  9135. spin_unlock(&cluster->refill_lock);
  9136. /*
  9137. * make sure this block group isn't part of a metadata
  9138. * allocation cluster
  9139. */
  9140. cluster = &fs_info->meta_alloc_cluster;
  9141. spin_lock(&cluster->refill_lock);
  9142. btrfs_return_cluster_to_free_space(block_group, cluster);
  9143. spin_unlock(&cluster->refill_lock);
  9144. path = btrfs_alloc_path();
  9145. if (!path) {
  9146. ret = -ENOMEM;
  9147. goto out;
  9148. }
  9149. /*
  9150. * get the inode first so any iput calls done for the io_list
  9151. * aren't the final iput (no unlinks allowed now)
  9152. */
  9153. inode = lookup_free_space_inode(fs_info, block_group, path);
  9154. mutex_lock(&trans->transaction->cache_write_mutex);
  9155. /*
  9156. * make sure our free spache cache IO is done before remove the
  9157. * free space inode
  9158. */
  9159. spin_lock(&trans->transaction->dirty_bgs_lock);
  9160. if (!list_empty(&block_group->io_list)) {
  9161. list_del_init(&block_group->io_list);
  9162. WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
  9163. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9164. btrfs_wait_cache_io(trans, block_group, path);
  9165. btrfs_put_block_group(block_group);
  9166. spin_lock(&trans->transaction->dirty_bgs_lock);
  9167. }
  9168. if (!list_empty(&block_group->dirty_list)) {
  9169. list_del_init(&block_group->dirty_list);
  9170. btrfs_put_block_group(block_group);
  9171. }
  9172. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9173. mutex_unlock(&trans->transaction->cache_write_mutex);
  9174. if (!IS_ERR(inode)) {
  9175. ret = btrfs_orphan_add(trans, BTRFS_I(inode));
  9176. if (ret) {
  9177. btrfs_add_delayed_iput(inode);
  9178. goto out;
  9179. }
  9180. clear_nlink(inode);
  9181. /* One for the block groups ref */
  9182. spin_lock(&block_group->lock);
  9183. if (block_group->iref) {
  9184. block_group->iref = 0;
  9185. block_group->inode = NULL;
  9186. spin_unlock(&block_group->lock);
  9187. iput(inode);
  9188. } else {
  9189. spin_unlock(&block_group->lock);
  9190. }
  9191. /* One for our lookup ref */
  9192. btrfs_add_delayed_iput(inode);
  9193. }
  9194. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  9195. key.offset = block_group->key.objectid;
  9196. key.type = 0;
  9197. ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
  9198. if (ret < 0)
  9199. goto out;
  9200. if (ret > 0)
  9201. btrfs_release_path(path);
  9202. if (ret == 0) {
  9203. ret = btrfs_del_item(trans, tree_root, path);
  9204. if (ret)
  9205. goto out;
  9206. btrfs_release_path(path);
  9207. }
  9208. spin_lock(&fs_info->block_group_cache_lock);
  9209. rb_erase(&block_group->cache_node,
  9210. &fs_info->block_group_cache_tree);
  9211. RB_CLEAR_NODE(&block_group->cache_node);
  9212. if (fs_info->first_logical_byte == block_group->key.objectid)
  9213. fs_info->first_logical_byte = (u64)-1;
  9214. spin_unlock(&fs_info->block_group_cache_lock);
  9215. down_write(&block_group->space_info->groups_sem);
  9216. /*
  9217. * we must use list_del_init so people can check to see if they
  9218. * are still on the list after taking the semaphore
  9219. */
  9220. list_del_init(&block_group->list);
  9221. if (list_empty(&block_group->space_info->block_groups[index])) {
  9222. kobj = block_group->space_info->block_group_kobjs[index];
  9223. block_group->space_info->block_group_kobjs[index] = NULL;
  9224. clear_avail_alloc_bits(fs_info, block_group->flags);
  9225. }
  9226. up_write(&block_group->space_info->groups_sem);
  9227. if (kobj) {
  9228. kobject_del(kobj);
  9229. kobject_put(kobj);
  9230. }
  9231. if (block_group->has_caching_ctl)
  9232. caching_ctl = get_caching_control(block_group);
  9233. if (block_group->cached == BTRFS_CACHE_STARTED)
  9234. wait_block_group_cache_done(block_group);
  9235. if (block_group->has_caching_ctl) {
  9236. down_write(&fs_info->commit_root_sem);
  9237. if (!caching_ctl) {
  9238. struct btrfs_caching_control *ctl;
  9239. list_for_each_entry(ctl,
  9240. &fs_info->caching_block_groups, list)
  9241. if (ctl->block_group == block_group) {
  9242. caching_ctl = ctl;
  9243. refcount_inc(&caching_ctl->count);
  9244. break;
  9245. }
  9246. }
  9247. if (caching_ctl)
  9248. list_del_init(&caching_ctl->list);
  9249. up_write(&fs_info->commit_root_sem);
  9250. if (caching_ctl) {
  9251. /* Once for the caching bgs list and once for us. */
  9252. put_caching_control(caching_ctl);
  9253. put_caching_control(caching_ctl);
  9254. }
  9255. }
  9256. spin_lock(&trans->transaction->dirty_bgs_lock);
  9257. if (!list_empty(&block_group->dirty_list)) {
  9258. WARN_ON(1);
  9259. }
  9260. if (!list_empty(&block_group->io_list)) {
  9261. WARN_ON(1);
  9262. }
  9263. spin_unlock(&trans->transaction->dirty_bgs_lock);
  9264. btrfs_remove_free_space_cache(block_group);
  9265. spin_lock(&block_group->space_info->lock);
  9266. list_del_init(&block_group->ro_list);
  9267. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  9268. WARN_ON(block_group->space_info->total_bytes
  9269. < block_group->key.offset);
  9270. WARN_ON(block_group->space_info->bytes_readonly
  9271. < block_group->key.offset);
  9272. WARN_ON(block_group->space_info->disk_total
  9273. < block_group->key.offset * factor);
  9274. }
  9275. block_group->space_info->total_bytes -= block_group->key.offset;
  9276. block_group->space_info->bytes_readonly -= block_group->key.offset;
  9277. block_group->space_info->disk_total -= block_group->key.offset * factor;
  9278. spin_unlock(&block_group->space_info->lock);
  9279. memcpy(&key, &block_group->key, sizeof(key));
  9280. mutex_lock(&fs_info->chunk_mutex);
  9281. if (!list_empty(&em->list)) {
  9282. /* We're in the transaction->pending_chunks list. */
  9283. free_extent_map(em);
  9284. }
  9285. spin_lock(&block_group->lock);
  9286. block_group->removed = 1;
  9287. /*
  9288. * At this point trimming can't start on this block group, because we
  9289. * removed the block group from the tree fs_info->block_group_cache_tree
  9290. * so no one can't find it anymore and even if someone already got this
  9291. * block group before we removed it from the rbtree, they have already
  9292. * incremented block_group->trimming - if they didn't, they won't find
  9293. * any free space entries because we already removed them all when we
  9294. * called btrfs_remove_free_space_cache().
  9295. *
  9296. * And we must not remove the extent map from the fs_info->mapping_tree
  9297. * to prevent the same logical address range and physical device space
  9298. * ranges from being reused for a new block group. This is because our
  9299. * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
  9300. * completely transactionless, so while it is trimming a range the
  9301. * currently running transaction might finish and a new one start,
  9302. * allowing for new block groups to be created that can reuse the same
  9303. * physical device locations unless we take this special care.
  9304. *
  9305. * There may also be an implicit trim operation if the file system
  9306. * is mounted with -odiscard. The same protections must remain
  9307. * in place until the extents have been discarded completely when
  9308. * the transaction commit has completed.
  9309. */
  9310. remove_em = (atomic_read(&block_group->trimming) == 0);
  9311. /*
  9312. * Make sure a trimmer task always sees the em in the pinned_chunks list
  9313. * if it sees block_group->removed == 1 (needs to lock block_group->lock
  9314. * before checking block_group->removed).
  9315. */
  9316. if (!remove_em) {
  9317. /*
  9318. * Our em might be in trans->transaction->pending_chunks which
  9319. * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
  9320. * and so is the fs_info->pinned_chunks list.
  9321. *
  9322. * So at this point we must be holding the chunk_mutex to avoid
  9323. * any races with chunk allocation (more specifically at
  9324. * volumes.c:contains_pending_extent()), to ensure it always
  9325. * sees the em, either in the pending_chunks list or in the
  9326. * pinned_chunks list.
  9327. */
  9328. list_move_tail(&em->list, &fs_info->pinned_chunks);
  9329. }
  9330. spin_unlock(&block_group->lock);
  9331. if (remove_em) {
  9332. struct extent_map_tree *em_tree;
  9333. em_tree = &fs_info->mapping_tree.map_tree;
  9334. write_lock(&em_tree->lock);
  9335. /*
  9336. * The em might be in the pending_chunks list, so make sure the
  9337. * chunk mutex is locked, since remove_extent_mapping() will
  9338. * delete us from that list.
  9339. */
  9340. remove_extent_mapping(em_tree, em);
  9341. write_unlock(&em_tree->lock);
  9342. /* once for the tree */
  9343. free_extent_map(em);
  9344. }
  9345. mutex_unlock(&fs_info->chunk_mutex);
  9346. ret = remove_block_group_free_space(trans, fs_info, block_group);
  9347. if (ret)
  9348. goto out;
  9349. btrfs_put_block_group(block_group);
  9350. btrfs_put_block_group(block_group);
  9351. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  9352. if (ret > 0)
  9353. ret = -EIO;
  9354. if (ret < 0)
  9355. goto out;
  9356. ret = btrfs_del_item(trans, root, path);
  9357. out:
  9358. btrfs_free_path(path);
  9359. return ret;
  9360. }
  9361. struct btrfs_trans_handle *
  9362. btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
  9363. const u64 chunk_offset)
  9364. {
  9365. struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
  9366. struct extent_map *em;
  9367. struct map_lookup *map;
  9368. unsigned int num_items;
  9369. read_lock(&em_tree->lock);
  9370. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  9371. read_unlock(&em_tree->lock);
  9372. ASSERT(em && em->start == chunk_offset);
  9373. /*
  9374. * We need to reserve 3 + N units from the metadata space info in order
  9375. * to remove a block group (done at btrfs_remove_chunk() and at
  9376. * btrfs_remove_block_group()), which are used for:
  9377. *
  9378. * 1 unit for adding the free space inode's orphan (located in the tree
  9379. * of tree roots).
  9380. * 1 unit for deleting the block group item (located in the extent
  9381. * tree).
  9382. * 1 unit for deleting the free space item (located in tree of tree
  9383. * roots).
  9384. * N units for deleting N device extent items corresponding to each
  9385. * stripe (located in the device tree).
  9386. *
  9387. * In order to remove a block group we also need to reserve units in the
  9388. * system space info in order to update the chunk tree (update one or
  9389. * more device items and remove one chunk item), but this is done at
  9390. * btrfs_remove_chunk() through a call to check_system_chunk().
  9391. */
  9392. map = em->map_lookup;
  9393. num_items = 3 + map->num_stripes;
  9394. free_extent_map(em);
  9395. return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
  9396. num_items, 1);
  9397. }
  9398. /*
  9399. * Process the unused_bgs list and remove any that don't have any allocated
  9400. * space inside of them.
  9401. */
  9402. void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
  9403. {
  9404. struct btrfs_block_group_cache *block_group;
  9405. struct btrfs_space_info *space_info;
  9406. struct btrfs_trans_handle *trans;
  9407. int ret = 0;
  9408. if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
  9409. return;
  9410. spin_lock(&fs_info->unused_bgs_lock);
  9411. while (!list_empty(&fs_info->unused_bgs)) {
  9412. u64 start, end;
  9413. int trimming;
  9414. block_group = list_first_entry(&fs_info->unused_bgs,
  9415. struct btrfs_block_group_cache,
  9416. bg_list);
  9417. list_del_init(&block_group->bg_list);
  9418. space_info = block_group->space_info;
  9419. if (ret || btrfs_mixed_space_info(space_info)) {
  9420. btrfs_put_block_group(block_group);
  9421. continue;
  9422. }
  9423. spin_unlock(&fs_info->unused_bgs_lock);
  9424. mutex_lock(&fs_info->delete_unused_bgs_mutex);
  9425. /* Don't want to race with allocators so take the groups_sem */
  9426. down_write(&space_info->groups_sem);
  9427. spin_lock(&block_group->lock);
  9428. if (block_group->reserved ||
  9429. btrfs_block_group_used(&block_group->item) ||
  9430. block_group->ro ||
  9431. list_is_singular(&block_group->list)) {
  9432. /*
  9433. * We want to bail if we made new allocations or have
  9434. * outstanding allocations in this block group. We do
  9435. * the ro check in case balance is currently acting on
  9436. * this block group.
  9437. */
  9438. spin_unlock(&block_group->lock);
  9439. up_write(&space_info->groups_sem);
  9440. goto next;
  9441. }
  9442. spin_unlock(&block_group->lock);
  9443. /* We don't want to force the issue, only flip if it's ok. */
  9444. ret = inc_block_group_ro(block_group, 0);
  9445. up_write(&space_info->groups_sem);
  9446. if (ret < 0) {
  9447. ret = 0;
  9448. goto next;
  9449. }
  9450. /*
  9451. * Want to do this before we do anything else so we can recover
  9452. * properly if we fail to join the transaction.
  9453. */
  9454. trans = btrfs_start_trans_remove_block_group(fs_info,
  9455. block_group->key.objectid);
  9456. if (IS_ERR(trans)) {
  9457. btrfs_dec_block_group_ro(block_group);
  9458. ret = PTR_ERR(trans);
  9459. goto next;
  9460. }
  9461. /*
  9462. * We could have pending pinned extents for this block group,
  9463. * just delete them, we don't care about them anymore.
  9464. */
  9465. start = block_group->key.objectid;
  9466. end = start + block_group->key.offset - 1;
  9467. /*
  9468. * Hold the unused_bg_unpin_mutex lock to avoid racing with
  9469. * btrfs_finish_extent_commit(). If we are at transaction N,
  9470. * another task might be running finish_extent_commit() for the
  9471. * previous transaction N - 1, and have seen a range belonging
  9472. * to the block group in freed_extents[] before we were able to
  9473. * clear the whole block group range from freed_extents[]. This
  9474. * means that task can lookup for the block group after we
  9475. * unpinned it from freed_extents[] and removed it, leading to
  9476. * a BUG_ON() at btrfs_unpin_extent_range().
  9477. */
  9478. mutex_lock(&fs_info->unused_bg_unpin_mutex);
  9479. ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
  9480. EXTENT_DIRTY);
  9481. if (ret) {
  9482. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9483. btrfs_dec_block_group_ro(block_group);
  9484. goto end_trans;
  9485. }
  9486. ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
  9487. EXTENT_DIRTY);
  9488. if (ret) {
  9489. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9490. btrfs_dec_block_group_ro(block_group);
  9491. goto end_trans;
  9492. }
  9493. mutex_unlock(&fs_info->unused_bg_unpin_mutex);
  9494. /* Reset pinned so btrfs_put_block_group doesn't complain */
  9495. spin_lock(&space_info->lock);
  9496. spin_lock(&block_group->lock);
  9497. space_info->bytes_pinned -= block_group->pinned;
  9498. space_info->bytes_readonly += block_group->pinned;
  9499. percpu_counter_add(&space_info->total_bytes_pinned,
  9500. -block_group->pinned);
  9501. block_group->pinned = 0;
  9502. spin_unlock(&block_group->lock);
  9503. spin_unlock(&space_info->lock);
  9504. /* DISCARD can flip during remount */
  9505. trimming = btrfs_test_opt(fs_info, DISCARD);
  9506. /* Implicit trim during transaction commit. */
  9507. if (trimming)
  9508. btrfs_get_block_group_trimming(block_group);
  9509. /*
  9510. * Btrfs_remove_chunk will abort the transaction if things go
  9511. * horribly wrong.
  9512. */
  9513. ret = btrfs_remove_chunk(trans, fs_info,
  9514. block_group->key.objectid);
  9515. if (ret) {
  9516. if (trimming)
  9517. btrfs_put_block_group_trimming(block_group);
  9518. goto end_trans;
  9519. }
  9520. /*
  9521. * If we're not mounted with -odiscard, we can just forget
  9522. * about this block group. Otherwise we'll need to wait
  9523. * until transaction commit to do the actual discard.
  9524. */
  9525. if (trimming) {
  9526. spin_lock(&fs_info->unused_bgs_lock);
  9527. /*
  9528. * A concurrent scrub might have added us to the list
  9529. * fs_info->unused_bgs, so use a list_move operation
  9530. * to add the block group to the deleted_bgs list.
  9531. */
  9532. list_move(&block_group->bg_list,
  9533. &trans->transaction->deleted_bgs);
  9534. spin_unlock(&fs_info->unused_bgs_lock);
  9535. btrfs_get_block_group(block_group);
  9536. }
  9537. end_trans:
  9538. btrfs_end_transaction(trans);
  9539. next:
  9540. mutex_unlock(&fs_info->delete_unused_bgs_mutex);
  9541. btrfs_put_block_group(block_group);
  9542. spin_lock(&fs_info->unused_bgs_lock);
  9543. }
  9544. spin_unlock(&fs_info->unused_bgs_lock);
  9545. }
  9546. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  9547. {
  9548. struct btrfs_space_info *space_info;
  9549. struct btrfs_super_block *disk_super;
  9550. u64 features;
  9551. u64 flags;
  9552. int mixed = 0;
  9553. int ret;
  9554. disk_super = fs_info->super_copy;
  9555. if (!btrfs_super_root(disk_super))
  9556. return -EINVAL;
  9557. features = btrfs_super_incompat_flags(disk_super);
  9558. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  9559. mixed = 1;
  9560. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  9561. ret = create_space_info(fs_info, flags, &space_info);
  9562. if (ret)
  9563. goto out;
  9564. if (mixed) {
  9565. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  9566. ret = create_space_info(fs_info, flags, &space_info);
  9567. } else {
  9568. flags = BTRFS_BLOCK_GROUP_METADATA;
  9569. ret = create_space_info(fs_info, flags, &space_info);
  9570. if (ret)
  9571. goto out;
  9572. flags = BTRFS_BLOCK_GROUP_DATA;
  9573. ret = create_space_info(fs_info, flags, &space_info);
  9574. }
  9575. out:
  9576. return ret;
  9577. }
  9578. int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
  9579. u64 start, u64 end)
  9580. {
  9581. return unpin_extent_range(fs_info, start, end, false);
  9582. }
  9583. /*
  9584. * It used to be that old block groups would be left around forever.
  9585. * Iterating over them would be enough to trim unused space. Since we
  9586. * now automatically remove them, we also need to iterate over unallocated
  9587. * space.
  9588. *
  9589. * We don't want a transaction for this since the discard may take a
  9590. * substantial amount of time. We don't require that a transaction be
  9591. * running, but we do need to take a running transaction into account
  9592. * to ensure that we're not discarding chunks that were released in
  9593. * the current transaction.
  9594. *
  9595. * Holding the chunks lock will prevent other threads from allocating
  9596. * or releasing chunks, but it won't prevent a running transaction
  9597. * from committing and releasing the memory that the pending chunks
  9598. * list head uses. For that, we need to take a reference to the
  9599. * transaction.
  9600. */
  9601. static int btrfs_trim_free_extents(struct btrfs_device *device,
  9602. u64 minlen, u64 *trimmed)
  9603. {
  9604. u64 start = 0, len = 0;
  9605. int ret;
  9606. *trimmed = 0;
  9607. /* Not writeable = nothing to do. */
  9608. if (!device->writeable)
  9609. return 0;
  9610. /* No free space = nothing to do. */
  9611. if (device->total_bytes <= device->bytes_used)
  9612. return 0;
  9613. ret = 0;
  9614. while (1) {
  9615. struct btrfs_fs_info *fs_info = device->fs_info;
  9616. struct btrfs_transaction *trans;
  9617. u64 bytes;
  9618. ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
  9619. if (ret)
  9620. return ret;
  9621. down_read(&fs_info->commit_root_sem);
  9622. spin_lock(&fs_info->trans_lock);
  9623. trans = fs_info->running_transaction;
  9624. if (trans)
  9625. refcount_inc(&trans->use_count);
  9626. spin_unlock(&fs_info->trans_lock);
  9627. ret = find_free_dev_extent_start(trans, device, minlen, start,
  9628. &start, &len);
  9629. if (trans)
  9630. btrfs_put_transaction(trans);
  9631. if (ret) {
  9632. up_read(&fs_info->commit_root_sem);
  9633. mutex_unlock(&fs_info->chunk_mutex);
  9634. if (ret == -ENOSPC)
  9635. ret = 0;
  9636. break;
  9637. }
  9638. ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
  9639. up_read(&fs_info->commit_root_sem);
  9640. mutex_unlock(&fs_info->chunk_mutex);
  9641. if (ret)
  9642. break;
  9643. start += len;
  9644. *trimmed += bytes;
  9645. if (fatal_signal_pending(current)) {
  9646. ret = -ERESTARTSYS;
  9647. break;
  9648. }
  9649. cond_resched();
  9650. }
  9651. return ret;
  9652. }
  9653. int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
  9654. {
  9655. struct btrfs_block_group_cache *cache = NULL;
  9656. struct btrfs_device *device;
  9657. struct list_head *devices;
  9658. u64 group_trimmed;
  9659. u64 start;
  9660. u64 end;
  9661. u64 trimmed = 0;
  9662. u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
  9663. int ret = 0;
  9664. /*
  9665. * try to trim all FS space, our block group may start from non-zero.
  9666. */
  9667. if (range->len == total_bytes)
  9668. cache = btrfs_lookup_first_block_group(fs_info, range->start);
  9669. else
  9670. cache = btrfs_lookup_block_group(fs_info, range->start);
  9671. while (cache) {
  9672. if (cache->key.objectid >= (range->start + range->len)) {
  9673. btrfs_put_block_group(cache);
  9674. break;
  9675. }
  9676. start = max(range->start, cache->key.objectid);
  9677. end = min(range->start + range->len,
  9678. cache->key.objectid + cache->key.offset);
  9679. if (end - start >= range->minlen) {
  9680. if (!block_group_cache_done(cache)) {
  9681. ret = cache_block_group(cache, 0);
  9682. if (ret) {
  9683. btrfs_put_block_group(cache);
  9684. break;
  9685. }
  9686. ret = wait_block_group_cache_done(cache);
  9687. if (ret) {
  9688. btrfs_put_block_group(cache);
  9689. break;
  9690. }
  9691. }
  9692. ret = btrfs_trim_block_group(cache,
  9693. &group_trimmed,
  9694. start,
  9695. end,
  9696. range->minlen);
  9697. trimmed += group_trimmed;
  9698. if (ret) {
  9699. btrfs_put_block_group(cache);
  9700. break;
  9701. }
  9702. }
  9703. cache = next_block_group(fs_info, cache);
  9704. }
  9705. mutex_lock(&fs_info->fs_devices->device_list_mutex);
  9706. devices = &fs_info->fs_devices->alloc_list;
  9707. list_for_each_entry(device, devices, dev_alloc_list) {
  9708. ret = btrfs_trim_free_extents(device, range->minlen,
  9709. &group_trimmed);
  9710. if (ret)
  9711. break;
  9712. trimmed += group_trimmed;
  9713. }
  9714. mutex_unlock(&fs_info->fs_devices->device_list_mutex);
  9715. range->len = trimmed;
  9716. return ret;
  9717. }
  9718. /*
  9719. * btrfs_{start,end}_write_no_snapshotting() are similar to
  9720. * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
  9721. * data into the page cache through nocow before the subvolume is snapshoted,
  9722. * but flush the data into disk after the snapshot creation, or to prevent
  9723. * operations while snapshotting is ongoing and that cause the snapshot to be
  9724. * inconsistent (writes followed by expanding truncates for example).
  9725. */
  9726. void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
  9727. {
  9728. percpu_counter_dec(&root->subv_writers->counter);
  9729. /*
  9730. * Make sure counter is updated before we wake up waiters.
  9731. */
  9732. smp_mb();
  9733. if (waitqueue_active(&root->subv_writers->wait))
  9734. wake_up(&root->subv_writers->wait);
  9735. }
  9736. int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
  9737. {
  9738. if (atomic_read(&root->will_be_snapshotted))
  9739. return 0;
  9740. percpu_counter_inc(&root->subv_writers->counter);
  9741. /*
  9742. * Make sure counter is updated before we check for snapshot creation.
  9743. */
  9744. smp_mb();
  9745. if (atomic_read(&root->will_be_snapshotted)) {
  9746. btrfs_end_write_no_snapshotting(root);
  9747. return 0;
  9748. }
  9749. return 1;
  9750. }
  9751. static int wait_snapshotting_atomic_t(atomic_t *a)
  9752. {
  9753. schedule();
  9754. return 0;
  9755. }
  9756. void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
  9757. {
  9758. while (true) {
  9759. int ret;
  9760. ret = btrfs_start_write_no_snapshotting(root);
  9761. if (ret)
  9762. break;
  9763. wait_on_atomic_t(&root->will_be_snapshotted,
  9764. wait_snapshotting_atomic_t,
  9765. TASK_UNINTERRUPTIBLE);
  9766. }
  9767. }