ctree.c 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945
  1. /*
  2. * Copyright (C) 2007,2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/rbtree.h>
  21. #include <linux/mm.h>
  22. #include "ctree.h"
  23. #include "disk-io.h"
  24. #include "transaction.h"
  25. #include "print-tree.h"
  26. #include "locking.h"
  27. static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  28. *root, struct btrfs_path *path, int level);
  29. static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  30. const struct btrfs_key *ins_key, struct btrfs_path *path,
  31. int data_size, int extend);
  32. static int push_node_left(struct btrfs_trans_handle *trans,
  33. struct btrfs_fs_info *fs_info,
  34. struct extent_buffer *dst,
  35. struct extent_buffer *src, int empty);
  36. static int balance_node_right(struct btrfs_trans_handle *trans,
  37. struct btrfs_fs_info *fs_info,
  38. struct extent_buffer *dst_buf,
  39. struct extent_buffer *src_buf);
  40. static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  41. int level, int slot);
  42. static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  43. struct extent_buffer *eb);
  44. struct btrfs_path *btrfs_alloc_path(void)
  45. {
  46. return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
  47. }
  48. /*
  49. * set all locked nodes in the path to blocking locks. This should
  50. * be done before scheduling
  51. */
  52. noinline void btrfs_set_path_blocking(struct btrfs_path *p)
  53. {
  54. int i;
  55. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  56. if (!p->nodes[i] || !p->locks[i])
  57. continue;
  58. btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
  59. if (p->locks[i] == BTRFS_READ_LOCK)
  60. p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
  61. else if (p->locks[i] == BTRFS_WRITE_LOCK)
  62. p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
  63. }
  64. }
  65. /*
  66. * reset all the locked nodes in the patch to spinning locks.
  67. *
  68. * held is used to keep lockdep happy, when lockdep is enabled
  69. * we set held to a blocking lock before we go around and
  70. * retake all the spinlocks in the path. You can safely use NULL
  71. * for held
  72. */
  73. noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
  74. struct extent_buffer *held, int held_rw)
  75. {
  76. int i;
  77. if (held) {
  78. btrfs_set_lock_blocking_rw(held, held_rw);
  79. if (held_rw == BTRFS_WRITE_LOCK)
  80. held_rw = BTRFS_WRITE_LOCK_BLOCKING;
  81. else if (held_rw == BTRFS_READ_LOCK)
  82. held_rw = BTRFS_READ_LOCK_BLOCKING;
  83. }
  84. btrfs_set_path_blocking(p);
  85. for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
  86. if (p->nodes[i] && p->locks[i]) {
  87. btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
  88. if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
  89. p->locks[i] = BTRFS_WRITE_LOCK;
  90. else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
  91. p->locks[i] = BTRFS_READ_LOCK;
  92. }
  93. }
  94. if (held)
  95. btrfs_clear_lock_blocking_rw(held, held_rw);
  96. }
  97. /* this also releases the path */
  98. void btrfs_free_path(struct btrfs_path *p)
  99. {
  100. if (!p)
  101. return;
  102. btrfs_release_path(p);
  103. kmem_cache_free(btrfs_path_cachep, p);
  104. }
  105. /*
  106. * path release drops references on the extent buffers in the path
  107. * and it drops any locks held by this path
  108. *
  109. * It is safe to call this on paths that no locks or extent buffers held.
  110. */
  111. noinline void btrfs_release_path(struct btrfs_path *p)
  112. {
  113. int i;
  114. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  115. p->slots[i] = 0;
  116. if (!p->nodes[i])
  117. continue;
  118. if (p->locks[i]) {
  119. btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
  120. p->locks[i] = 0;
  121. }
  122. free_extent_buffer(p->nodes[i]);
  123. p->nodes[i] = NULL;
  124. }
  125. }
  126. /*
  127. * safely gets a reference on the root node of a tree. A lock
  128. * is not taken, so a concurrent writer may put a different node
  129. * at the root of the tree. See btrfs_lock_root_node for the
  130. * looping required.
  131. *
  132. * The extent buffer returned by this has a reference taken, so
  133. * it won't disappear. It may stop being the root of the tree
  134. * at any time because there are no locks held.
  135. */
  136. struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
  137. {
  138. struct extent_buffer *eb;
  139. while (1) {
  140. rcu_read_lock();
  141. eb = rcu_dereference(root->node);
  142. /*
  143. * RCU really hurts here, we could free up the root node because
  144. * it was COWed but we may not get the new root node yet so do
  145. * the inc_not_zero dance and if it doesn't work then
  146. * synchronize_rcu and try again.
  147. */
  148. if (atomic_inc_not_zero(&eb->refs)) {
  149. rcu_read_unlock();
  150. break;
  151. }
  152. rcu_read_unlock();
  153. synchronize_rcu();
  154. }
  155. return eb;
  156. }
  157. /* loop around taking references on and locking the root node of the
  158. * tree until you end up with a lock on the root. A locked buffer
  159. * is returned, with a reference held.
  160. */
  161. struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
  162. {
  163. struct extent_buffer *eb;
  164. while (1) {
  165. eb = btrfs_root_node(root);
  166. btrfs_tree_lock(eb);
  167. if (eb == root->node)
  168. break;
  169. btrfs_tree_unlock(eb);
  170. free_extent_buffer(eb);
  171. }
  172. return eb;
  173. }
  174. /* loop around taking references on and locking the root node of the
  175. * tree until you end up with a lock on the root. A locked buffer
  176. * is returned, with a reference held.
  177. */
  178. static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  179. {
  180. struct extent_buffer *eb;
  181. while (1) {
  182. eb = btrfs_root_node(root);
  183. btrfs_tree_read_lock(eb);
  184. if (eb == root->node)
  185. break;
  186. btrfs_tree_read_unlock(eb);
  187. free_extent_buffer(eb);
  188. }
  189. return eb;
  190. }
  191. /* cowonly root (everything not a reference counted cow subvolume), just get
  192. * put onto a simple dirty list. transaction.c walks this to make sure they
  193. * get properly updated on disk.
  194. */
  195. static void add_root_to_dirty_list(struct btrfs_root *root)
  196. {
  197. struct btrfs_fs_info *fs_info = root->fs_info;
  198. if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
  199. !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
  200. return;
  201. spin_lock(&fs_info->trans_lock);
  202. if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
  203. /* Want the extent tree to be the last on the list */
  204. if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  205. list_move_tail(&root->dirty_list,
  206. &fs_info->dirty_cowonly_roots);
  207. else
  208. list_move(&root->dirty_list,
  209. &fs_info->dirty_cowonly_roots);
  210. }
  211. spin_unlock(&fs_info->trans_lock);
  212. }
  213. /*
  214. * used by snapshot creation to make a copy of a root for a tree with
  215. * a given objectid. The buffer with the new root node is returned in
  216. * cow_ret, and this func returns zero on success or a negative error code.
  217. */
  218. int btrfs_copy_root(struct btrfs_trans_handle *trans,
  219. struct btrfs_root *root,
  220. struct extent_buffer *buf,
  221. struct extent_buffer **cow_ret, u64 new_root_objectid)
  222. {
  223. struct btrfs_fs_info *fs_info = root->fs_info;
  224. struct extent_buffer *cow;
  225. int ret = 0;
  226. int level;
  227. struct btrfs_disk_key disk_key;
  228. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  229. trans->transid != fs_info->running_transaction->transid);
  230. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  231. trans->transid != root->last_trans);
  232. level = btrfs_header_level(buf);
  233. if (level == 0)
  234. btrfs_item_key(buf, &disk_key, 0);
  235. else
  236. btrfs_node_key(buf, &disk_key, 0);
  237. cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
  238. &disk_key, level, buf->start, 0);
  239. if (IS_ERR(cow))
  240. return PTR_ERR(cow);
  241. copy_extent_buffer_full(cow, buf);
  242. btrfs_set_header_bytenr(cow, cow->start);
  243. btrfs_set_header_generation(cow, trans->transid);
  244. btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
  245. btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
  246. BTRFS_HEADER_FLAG_RELOC);
  247. if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
  248. btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
  249. else
  250. btrfs_set_header_owner(cow, new_root_objectid);
  251. write_extent_buffer_fsid(cow, fs_info->fsid);
  252. WARN_ON(btrfs_header_generation(buf) > trans->transid);
  253. if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
  254. ret = btrfs_inc_ref(trans, root, cow, 1);
  255. else
  256. ret = btrfs_inc_ref(trans, root, cow, 0);
  257. if (ret)
  258. return ret;
  259. btrfs_mark_buffer_dirty(cow);
  260. *cow_ret = cow;
  261. return 0;
  262. }
  263. enum mod_log_op {
  264. MOD_LOG_KEY_REPLACE,
  265. MOD_LOG_KEY_ADD,
  266. MOD_LOG_KEY_REMOVE,
  267. MOD_LOG_KEY_REMOVE_WHILE_FREEING,
  268. MOD_LOG_KEY_REMOVE_WHILE_MOVING,
  269. MOD_LOG_MOVE_KEYS,
  270. MOD_LOG_ROOT_REPLACE,
  271. };
  272. struct tree_mod_move {
  273. int dst_slot;
  274. int nr_items;
  275. };
  276. struct tree_mod_root {
  277. u64 logical;
  278. u8 level;
  279. };
  280. struct tree_mod_elem {
  281. struct rb_node node;
  282. u64 logical;
  283. u64 seq;
  284. enum mod_log_op op;
  285. /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
  286. int slot;
  287. /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
  288. u64 generation;
  289. /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
  290. struct btrfs_disk_key key;
  291. u64 blockptr;
  292. /* this is used for op == MOD_LOG_MOVE_KEYS */
  293. struct tree_mod_move move;
  294. /* this is used for op == MOD_LOG_ROOT_REPLACE */
  295. struct tree_mod_root old_root;
  296. };
  297. static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
  298. {
  299. read_lock(&fs_info->tree_mod_log_lock);
  300. }
  301. static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
  302. {
  303. read_unlock(&fs_info->tree_mod_log_lock);
  304. }
  305. static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
  306. {
  307. write_lock(&fs_info->tree_mod_log_lock);
  308. }
  309. static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
  310. {
  311. write_unlock(&fs_info->tree_mod_log_lock);
  312. }
  313. /*
  314. * Pull a new tree mod seq number for our operation.
  315. */
  316. static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
  317. {
  318. return atomic64_inc_return(&fs_info->tree_mod_seq);
  319. }
  320. /*
  321. * This adds a new blocker to the tree mod log's blocker list if the @elem
  322. * passed does not already have a sequence number set. So when a caller expects
  323. * to record tree modifications, it should ensure to set elem->seq to zero
  324. * before calling btrfs_get_tree_mod_seq.
  325. * Returns a fresh, unused tree log modification sequence number, even if no new
  326. * blocker was added.
  327. */
  328. u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
  329. struct seq_list *elem)
  330. {
  331. tree_mod_log_write_lock(fs_info);
  332. spin_lock(&fs_info->tree_mod_seq_lock);
  333. if (!elem->seq) {
  334. elem->seq = btrfs_inc_tree_mod_seq(fs_info);
  335. list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
  336. }
  337. spin_unlock(&fs_info->tree_mod_seq_lock);
  338. tree_mod_log_write_unlock(fs_info);
  339. return elem->seq;
  340. }
  341. void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
  342. struct seq_list *elem)
  343. {
  344. struct rb_root *tm_root;
  345. struct rb_node *node;
  346. struct rb_node *next;
  347. struct seq_list *cur_elem;
  348. struct tree_mod_elem *tm;
  349. u64 min_seq = (u64)-1;
  350. u64 seq_putting = elem->seq;
  351. if (!seq_putting)
  352. return;
  353. spin_lock(&fs_info->tree_mod_seq_lock);
  354. list_del(&elem->list);
  355. elem->seq = 0;
  356. list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
  357. if (cur_elem->seq < min_seq) {
  358. if (seq_putting > cur_elem->seq) {
  359. /*
  360. * blocker with lower sequence number exists, we
  361. * cannot remove anything from the log
  362. */
  363. spin_unlock(&fs_info->tree_mod_seq_lock);
  364. return;
  365. }
  366. min_seq = cur_elem->seq;
  367. }
  368. }
  369. spin_unlock(&fs_info->tree_mod_seq_lock);
  370. /*
  371. * anything that's lower than the lowest existing (read: blocked)
  372. * sequence number can be removed from the tree.
  373. */
  374. tree_mod_log_write_lock(fs_info);
  375. tm_root = &fs_info->tree_mod_log;
  376. for (node = rb_first(tm_root); node; node = next) {
  377. next = rb_next(node);
  378. tm = rb_entry(node, struct tree_mod_elem, node);
  379. if (tm->seq > min_seq)
  380. continue;
  381. rb_erase(node, tm_root);
  382. kfree(tm);
  383. }
  384. tree_mod_log_write_unlock(fs_info);
  385. }
  386. /*
  387. * key order of the log:
  388. * node/leaf start address -> sequence
  389. *
  390. * The 'start address' is the logical address of the *new* root node
  391. * for root replace operations, or the logical address of the affected
  392. * block for all other operations.
  393. *
  394. * Note: must be called with write lock (tree_mod_log_write_lock).
  395. */
  396. static noinline int
  397. __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
  398. {
  399. struct rb_root *tm_root;
  400. struct rb_node **new;
  401. struct rb_node *parent = NULL;
  402. struct tree_mod_elem *cur;
  403. tm->seq = btrfs_inc_tree_mod_seq(fs_info);
  404. tm_root = &fs_info->tree_mod_log;
  405. new = &tm_root->rb_node;
  406. while (*new) {
  407. cur = rb_entry(*new, struct tree_mod_elem, node);
  408. parent = *new;
  409. if (cur->logical < tm->logical)
  410. new = &((*new)->rb_left);
  411. else if (cur->logical > tm->logical)
  412. new = &((*new)->rb_right);
  413. else if (cur->seq < tm->seq)
  414. new = &((*new)->rb_left);
  415. else if (cur->seq > tm->seq)
  416. new = &((*new)->rb_right);
  417. else
  418. return -EEXIST;
  419. }
  420. rb_link_node(&tm->node, parent, new);
  421. rb_insert_color(&tm->node, tm_root);
  422. return 0;
  423. }
  424. /*
  425. * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
  426. * returns zero with the tree_mod_log_lock acquired. The caller must hold
  427. * this until all tree mod log insertions are recorded in the rb tree and then
  428. * call tree_mod_log_write_unlock() to release.
  429. */
  430. static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
  431. struct extent_buffer *eb) {
  432. smp_mb();
  433. if (list_empty(&(fs_info)->tree_mod_seq_list))
  434. return 1;
  435. if (eb && btrfs_header_level(eb) == 0)
  436. return 1;
  437. tree_mod_log_write_lock(fs_info);
  438. if (list_empty(&(fs_info)->tree_mod_seq_list)) {
  439. tree_mod_log_write_unlock(fs_info);
  440. return 1;
  441. }
  442. return 0;
  443. }
  444. /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
  445. static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
  446. struct extent_buffer *eb)
  447. {
  448. smp_mb();
  449. if (list_empty(&(fs_info)->tree_mod_seq_list))
  450. return 0;
  451. if (eb && btrfs_header_level(eb) == 0)
  452. return 0;
  453. return 1;
  454. }
  455. static struct tree_mod_elem *
  456. alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
  457. enum mod_log_op op, gfp_t flags)
  458. {
  459. struct tree_mod_elem *tm;
  460. tm = kzalloc(sizeof(*tm), flags);
  461. if (!tm)
  462. return NULL;
  463. tm->logical = eb->start;
  464. if (op != MOD_LOG_KEY_ADD) {
  465. btrfs_node_key(eb, &tm->key, slot);
  466. tm->blockptr = btrfs_node_blockptr(eb, slot);
  467. }
  468. tm->op = op;
  469. tm->slot = slot;
  470. tm->generation = btrfs_node_ptr_generation(eb, slot);
  471. RB_CLEAR_NODE(&tm->node);
  472. return tm;
  473. }
  474. static noinline int
  475. tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
  476. struct extent_buffer *eb, int slot,
  477. enum mod_log_op op, gfp_t flags)
  478. {
  479. struct tree_mod_elem *tm;
  480. int ret;
  481. if (!tree_mod_need_log(fs_info, eb))
  482. return 0;
  483. tm = alloc_tree_mod_elem(eb, slot, op, flags);
  484. if (!tm)
  485. return -ENOMEM;
  486. if (tree_mod_dont_log(fs_info, eb)) {
  487. kfree(tm);
  488. return 0;
  489. }
  490. ret = __tree_mod_log_insert(fs_info, tm);
  491. tree_mod_log_write_unlock(fs_info);
  492. if (ret)
  493. kfree(tm);
  494. return ret;
  495. }
  496. static noinline int
  497. tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
  498. struct extent_buffer *eb, int dst_slot, int src_slot,
  499. int nr_items)
  500. {
  501. struct tree_mod_elem *tm = NULL;
  502. struct tree_mod_elem **tm_list = NULL;
  503. int ret = 0;
  504. int i;
  505. int locked = 0;
  506. if (!tree_mod_need_log(fs_info, eb))
  507. return 0;
  508. tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
  509. if (!tm_list)
  510. return -ENOMEM;
  511. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  512. if (!tm) {
  513. ret = -ENOMEM;
  514. goto free_tms;
  515. }
  516. tm->logical = eb->start;
  517. tm->slot = src_slot;
  518. tm->move.dst_slot = dst_slot;
  519. tm->move.nr_items = nr_items;
  520. tm->op = MOD_LOG_MOVE_KEYS;
  521. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  522. tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
  523. MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
  524. if (!tm_list[i]) {
  525. ret = -ENOMEM;
  526. goto free_tms;
  527. }
  528. }
  529. if (tree_mod_dont_log(fs_info, eb))
  530. goto free_tms;
  531. locked = 1;
  532. /*
  533. * When we override something during the move, we log these removals.
  534. * This can only happen when we move towards the beginning of the
  535. * buffer, i.e. dst_slot < src_slot.
  536. */
  537. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  538. ret = __tree_mod_log_insert(fs_info, tm_list[i]);
  539. if (ret)
  540. goto free_tms;
  541. }
  542. ret = __tree_mod_log_insert(fs_info, tm);
  543. if (ret)
  544. goto free_tms;
  545. tree_mod_log_write_unlock(fs_info);
  546. kfree(tm_list);
  547. return 0;
  548. free_tms:
  549. for (i = 0; i < nr_items; i++) {
  550. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  551. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  552. kfree(tm_list[i]);
  553. }
  554. if (locked)
  555. tree_mod_log_write_unlock(fs_info);
  556. kfree(tm_list);
  557. kfree(tm);
  558. return ret;
  559. }
  560. static inline int
  561. __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  562. struct tree_mod_elem **tm_list,
  563. int nritems)
  564. {
  565. int i, j;
  566. int ret;
  567. for (i = nritems - 1; i >= 0; i--) {
  568. ret = __tree_mod_log_insert(fs_info, tm_list[i]);
  569. if (ret) {
  570. for (j = nritems - 1; j > i; j--)
  571. rb_erase(&tm_list[j]->node,
  572. &fs_info->tree_mod_log);
  573. return ret;
  574. }
  575. }
  576. return 0;
  577. }
  578. static noinline int
  579. tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
  580. struct extent_buffer *old_root,
  581. struct extent_buffer *new_root,
  582. int log_removal)
  583. {
  584. struct tree_mod_elem *tm = NULL;
  585. struct tree_mod_elem **tm_list = NULL;
  586. int nritems = 0;
  587. int ret = 0;
  588. int i;
  589. if (!tree_mod_need_log(fs_info, NULL))
  590. return 0;
  591. if (log_removal && btrfs_header_level(old_root) > 0) {
  592. nritems = btrfs_header_nritems(old_root);
  593. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
  594. GFP_NOFS);
  595. if (!tm_list) {
  596. ret = -ENOMEM;
  597. goto free_tms;
  598. }
  599. for (i = 0; i < nritems; i++) {
  600. tm_list[i] = alloc_tree_mod_elem(old_root, i,
  601. MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
  602. if (!tm_list[i]) {
  603. ret = -ENOMEM;
  604. goto free_tms;
  605. }
  606. }
  607. }
  608. tm = kzalloc(sizeof(*tm), GFP_NOFS);
  609. if (!tm) {
  610. ret = -ENOMEM;
  611. goto free_tms;
  612. }
  613. tm->logical = new_root->start;
  614. tm->old_root.logical = old_root->start;
  615. tm->old_root.level = btrfs_header_level(old_root);
  616. tm->generation = btrfs_header_generation(old_root);
  617. tm->op = MOD_LOG_ROOT_REPLACE;
  618. if (tree_mod_dont_log(fs_info, NULL))
  619. goto free_tms;
  620. if (tm_list)
  621. ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
  622. if (!ret)
  623. ret = __tree_mod_log_insert(fs_info, tm);
  624. tree_mod_log_write_unlock(fs_info);
  625. if (ret)
  626. goto free_tms;
  627. kfree(tm_list);
  628. return ret;
  629. free_tms:
  630. if (tm_list) {
  631. for (i = 0; i < nritems; i++)
  632. kfree(tm_list[i]);
  633. kfree(tm_list);
  634. }
  635. kfree(tm);
  636. return ret;
  637. }
  638. static struct tree_mod_elem *
  639. __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
  640. int smallest)
  641. {
  642. struct rb_root *tm_root;
  643. struct rb_node *node;
  644. struct tree_mod_elem *cur = NULL;
  645. struct tree_mod_elem *found = NULL;
  646. tree_mod_log_read_lock(fs_info);
  647. tm_root = &fs_info->tree_mod_log;
  648. node = tm_root->rb_node;
  649. while (node) {
  650. cur = rb_entry(node, struct tree_mod_elem, node);
  651. if (cur->logical < start) {
  652. node = node->rb_left;
  653. } else if (cur->logical > start) {
  654. node = node->rb_right;
  655. } else if (cur->seq < min_seq) {
  656. node = node->rb_left;
  657. } else if (!smallest) {
  658. /* we want the node with the highest seq */
  659. if (found)
  660. BUG_ON(found->seq > cur->seq);
  661. found = cur;
  662. node = node->rb_left;
  663. } else if (cur->seq > min_seq) {
  664. /* we want the node with the smallest seq */
  665. if (found)
  666. BUG_ON(found->seq < cur->seq);
  667. found = cur;
  668. node = node->rb_right;
  669. } else {
  670. found = cur;
  671. break;
  672. }
  673. }
  674. tree_mod_log_read_unlock(fs_info);
  675. return found;
  676. }
  677. /*
  678. * this returns the element from the log with the smallest time sequence
  679. * value that's in the log (the oldest log item). any element with a time
  680. * sequence lower than min_seq will be ignored.
  681. */
  682. static struct tree_mod_elem *
  683. tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
  684. u64 min_seq)
  685. {
  686. return __tree_mod_log_search(fs_info, start, min_seq, 1);
  687. }
  688. /*
  689. * this returns the element from the log with the largest time sequence
  690. * value that's in the log (the most recent log item). any element with
  691. * a time sequence lower than min_seq will be ignored.
  692. */
  693. static struct tree_mod_elem *
  694. tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
  695. {
  696. return __tree_mod_log_search(fs_info, start, min_seq, 0);
  697. }
  698. static noinline int
  699. tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
  700. struct extent_buffer *src, unsigned long dst_offset,
  701. unsigned long src_offset, int nr_items)
  702. {
  703. int ret = 0;
  704. struct tree_mod_elem **tm_list = NULL;
  705. struct tree_mod_elem **tm_list_add, **tm_list_rem;
  706. int i;
  707. int locked = 0;
  708. if (!tree_mod_need_log(fs_info, NULL))
  709. return 0;
  710. if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
  711. return 0;
  712. tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
  713. GFP_NOFS);
  714. if (!tm_list)
  715. return -ENOMEM;
  716. tm_list_add = tm_list;
  717. tm_list_rem = tm_list + nr_items;
  718. for (i = 0; i < nr_items; i++) {
  719. tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
  720. MOD_LOG_KEY_REMOVE, GFP_NOFS);
  721. if (!tm_list_rem[i]) {
  722. ret = -ENOMEM;
  723. goto free_tms;
  724. }
  725. tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
  726. MOD_LOG_KEY_ADD, GFP_NOFS);
  727. if (!tm_list_add[i]) {
  728. ret = -ENOMEM;
  729. goto free_tms;
  730. }
  731. }
  732. if (tree_mod_dont_log(fs_info, NULL))
  733. goto free_tms;
  734. locked = 1;
  735. for (i = 0; i < nr_items; i++) {
  736. ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
  737. if (ret)
  738. goto free_tms;
  739. ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
  740. if (ret)
  741. goto free_tms;
  742. }
  743. tree_mod_log_write_unlock(fs_info);
  744. kfree(tm_list);
  745. return 0;
  746. free_tms:
  747. for (i = 0; i < nr_items * 2; i++) {
  748. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  749. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  750. kfree(tm_list[i]);
  751. }
  752. if (locked)
  753. tree_mod_log_write_unlock(fs_info);
  754. kfree(tm_list);
  755. return ret;
  756. }
  757. static inline void
  758. tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
  759. int dst_offset, int src_offset, int nr_items)
  760. {
  761. int ret;
  762. ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
  763. nr_items);
  764. BUG_ON(ret < 0);
  765. }
  766. static noinline void
  767. tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
  768. struct extent_buffer *eb, int slot, int atomic)
  769. {
  770. int ret;
  771. ret = tree_mod_log_insert_key(fs_info, eb, slot,
  772. MOD_LOG_KEY_REPLACE,
  773. atomic ? GFP_ATOMIC : GFP_NOFS);
  774. BUG_ON(ret < 0);
  775. }
  776. static noinline int
  777. tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
  778. {
  779. struct tree_mod_elem **tm_list = NULL;
  780. int nritems = 0;
  781. int i;
  782. int ret = 0;
  783. if (btrfs_header_level(eb) == 0)
  784. return 0;
  785. if (!tree_mod_need_log(fs_info, NULL))
  786. return 0;
  787. nritems = btrfs_header_nritems(eb);
  788. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
  789. if (!tm_list)
  790. return -ENOMEM;
  791. for (i = 0; i < nritems; i++) {
  792. tm_list[i] = alloc_tree_mod_elem(eb, i,
  793. MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
  794. if (!tm_list[i]) {
  795. ret = -ENOMEM;
  796. goto free_tms;
  797. }
  798. }
  799. if (tree_mod_dont_log(fs_info, eb))
  800. goto free_tms;
  801. ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
  802. tree_mod_log_write_unlock(fs_info);
  803. if (ret)
  804. goto free_tms;
  805. kfree(tm_list);
  806. return 0;
  807. free_tms:
  808. for (i = 0; i < nritems; i++)
  809. kfree(tm_list[i]);
  810. kfree(tm_list);
  811. return ret;
  812. }
  813. static noinline void
  814. tree_mod_log_set_root_pointer(struct btrfs_root *root,
  815. struct extent_buffer *new_root_node,
  816. int log_removal)
  817. {
  818. int ret;
  819. ret = tree_mod_log_insert_root(root->fs_info, root->node,
  820. new_root_node, log_removal);
  821. BUG_ON(ret < 0);
  822. }
  823. /*
  824. * check if the tree block can be shared by multiple trees
  825. */
  826. int btrfs_block_can_be_shared(struct btrfs_root *root,
  827. struct extent_buffer *buf)
  828. {
  829. /*
  830. * Tree blocks not in reference counted trees and tree roots
  831. * are never shared. If a block was allocated after the last
  832. * snapshot and the block was not allocated by tree relocation,
  833. * we know the block is not shared.
  834. */
  835. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  836. buf != root->node && buf != root->commit_root &&
  837. (btrfs_header_generation(buf) <=
  838. btrfs_root_last_snapshot(&root->root_item) ||
  839. btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
  840. return 1;
  841. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  842. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  843. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  844. return 1;
  845. #endif
  846. return 0;
  847. }
  848. static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
  849. struct btrfs_root *root,
  850. struct extent_buffer *buf,
  851. struct extent_buffer *cow,
  852. int *last_ref)
  853. {
  854. struct btrfs_fs_info *fs_info = root->fs_info;
  855. u64 refs;
  856. u64 owner;
  857. u64 flags;
  858. u64 new_flags = 0;
  859. int ret;
  860. /*
  861. * Backrefs update rules:
  862. *
  863. * Always use full backrefs for extent pointers in tree block
  864. * allocated by tree relocation.
  865. *
  866. * If a shared tree block is no longer referenced by its owner
  867. * tree (btrfs_header_owner(buf) == root->root_key.objectid),
  868. * use full backrefs for extent pointers in tree block.
  869. *
  870. * If a tree block is been relocating
  871. * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
  872. * use full backrefs for extent pointers in tree block.
  873. * The reason for this is some operations (such as drop tree)
  874. * are only allowed for blocks use full backrefs.
  875. */
  876. if (btrfs_block_can_be_shared(root, buf)) {
  877. ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
  878. btrfs_header_level(buf), 1,
  879. &refs, &flags);
  880. if (ret)
  881. return ret;
  882. if (refs == 0) {
  883. ret = -EROFS;
  884. btrfs_handle_fs_error(fs_info, ret, NULL);
  885. return ret;
  886. }
  887. } else {
  888. refs = 1;
  889. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  890. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  891. flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  892. else
  893. flags = 0;
  894. }
  895. owner = btrfs_header_owner(buf);
  896. BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
  897. !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  898. if (refs > 1) {
  899. if ((owner == root->root_key.objectid ||
  900. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
  901. !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
  902. ret = btrfs_inc_ref(trans, root, buf, 1);
  903. BUG_ON(ret); /* -ENOMEM */
  904. if (root->root_key.objectid ==
  905. BTRFS_TREE_RELOC_OBJECTID) {
  906. ret = btrfs_dec_ref(trans, root, buf, 0);
  907. BUG_ON(ret); /* -ENOMEM */
  908. ret = btrfs_inc_ref(trans, root, cow, 1);
  909. BUG_ON(ret); /* -ENOMEM */
  910. }
  911. new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  912. } else {
  913. if (root->root_key.objectid ==
  914. BTRFS_TREE_RELOC_OBJECTID)
  915. ret = btrfs_inc_ref(trans, root, cow, 1);
  916. else
  917. ret = btrfs_inc_ref(trans, root, cow, 0);
  918. BUG_ON(ret); /* -ENOMEM */
  919. }
  920. if (new_flags != 0) {
  921. int level = btrfs_header_level(buf);
  922. ret = btrfs_set_disk_extent_flags(trans, fs_info,
  923. buf->start,
  924. buf->len,
  925. new_flags, level, 0);
  926. if (ret)
  927. return ret;
  928. }
  929. } else {
  930. if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  931. if (root->root_key.objectid ==
  932. BTRFS_TREE_RELOC_OBJECTID)
  933. ret = btrfs_inc_ref(trans, root, cow, 1);
  934. else
  935. ret = btrfs_inc_ref(trans, root, cow, 0);
  936. BUG_ON(ret); /* -ENOMEM */
  937. ret = btrfs_dec_ref(trans, root, buf, 1);
  938. BUG_ON(ret); /* -ENOMEM */
  939. }
  940. clean_tree_block(fs_info, buf);
  941. *last_ref = 1;
  942. }
  943. return 0;
  944. }
  945. /*
  946. * does the dirty work in cow of a single block. The parent block (if
  947. * supplied) is updated to point to the new cow copy. The new buffer is marked
  948. * dirty and returned locked. If you modify the block it needs to be marked
  949. * dirty again.
  950. *
  951. * search_start -- an allocation hint for the new block
  952. *
  953. * empty_size -- a hint that you plan on doing more cow. This is the size in
  954. * bytes the allocator should try to find free next to the block it returns.
  955. * This is just a hint and may be ignored by the allocator.
  956. */
  957. static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
  958. struct btrfs_root *root,
  959. struct extent_buffer *buf,
  960. struct extent_buffer *parent, int parent_slot,
  961. struct extent_buffer **cow_ret,
  962. u64 search_start, u64 empty_size)
  963. {
  964. struct btrfs_fs_info *fs_info = root->fs_info;
  965. struct btrfs_disk_key disk_key;
  966. struct extent_buffer *cow;
  967. int level, ret;
  968. int last_ref = 0;
  969. int unlock_orig = 0;
  970. u64 parent_start = 0;
  971. if (*cow_ret == buf)
  972. unlock_orig = 1;
  973. btrfs_assert_tree_locked(buf);
  974. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  975. trans->transid != fs_info->running_transaction->transid);
  976. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  977. trans->transid != root->last_trans);
  978. level = btrfs_header_level(buf);
  979. if (level == 0)
  980. btrfs_item_key(buf, &disk_key, 0);
  981. else
  982. btrfs_node_key(buf, &disk_key, 0);
  983. if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
  984. parent_start = parent->start;
  985. cow = btrfs_alloc_tree_block(trans, root, parent_start,
  986. root->root_key.objectid, &disk_key, level,
  987. search_start, empty_size);
  988. if (IS_ERR(cow))
  989. return PTR_ERR(cow);
  990. /* cow is set to blocking by btrfs_init_new_buffer */
  991. copy_extent_buffer_full(cow, buf);
  992. btrfs_set_header_bytenr(cow, cow->start);
  993. btrfs_set_header_generation(cow, trans->transid);
  994. btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
  995. btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
  996. BTRFS_HEADER_FLAG_RELOC);
  997. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  998. btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
  999. else
  1000. btrfs_set_header_owner(cow, root->root_key.objectid);
  1001. write_extent_buffer_fsid(cow, fs_info->fsid);
  1002. ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
  1003. if (ret) {
  1004. btrfs_abort_transaction(trans, ret);
  1005. return ret;
  1006. }
  1007. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  1008. ret = btrfs_reloc_cow_block(trans, root, buf, cow);
  1009. if (ret) {
  1010. btrfs_abort_transaction(trans, ret);
  1011. return ret;
  1012. }
  1013. }
  1014. if (buf == root->node) {
  1015. WARN_ON(parent && parent != buf);
  1016. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  1017. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  1018. parent_start = buf->start;
  1019. extent_buffer_get(cow);
  1020. tree_mod_log_set_root_pointer(root, cow, 1);
  1021. rcu_assign_pointer(root->node, cow);
  1022. btrfs_free_tree_block(trans, root, buf, parent_start,
  1023. last_ref);
  1024. free_extent_buffer(buf);
  1025. add_root_to_dirty_list(root);
  1026. } else {
  1027. WARN_ON(trans->transid != btrfs_header_generation(parent));
  1028. tree_mod_log_insert_key(fs_info, parent, parent_slot,
  1029. MOD_LOG_KEY_REPLACE, GFP_NOFS);
  1030. btrfs_set_node_blockptr(parent, parent_slot,
  1031. cow->start);
  1032. btrfs_set_node_ptr_generation(parent, parent_slot,
  1033. trans->transid);
  1034. btrfs_mark_buffer_dirty(parent);
  1035. if (last_ref) {
  1036. ret = tree_mod_log_free_eb(fs_info, buf);
  1037. if (ret) {
  1038. btrfs_abort_transaction(trans, ret);
  1039. return ret;
  1040. }
  1041. }
  1042. btrfs_free_tree_block(trans, root, buf, parent_start,
  1043. last_ref);
  1044. }
  1045. if (unlock_orig)
  1046. btrfs_tree_unlock(buf);
  1047. free_extent_buffer_stale(buf);
  1048. btrfs_mark_buffer_dirty(cow);
  1049. *cow_ret = cow;
  1050. return 0;
  1051. }
  1052. /*
  1053. * returns the logical address of the oldest predecessor of the given root.
  1054. * entries older than time_seq are ignored.
  1055. */
  1056. static struct tree_mod_elem *
  1057. __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
  1058. struct extent_buffer *eb_root, u64 time_seq)
  1059. {
  1060. struct tree_mod_elem *tm;
  1061. struct tree_mod_elem *found = NULL;
  1062. u64 root_logical = eb_root->start;
  1063. int looped = 0;
  1064. if (!time_seq)
  1065. return NULL;
  1066. /*
  1067. * the very last operation that's logged for a root is the
  1068. * replacement operation (if it is replaced at all). this has
  1069. * the logical address of the *new* root, making it the very
  1070. * first operation that's logged for this root.
  1071. */
  1072. while (1) {
  1073. tm = tree_mod_log_search_oldest(fs_info, root_logical,
  1074. time_seq);
  1075. if (!looped && !tm)
  1076. return NULL;
  1077. /*
  1078. * if there are no tree operation for the oldest root, we simply
  1079. * return it. this should only happen if that (old) root is at
  1080. * level 0.
  1081. */
  1082. if (!tm)
  1083. break;
  1084. /*
  1085. * if there's an operation that's not a root replacement, we
  1086. * found the oldest version of our root. normally, we'll find a
  1087. * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
  1088. */
  1089. if (tm->op != MOD_LOG_ROOT_REPLACE)
  1090. break;
  1091. found = tm;
  1092. root_logical = tm->old_root.logical;
  1093. looped = 1;
  1094. }
  1095. /* if there's no old root to return, return what we found instead */
  1096. if (!found)
  1097. found = tm;
  1098. return found;
  1099. }
  1100. /*
  1101. * tm is a pointer to the first operation to rewind within eb. then, all
  1102. * previous operations will be rewound (until we reach something older than
  1103. * time_seq).
  1104. */
  1105. static void
  1106. __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
  1107. u64 time_seq, struct tree_mod_elem *first_tm)
  1108. {
  1109. u32 n;
  1110. struct rb_node *next;
  1111. struct tree_mod_elem *tm = first_tm;
  1112. unsigned long o_dst;
  1113. unsigned long o_src;
  1114. unsigned long p_size = sizeof(struct btrfs_key_ptr);
  1115. n = btrfs_header_nritems(eb);
  1116. tree_mod_log_read_lock(fs_info);
  1117. while (tm && tm->seq >= time_seq) {
  1118. /*
  1119. * all the operations are recorded with the operator used for
  1120. * the modification. as we're going backwards, we do the
  1121. * opposite of each operation here.
  1122. */
  1123. switch (tm->op) {
  1124. case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
  1125. BUG_ON(tm->slot < n);
  1126. /* Fallthrough */
  1127. case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
  1128. case MOD_LOG_KEY_REMOVE:
  1129. btrfs_set_node_key(eb, &tm->key, tm->slot);
  1130. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  1131. btrfs_set_node_ptr_generation(eb, tm->slot,
  1132. tm->generation);
  1133. n++;
  1134. break;
  1135. case MOD_LOG_KEY_REPLACE:
  1136. BUG_ON(tm->slot >= n);
  1137. btrfs_set_node_key(eb, &tm->key, tm->slot);
  1138. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  1139. btrfs_set_node_ptr_generation(eb, tm->slot,
  1140. tm->generation);
  1141. break;
  1142. case MOD_LOG_KEY_ADD:
  1143. /* if a move operation is needed it's in the log */
  1144. n--;
  1145. break;
  1146. case MOD_LOG_MOVE_KEYS:
  1147. o_dst = btrfs_node_key_ptr_offset(tm->slot);
  1148. o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
  1149. memmove_extent_buffer(eb, o_dst, o_src,
  1150. tm->move.nr_items * p_size);
  1151. break;
  1152. case MOD_LOG_ROOT_REPLACE:
  1153. /*
  1154. * this operation is special. for roots, this must be
  1155. * handled explicitly before rewinding.
  1156. * for non-roots, this operation may exist if the node
  1157. * was a root: root A -> child B; then A gets empty and
  1158. * B is promoted to the new root. in the mod log, we'll
  1159. * have a root-replace operation for B, a tree block
  1160. * that is no root. we simply ignore that operation.
  1161. */
  1162. break;
  1163. }
  1164. next = rb_next(&tm->node);
  1165. if (!next)
  1166. break;
  1167. tm = rb_entry(next, struct tree_mod_elem, node);
  1168. if (tm->logical != first_tm->logical)
  1169. break;
  1170. }
  1171. tree_mod_log_read_unlock(fs_info);
  1172. btrfs_set_header_nritems(eb, n);
  1173. }
  1174. /*
  1175. * Called with eb read locked. If the buffer cannot be rewound, the same buffer
  1176. * is returned. If rewind operations happen, a fresh buffer is returned. The
  1177. * returned buffer is always read-locked. If the returned buffer is not the
  1178. * input buffer, the lock on the input buffer is released and the input buffer
  1179. * is freed (its refcount is decremented).
  1180. */
  1181. static struct extent_buffer *
  1182. tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
  1183. struct extent_buffer *eb, u64 time_seq)
  1184. {
  1185. struct extent_buffer *eb_rewin;
  1186. struct tree_mod_elem *tm;
  1187. if (!time_seq)
  1188. return eb;
  1189. if (btrfs_header_level(eb) == 0)
  1190. return eb;
  1191. tm = tree_mod_log_search(fs_info, eb->start, time_seq);
  1192. if (!tm)
  1193. return eb;
  1194. btrfs_set_path_blocking(path);
  1195. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  1196. if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  1197. BUG_ON(tm->slot != 0);
  1198. eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
  1199. if (!eb_rewin) {
  1200. btrfs_tree_read_unlock_blocking(eb);
  1201. free_extent_buffer(eb);
  1202. return NULL;
  1203. }
  1204. btrfs_set_header_bytenr(eb_rewin, eb->start);
  1205. btrfs_set_header_backref_rev(eb_rewin,
  1206. btrfs_header_backref_rev(eb));
  1207. btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
  1208. btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
  1209. } else {
  1210. eb_rewin = btrfs_clone_extent_buffer(eb);
  1211. if (!eb_rewin) {
  1212. btrfs_tree_read_unlock_blocking(eb);
  1213. free_extent_buffer(eb);
  1214. return NULL;
  1215. }
  1216. }
  1217. btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
  1218. btrfs_tree_read_unlock_blocking(eb);
  1219. free_extent_buffer(eb);
  1220. extent_buffer_get(eb_rewin);
  1221. btrfs_tree_read_lock(eb_rewin);
  1222. __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
  1223. WARN_ON(btrfs_header_nritems(eb_rewin) >
  1224. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  1225. return eb_rewin;
  1226. }
  1227. /*
  1228. * get_old_root() rewinds the state of @root's root node to the given @time_seq
  1229. * value. If there are no changes, the current root->root_node is returned. If
  1230. * anything changed in between, there's a fresh buffer allocated on which the
  1231. * rewind operations are done. In any case, the returned buffer is read locked.
  1232. * Returns NULL on error (with no locks held).
  1233. */
  1234. static inline struct extent_buffer *
  1235. get_old_root(struct btrfs_root *root, u64 time_seq)
  1236. {
  1237. struct btrfs_fs_info *fs_info = root->fs_info;
  1238. struct tree_mod_elem *tm;
  1239. struct extent_buffer *eb = NULL;
  1240. struct extent_buffer *eb_root;
  1241. struct extent_buffer *old;
  1242. struct tree_mod_root *old_root = NULL;
  1243. u64 old_generation = 0;
  1244. u64 logical;
  1245. eb_root = btrfs_read_lock_root_node(root);
  1246. tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
  1247. if (!tm)
  1248. return eb_root;
  1249. if (tm->op == MOD_LOG_ROOT_REPLACE) {
  1250. old_root = &tm->old_root;
  1251. old_generation = tm->generation;
  1252. logical = old_root->logical;
  1253. } else {
  1254. logical = eb_root->start;
  1255. }
  1256. tm = tree_mod_log_search(fs_info, logical, time_seq);
  1257. if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  1258. btrfs_tree_read_unlock(eb_root);
  1259. free_extent_buffer(eb_root);
  1260. old = read_tree_block(fs_info, logical, 0);
  1261. if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
  1262. if (!IS_ERR(old))
  1263. free_extent_buffer(old);
  1264. btrfs_warn(fs_info,
  1265. "failed to read tree block %llu from get_old_root",
  1266. logical);
  1267. } else {
  1268. eb = btrfs_clone_extent_buffer(old);
  1269. free_extent_buffer(old);
  1270. }
  1271. } else if (old_root) {
  1272. btrfs_tree_read_unlock(eb_root);
  1273. free_extent_buffer(eb_root);
  1274. eb = alloc_dummy_extent_buffer(fs_info, logical);
  1275. } else {
  1276. btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
  1277. eb = btrfs_clone_extent_buffer(eb_root);
  1278. btrfs_tree_read_unlock_blocking(eb_root);
  1279. free_extent_buffer(eb_root);
  1280. }
  1281. if (!eb)
  1282. return NULL;
  1283. extent_buffer_get(eb);
  1284. btrfs_tree_read_lock(eb);
  1285. if (old_root) {
  1286. btrfs_set_header_bytenr(eb, eb->start);
  1287. btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
  1288. btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
  1289. btrfs_set_header_level(eb, old_root->level);
  1290. btrfs_set_header_generation(eb, old_generation);
  1291. }
  1292. if (tm)
  1293. __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
  1294. else
  1295. WARN_ON(btrfs_header_level(eb) != 0);
  1296. WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  1297. return eb;
  1298. }
  1299. int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
  1300. {
  1301. struct tree_mod_elem *tm;
  1302. int level;
  1303. struct extent_buffer *eb_root = btrfs_root_node(root);
  1304. tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
  1305. if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
  1306. level = tm->old_root.level;
  1307. } else {
  1308. level = btrfs_header_level(eb_root);
  1309. }
  1310. free_extent_buffer(eb_root);
  1311. return level;
  1312. }
  1313. static inline int should_cow_block(struct btrfs_trans_handle *trans,
  1314. struct btrfs_root *root,
  1315. struct extent_buffer *buf)
  1316. {
  1317. if (btrfs_is_testing(root->fs_info))
  1318. return 0;
  1319. /* ensure we can see the force_cow */
  1320. smp_rmb();
  1321. /*
  1322. * We do not need to cow a block if
  1323. * 1) this block is not created or changed in this transaction;
  1324. * 2) this block does not belong to TREE_RELOC tree;
  1325. * 3) the root is not forced COW.
  1326. *
  1327. * What is forced COW:
  1328. * when we create snapshot during committing the transaction,
  1329. * after we've finished coping src root, we must COW the shared
  1330. * block to ensure the metadata consistency.
  1331. */
  1332. if (btrfs_header_generation(buf) == trans->transid &&
  1333. !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
  1334. !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
  1335. btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
  1336. !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
  1337. return 0;
  1338. return 1;
  1339. }
  1340. /*
  1341. * cows a single block, see __btrfs_cow_block for the real work.
  1342. * This version of it has extra checks so that a block isn't COWed more than
  1343. * once per transaction, as long as it hasn't been written yet
  1344. */
  1345. noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
  1346. struct btrfs_root *root, struct extent_buffer *buf,
  1347. struct extent_buffer *parent, int parent_slot,
  1348. struct extent_buffer **cow_ret)
  1349. {
  1350. struct btrfs_fs_info *fs_info = root->fs_info;
  1351. u64 search_start;
  1352. int ret;
  1353. if (trans->transaction != fs_info->running_transaction)
  1354. WARN(1, KERN_CRIT "trans %llu running %llu\n",
  1355. trans->transid,
  1356. fs_info->running_transaction->transid);
  1357. if (trans->transid != fs_info->generation)
  1358. WARN(1, KERN_CRIT "trans %llu running %llu\n",
  1359. trans->transid, fs_info->generation);
  1360. if (!should_cow_block(trans, root, buf)) {
  1361. trans->dirty = true;
  1362. *cow_ret = buf;
  1363. return 0;
  1364. }
  1365. search_start = buf->start & ~((u64)SZ_1G - 1);
  1366. if (parent)
  1367. btrfs_set_lock_blocking(parent);
  1368. btrfs_set_lock_blocking(buf);
  1369. ret = __btrfs_cow_block(trans, root, buf, parent,
  1370. parent_slot, cow_ret, search_start, 0);
  1371. trace_btrfs_cow_block(root, buf, *cow_ret);
  1372. return ret;
  1373. }
  1374. /*
  1375. * helper function for defrag to decide if two blocks pointed to by a
  1376. * node are actually close by
  1377. */
  1378. static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
  1379. {
  1380. if (blocknr < other && other - (blocknr + blocksize) < 32768)
  1381. return 1;
  1382. if (blocknr > other && blocknr - (other + blocksize) < 32768)
  1383. return 1;
  1384. return 0;
  1385. }
  1386. /*
  1387. * compare two keys in a memcmp fashion
  1388. */
  1389. static int comp_keys(const struct btrfs_disk_key *disk,
  1390. const struct btrfs_key *k2)
  1391. {
  1392. struct btrfs_key k1;
  1393. btrfs_disk_key_to_cpu(&k1, disk);
  1394. return btrfs_comp_cpu_keys(&k1, k2);
  1395. }
  1396. /*
  1397. * same as comp_keys only with two btrfs_key's
  1398. */
  1399. int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
  1400. {
  1401. if (k1->objectid > k2->objectid)
  1402. return 1;
  1403. if (k1->objectid < k2->objectid)
  1404. return -1;
  1405. if (k1->type > k2->type)
  1406. return 1;
  1407. if (k1->type < k2->type)
  1408. return -1;
  1409. if (k1->offset > k2->offset)
  1410. return 1;
  1411. if (k1->offset < k2->offset)
  1412. return -1;
  1413. return 0;
  1414. }
  1415. /*
  1416. * this is used by the defrag code to go through all the
  1417. * leaves pointed to by a node and reallocate them so that
  1418. * disk order is close to key order
  1419. */
  1420. int btrfs_realloc_node(struct btrfs_trans_handle *trans,
  1421. struct btrfs_root *root, struct extent_buffer *parent,
  1422. int start_slot, u64 *last_ret,
  1423. struct btrfs_key *progress)
  1424. {
  1425. struct btrfs_fs_info *fs_info = root->fs_info;
  1426. struct extent_buffer *cur;
  1427. u64 blocknr;
  1428. u64 gen;
  1429. u64 search_start = *last_ret;
  1430. u64 last_block = 0;
  1431. u64 other;
  1432. u32 parent_nritems;
  1433. int end_slot;
  1434. int i;
  1435. int err = 0;
  1436. int parent_level;
  1437. int uptodate;
  1438. u32 blocksize;
  1439. int progress_passed = 0;
  1440. struct btrfs_disk_key disk_key;
  1441. parent_level = btrfs_header_level(parent);
  1442. WARN_ON(trans->transaction != fs_info->running_transaction);
  1443. WARN_ON(trans->transid != fs_info->generation);
  1444. parent_nritems = btrfs_header_nritems(parent);
  1445. blocksize = fs_info->nodesize;
  1446. end_slot = parent_nritems - 1;
  1447. if (parent_nritems <= 1)
  1448. return 0;
  1449. btrfs_set_lock_blocking(parent);
  1450. for (i = start_slot; i <= end_slot; i++) {
  1451. int close = 1;
  1452. btrfs_node_key(parent, &disk_key, i);
  1453. if (!progress_passed && comp_keys(&disk_key, progress) < 0)
  1454. continue;
  1455. progress_passed = 1;
  1456. blocknr = btrfs_node_blockptr(parent, i);
  1457. gen = btrfs_node_ptr_generation(parent, i);
  1458. if (last_block == 0)
  1459. last_block = blocknr;
  1460. if (i > 0) {
  1461. other = btrfs_node_blockptr(parent, i - 1);
  1462. close = close_blocks(blocknr, other, blocksize);
  1463. }
  1464. if (!close && i < end_slot) {
  1465. other = btrfs_node_blockptr(parent, i + 1);
  1466. close = close_blocks(blocknr, other, blocksize);
  1467. }
  1468. if (close) {
  1469. last_block = blocknr;
  1470. continue;
  1471. }
  1472. cur = find_extent_buffer(fs_info, blocknr);
  1473. if (cur)
  1474. uptodate = btrfs_buffer_uptodate(cur, gen, 0);
  1475. else
  1476. uptodate = 0;
  1477. if (!cur || !uptodate) {
  1478. if (!cur) {
  1479. cur = read_tree_block(fs_info, blocknr, gen);
  1480. if (IS_ERR(cur)) {
  1481. return PTR_ERR(cur);
  1482. } else if (!extent_buffer_uptodate(cur)) {
  1483. free_extent_buffer(cur);
  1484. return -EIO;
  1485. }
  1486. } else if (!uptodate) {
  1487. err = btrfs_read_buffer(cur, gen);
  1488. if (err) {
  1489. free_extent_buffer(cur);
  1490. return err;
  1491. }
  1492. }
  1493. }
  1494. if (search_start == 0)
  1495. search_start = last_block;
  1496. btrfs_tree_lock(cur);
  1497. btrfs_set_lock_blocking(cur);
  1498. err = __btrfs_cow_block(trans, root, cur, parent, i,
  1499. &cur, search_start,
  1500. min(16 * blocksize,
  1501. (end_slot - i) * blocksize));
  1502. if (err) {
  1503. btrfs_tree_unlock(cur);
  1504. free_extent_buffer(cur);
  1505. break;
  1506. }
  1507. search_start = cur->start;
  1508. last_block = cur->start;
  1509. *last_ret = search_start;
  1510. btrfs_tree_unlock(cur);
  1511. free_extent_buffer(cur);
  1512. }
  1513. return err;
  1514. }
  1515. /*
  1516. * search for key in the extent_buffer. The items start at offset p,
  1517. * and they are item_size apart. There are 'max' items in p.
  1518. *
  1519. * the slot in the array is returned via slot, and it points to
  1520. * the place where you would insert key if it is not found in
  1521. * the array.
  1522. *
  1523. * slot may point to max if the key is bigger than all of the keys
  1524. */
  1525. static noinline int generic_bin_search(struct extent_buffer *eb,
  1526. unsigned long p, int item_size,
  1527. const struct btrfs_key *key,
  1528. int max, int *slot)
  1529. {
  1530. int low = 0;
  1531. int high = max;
  1532. int mid;
  1533. int ret;
  1534. struct btrfs_disk_key *tmp = NULL;
  1535. struct btrfs_disk_key unaligned;
  1536. unsigned long offset;
  1537. char *kaddr = NULL;
  1538. unsigned long map_start = 0;
  1539. unsigned long map_len = 0;
  1540. int err;
  1541. if (low > high) {
  1542. btrfs_err(eb->fs_info,
  1543. "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
  1544. __func__, low, high, eb->start,
  1545. btrfs_header_owner(eb), btrfs_header_level(eb));
  1546. return -EINVAL;
  1547. }
  1548. while (low < high) {
  1549. mid = (low + high) / 2;
  1550. offset = p + mid * item_size;
  1551. if (!kaddr || offset < map_start ||
  1552. (offset + sizeof(struct btrfs_disk_key)) >
  1553. map_start + map_len) {
  1554. err = map_private_extent_buffer(eb, offset,
  1555. sizeof(struct btrfs_disk_key),
  1556. &kaddr, &map_start, &map_len);
  1557. if (!err) {
  1558. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  1559. map_start);
  1560. } else if (err == 1) {
  1561. read_extent_buffer(eb, &unaligned,
  1562. offset, sizeof(unaligned));
  1563. tmp = &unaligned;
  1564. } else {
  1565. return err;
  1566. }
  1567. } else {
  1568. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  1569. map_start);
  1570. }
  1571. ret = comp_keys(tmp, key);
  1572. if (ret < 0)
  1573. low = mid + 1;
  1574. else if (ret > 0)
  1575. high = mid;
  1576. else {
  1577. *slot = mid;
  1578. return 0;
  1579. }
  1580. }
  1581. *slot = low;
  1582. return 1;
  1583. }
  1584. /*
  1585. * simple bin_search frontend that does the right thing for
  1586. * leaves vs nodes
  1587. */
  1588. static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
  1589. int level, int *slot)
  1590. {
  1591. if (level == 0)
  1592. return generic_bin_search(eb,
  1593. offsetof(struct btrfs_leaf, items),
  1594. sizeof(struct btrfs_item),
  1595. key, btrfs_header_nritems(eb),
  1596. slot);
  1597. else
  1598. return generic_bin_search(eb,
  1599. offsetof(struct btrfs_node, ptrs),
  1600. sizeof(struct btrfs_key_ptr),
  1601. key, btrfs_header_nritems(eb),
  1602. slot);
  1603. }
  1604. int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
  1605. int level, int *slot)
  1606. {
  1607. return bin_search(eb, key, level, slot);
  1608. }
  1609. static void root_add_used(struct btrfs_root *root, u32 size)
  1610. {
  1611. spin_lock(&root->accounting_lock);
  1612. btrfs_set_root_used(&root->root_item,
  1613. btrfs_root_used(&root->root_item) + size);
  1614. spin_unlock(&root->accounting_lock);
  1615. }
  1616. static void root_sub_used(struct btrfs_root *root, u32 size)
  1617. {
  1618. spin_lock(&root->accounting_lock);
  1619. btrfs_set_root_used(&root->root_item,
  1620. btrfs_root_used(&root->root_item) - size);
  1621. spin_unlock(&root->accounting_lock);
  1622. }
  1623. /* given a node and slot number, this reads the blocks it points to. The
  1624. * extent buffer is returned with a reference taken (but unlocked).
  1625. */
  1626. static noinline struct extent_buffer *
  1627. read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
  1628. int slot)
  1629. {
  1630. int level = btrfs_header_level(parent);
  1631. struct extent_buffer *eb;
  1632. if (slot < 0 || slot >= btrfs_header_nritems(parent))
  1633. return ERR_PTR(-ENOENT);
  1634. BUG_ON(level == 0);
  1635. eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
  1636. btrfs_node_ptr_generation(parent, slot));
  1637. if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
  1638. free_extent_buffer(eb);
  1639. eb = ERR_PTR(-EIO);
  1640. }
  1641. return eb;
  1642. }
  1643. /*
  1644. * node level balancing, used to make sure nodes are in proper order for
  1645. * item deletion. We balance from the top down, so we have to make sure
  1646. * that a deletion won't leave an node completely empty later on.
  1647. */
  1648. static noinline int balance_level(struct btrfs_trans_handle *trans,
  1649. struct btrfs_root *root,
  1650. struct btrfs_path *path, int level)
  1651. {
  1652. struct btrfs_fs_info *fs_info = root->fs_info;
  1653. struct extent_buffer *right = NULL;
  1654. struct extent_buffer *mid;
  1655. struct extent_buffer *left = NULL;
  1656. struct extent_buffer *parent = NULL;
  1657. int ret = 0;
  1658. int wret;
  1659. int pslot;
  1660. int orig_slot = path->slots[level];
  1661. u64 orig_ptr;
  1662. if (level == 0)
  1663. return 0;
  1664. mid = path->nodes[level];
  1665. WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
  1666. path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
  1667. WARN_ON(btrfs_header_generation(mid) != trans->transid);
  1668. orig_ptr = btrfs_node_blockptr(mid, orig_slot);
  1669. if (level < BTRFS_MAX_LEVEL - 1) {
  1670. parent = path->nodes[level + 1];
  1671. pslot = path->slots[level + 1];
  1672. }
  1673. /*
  1674. * deal with the case where there is only one pointer in the root
  1675. * by promoting the node below to a root
  1676. */
  1677. if (!parent) {
  1678. struct extent_buffer *child;
  1679. if (btrfs_header_nritems(mid) != 1)
  1680. return 0;
  1681. /* promote the child to a root */
  1682. child = read_node_slot(fs_info, mid, 0);
  1683. if (IS_ERR(child)) {
  1684. ret = PTR_ERR(child);
  1685. btrfs_handle_fs_error(fs_info, ret, NULL);
  1686. goto enospc;
  1687. }
  1688. btrfs_tree_lock(child);
  1689. btrfs_set_lock_blocking(child);
  1690. ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
  1691. if (ret) {
  1692. btrfs_tree_unlock(child);
  1693. free_extent_buffer(child);
  1694. goto enospc;
  1695. }
  1696. tree_mod_log_set_root_pointer(root, child, 1);
  1697. rcu_assign_pointer(root->node, child);
  1698. add_root_to_dirty_list(root);
  1699. btrfs_tree_unlock(child);
  1700. path->locks[level] = 0;
  1701. path->nodes[level] = NULL;
  1702. clean_tree_block(fs_info, mid);
  1703. btrfs_tree_unlock(mid);
  1704. /* once for the path */
  1705. free_extent_buffer(mid);
  1706. root_sub_used(root, mid->len);
  1707. btrfs_free_tree_block(trans, root, mid, 0, 1);
  1708. /* once for the root ptr */
  1709. free_extent_buffer_stale(mid);
  1710. return 0;
  1711. }
  1712. if (btrfs_header_nritems(mid) >
  1713. BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
  1714. return 0;
  1715. left = read_node_slot(fs_info, parent, pslot - 1);
  1716. if (IS_ERR(left))
  1717. left = NULL;
  1718. if (left) {
  1719. btrfs_tree_lock(left);
  1720. btrfs_set_lock_blocking(left);
  1721. wret = btrfs_cow_block(trans, root, left,
  1722. parent, pslot - 1, &left);
  1723. if (wret) {
  1724. ret = wret;
  1725. goto enospc;
  1726. }
  1727. }
  1728. right = read_node_slot(fs_info, parent, pslot + 1);
  1729. if (IS_ERR(right))
  1730. right = NULL;
  1731. if (right) {
  1732. btrfs_tree_lock(right);
  1733. btrfs_set_lock_blocking(right);
  1734. wret = btrfs_cow_block(trans, root, right,
  1735. parent, pslot + 1, &right);
  1736. if (wret) {
  1737. ret = wret;
  1738. goto enospc;
  1739. }
  1740. }
  1741. /* first, try to make some room in the middle buffer */
  1742. if (left) {
  1743. orig_slot += btrfs_header_nritems(left);
  1744. wret = push_node_left(trans, fs_info, left, mid, 1);
  1745. if (wret < 0)
  1746. ret = wret;
  1747. }
  1748. /*
  1749. * then try to empty the right most buffer into the middle
  1750. */
  1751. if (right) {
  1752. wret = push_node_left(trans, fs_info, mid, right, 1);
  1753. if (wret < 0 && wret != -ENOSPC)
  1754. ret = wret;
  1755. if (btrfs_header_nritems(right) == 0) {
  1756. clean_tree_block(fs_info, right);
  1757. btrfs_tree_unlock(right);
  1758. del_ptr(root, path, level + 1, pslot + 1);
  1759. root_sub_used(root, right->len);
  1760. btrfs_free_tree_block(trans, root, right, 0, 1);
  1761. free_extent_buffer_stale(right);
  1762. right = NULL;
  1763. } else {
  1764. struct btrfs_disk_key right_key;
  1765. btrfs_node_key(right, &right_key, 0);
  1766. tree_mod_log_set_node_key(fs_info, parent,
  1767. pslot + 1, 0);
  1768. btrfs_set_node_key(parent, &right_key, pslot + 1);
  1769. btrfs_mark_buffer_dirty(parent);
  1770. }
  1771. }
  1772. if (btrfs_header_nritems(mid) == 1) {
  1773. /*
  1774. * we're not allowed to leave a node with one item in the
  1775. * tree during a delete. A deletion from lower in the tree
  1776. * could try to delete the only pointer in this node.
  1777. * So, pull some keys from the left.
  1778. * There has to be a left pointer at this point because
  1779. * otherwise we would have pulled some pointers from the
  1780. * right
  1781. */
  1782. if (!left) {
  1783. ret = -EROFS;
  1784. btrfs_handle_fs_error(fs_info, ret, NULL);
  1785. goto enospc;
  1786. }
  1787. wret = balance_node_right(trans, fs_info, mid, left);
  1788. if (wret < 0) {
  1789. ret = wret;
  1790. goto enospc;
  1791. }
  1792. if (wret == 1) {
  1793. wret = push_node_left(trans, fs_info, left, mid, 1);
  1794. if (wret < 0)
  1795. ret = wret;
  1796. }
  1797. BUG_ON(wret == 1);
  1798. }
  1799. if (btrfs_header_nritems(mid) == 0) {
  1800. clean_tree_block(fs_info, mid);
  1801. btrfs_tree_unlock(mid);
  1802. del_ptr(root, path, level + 1, pslot);
  1803. root_sub_used(root, mid->len);
  1804. btrfs_free_tree_block(trans, root, mid, 0, 1);
  1805. free_extent_buffer_stale(mid);
  1806. mid = NULL;
  1807. } else {
  1808. /* update the parent key to reflect our changes */
  1809. struct btrfs_disk_key mid_key;
  1810. btrfs_node_key(mid, &mid_key, 0);
  1811. tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
  1812. btrfs_set_node_key(parent, &mid_key, pslot);
  1813. btrfs_mark_buffer_dirty(parent);
  1814. }
  1815. /* update the path */
  1816. if (left) {
  1817. if (btrfs_header_nritems(left) > orig_slot) {
  1818. extent_buffer_get(left);
  1819. /* left was locked after cow */
  1820. path->nodes[level] = left;
  1821. path->slots[level + 1] -= 1;
  1822. path->slots[level] = orig_slot;
  1823. if (mid) {
  1824. btrfs_tree_unlock(mid);
  1825. free_extent_buffer(mid);
  1826. }
  1827. } else {
  1828. orig_slot -= btrfs_header_nritems(left);
  1829. path->slots[level] = orig_slot;
  1830. }
  1831. }
  1832. /* double check we haven't messed things up */
  1833. if (orig_ptr !=
  1834. btrfs_node_blockptr(path->nodes[level], path->slots[level]))
  1835. BUG();
  1836. enospc:
  1837. if (right) {
  1838. btrfs_tree_unlock(right);
  1839. free_extent_buffer(right);
  1840. }
  1841. if (left) {
  1842. if (path->nodes[level] != left)
  1843. btrfs_tree_unlock(left);
  1844. free_extent_buffer(left);
  1845. }
  1846. return ret;
  1847. }
  1848. /* Node balancing for insertion. Here we only split or push nodes around
  1849. * when they are completely full. This is also done top down, so we
  1850. * have to be pessimistic.
  1851. */
  1852. static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
  1853. struct btrfs_root *root,
  1854. struct btrfs_path *path, int level)
  1855. {
  1856. struct btrfs_fs_info *fs_info = root->fs_info;
  1857. struct extent_buffer *right = NULL;
  1858. struct extent_buffer *mid;
  1859. struct extent_buffer *left = NULL;
  1860. struct extent_buffer *parent = NULL;
  1861. int ret = 0;
  1862. int wret;
  1863. int pslot;
  1864. int orig_slot = path->slots[level];
  1865. if (level == 0)
  1866. return 1;
  1867. mid = path->nodes[level];
  1868. WARN_ON(btrfs_header_generation(mid) != trans->transid);
  1869. if (level < BTRFS_MAX_LEVEL - 1) {
  1870. parent = path->nodes[level + 1];
  1871. pslot = path->slots[level + 1];
  1872. }
  1873. if (!parent)
  1874. return 1;
  1875. left = read_node_slot(fs_info, parent, pslot - 1);
  1876. if (IS_ERR(left))
  1877. left = NULL;
  1878. /* first, try to make some room in the middle buffer */
  1879. if (left) {
  1880. u32 left_nr;
  1881. btrfs_tree_lock(left);
  1882. btrfs_set_lock_blocking(left);
  1883. left_nr = btrfs_header_nritems(left);
  1884. if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
  1885. wret = 1;
  1886. } else {
  1887. ret = btrfs_cow_block(trans, root, left, parent,
  1888. pslot - 1, &left);
  1889. if (ret)
  1890. wret = 1;
  1891. else {
  1892. wret = push_node_left(trans, fs_info,
  1893. left, mid, 0);
  1894. }
  1895. }
  1896. if (wret < 0)
  1897. ret = wret;
  1898. if (wret == 0) {
  1899. struct btrfs_disk_key disk_key;
  1900. orig_slot += left_nr;
  1901. btrfs_node_key(mid, &disk_key, 0);
  1902. tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
  1903. btrfs_set_node_key(parent, &disk_key, pslot);
  1904. btrfs_mark_buffer_dirty(parent);
  1905. if (btrfs_header_nritems(left) > orig_slot) {
  1906. path->nodes[level] = left;
  1907. path->slots[level + 1] -= 1;
  1908. path->slots[level] = orig_slot;
  1909. btrfs_tree_unlock(mid);
  1910. free_extent_buffer(mid);
  1911. } else {
  1912. orig_slot -=
  1913. btrfs_header_nritems(left);
  1914. path->slots[level] = orig_slot;
  1915. btrfs_tree_unlock(left);
  1916. free_extent_buffer(left);
  1917. }
  1918. return 0;
  1919. }
  1920. btrfs_tree_unlock(left);
  1921. free_extent_buffer(left);
  1922. }
  1923. right = read_node_slot(fs_info, parent, pslot + 1);
  1924. if (IS_ERR(right))
  1925. right = NULL;
  1926. /*
  1927. * then try to empty the right most buffer into the middle
  1928. */
  1929. if (right) {
  1930. u32 right_nr;
  1931. btrfs_tree_lock(right);
  1932. btrfs_set_lock_blocking(right);
  1933. right_nr = btrfs_header_nritems(right);
  1934. if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
  1935. wret = 1;
  1936. } else {
  1937. ret = btrfs_cow_block(trans, root, right,
  1938. parent, pslot + 1,
  1939. &right);
  1940. if (ret)
  1941. wret = 1;
  1942. else {
  1943. wret = balance_node_right(trans, fs_info,
  1944. right, mid);
  1945. }
  1946. }
  1947. if (wret < 0)
  1948. ret = wret;
  1949. if (wret == 0) {
  1950. struct btrfs_disk_key disk_key;
  1951. btrfs_node_key(right, &disk_key, 0);
  1952. tree_mod_log_set_node_key(fs_info, parent,
  1953. pslot + 1, 0);
  1954. btrfs_set_node_key(parent, &disk_key, pslot + 1);
  1955. btrfs_mark_buffer_dirty(parent);
  1956. if (btrfs_header_nritems(mid) <= orig_slot) {
  1957. path->nodes[level] = right;
  1958. path->slots[level + 1] += 1;
  1959. path->slots[level] = orig_slot -
  1960. btrfs_header_nritems(mid);
  1961. btrfs_tree_unlock(mid);
  1962. free_extent_buffer(mid);
  1963. } else {
  1964. btrfs_tree_unlock(right);
  1965. free_extent_buffer(right);
  1966. }
  1967. return 0;
  1968. }
  1969. btrfs_tree_unlock(right);
  1970. free_extent_buffer(right);
  1971. }
  1972. return 1;
  1973. }
  1974. /*
  1975. * readahead one full node of leaves, finding things that are close
  1976. * to the block in 'slot', and triggering ra on them.
  1977. */
  1978. static void reada_for_search(struct btrfs_fs_info *fs_info,
  1979. struct btrfs_path *path,
  1980. int level, int slot, u64 objectid)
  1981. {
  1982. struct extent_buffer *node;
  1983. struct btrfs_disk_key disk_key;
  1984. u32 nritems;
  1985. u64 search;
  1986. u64 target;
  1987. u64 nread = 0;
  1988. struct extent_buffer *eb;
  1989. u32 nr;
  1990. u32 blocksize;
  1991. u32 nscan = 0;
  1992. if (level != 1)
  1993. return;
  1994. if (!path->nodes[level])
  1995. return;
  1996. node = path->nodes[level];
  1997. search = btrfs_node_blockptr(node, slot);
  1998. blocksize = fs_info->nodesize;
  1999. eb = find_extent_buffer(fs_info, search);
  2000. if (eb) {
  2001. free_extent_buffer(eb);
  2002. return;
  2003. }
  2004. target = search;
  2005. nritems = btrfs_header_nritems(node);
  2006. nr = slot;
  2007. while (1) {
  2008. if (path->reada == READA_BACK) {
  2009. if (nr == 0)
  2010. break;
  2011. nr--;
  2012. } else if (path->reada == READA_FORWARD) {
  2013. nr++;
  2014. if (nr >= nritems)
  2015. break;
  2016. }
  2017. if (path->reada == READA_BACK && objectid) {
  2018. btrfs_node_key(node, &disk_key, nr);
  2019. if (btrfs_disk_key_objectid(&disk_key) != objectid)
  2020. break;
  2021. }
  2022. search = btrfs_node_blockptr(node, nr);
  2023. if ((search <= target && target - search <= 65536) ||
  2024. (search > target && search - target <= 65536)) {
  2025. readahead_tree_block(fs_info, search);
  2026. nread += blocksize;
  2027. }
  2028. nscan++;
  2029. if ((nread > 65536 || nscan > 32))
  2030. break;
  2031. }
  2032. }
  2033. static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
  2034. struct btrfs_path *path, int level)
  2035. {
  2036. int slot;
  2037. int nritems;
  2038. struct extent_buffer *parent;
  2039. struct extent_buffer *eb;
  2040. u64 gen;
  2041. u64 block1 = 0;
  2042. u64 block2 = 0;
  2043. parent = path->nodes[level + 1];
  2044. if (!parent)
  2045. return;
  2046. nritems = btrfs_header_nritems(parent);
  2047. slot = path->slots[level + 1];
  2048. if (slot > 0) {
  2049. block1 = btrfs_node_blockptr(parent, slot - 1);
  2050. gen = btrfs_node_ptr_generation(parent, slot - 1);
  2051. eb = find_extent_buffer(fs_info, block1);
  2052. /*
  2053. * if we get -eagain from btrfs_buffer_uptodate, we
  2054. * don't want to return eagain here. That will loop
  2055. * forever
  2056. */
  2057. if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
  2058. block1 = 0;
  2059. free_extent_buffer(eb);
  2060. }
  2061. if (slot + 1 < nritems) {
  2062. block2 = btrfs_node_blockptr(parent, slot + 1);
  2063. gen = btrfs_node_ptr_generation(parent, slot + 1);
  2064. eb = find_extent_buffer(fs_info, block2);
  2065. if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
  2066. block2 = 0;
  2067. free_extent_buffer(eb);
  2068. }
  2069. if (block1)
  2070. readahead_tree_block(fs_info, block1);
  2071. if (block2)
  2072. readahead_tree_block(fs_info, block2);
  2073. }
  2074. /*
  2075. * when we walk down the tree, it is usually safe to unlock the higher layers
  2076. * in the tree. The exceptions are when our path goes through slot 0, because
  2077. * operations on the tree might require changing key pointers higher up in the
  2078. * tree.
  2079. *
  2080. * callers might also have set path->keep_locks, which tells this code to keep
  2081. * the lock if the path points to the last slot in the block. This is part of
  2082. * walking through the tree, and selecting the next slot in the higher block.
  2083. *
  2084. * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
  2085. * if lowest_unlock is 1, level 0 won't be unlocked
  2086. */
  2087. static noinline void unlock_up(struct btrfs_path *path, int level,
  2088. int lowest_unlock, int min_write_lock_level,
  2089. int *write_lock_level)
  2090. {
  2091. int i;
  2092. int skip_level = level;
  2093. int no_skips = 0;
  2094. struct extent_buffer *t;
  2095. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2096. if (!path->nodes[i])
  2097. break;
  2098. if (!path->locks[i])
  2099. break;
  2100. if (!no_skips && path->slots[i] == 0) {
  2101. skip_level = i + 1;
  2102. continue;
  2103. }
  2104. if (!no_skips && path->keep_locks) {
  2105. u32 nritems;
  2106. t = path->nodes[i];
  2107. nritems = btrfs_header_nritems(t);
  2108. if (nritems < 1 || path->slots[i] >= nritems - 1) {
  2109. skip_level = i + 1;
  2110. continue;
  2111. }
  2112. }
  2113. if (skip_level < i && i >= lowest_unlock)
  2114. no_skips = 1;
  2115. t = path->nodes[i];
  2116. if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
  2117. btrfs_tree_unlock_rw(t, path->locks[i]);
  2118. path->locks[i] = 0;
  2119. if (write_lock_level &&
  2120. i > min_write_lock_level &&
  2121. i <= *write_lock_level) {
  2122. *write_lock_level = i - 1;
  2123. }
  2124. }
  2125. }
  2126. }
  2127. /*
  2128. * This releases any locks held in the path starting at level and
  2129. * going all the way up to the root.
  2130. *
  2131. * btrfs_search_slot will keep the lock held on higher nodes in a few
  2132. * corner cases, such as COW of the block at slot zero in the node. This
  2133. * ignores those rules, and it should only be called when there are no
  2134. * more updates to be done higher up in the tree.
  2135. */
  2136. noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
  2137. {
  2138. int i;
  2139. if (path->keep_locks)
  2140. return;
  2141. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2142. if (!path->nodes[i])
  2143. continue;
  2144. if (!path->locks[i])
  2145. continue;
  2146. btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
  2147. path->locks[i] = 0;
  2148. }
  2149. }
  2150. /*
  2151. * helper function for btrfs_search_slot. The goal is to find a block
  2152. * in cache without setting the path to blocking. If we find the block
  2153. * we return zero and the path is unchanged.
  2154. *
  2155. * If we can't find the block, we set the path blocking and do some
  2156. * reada. -EAGAIN is returned and the search must be repeated.
  2157. */
  2158. static int
  2159. read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
  2160. struct extent_buffer **eb_ret, int level, int slot,
  2161. const struct btrfs_key *key)
  2162. {
  2163. struct btrfs_fs_info *fs_info = root->fs_info;
  2164. u64 blocknr;
  2165. u64 gen;
  2166. struct extent_buffer *b = *eb_ret;
  2167. struct extent_buffer *tmp;
  2168. int ret;
  2169. blocknr = btrfs_node_blockptr(b, slot);
  2170. gen = btrfs_node_ptr_generation(b, slot);
  2171. tmp = find_extent_buffer(fs_info, blocknr);
  2172. if (tmp) {
  2173. /* first we do an atomic uptodate check */
  2174. if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
  2175. *eb_ret = tmp;
  2176. return 0;
  2177. }
  2178. /* the pages were up to date, but we failed
  2179. * the generation number check. Do a full
  2180. * read for the generation number that is correct.
  2181. * We must do this without dropping locks so
  2182. * we can trust our generation number
  2183. */
  2184. btrfs_set_path_blocking(p);
  2185. /* now we're allowed to do a blocking uptodate check */
  2186. ret = btrfs_read_buffer(tmp, gen);
  2187. if (!ret) {
  2188. *eb_ret = tmp;
  2189. return 0;
  2190. }
  2191. free_extent_buffer(tmp);
  2192. btrfs_release_path(p);
  2193. return -EIO;
  2194. }
  2195. /*
  2196. * reduce lock contention at high levels
  2197. * of the btree by dropping locks before
  2198. * we read. Don't release the lock on the current
  2199. * level because we need to walk this node to figure
  2200. * out which blocks to read.
  2201. */
  2202. btrfs_unlock_up_safe(p, level + 1);
  2203. btrfs_set_path_blocking(p);
  2204. free_extent_buffer(tmp);
  2205. if (p->reada != READA_NONE)
  2206. reada_for_search(fs_info, p, level, slot, key->objectid);
  2207. btrfs_release_path(p);
  2208. ret = -EAGAIN;
  2209. tmp = read_tree_block(fs_info, blocknr, 0);
  2210. if (!IS_ERR(tmp)) {
  2211. /*
  2212. * If the read above didn't mark this buffer up to date,
  2213. * it will never end up being up to date. Set ret to EIO now
  2214. * and give up so that our caller doesn't loop forever
  2215. * on our EAGAINs.
  2216. */
  2217. if (!btrfs_buffer_uptodate(tmp, 0, 0))
  2218. ret = -EIO;
  2219. free_extent_buffer(tmp);
  2220. } else {
  2221. ret = PTR_ERR(tmp);
  2222. }
  2223. return ret;
  2224. }
  2225. /*
  2226. * helper function for btrfs_search_slot. This does all of the checks
  2227. * for node-level blocks and does any balancing required based on
  2228. * the ins_len.
  2229. *
  2230. * If no extra work was required, zero is returned. If we had to
  2231. * drop the path, -EAGAIN is returned and btrfs_search_slot must
  2232. * start over
  2233. */
  2234. static int
  2235. setup_nodes_for_search(struct btrfs_trans_handle *trans,
  2236. struct btrfs_root *root, struct btrfs_path *p,
  2237. struct extent_buffer *b, int level, int ins_len,
  2238. int *write_lock_level)
  2239. {
  2240. struct btrfs_fs_info *fs_info = root->fs_info;
  2241. int ret;
  2242. if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
  2243. BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
  2244. int sret;
  2245. if (*write_lock_level < level + 1) {
  2246. *write_lock_level = level + 1;
  2247. btrfs_release_path(p);
  2248. goto again;
  2249. }
  2250. btrfs_set_path_blocking(p);
  2251. reada_for_balance(fs_info, p, level);
  2252. sret = split_node(trans, root, p, level);
  2253. btrfs_clear_path_blocking(p, NULL, 0);
  2254. BUG_ON(sret > 0);
  2255. if (sret) {
  2256. ret = sret;
  2257. goto done;
  2258. }
  2259. b = p->nodes[level];
  2260. } else if (ins_len < 0 && btrfs_header_nritems(b) <
  2261. BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
  2262. int sret;
  2263. if (*write_lock_level < level + 1) {
  2264. *write_lock_level = level + 1;
  2265. btrfs_release_path(p);
  2266. goto again;
  2267. }
  2268. btrfs_set_path_blocking(p);
  2269. reada_for_balance(fs_info, p, level);
  2270. sret = balance_level(trans, root, p, level);
  2271. btrfs_clear_path_blocking(p, NULL, 0);
  2272. if (sret) {
  2273. ret = sret;
  2274. goto done;
  2275. }
  2276. b = p->nodes[level];
  2277. if (!b) {
  2278. btrfs_release_path(p);
  2279. goto again;
  2280. }
  2281. BUG_ON(btrfs_header_nritems(b) == 1);
  2282. }
  2283. return 0;
  2284. again:
  2285. ret = -EAGAIN;
  2286. done:
  2287. return ret;
  2288. }
  2289. static void key_search_validate(struct extent_buffer *b,
  2290. const struct btrfs_key *key,
  2291. int level)
  2292. {
  2293. #ifdef CONFIG_BTRFS_ASSERT
  2294. struct btrfs_disk_key disk_key;
  2295. btrfs_cpu_key_to_disk(&disk_key, key);
  2296. if (level == 0)
  2297. ASSERT(!memcmp_extent_buffer(b, &disk_key,
  2298. offsetof(struct btrfs_leaf, items[0].key),
  2299. sizeof(disk_key)));
  2300. else
  2301. ASSERT(!memcmp_extent_buffer(b, &disk_key,
  2302. offsetof(struct btrfs_node, ptrs[0].key),
  2303. sizeof(disk_key)));
  2304. #endif
  2305. }
  2306. static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
  2307. int level, int *prev_cmp, int *slot)
  2308. {
  2309. if (*prev_cmp != 0) {
  2310. *prev_cmp = bin_search(b, key, level, slot);
  2311. return *prev_cmp;
  2312. }
  2313. key_search_validate(b, key, level);
  2314. *slot = 0;
  2315. return 0;
  2316. }
  2317. int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
  2318. u64 iobjectid, u64 ioff, u8 key_type,
  2319. struct btrfs_key *found_key)
  2320. {
  2321. int ret;
  2322. struct btrfs_key key;
  2323. struct extent_buffer *eb;
  2324. ASSERT(path);
  2325. ASSERT(found_key);
  2326. key.type = key_type;
  2327. key.objectid = iobjectid;
  2328. key.offset = ioff;
  2329. ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
  2330. if (ret < 0)
  2331. return ret;
  2332. eb = path->nodes[0];
  2333. if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
  2334. ret = btrfs_next_leaf(fs_root, path);
  2335. if (ret)
  2336. return ret;
  2337. eb = path->nodes[0];
  2338. }
  2339. btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
  2340. if (found_key->type != key.type ||
  2341. found_key->objectid != key.objectid)
  2342. return 1;
  2343. return 0;
  2344. }
  2345. /*
  2346. * look for key in the tree. path is filled in with nodes along the way
  2347. * if key is found, we return zero and you can find the item in the leaf
  2348. * level of the path (level 0)
  2349. *
  2350. * If the key isn't found, the path points to the slot where it should
  2351. * be inserted, and 1 is returned. If there are other errors during the
  2352. * search a negative error number is returned.
  2353. *
  2354. * if ins_len > 0, nodes and leaves will be split as we walk down the
  2355. * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
  2356. * possible)
  2357. */
  2358. int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  2359. const struct btrfs_key *key, struct btrfs_path *p,
  2360. int ins_len, int cow)
  2361. {
  2362. struct btrfs_fs_info *fs_info = root->fs_info;
  2363. struct extent_buffer *b;
  2364. int slot;
  2365. int ret;
  2366. int err;
  2367. int level;
  2368. int lowest_unlock = 1;
  2369. int root_lock;
  2370. /* everything at write_lock_level or lower must be write locked */
  2371. int write_lock_level = 0;
  2372. u8 lowest_level = 0;
  2373. int min_write_lock_level;
  2374. int prev_cmp;
  2375. lowest_level = p->lowest_level;
  2376. WARN_ON(lowest_level && ins_len > 0);
  2377. WARN_ON(p->nodes[0] != NULL);
  2378. BUG_ON(!cow && ins_len);
  2379. if (ins_len < 0) {
  2380. lowest_unlock = 2;
  2381. /* when we are removing items, we might have to go up to level
  2382. * two as we update tree pointers Make sure we keep write
  2383. * for those levels as well
  2384. */
  2385. write_lock_level = 2;
  2386. } else if (ins_len > 0) {
  2387. /*
  2388. * for inserting items, make sure we have a write lock on
  2389. * level 1 so we can update keys
  2390. */
  2391. write_lock_level = 1;
  2392. }
  2393. if (!cow)
  2394. write_lock_level = -1;
  2395. if (cow && (p->keep_locks || p->lowest_level))
  2396. write_lock_level = BTRFS_MAX_LEVEL;
  2397. min_write_lock_level = write_lock_level;
  2398. again:
  2399. prev_cmp = -1;
  2400. /*
  2401. * we try very hard to do read locks on the root
  2402. */
  2403. root_lock = BTRFS_READ_LOCK;
  2404. level = 0;
  2405. if (p->search_commit_root) {
  2406. /*
  2407. * the commit roots are read only
  2408. * so we always do read locks
  2409. */
  2410. if (p->need_commit_sem)
  2411. down_read(&fs_info->commit_root_sem);
  2412. b = root->commit_root;
  2413. extent_buffer_get(b);
  2414. level = btrfs_header_level(b);
  2415. if (p->need_commit_sem)
  2416. up_read(&fs_info->commit_root_sem);
  2417. if (!p->skip_locking)
  2418. btrfs_tree_read_lock(b);
  2419. } else {
  2420. if (p->skip_locking) {
  2421. b = btrfs_root_node(root);
  2422. level = btrfs_header_level(b);
  2423. } else {
  2424. /* we don't know the level of the root node
  2425. * until we actually have it read locked
  2426. */
  2427. b = btrfs_read_lock_root_node(root);
  2428. level = btrfs_header_level(b);
  2429. if (level <= write_lock_level) {
  2430. /* whoops, must trade for write lock */
  2431. btrfs_tree_read_unlock(b);
  2432. free_extent_buffer(b);
  2433. b = btrfs_lock_root_node(root);
  2434. root_lock = BTRFS_WRITE_LOCK;
  2435. /* the level might have changed, check again */
  2436. level = btrfs_header_level(b);
  2437. }
  2438. }
  2439. }
  2440. p->nodes[level] = b;
  2441. if (!p->skip_locking)
  2442. p->locks[level] = root_lock;
  2443. while (b) {
  2444. level = btrfs_header_level(b);
  2445. /*
  2446. * setup the path here so we can release it under lock
  2447. * contention with the cow code
  2448. */
  2449. if (cow) {
  2450. /*
  2451. * if we don't really need to cow this block
  2452. * then we don't want to set the path blocking,
  2453. * so we test it here
  2454. */
  2455. if (!should_cow_block(trans, root, b)) {
  2456. trans->dirty = true;
  2457. goto cow_done;
  2458. }
  2459. /*
  2460. * must have write locks on this node and the
  2461. * parent
  2462. */
  2463. if (level > write_lock_level ||
  2464. (level + 1 > write_lock_level &&
  2465. level + 1 < BTRFS_MAX_LEVEL &&
  2466. p->nodes[level + 1])) {
  2467. write_lock_level = level + 1;
  2468. btrfs_release_path(p);
  2469. goto again;
  2470. }
  2471. btrfs_set_path_blocking(p);
  2472. err = btrfs_cow_block(trans, root, b,
  2473. p->nodes[level + 1],
  2474. p->slots[level + 1], &b);
  2475. if (err) {
  2476. ret = err;
  2477. goto done;
  2478. }
  2479. }
  2480. cow_done:
  2481. p->nodes[level] = b;
  2482. btrfs_clear_path_blocking(p, NULL, 0);
  2483. /*
  2484. * we have a lock on b and as long as we aren't changing
  2485. * the tree, there is no way to for the items in b to change.
  2486. * It is safe to drop the lock on our parent before we
  2487. * go through the expensive btree search on b.
  2488. *
  2489. * If we're inserting or deleting (ins_len != 0), then we might
  2490. * be changing slot zero, which may require changing the parent.
  2491. * So, we can't drop the lock until after we know which slot
  2492. * we're operating on.
  2493. */
  2494. if (!ins_len && !p->keep_locks) {
  2495. int u = level + 1;
  2496. if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
  2497. btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
  2498. p->locks[u] = 0;
  2499. }
  2500. }
  2501. ret = key_search(b, key, level, &prev_cmp, &slot);
  2502. if (ret < 0)
  2503. goto done;
  2504. if (level != 0) {
  2505. int dec = 0;
  2506. if (ret && slot > 0) {
  2507. dec = 1;
  2508. slot -= 1;
  2509. }
  2510. p->slots[level] = slot;
  2511. err = setup_nodes_for_search(trans, root, p, b, level,
  2512. ins_len, &write_lock_level);
  2513. if (err == -EAGAIN)
  2514. goto again;
  2515. if (err) {
  2516. ret = err;
  2517. goto done;
  2518. }
  2519. b = p->nodes[level];
  2520. slot = p->slots[level];
  2521. /*
  2522. * slot 0 is special, if we change the key
  2523. * we have to update the parent pointer
  2524. * which means we must have a write lock
  2525. * on the parent
  2526. */
  2527. if (slot == 0 && ins_len &&
  2528. write_lock_level < level + 1) {
  2529. write_lock_level = level + 1;
  2530. btrfs_release_path(p);
  2531. goto again;
  2532. }
  2533. unlock_up(p, level, lowest_unlock,
  2534. min_write_lock_level, &write_lock_level);
  2535. if (level == lowest_level) {
  2536. if (dec)
  2537. p->slots[level]++;
  2538. goto done;
  2539. }
  2540. err = read_block_for_search(root, p, &b, level,
  2541. slot, key);
  2542. if (err == -EAGAIN)
  2543. goto again;
  2544. if (err) {
  2545. ret = err;
  2546. goto done;
  2547. }
  2548. if (!p->skip_locking) {
  2549. level = btrfs_header_level(b);
  2550. if (level <= write_lock_level) {
  2551. err = btrfs_try_tree_write_lock(b);
  2552. if (!err) {
  2553. btrfs_set_path_blocking(p);
  2554. btrfs_tree_lock(b);
  2555. btrfs_clear_path_blocking(p, b,
  2556. BTRFS_WRITE_LOCK);
  2557. }
  2558. p->locks[level] = BTRFS_WRITE_LOCK;
  2559. } else {
  2560. err = btrfs_tree_read_lock_atomic(b);
  2561. if (!err) {
  2562. btrfs_set_path_blocking(p);
  2563. btrfs_tree_read_lock(b);
  2564. btrfs_clear_path_blocking(p, b,
  2565. BTRFS_READ_LOCK);
  2566. }
  2567. p->locks[level] = BTRFS_READ_LOCK;
  2568. }
  2569. p->nodes[level] = b;
  2570. }
  2571. } else {
  2572. p->slots[level] = slot;
  2573. if (ins_len > 0 &&
  2574. btrfs_leaf_free_space(fs_info, b) < ins_len) {
  2575. if (write_lock_level < 1) {
  2576. write_lock_level = 1;
  2577. btrfs_release_path(p);
  2578. goto again;
  2579. }
  2580. btrfs_set_path_blocking(p);
  2581. err = split_leaf(trans, root, key,
  2582. p, ins_len, ret == 0);
  2583. btrfs_clear_path_blocking(p, NULL, 0);
  2584. BUG_ON(err > 0);
  2585. if (err) {
  2586. ret = err;
  2587. goto done;
  2588. }
  2589. }
  2590. if (!p->search_for_split)
  2591. unlock_up(p, level, lowest_unlock,
  2592. min_write_lock_level, &write_lock_level);
  2593. goto done;
  2594. }
  2595. }
  2596. ret = 1;
  2597. done:
  2598. /*
  2599. * we don't really know what they plan on doing with the path
  2600. * from here on, so for now just mark it as blocking
  2601. */
  2602. if (!p->leave_spinning)
  2603. btrfs_set_path_blocking(p);
  2604. if (ret < 0 && !p->skip_release_on_error)
  2605. btrfs_release_path(p);
  2606. return ret;
  2607. }
  2608. /*
  2609. * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
  2610. * current state of the tree together with the operations recorded in the tree
  2611. * modification log to search for the key in a previous version of this tree, as
  2612. * denoted by the time_seq parameter.
  2613. *
  2614. * Naturally, there is no support for insert, delete or cow operations.
  2615. *
  2616. * The resulting path and return value will be set up as if we called
  2617. * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
  2618. */
  2619. int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
  2620. struct btrfs_path *p, u64 time_seq)
  2621. {
  2622. struct btrfs_fs_info *fs_info = root->fs_info;
  2623. struct extent_buffer *b;
  2624. int slot;
  2625. int ret;
  2626. int err;
  2627. int level;
  2628. int lowest_unlock = 1;
  2629. u8 lowest_level = 0;
  2630. int prev_cmp = -1;
  2631. lowest_level = p->lowest_level;
  2632. WARN_ON(p->nodes[0] != NULL);
  2633. if (p->search_commit_root) {
  2634. BUG_ON(time_seq);
  2635. return btrfs_search_slot(NULL, root, key, p, 0, 0);
  2636. }
  2637. again:
  2638. b = get_old_root(root, time_seq);
  2639. level = btrfs_header_level(b);
  2640. p->locks[level] = BTRFS_READ_LOCK;
  2641. while (b) {
  2642. level = btrfs_header_level(b);
  2643. p->nodes[level] = b;
  2644. btrfs_clear_path_blocking(p, NULL, 0);
  2645. /*
  2646. * we have a lock on b and as long as we aren't changing
  2647. * the tree, there is no way to for the items in b to change.
  2648. * It is safe to drop the lock on our parent before we
  2649. * go through the expensive btree search on b.
  2650. */
  2651. btrfs_unlock_up_safe(p, level + 1);
  2652. /*
  2653. * Since we can unwind ebs we want to do a real search every
  2654. * time.
  2655. */
  2656. prev_cmp = -1;
  2657. ret = key_search(b, key, level, &prev_cmp, &slot);
  2658. if (level != 0) {
  2659. int dec = 0;
  2660. if (ret && slot > 0) {
  2661. dec = 1;
  2662. slot -= 1;
  2663. }
  2664. p->slots[level] = slot;
  2665. unlock_up(p, level, lowest_unlock, 0, NULL);
  2666. if (level == lowest_level) {
  2667. if (dec)
  2668. p->slots[level]++;
  2669. goto done;
  2670. }
  2671. err = read_block_for_search(root, p, &b, level,
  2672. slot, key);
  2673. if (err == -EAGAIN)
  2674. goto again;
  2675. if (err) {
  2676. ret = err;
  2677. goto done;
  2678. }
  2679. level = btrfs_header_level(b);
  2680. err = btrfs_tree_read_lock_atomic(b);
  2681. if (!err) {
  2682. btrfs_set_path_blocking(p);
  2683. btrfs_tree_read_lock(b);
  2684. btrfs_clear_path_blocking(p, b,
  2685. BTRFS_READ_LOCK);
  2686. }
  2687. b = tree_mod_log_rewind(fs_info, p, b, time_seq);
  2688. if (!b) {
  2689. ret = -ENOMEM;
  2690. goto done;
  2691. }
  2692. p->locks[level] = BTRFS_READ_LOCK;
  2693. p->nodes[level] = b;
  2694. } else {
  2695. p->slots[level] = slot;
  2696. unlock_up(p, level, lowest_unlock, 0, NULL);
  2697. goto done;
  2698. }
  2699. }
  2700. ret = 1;
  2701. done:
  2702. if (!p->leave_spinning)
  2703. btrfs_set_path_blocking(p);
  2704. if (ret < 0)
  2705. btrfs_release_path(p);
  2706. return ret;
  2707. }
  2708. /*
  2709. * helper to use instead of search slot if no exact match is needed but
  2710. * instead the next or previous item should be returned.
  2711. * When find_higher is true, the next higher item is returned, the next lower
  2712. * otherwise.
  2713. * When return_any and find_higher are both true, and no higher item is found,
  2714. * return the next lower instead.
  2715. * When return_any is true and find_higher is false, and no lower item is found,
  2716. * return the next higher instead.
  2717. * It returns 0 if any item is found, 1 if none is found (tree empty), and
  2718. * < 0 on error
  2719. */
  2720. int btrfs_search_slot_for_read(struct btrfs_root *root,
  2721. const struct btrfs_key *key,
  2722. struct btrfs_path *p, int find_higher,
  2723. int return_any)
  2724. {
  2725. int ret;
  2726. struct extent_buffer *leaf;
  2727. again:
  2728. ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
  2729. if (ret <= 0)
  2730. return ret;
  2731. /*
  2732. * a return value of 1 means the path is at the position where the
  2733. * item should be inserted. Normally this is the next bigger item,
  2734. * but in case the previous item is the last in a leaf, path points
  2735. * to the first free slot in the previous leaf, i.e. at an invalid
  2736. * item.
  2737. */
  2738. leaf = p->nodes[0];
  2739. if (find_higher) {
  2740. if (p->slots[0] >= btrfs_header_nritems(leaf)) {
  2741. ret = btrfs_next_leaf(root, p);
  2742. if (ret <= 0)
  2743. return ret;
  2744. if (!return_any)
  2745. return 1;
  2746. /*
  2747. * no higher item found, return the next
  2748. * lower instead
  2749. */
  2750. return_any = 0;
  2751. find_higher = 0;
  2752. btrfs_release_path(p);
  2753. goto again;
  2754. }
  2755. } else {
  2756. if (p->slots[0] == 0) {
  2757. ret = btrfs_prev_leaf(root, p);
  2758. if (ret < 0)
  2759. return ret;
  2760. if (!ret) {
  2761. leaf = p->nodes[0];
  2762. if (p->slots[0] == btrfs_header_nritems(leaf))
  2763. p->slots[0]--;
  2764. return 0;
  2765. }
  2766. if (!return_any)
  2767. return 1;
  2768. /*
  2769. * no lower item found, return the next
  2770. * higher instead
  2771. */
  2772. return_any = 0;
  2773. find_higher = 1;
  2774. btrfs_release_path(p);
  2775. goto again;
  2776. } else {
  2777. --p->slots[0];
  2778. }
  2779. }
  2780. return 0;
  2781. }
  2782. /*
  2783. * adjust the pointers going up the tree, starting at level
  2784. * making sure the right key of each node is points to 'key'.
  2785. * This is used after shifting pointers to the left, so it stops
  2786. * fixing up pointers when a given leaf/node is not in slot 0 of the
  2787. * higher levels
  2788. *
  2789. */
  2790. static void fixup_low_keys(struct btrfs_fs_info *fs_info,
  2791. struct btrfs_path *path,
  2792. struct btrfs_disk_key *key, int level)
  2793. {
  2794. int i;
  2795. struct extent_buffer *t;
  2796. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2797. int tslot = path->slots[i];
  2798. if (!path->nodes[i])
  2799. break;
  2800. t = path->nodes[i];
  2801. tree_mod_log_set_node_key(fs_info, t, tslot, 1);
  2802. btrfs_set_node_key(t, key, tslot);
  2803. btrfs_mark_buffer_dirty(path->nodes[i]);
  2804. if (tslot != 0)
  2805. break;
  2806. }
  2807. }
  2808. /*
  2809. * update item key.
  2810. *
  2811. * This function isn't completely safe. It's the caller's responsibility
  2812. * that the new key won't break the order
  2813. */
  2814. void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
  2815. struct btrfs_path *path,
  2816. const struct btrfs_key *new_key)
  2817. {
  2818. struct btrfs_disk_key disk_key;
  2819. struct extent_buffer *eb;
  2820. int slot;
  2821. eb = path->nodes[0];
  2822. slot = path->slots[0];
  2823. if (slot > 0) {
  2824. btrfs_item_key(eb, &disk_key, slot - 1);
  2825. BUG_ON(comp_keys(&disk_key, new_key) >= 0);
  2826. }
  2827. if (slot < btrfs_header_nritems(eb) - 1) {
  2828. btrfs_item_key(eb, &disk_key, slot + 1);
  2829. BUG_ON(comp_keys(&disk_key, new_key) <= 0);
  2830. }
  2831. btrfs_cpu_key_to_disk(&disk_key, new_key);
  2832. btrfs_set_item_key(eb, &disk_key, slot);
  2833. btrfs_mark_buffer_dirty(eb);
  2834. if (slot == 0)
  2835. fixup_low_keys(fs_info, path, &disk_key, 1);
  2836. }
  2837. /*
  2838. * try to push data from one node into the next node left in the
  2839. * tree.
  2840. *
  2841. * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
  2842. * error, and > 0 if there was no room in the left hand block.
  2843. */
  2844. static int push_node_left(struct btrfs_trans_handle *trans,
  2845. struct btrfs_fs_info *fs_info,
  2846. struct extent_buffer *dst,
  2847. struct extent_buffer *src, int empty)
  2848. {
  2849. int push_items = 0;
  2850. int src_nritems;
  2851. int dst_nritems;
  2852. int ret = 0;
  2853. src_nritems = btrfs_header_nritems(src);
  2854. dst_nritems = btrfs_header_nritems(dst);
  2855. push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
  2856. WARN_ON(btrfs_header_generation(src) != trans->transid);
  2857. WARN_ON(btrfs_header_generation(dst) != trans->transid);
  2858. if (!empty && src_nritems <= 8)
  2859. return 1;
  2860. if (push_items <= 0)
  2861. return 1;
  2862. if (empty) {
  2863. push_items = min(src_nritems, push_items);
  2864. if (push_items < src_nritems) {
  2865. /* leave at least 8 pointers in the node if
  2866. * we aren't going to empty it
  2867. */
  2868. if (src_nritems - push_items < 8) {
  2869. if (push_items <= 8)
  2870. return 1;
  2871. push_items -= 8;
  2872. }
  2873. }
  2874. } else
  2875. push_items = min(src_nritems - 8, push_items);
  2876. ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
  2877. push_items);
  2878. if (ret) {
  2879. btrfs_abort_transaction(trans, ret);
  2880. return ret;
  2881. }
  2882. copy_extent_buffer(dst, src,
  2883. btrfs_node_key_ptr_offset(dst_nritems),
  2884. btrfs_node_key_ptr_offset(0),
  2885. push_items * sizeof(struct btrfs_key_ptr));
  2886. if (push_items < src_nritems) {
  2887. /*
  2888. * don't call tree_mod_log_eb_move here, key removal was already
  2889. * fully logged by tree_mod_log_eb_copy above.
  2890. */
  2891. memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
  2892. btrfs_node_key_ptr_offset(push_items),
  2893. (src_nritems - push_items) *
  2894. sizeof(struct btrfs_key_ptr));
  2895. }
  2896. btrfs_set_header_nritems(src, src_nritems - push_items);
  2897. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  2898. btrfs_mark_buffer_dirty(src);
  2899. btrfs_mark_buffer_dirty(dst);
  2900. return ret;
  2901. }
  2902. /*
  2903. * try to push data from one node into the next node right in the
  2904. * tree.
  2905. *
  2906. * returns 0 if some ptrs were pushed, < 0 if there was some horrible
  2907. * error, and > 0 if there was no room in the right hand block.
  2908. *
  2909. * this will only push up to 1/2 the contents of the left node over
  2910. */
  2911. static int balance_node_right(struct btrfs_trans_handle *trans,
  2912. struct btrfs_fs_info *fs_info,
  2913. struct extent_buffer *dst,
  2914. struct extent_buffer *src)
  2915. {
  2916. int push_items = 0;
  2917. int max_push;
  2918. int src_nritems;
  2919. int dst_nritems;
  2920. int ret = 0;
  2921. WARN_ON(btrfs_header_generation(src) != trans->transid);
  2922. WARN_ON(btrfs_header_generation(dst) != trans->transid);
  2923. src_nritems = btrfs_header_nritems(src);
  2924. dst_nritems = btrfs_header_nritems(dst);
  2925. push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
  2926. if (push_items <= 0)
  2927. return 1;
  2928. if (src_nritems < 4)
  2929. return 1;
  2930. max_push = src_nritems / 2 + 1;
  2931. /* don't try to empty the node */
  2932. if (max_push >= src_nritems)
  2933. return 1;
  2934. if (max_push < push_items)
  2935. push_items = max_push;
  2936. tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
  2937. memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
  2938. btrfs_node_key_ptr_offset(0),
  2939. (dst_nritems) *
  2940. sizeof(struct btrfs_key_ptr));
  2941. ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
  2942. src_nritems - push_items, push_items);
  2943. if (ret) {
  2944. btrfs_abort_transaction(trans, ret);
  2945. return ret;
  2946. }
  2947. copy_extent_buffer(dst, src,
  2948. btrfs_node_key_ptr_offset(0),
  2949. btrfs_node_key_ptr_offset(src_nritems - push_items),
  2950. push_items * sizeof(struct btrfs_key_ptr));
  2951. btrfs_set_header_nritems(src, src_nritems - push_items);
  2952. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  2953. btrfs_mark_buffer_dirty(src);
  2954. btrfs_mark_buffer_dirty(dst);
  2955. return ret;
  2956. }
  2957. /*
  2958. * helper function to insert a new root level in the tree.
  2959. * A new node is allocated, and a single item is inserted to
  2960. * point to the existing root
  2961. *
  2962. * returns zero on success or < 0 on failure.
  2963. */
  2964. static noinline int insert_new_root(struct btrfs_trans_handle *trans,
  2965. struct btrfs_root *root,
  2966. struct btrfs_path *path, int level)
  2967. {
  2968. struct btrfs_fs_info *fs_info = root->fs_info;
  2969. u64 lower_gen;
  2970. struct extent_buffer *lower;
  2971. struct extent_buffer *c;
  2972. struct extent_buffer *old;
  2973. struct btrfs_disk_key lower_key;
  2974. BUG_ON(path->nodes[level]);
  2975. BUG_ON(path->nodes[level-1] != root->node);
  2976. lower = path->nodes[level-1];
  2977. if (level == 1)
  2978. btrfs_item_key(lower, &lower_key, 0);
  2979. else
  2980. btrfs_node_key(lower, &lower_key, 0);
  2981. c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  2982. &lower_key, level, root->node->start, 0);
  2983. if (IS_ERR(c))
  2984. return PTR_ERR(c);
  2985. root_add_used(root, fs_info->nodesize);
  2986. memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
  2987. btrfs_set_header_nritems(c, 1);
  2988. btrfs_set_header_level(c, level);
  2989. btrfs_set_header_bytenr(c, c->start);
  2990. btrfs_set_header_generation(c, trans->transid);
  2991. btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
  2992. btrfs_set_header_owner(c, root->root_key.objectid);
  2993. write_extent_buffer_fsid(c, fs_info->fsid);
  2994. write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
  2995. btrfs_set_node_key(c, &lower_key, 0);
  2996. btrfs_set_node_blockptr(c, 0, lower->start);
  2997. lower_gen = btrfs_header_generation(lower);
  2998. WARN_ON(lower_gen != trans->transid);
  2999. btrfs_set_node_ptr_generation(c, 0, lower_gen);
  3000. btrfs_mark_buffer_dirty(c);
  3001. old = root->node;
  3002. tree_mod_log_set_root_pointer(root, c, 0);
  3003. rcu_assign_pointer(root->node, c);
  3004. /* the super has an extra ref to root->node */
  3005. free_extent_buffer(old);
  3006. add_root_to_dirty_list(root);
  3007. extent_buffer_get(c);
  3008. path->nodes[level] = c;
  3009. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  3010. path->slots[level] = 0;
  3011. return 0;
  3012. }
  3013. /*
  3014. * worker function to insert a single pointer in a node.
  3015. * the node should have enough room for the pointer already
  3016. *
  3017. * slot and level indicate where you want the key to go, and
  3018. * blocknr is the block the key points to.
  3019. */
  3020. static void insert_ptr(struct btrfs_trans_handle *trans,
  3021. struct btrfs_fs_info *fs_info, struct btrfs_path *path,
  3022. struct btrfs_disk_key *key, u64 bytenr,
  3023. int slot, int level)
  3024. {
  3025. struct extent_buffer *lower;
  3026. int nritems;
  3027. int ret;
  3028. BUG_ON(!path->nodes[level]);
  3029. btrfs_assert_tree_locked(path->nodes[level]);
  3030. lower = path->nodes[level];
  3031. nritems = btrfs_header_nritems(lower);
  3032. BUG_ON(slot > nritems);
  3033. BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  3034. if (slot != nritems) {
  3035. if (level)
  3036. tree_mod_log_eb_move(fs_info, lower, slot + 1,
  3037. slot, nritems - slot);
  3038. memmove_extent_buffer(lower,
  3039. btrfs_node_key_ptr_offset(slot + 1),
  3040. btrfs_node_key_ptr_offset(slot),
  3041. (nritems - slot) * sizeof(struct btrfs_key_ptr));
  3042. }
  3043. if (level) {
  3044. ret = tree_mod_log_insert_key(fs_info, lower, slot,
  3045. MOD_LOG_KEY_ADD, GFP_NOFS);
  3046. BUG_ON(ret < 0);
  3047. }
  3048. btrfs_set_node_key(lower, key, slot);
  3049. btrfs_set_node_blockptr(lower, slot, bytenr);
  3050. WARN_ON(trans->transid == 0);
  3051. btrfs_set_node_ptr_generation(lower, slot, trans->transid);
  3052. btrfs_set_header_nritems(lower, nritems + 1);
  3053. btrfs_mark_buffer_dirty(lower);
  3054. }
  3055. /*
  3056. * split the node at the specified level in path in two.
  3057. * The path is corrected to point to the appropriate node after the split
  3058. *
  3059. * Before splitting this tries to make some room in the node by pushing
  3060. * left and right, if either one works, it returns right away.
  3061. *
  3062. * returns 0 on success and < 0 on failure
  3063. */
  3064. static noinline int split_node(struct btrfs_trans_handle *trans,
  3065. struct btrfs_root *root,
  3066. struct btrfs_path *path, int level)
  3067. {
  3068. struct btrfs_fs_info *fs_info = root->fs_info;
  3069. struct extent_buffer *c;
  3070. struct extent_buffer *split;
  3071. struct btrfs_disk_key disk_key;
  3072. int mid;
  3073. int ret;
  3074. u32 c_nritems;
  3075. c = path->nodes[level];
  3076. WARN_ON(btrfs_header_generation(c) != trans->transid);
  3077. if (c == root->node) {
  3078. /*
  3079. * trying to split the root, lets make a new one
  3080. *
  3081. * tree mod log: We don't log_removal old root in
  3082. * insert_new_root, because that root buffer will be kept as a
  3083. * normal node. We are going to log removal of half of the
  3084. * elements below with tree_mod_log_eb_copy. We're holding a
  3085. * tree lock on the buffer, which is why we cannot race with
  3086. * other tree_mod_log users.
  3087. */
  3088. ret = insert_new_root(trans, root, path, level + 1);
  3089. if (ret)
  3090. return ret;
  3091. } else {
  3092. ret = push_nodes_for_insert(trans, root, path, level);
  3093. c = path->nodes[level];
  3094. if (!ret && btrfs_header_nritems(c) <
  3095. BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
  3096. return 0;
  3097. if (ret < 0)
  3098. return ret;
  3099. }
  3100. c_nritems = btrfs_header_nritems(c);
  3101. mid = (c_nritems + 1) / 2;
  3102. btrfs_node_key(c, &disk_key, mid);
  3103. split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  3104. &disk_key, level, c->start, 0);
  3105. if (IS_ERR(split))
  3106. return PTR_ERR(split);
  3107. root_add_used(root, fs_info->nodesize);
  3108. memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
  3109. btrfs_set_header_level(split, btrfs_header_level(c));
  3110. btrfs_set_header_bytenr(split, split->start);
  3111. btrfs_set_header_generation(split, trans->transid);
  3112. btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
  3113. btrfs_set_header_owner(split, root->root_key.objectid);
  3114. write_extent_buffer_fsid(split, fs_info->fsid);
  3115. write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
  3116. ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
  3117. if (ret) {
  3118. btrfs_abort_transaction(trans, ret);
  3119. return ret;
  3120. }
  3121. copy_extent_buffer(split, c,
  3122. btrfs_node_key_ptr_offset(0),
  3123. btrfs_node_key_ptr_offset(mid),
  3124. (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
  3125. btrfs_set_header_nritems(split, c_nritems - mid);
  3126. btrfs_set_header_nritems(c, mid);
  3127. ret = 0;
  3128. btrfs_mark_buffer_dirty(c);
  3129. btrfs_mark_buffer_dirty(split);
  3130. insert_ptr(trans, fs_info, path, &disk_key, split->start,
  3131. path->slots[level + 1] + 1, level + 1);
  3132. if (path->slots[level] >= mid) {
  3133. path->slots[level] -= mid;
  3134. btrfs_tree_unlock(c);
  3135. free_extent_buffer(c);
  3136. path->nodes[level] = split;
  3137. path->slots[level + 1] += 1;
  3138. } else {
  3139. btrfs_tree_unlock(split);
  3140. free_extent_buffer(split);
  3141. }
  3142. return ret;
  3143. }
  3144. /*
  3145. * how many bytes are required to store the items in a leaf. start
  3146. * and nr indicate which items in the leaf to check. This totals up the
  3147. * space used both by the item structs and the item data
  3148. */
  3149. static int leaf_space_used(struct extent_buffer *l, int start, int nr)
  3150. {
  3151. struct btrfs_item *start_item;
  3152. struct btrfs_item *end_item;
  3153. struct btrfs_map_token token;
  3154. int data_len;
  3155. int nritems = btrfs_header_nritems(l);
  3156. int end = min(nritems, start + nr) - 1;
  3157. if (!nr)
  3158. return 0;
  3159. btrfs_init_map_token(&token);
  3160. start_item = btrfs_item_nr(start);
  3161. end_item = btrfs_item_nr(end);
  3162. data_len = btrfs_token_item_offset(l, start_item, &token) +
  3163. btrfs_token_item_size(l, start_item, &token);
  3164. data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
  3165. data_len += sizeof(struct btrfs_item) * nr;
  3166. WARN_ON(data_len < 0);
  3167. return data_len;
  3168. }
  3169. /*
  3170. * The space between the end of the leaf items and
  3171. * the start of the leaf data. IOW, how much room
  3172. * the leaf has left for both items and data
  3173. */
  3174. noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
  3175. struct extent_buffer *leaf)
  3176. {
  3177. int nritems = btrfs_header_nritems(leaf);
  3178. int ret;
  3179. ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
  3180. if (ret < 0) {
  3181. btrfs_crit(fs_info,
  3182. "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
  3183. ret,
  3184. (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
  3185. leaf_space_used(leaf, 0, nritems), nritems);
  3186. }
  3187. return ret;
  3188. }
  3189. /*
  3190. * min slot controls the lowest index we're willing to push to the
  3191. * right. We'll push up to and including min_slot, but no lower
  3192. */
  3193. static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
  3194. struct btrfs_path *path,
  3195. int data_size, int empty,
  3196. struct extent_buffer *right,
  3197. int free_space, u32 left_nritems,
  3198. u32 min_slot)
  3199. {
  3200. struct extent_buffer *left = path->nodes[0];
  3201. struct extent_buffer *upper = path->nodes[1];
  3202. struct btrfs_map_token token;
  3203. struct btrfs_disk_key disk_key;
  3204. int slot;
  3205. u32 i;
  3206. int push_space = 0;
  3207. int push_items = 0;
  3208. struct btrfs_item *item;
  3209. u32 nr;
  3210. u32 right_nritems;
  3211. u32 data_end;
  3212. u32 this_item_size;
  3213. btrfs_init_map_token(&token);
  3214. if (empty)
  3215. nr = 0;
  3216. else
  3217. nr = max_t(u32, 1, min_slot);
  3218. if (path->slots[0] >= left_nritems)
  3219. push_space += data_size;
  3220. slot = path->slots[1];
  3221. i = left_nritems - 1;
  3222. while (i >= nr) {
  3223. item = btrfs_item_nr(i);
  3224. if (!empty && push_items > 0) {
  3225. if (path->slots[0] > i)
  3226. break;
  3227. if (path->slots[0] == i) {
  3228. int space = btrfs_leaf_free_space(fs_info, left);
  3229. if (space + push_space * 2 > free_space)
  3230. break;
  3231. }
  3232. }
  3233. if (path->slots[0] == i)
  3234. push_space += data_size;
  3235. this_item_size = btrfs_item_size(left, item);
  3236. if (this_item_size + sizeof(*item) + push_space > free_space)
  3237. break;
  3238. push_items++;
  3239. push_space += this_item_size + sizeof(*item);
  3240. if (i == 0)
  3241. break;
  3242. i--;
  3243. }
  3244. if (push_items == 0)
  3245. goto out_unlock;
  3246. WARN_ON(!empty && push_items == left_nritems);
  3247. /* push left to right */
  3248. right_nritems = btrfs_header_nritems(right);
  3249. push_space = btrfs_item_end_nr(left, left_nritems - push_items);
  3250. push_space -= leaf_data_end(fs_info, left);
  3251. /* make room in the right data area */
  3252. data_end = leaf_data_end(fs_info, right);
  3253. memmove_extent_buffer(right,
  3254. BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
  3255. BTRFS_LEAF_DATA_OFFSET + data_end,
  3256. BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
  3257. /* copy from the left data area */
  3258. copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
  3259. BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
  3260. BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
  3261. push_space);
  3262. memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
  3263. btrfs_item_nr_offset(0),
  3264. right_nritems * sizeof(struct btrfs_item));
  3265. /* copy the items from left to right */
  3266. copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
  3267. btrfs_item_nr_offset(left_nritems - push_items),
  3268. push_items * sizeof(struct btrfs_item));
  3269. /* update the item pointers */
  3270. right_nritems += push_items;
  3271. btrfs_set_header_nritems(right, right_nritems);
  3272. push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
  3273. for (i = 0; i < right_nritems; i++) {
  3274. item = btrfs_item_nr(i);
  3275. push_space -= btrfs_token_item_size(right, item, &token);
  3276. btrfs_set_token_item_offset(right, item, push_space, &token);
  3277. }
  3278. left_nritems -= push_items;
  3279. btrfs_set_header_nritems(left, left_nritems);
  3280. if (left_nritems)
  3281. btrfs_mark_buffer_dirty(left);
  3282. else
  3283. clean_tree_block(fs_info, left);
  3284. btrfs_mark_buffer_dirty(right);
  3285. btrfs_item_key(right, &disk_key, 0);
  3286. btrfs_set_node_key(upper, &disk_key, slot + 1);
  3287. btrfs_mark_buffer_dirty(upper);
  3288. /* then fixup the leaf pointer in the path */
  3289. if (path->slots[0] >= left_nritems) {
  3290. path->slots[0] -= left_nritems;
  3291. if (btrfs_header_nritems(path->nodes[0]) == 0)
  3292. clean_tree_block(fs_info, path->nodes[0]);
  3293. btrfs_tree_unlock(path->nodes[0]);
  3294. free_extent_buffer(path->nodes[0]);
  3295. path->nodes[0] = right;
  3296. path->slots[1] += 1;
  3297. } else {
  3298. btrfs_tree_unlock(right);
  3299. free_extent_buffer(right);
  3300. }
  3301. return 0;
  3302. out_unlock:
  3303. btrfs_tree_unlock(right);
  3304. free_extent_buffer(right);
  3305. return 1;
  3306. }
  3307. /*
  3308. * push some data in the path leaf to the right, trying to free up at
  3309. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3310. *
  3311. * returns 1 if the push failed because the other node didn't have enough
  3312. * room, 0 if everything worked out and < 0 if there were major errors.
  3313. *
  3314. * this will push starting from min_slot to the end of the leaf. It won't
  3315. * push any slot lower than min_slot
  3316. */
  3317. static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
  3318. *root, struct btrfs_path *path,
  3319. int min_data_size, int data_size,
  3320. int empty, u32 min_slot)
  3321. {
  3322. struct btrfs_fs_info *fs_info = root->fs_info;
  3323. struct extent_buffer *left = path->nodes[0];
  3324. struct extent_buffer *right;
  3325. struct extent_buffer *upper;
  3326. int slot;
  3327. int free_space;
  3328. u32 left_nritems;
  3329. int ret;
  3330. if (!path->nodes[1])
  3331. return 1;
  3332. slot = path->slots[1];
  3333. upper = path->nodes[1];
  3334. if (slot >= btrfs_header_nritems(upper) - 1)
  3335. return 1;
  3336. btrfs_assert_tree_locked(path->nodes[1]);
  3337. right = read_node_slot(fs_info, upper, slot + 1);
  3338. /*
  3339. * slot + 1 is not valid or we fail to read the right node,
  3340. * no big deal, just return.
  3341. */
  3342. if (IS_ERR(right))
  3343. return 1;
  3344. btrfs_tree_lock(right);
  3345. btrfs_set_lock_blocking(right);
  3346. free_space = btrfs_leaf_free_space(fs_info, right);
  3347. if (free_space < data_size)
  3348. goto out_unlock;
  3349. /* cow and double check */
  3350. ret = btrfs_cow_block(trans, root, right, upper,
  3351. slot + 1, &right);
  3352. if (ret)
  3353. goto out_unlock;
  3354. free_space = btrfs_leaf_free_space(fs_info, right);
  3355. if (free_space < data_size)
  3356. goto out_unlock;
  3357. left_nritems = btrfs_header_nritems(left);
  3358. if (left_nritems == 0)
  3359. goto out_unlock;
  3360. if (path->slots[0] == left_nritems && !empty) {
  3361. /* Key greater than all keys in the leaf, right neighbor has
  3362. * enough room for it and we're not emptying our leaf to delete
  3363. * it, therefore use right neighbor to insert the new item and
  3364. * no need to touch/dirty our left leaft. */
  3365. btrfs_tree_unlock(left);
  3366. free_extent_buffer(left);
  3367. path->nodes[0] = right;
  3368. path->slots[0] = 0;
  3369. path->slots[1]++;
  3370. return 0;
  3371. }
  3372. return __push_leaf_right(fs_info, path, min_data_size, empty,
  3373. right, free_space, left_nritems, min_slot);
  3374. out_unlock:
  3375. btrfs_tree_unlock(right);
  3376. free_extent_buffer(right);
  3377. return 1;
  3378. }
  3379. /*
  3380. * push some data in the path leaf to the left, trying to free up at
  3381. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3382. *
  3383. * max_slot can put a limit on how far into the leaf we'll push items. The
  3384. * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
  3385. * items
  3386. */
  3387. static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
  3388. struct btrfs_path *path, int data_size,
  3389. int empty, struct extent_buffer *left,
  3390. int free_space, u32 right_nritems,
  3391. u32 max_slot)
  3392. {
  3393. struct btrfs_disk_key disk_key;
  3394. struct extent_buffer *right = path->nodes[0];
  3395. int i;
  3396. int push_space = 0;
  3397. int push_items = 0;
  3398. struct btrfs_item *item;
  3399. u32 old_left_nritems;
  3400. u32 nr;
  3401. int ret = 0;
  3402. u32 this_item_size;
  3403. u32 old_left_item_size;
  3404. struct btrfs_map_token token;
  3405. btrfs_init_map_token(&token);
  3406. if (empty)
  3407. nr = min(right_nritems, max_slot);
  3408. else
  3409. nr = min(right_nritems - 1, max_slot);
  3410. for (i = 0; i < nr; i++) {
  3411. item = btrfs_item_nr(i);
  3412. if (!empty && push_items > 0) {
  3413. if (path->slots[0] < i)
  3414. break;
  3415. if (path->slots[0] == i) {
  3416. int space = btrfs_leaf_free_space(fs_info, right);
  3417. if (space + push_space * 2 > free_space)
  3418. break;
  3419. }
  3420. }
  3421. if (path->slots[0] == i)
  3422. push_space += data_size;
  3423. this_item_size = btrfs_item_size(right, item);
  3424. if (this_item_size + sizeof(*item) + push_space > free_space)
  3425. break;
  3426. push_items++;
  3427. push_space += this_item_size + sizeof(*item);
  3428. }
  3429. if (push_items == 0) {
  3430. ret = 1;
  3431. goto out;
  3432. }
  3433. WARN_ON(!empty && push_items == btrfs_header_nritems(right));
  3434. /* push data from right to left */
  3435. copy_extent_buffer(left, right,
  3436. btrfs_item_nr_offset(btrfs_header_nritems(left)),
  3437. btrfs_item_nr_offset(0),
  3438. push_items * sizeof(struct btrfs_item));
  3439. push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
  3440. btrfs_item_offset_nr(right, push_items - 1);
  3441. copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
  3442. leaf_data_end(fs_info, left) - push_space,
  3443. BTRFS_LEAF_DATA_OFFSET +
  3444. btrfs_item_offset_nr(right, push_items - 1),
  3445. push_space);
  3446. old_left_nritems = btrfs_header_nritems(left);
  3447. BUG_ON(old_left_nritems <= 0);
  3448. old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
  3449. for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
  3450. u32 ioff;
  3451. item = btrfs_item_nr(i);
  3452. ioff = btrfs_token_item_offset(left, item, &token);
  3453. btrfs_set_token_item_offset(left, item,
  3454. ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
  3455. &token);
  3456. }
  3457. btrfs_set_header_nritems(left, old_left_nritems + push_items);
  3458. /* fixup right node */
  3459. if (push_items > right_nritems)
  3460. WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
  3461. right_nritems);
  3462. if (push_items < right_nritems) {
  3463. push_space = btrfs_item_offset_nr(right, push_items - 1) -
  3464. leaf_data_end(fs_info, right);
  3465. memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
  3466. BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
  3467. BTRFS_LEAF_DATA_OFFSET +
  3468. leaf_data_end(fs_info, right), push_space);
  3469. memmove_extent_buffer(right, btrfs_item_nr_offset(0),
  3470. btrfs_item_nr_offset(push_items),
  3471. (btrfs_header_nritems(right) - push_items) *
  3472. sizeof(struct btrfs_item));
  3473. }
  3474. right_nritems -= push_items;
  3475. btrfs_set_header_nritems(right, right_nritems);
  3476. push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
  3477. for (i = 0; i < right_nritems; i++) {
  3478. item = btrfs_item_nr(i);
  3479. push_space = push_space - btrfs_token_item_size(right,
  3480. item, &token);
  3481. btrfs_set_token_item_offset(right, item, push_space, &token);
  3482. }
  3483. btrfs_mark_buffer_dirty(left);
  3484. if (right_nritems)
  3485. btrfs_mark_buffer_dirty(right);
  3486. else
  3487. clean_tree_block(fs_info, right);
  3488. btrfs_item_key(right, &disk_key, 0);
  3489. fixup_low_keys(fs_info, path, &disk_key, 1);
  3490. /* then fixup the leaf pointer in the path */
  3491. if (path->slots[0] < push_items) {
  3492. path->slots[0] += old_left_nritems;
  3493. btrfs_tree_unlock(path->nodes[0]);
  3494. free_extent_buffer(path->nodes[0]);
  3495. path->nodes[0] = left;
  3496. path->slots[1] -= 1;
  3497. } else {
  3498. btrfs_tree_unlock(left);
  3499. free_extent_buffer(left);
  3500. path->slots[0] -= push_items;
  3501. }
  3502. BUG_ON(path->slots[0] < 0);
  3503. return ret;
  3504. out:
  3505. btrfs_tree_unlock(left);
  3506. free_extent_buffer(left);
  3507. return ret;
  3508. }
  3509. /*
  3510. * push some data in the path leaf to the left, trying to free up at
  3511. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3512. *
  3513. * max_slot can put a limit on how far into the leaf we'll push items. The
  3514. * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
  3515. * items
  3516. */
  3517. static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
  3518. *root, struct btrfs_path *path, int min_data_size,
  3519. int data_size, int empty, u32 max_slot)
  3520. {
  3521. struct btrfs_fs_info *fs_info = root->fs_info;
  3522. struct extent_buffer *right = path->nodes[0];
  3523. struct extent_buffer *left;
  3524. int slot;
  3525. int free_space;
  3526. u32 right_nritems;
  3527. int ret = 0;
  3528. slot = path->slots[1];
  3529. if (slot == 0)
  3530. return 1;
  3531. if (!path->nodes[1])
  3532. return 1;
  3533. right_nritems = btrfs_header_nritems(right);
  3534. if (right_nritems == 0)
  3535. return 1;
  3536. btrfs_assert_tree_locked(path->nodes[1]);
  3537. left = read_node_slot(fs_info, path->nodes[1], slot - 1);
  3538. /*
  3539. * slot - 1 is not valid or we fail to read the left node,
  3540. * no big deal, just return.
  3541. */
  3542. if (IS_ERR(left))
  3543. return 1;
  3544. btrfs_tree_lock(left);
  3545. btrfs_set_lock_blocking(left);
  3546. free_space = btrfs_leaf_free_space(fs_info, left);
  3547. if (free_space < data_size) {
  3548. ret = 1;
  3549. goto out;
  3550. }
  3551. /* cow and double check */
  3552. ret = btrfs_cow_block(trans, root, left,
  3553. path->nodes[1], slot - 1, &left);
  3554. if (ret) {
  3555. /* we hit -ENOSPC, but it isn't fatal here */
  3556. if (ret == -ENOSPC)
  3557. ret = 1;
  3558. goto out;
  3559. }
  3560. free_space = btrfs_leaf_free_space(fs_info, left);
  3561. if (free_space < data_size) {
  3562. ret = 1;
  3563. goto out;
  3564. }
  3565. return __push_leaf_left(fs_info, path, min_data_size,
  3566. empty, left, free_space, right_nritems,
  3567. max_slot);
  3568. out:
  3569. btrfs_tree_unlock(left);
  3570. free_extent_buffer(left);
  3571. return ret;
  3572. }
  3573. /*
  3574. * split the path's leaf in two, making sure there is at least data_size
  3575. * available for the resulting leaf level of the path.
  3576. */
  3577. static noinline void copy_for_split(struct btrfs_trans_handle *trans,
  3578. struct btrfs_fs_info *fs_info,
  3579. struct btrfs_path *path,
  3580. struct extent_buffer *l,
  3581. struct extent_buffer *right,
  3582. int slot, int mid, int nritems)
  3583. {
  3584. int data_copy_size;
  3585. int rt_data_off;
  3586. int i;
  3587. struct btrfs_disk_key disk_key;
  3588. struct btrfs_map_token token;
  3589. btrfs_init_map_token(&token);
  3590. nritems = nritems - mid;
  3591. btrfs_set_header_nritems(right, nritems);
  3592. data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
  3593. copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
  3594. btrfs_item_nr_offset(mid),
  3595. nritems * sizeof(struct btrfs_item));
  3596. copy_extent_buffer(right, l,
  3597. BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
  3598. data_copy_size, BTRFS_LEAF_DATA_OFFSET +
  3599. leaf_data_end(fs_info, l), data_copy_size);
  3600. rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
  3601. for (i = 0; i < nritems; i++) {
  3602. struct btrfs_item *item = btrfs_item_nr(i);
  3603. u32 ioff;
  3604. ioff = btrfs_token_item_offset(right, item, &token);
  3605. btrfs_set_token_item_offset(right, item,
  3606. ioff + rt_data_off, &token);
  3607. }
  3608. btrfs_set_header_nritems(l, mid);
  3609. btrfs_item_key(right, &disk_key, 0);
  3610. insert_ptr(trans, fs_info, path, &disk_key, right->start,
  3611. path->slots[1] + 1, 1);
  3612. btrfs_mark_buffer_dirty(right);
  3613. btrfs_mark_buffer_dirty(l);
  3614. BUG_ON(path->slots[0] != slot);
  3615. if (mid <= slot) {
  3616. btrfs_tree_unlock(path->nodes[0]);
  3617. free_extent_buffer(path->nodes[0]);
  3618. path->nodes[0] = right;
  3619. path->slots[0] -= mid;
  3620. path->slots[1] += 1;
  3621. } else {
  3622. btrfs_tree_unlock(right);
  3623. free_extent_buffer(right);
  3624. }
  3625. BUG_ON(path->slots[0] < 0);
  3626. }
  3627. /*
  3628. * double splits happen when we need to insert a big item in the middle
  3629. * of a leaf. A double split can leave us with 3 mostly empty leaves:
  3630. * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
  3631. * A B C
  3632. *
  3633. * We avoid this by trying to push the items on either side of our target
  3634. * into the adjacent leaves. If all goes well we can avoid the double split
  3635. * completely.
  3636. */
  3637. static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
  3638. struct btrfs_root *root,
  3639. struct btrfs_path *path,
  3640. int data_size)
  3641. {
  3642. struct btrfs_fs_info *fs_info = root->fs_info;
  3643. int ret;
  3644. int progress = 0;
  3645. int slot;
  3646. u32 nritems;
  3647. int space_needed = data_size;
  3648. slot = path->slots[0];
  3649. if (slot < btrfs_header_nritems(path->nodes[0]))
  3650. space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
  3651. /*
  3652. * try to push all the items after our slot into the
  3653. * right leaf
  3654. */
  3655. ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
  3656. if (ret < 0)
  3657. return ret;
  3658. if (ret == 0)
  3659. progress++;
  3660. nritems = btrfs_header_nritems(path->nodes[0]);
  3661. /*
  3662. * our goal is to get our slot at the start or end of a leaf. If
  3663. * we've done so we're done
  3664. */
  3665. if (path->slots[0] == 0 || path->slots[0] == nritems)
  3666. return 0;
  3667. if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
  3668. return 0;
  3669. /* try to push all the items before our slot into the next leaf */
  3670. slot = path->slots[0];
  3671. space_needed = data_size;
  3672. if (slot > 0)
  3673. space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
  3674. ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
  3675. if (ret < 0)
  3676. return ret;
  3677. if (ret == 0)
  3678. progress++;
  3679. if (progress)
  3680. return 0;
  3681. return 1;
  3682. }
  3683. /*
  3684. * split the path's leaf in two, making sure there is at least data_size
  3685. * available for the resulting leaf level of the path.
  3686. *
  3687. * returns 0 if all went well and < 0 on failure.
  3688. */
  3689. static noinline int split_leaf(struct btrfs_trans_handle *trans,
  3690. struct btrfs_root *root,
  3691. const struct btrfs_key *ins_key,
  3692. struct btrfs_path *path, int data_size,
  3693. int extend)
  3694. {
  3695. struct btrfs_disk_key disk_key;
  3696. struct extent_buffer *l;
  3697. u32 nritems;
  3698. int mid;
  3699. int slot;
  3700. struct extent_buffer *right;
  3701. struct btrfs_fs_info *fs_info = root->fs_info;
  3702. int ret = 0;
  3703. int wret;
  3704. int split;
  3705. int num_doubles = 0;
  3706. int tried_avoid_double = 0;
  3707. l = path->nodes[0];
  3708. slot = path->slots[0];
  3709. if (extend && data_size + btrfs_item_size_nr(l, slot) +
  3710. sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
  3711. return -EOVERFLOW;
  3712. /* first try to make some room by pushing left and right */
  3713. if (data_size && path->nodes[1]) {
  3714. int space_needed = data_size;
  3715. if (slot < btrfs_header_nritems(l))
  3716. space_needed -= btrfs_leaf_free_space(fs_info, l);
  3717. wret = push_leaf_right(trans, root, path, space_needed,
  3718. space_needed, 0, 0);
  3719. if (wret < 0)
  3720. return wret;
  3721. if (wret) {
  3722. space_needed = data_size;
  3723. if (slot > 0)
  3724. space_needed -= btrfs_leaf_free_space(fs_info,
  3725. l);
  3726. wret = push_leaf_left(trans, root, path, space_needed,
  3727. space_needed, 0, (u32)-1);
  3728. if (wret < 0)
  3729. return wret;
  3730. }
  3731. l = path->nodes[0];
  3732. /* did the pushes work? */
  3733. if (btrfs_leaf_free_space(fs_info, l) >= data_size)
  3734. return 0;
  3735. }
  3736. if (!path->nodes[1]) {
  3737. ret = insert_new_root(trans, root, path, 1);
  3738. if (ret)
  3739. return ret;
  3740. }
  3741. again:
  3742. split = 1;
  3743. l = path->nodes[0];
  3744. slot = path->slots[0];
  3745. nritems = btrfs_header_nritems(l);
  3746. mid = (nritems + 1) / 2;
  3747. if (mid <= slot) {
  3748. if (nritems == 1 ||
  3749. leaf_space_used(l, mid, nritems - mid) + data_size >
  3750. BTRFS_LEAF_DATA_SIZE(fs_info)) {
  3751. if (slot >= nritems) {
  3752. split = 0;
  3753. } else {
  3754. mid = slot;
  3755. if (mid != nritems &&
  3756. leaf_space_used(l, mid, nritems - mid) +
  3757. data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
  3758. if (data_size && !tried_avoid_double)
  3759. goto push_for_double;
  3760. split = 2;
  3761. }
  3762. }
  3763. }
  3764. } else {
  3765. if (leaf_space_used(l, 0, mid) + data_size >
  3766. BTRFS_LEAF_DATA_SIZE(fs_info)) {
  3767. if (!extend && data_size && slot == 0) {
  3768. split = 0;
  3769. } else if ((extend || !data_size) && slot == 0) {
  3770. mid = 1;
  3771. } else {
  3772. mid = slot;
  3773. if (mid != nritems &&
  3774. leaf_space_used(l, mid, nritems - mid) +
  3775. data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
  3776. if (data_size && !tried_avoid_double)
  3777. goto push_for_double;
  3778. split = 2;
  3779. }
  3780. }
  3781. }
  3782. }
  3783. if (split == 0)
  3784. btrfs_cpu_key_to_disk(&disk_key, ins_key);
  3785. else
  3786. btrfs_item_key(l, &disk_key, mid);
  3787. right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  3788. &disk_key, 0, l->start, 0);
  3789. if (IS_ERR(right))
  3790. return PTR_ERR(right);
  3791. root_add_used(root, fs_info->nodesize);
  3792. memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
  3793. btrfs_set_header_bytenr(right, right->start);
  3794. btrfs_set_header_generation(right, trans->transid);
  3795. btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
  3796. btrfs_set_header_owner(right, root->root_key.objectid);
  3797. btrfs_set_header_level(right, 0);
  3798. write_extent_buffer_fsid(right, fs_info->fsid);
  3799. write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
  3800. if (split == 0) {
  3801. if (mid <= slot) {
  3802. btrfs_set_header_nritems(right, 0);
  3803. insert_ptr(trans, fs_info, path, &disk_key,
  3804. right->start, path->slots[1] + 1, 1);
  3805. btrfs_tree_unlock(path->nodes[0]);
  3806. free_extent_buffer(path->nodes[0]);
  3807. path->nodes[0] = right;
  3808. path->slots[0] = 0;
  3809. path->slots[1] += 1;
  3810. } else {
  3811. btrfs_set_header_nritems(right, 0);
  3812. insert_ptr(trans, fs_info, path, &disk_key,
  3813. right->start, path->slots[1], 1);
  3814. btrfs_tree_unlock(path->nodes[0]);
  3815. free_extent_buffer(path->nodes[0]);
  3816. path->nodes[0] = right;
  3817. path->slots[0] = 0;
  3818. if (path->slots[1] == 0)
  3819. fixup_low_keys(fs_info, path, &disk_key, 1);
  3820. }
  3821. /*
  3822. * We create a new leaf 'right' for the required ins_len and
  3823. * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
  3824. * the content of ins_len to 'right'.
  3825. */
  3826. return ret;
  3827. }
  3828. copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
  3829. if (split == 2) {
  3830. BUG_ON(num_doubles != 0);
  3831. num_doubles++;
  3832. goto again;
  3833. }
  3834. return 0;
  3835. push_for_double:
  3836. push_for_double_split(trans, root, path, data_size);
  3837. tried_avoid_double = 1;
  3838. if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
  3839. return 0;
  3840. goto again;
  3841. }
  3842. static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
  3843. struct btrfs_root *root,
  3844. struct btrfs_path *path, int ins_len)
  3845. {
  3846. struct btrfs_fs_info *fs_info = root->fs_info;
  3847. struct btrfs_key key;
  3848. struct extent_buffer *leaf;
  3849. struct btrfs_file_extent_item *fi;
  3850. u64 extent_len = 0;
  3851. u32 item_size;
  3852. int ret;
  3853. leaf = path->nodes[0];
  3854. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3855. BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
  3856. key.type != BTRFS_EXTENT_CSUM_KEY);
  3857. if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
  3858. return 0;
  3859. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  3860. if (key.type == BTRFS_EXTENT_DATA_KEY) {
  3861. fi = btrfs_item_ptr(leaf, path->slots[0],
  3862. struct btrfs_file_extent_item);
  3863. extent_len = btrfs_file_extent_num_bytes(leaf, fi);
  3864. }
  3865. btrfs_release_path(path);
  3866. path->keep_locks = 1;
  3867. path->search_for_split = 1;
  3868. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  3869. path->search_for_split = 0;
  3870. if (ret > 0)
  3871. ret = -EAGAIN;
  3872. if (ret < 0)
  3873. goto err;
  3874. ret = -EAGAIN;
  3875. leaf = path->nodes[0];
  3876. /* if our item isn't there, return now */
  3877. if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
  3878. goto err;
  3879. /* the leaf has changed, it now has room. return now */
  3880. if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
  3881. goto err;
  3882. if (key.type == BTRFS_EXTENT_DATA_KEY) {
  3883. fi = btrfs_item_ptr(leaf, path->slots[0],
  3884. struct btrfs_file_extent_item);
  3885. if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
  3886. goto err;
  3887. }
  3888. btrfs_set_path_blocking(path);
  3889. ret = split_leaf(trans, root, &key, path, ins_len, 1);
  3890. if (ret)
  3891. goto err;
  3892. path->keep_locks = 0;
  3893. btrfs_unlock_up_safe(path, 1);
  3894. return 0;
  3895. err:
  3896. path->keep_locks = 0;
  3897. return ret;
  3898. }
  3899. static noinline int split_item(struct btrfs_fs_info *fs_info,
  3900. struct btrfs_path *path,
  3901. const struct btrfs_key *new_key,
  3902. unsigned long split_offset)
  3903. {
  3904. struct extent_buffer *leaf;
  3905. struct btrfs_item *item;
  3906. struct btrfs_item *new_item;
  3907. int slot;
  3908. char *buf;
  3909. u32 nritems;
  3910. u32 item_size;
  3911. u32 orig_offset;
  3912. struct btrfs_disk_key disk_key;
  3913. leaf = path->nodes[0];
  3914. BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
  3915. btrfs_set_path_blocking(path);
  3916. item = btrfs_item_nr(path->slots[0]);
  3917. orig_offset = btrfs_item_offset(leaf, item);
  3918. item_size = btrfs_item_size(leaf, item);
  3919. buf = kmalloc(item_size, GFP_NOFS);
  3920. if (!buf)
  3921. return -ENOMEM;
  3922. read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
  3923. path->slots[0]), item_size);
  3924. slot = path->slots[0] + 1;
  3925. nritems = btrfs_header_nritems(leaf);
  3926. if (slot != nritems) {
  3927. /* shift the items */
  3928. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
  3929. btrfs_item_nr_offset(slot),
  3930. (nritems - slot) * sizeof(struct btrfs_item));
  3931. }
  3932. btrfs_cpu_key_to_disk(&disk_key, new_key);
  3933. btrfs_set_item_key(leaf, &disk_key, slot);
  3934. new_item = btrfs_item_nr(slot);
  3935. btrfs_set_item_offset(leaf, new_item, orig_offset);
  3936. btrfs_set_item_size(leaf, new_item, item_size - split_offset);
  3937. btrfs_set_item_offset(leaf, item,
  3938. orig_offset + item_size - split_offset);
  3939. btrfs_set_item_size(leaf, item, split_offset);
  3940. btrfs_set_header_nritems(leaf, nritems + 1);
  3941. /* write the data for the start of the original item */
  3942. write_extent_buffer(leaf, buf,
  3943. btrfs_item_ptr_offset(leaf, path->slots[0]),
  3944. split_offset);
  3945. /* write the data for the new item */
  3946. write_extent_buffer(leaf, buf + split_offset,
  3947. btrfs_item_ptr_offset(leaf, slot),
  3948. item_size - split_offset);
  3949. btrfs_mark_buffer_dirty(leaf);
  3950. BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
  3951. kfree(buf);
  3952. return 0;
  3953. }
  3954. /*
  3955. * This function splits a single item into two items,
  3956. * giving 'new_key' to the new item and splitting the
  3957. * old one at split_offset (from the start of the item).
  3958. *
  3959. * The path may be released by this operation. After
  3960. * the split, the path is pointing to the old item. The
  3961. * new item is going to be in the same node as the old one.
  3962. *
  3963. * Note, the item being split must be smaller enough to live alone on
  3964. * a tree block with room for one extra struct btrfs_item
  3965. *
  3966. * This allows us to split the item in place, keeping a lock on the
  3967. * leaf the entire time.
  3968. */
  3969. int btrfs_split_item(struct btrfs_trans_handle *trans,
  3970. struct btrfs_root *root,
  3971. struct btrfs_path *path,
  3972. const struct btrfs_key *new_key,
  3973. unsigned long split_offset)
  3974. {
  3975. int ret;
  3976. ret = setup_leaf_for_split(trans, root, path,
  3977. sizeof(struct btrfs_item));
  3978. if (ret)
  3979. return ret;
  3980. ret = split_item(root->fs_info, path, new_key, split_offset);
  3981. return ret;
  3982. }
  3983. /*
  3984. * This function duplicate a item, giving 'new_key' to the new item.
  3985. * It guarantees both items live in the same tree leaf and the new item
  3986. * is contiguous with the original item.
  3987. *
  3988. * This allows us to split file extent in place, keeping a lock on the
  3989. * leaf the entire time.
  3990. */
  3991. int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
  3992. struct btrfs_root *root,
  3993. struct btrfs_path *path,
  3994. const struct btrfs_key *new_key)
  3995. {
  3996. struct extent_buffer *leaf;
  3997. int ret;
  3998. u32 item_size;
  3999. leaf = path->nodes[0];
  4000. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  4001. ret = setup_leaf_for_split(trans, root, path,
  4002. item_size + sizeof(struct btrfs_item));
  4003. if (ret)
  4004. return ret;
  4005. path->slots[0]++;
  4006. setup_items_for_insert(root, path, new_key, &item_size,
  4007. item_size, item_size +
  4008. sizeof(struct btrfs_item), 1);
  4009. leaf = path->nodes[0];
  4010. memcpy_extent_buffer(leaf,
  4011. btrfs_item_ptr_offset(leaf, path->slots[0]),
  4012. btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
  4013. item_size);
  4014. return 0;
  4015. }
  4016. /*
  4017. * make the item pointed to by the path smaller. new_size indicates
  4018. * how small to make it, and from_end tells us if we just chop bytes
  4019. * off the end of the item or if we shift the item to chop bytes off
  4020. * the front.
  4021. */
  4022. void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
  4023. struct btrfs_path *path, u32 new_size, int from_end)
  4024. {
  4025. int slot;
  4026. struct extent_buffer *leaf;
  4027. struct btrfs_item *item;
  4028. u32 nritems;
  4029. unsigned int data_end;
  4030. unsigned int old_data_start;
  4031. unsigned int old_size;
  4032. unsigned int size_diff;
  4033. int i;
  4034. struct btrfs_map_token token;
  4035. btrfs_init_map_token(&token);
  4036. leaf = path->nodes[0];
  4037. slot = path->slots[0];
  4038. old_size = btrfs_item_size_nr(leaf, slot);
  4039. if (old_size == new_size)
  4040. return;
  4041. nritems = btrfs_header_nritems(leaf);
  4042. data_end = leaf_data_end(fs_info, leaf);
  4043. old_data_start = btrfs_item_offset_nr(leaf, slot);
  4044. size_diff = old_size - new_size;
  4045. BUG_ON(slot < 0);
  4046. BUG_ON(slot >= nritems);
  4047. /*
  4048. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4049. */
  4050. /* first correct the data pointers */
  4051. for (i = slot; i < nritems; i++) {
  4052. u32 ioff;
  4053. item = btrfs_item_nr(i);
  4054. ioff = btrfs_token_item_offset(leaf, item, &token);
  4055. btrfs_set_token_item_offset(leaf, item,
  4056. ioff + size_diff, &token);
  4057. }
  4058. /* shift the data */
  4059. if (from_end) {
  4060. memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
  4061. data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
  4062. data_end, old_data_start + new_size - data_end);
  4063. } else {
  4064. struct btrfs_disk_key disk_key;
  4065. u64 offset;
  4066. btrfs_item_key(leaf, &disk_key, slot);
  4067. if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
  4068. unsigned long ptr;
  4069. struct btrfs_file_extent_item *fi;
  4070. fi = btrfs_item_ptr(leaf, slot,
  4071. struct btrfs_file_extent_item);
  4072. fi = (struct btrfs_file_extent_item *)(
  4073. (unsigned long)fi - size_diff);
  4074. if (btrfs_file_extent_type(leaf, fi) ==
  4075. BTRFS_FILE_EXTENT_INLINE) {
  4076. ptr = btrfs_item_ptr_offset(leaf, slot);
  4077. memmove_extent_buffer(leaf, ptr,
  4078. (unsigned long)fi,
  4079. BTRFS_FILE_EXTENT_INLINE_DATA_START);
  4080. }
  4081. }
  4082. memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
  4083. data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
  4084. data_end, old_data_start - data_end);
  4085. offset = btrfs_disk_key_offset(&disk_key);
  4086. btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
  4087. btrfs_set_item_key(leaf, &disk_key, slot);
  4088. if (slot == 0)
  4089. fixup_low_keys(fs_info, path, &disk_key, 1);
  4090. }
  4091. item = btrfs_item_nr(slot);
  4092. btrfs_set_item_size(leaf, item, new_size);
  4093. btrfs_mark_buffer_dirty(leaf);
  4094. if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
  4095. btrfs_print_leaf(leaf);
  4096. BUG();
  4097. }
  4098. }
  4099. /*
  4100. * make the item pointed to by the path bigger, data_size is the added size.
  4101. */
  4102. void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
  4103. u32 data_size)
  4104. {
  4105. int slot;
  4106. struct extent_buffer *leaf;
  4107. struct btrfs_item *item;
  4108. u32 nritems;
  4109. unsigned int data_end;
  4110. unsigned int old_data;
  4111. unsigned int old_size;
  4112. int i;
  4113. struct btrfs_map_token token;
  4114. btrfs_init_map_token(&token);
  4115. leaf = path->nodes[0];
  4116. nritems = btrfs_header_nritems(leaf);
  4117. data_end = leaf_data_end(fs_info, leaf);
  4118. if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
  4119. btrfs_print_leaf(leaf);
  4120. BUG();
  4121. }
  4122. slot = path->slots[0];
  4123. old_data = btrfs_item_end_nr(leaf, slot);
  4124. BUG_ON(slot < 0);
  4125. if (slot >= nritems) {
  4126. btrfs_print_leaf(leaf);
  4127. btrfs_crit(fs_info, "slot %d too large, nritems %d",
  4128. slot, nritems);
  4129. BUG_ON(1);
  4130. }
  4131. /*
  4132. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4133. */
  4134. /* first correct the data pointers */
  4135. for (i = slot; i < nritems; i++) {
  4136. u32 ioff;
  4137. item = btrfs_item_nr(i);
  4138. ioff = btrfs_token_item_offset(leaf, item, &token);
  4139. btrfs_set_token_item_offset(leaf, item,
  4140. ioff - data_size, &token);
  4141. }
  4142. /* shift the data */
  4143. memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
  4144. data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
  4145. data_end, old_data - data_end);
  4146. data_end = old_data;
  4147. old_size = btrfs_item_size_nr(leaf, slot);
  4148. item = btrfs_item_nr(slot);
  4149. btrfs_set_item_size(leaf, item, old_size + data_size);
  4150. btrfs_mark_buffer_dirty(leaf);
  4151. if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
  4152. btrfs_print_leaf(leaf);
  4153. BUG();
  4154. }
  4155. }
  4156. /*
  4157. * this is a helper for btrfs_insert_empty_items, the main goal here is
  4158. * to save stack depth by doing the bulk of the work in a function
  4159. * that doesn't call btrfs_search_slot
  4160. */
  4161. void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
  4162. const struct btrfs_key *cpu_key, u32 *data_size,
  4163. u32 total_data, u32 total_size, int nr)
  4164. {
  4165. struct btrfs_fs_info *fs_info = root->fs_info;
  4166. struct btrfs_item *item;
  4167. int i;
  4168. u32 nritems;
  4169. unsigned int data_end;
  4170. struct btrfs_disk_key disk_key;
  4171. struct extent_buffer *leaf;
  4172. int slot;
  4173. struct btrfs_map_token token;
  4174. if (path->slots[0] == 0) {
  4175. btrfs_cpu_key_to_disk(&disk_key, cpu_key);
  4176. fixup_low_keys(fs_info, path, &disk_key, 1);
  4177. }
  4178. btrfs_unlock_up_safe(path, 1);
  4179. btrfs_init_map_token(&token);
  4180. leaf = path->nodes[0];
  4181. slot = path->slots[0];
  4182. nritems = btrfs_header_nritems(leaf);
  4183. data_end = leaf_data_end(fs_info, leaf);
  4184. if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
  4185. btrfs_print_leaf(leaf);
  4186. btrfs_crit(fs_info, "not enough freespace need %u have %d",
  4187. total_size, btrfs_leaf_free_space(fs_info, leaf));
  4188. BUG();
  4189. }
  4190. if (slot != nritems) {
  4191. unsigned int old_data = btrfs_item_end_nr(leaf, slot);
  4192. if (old_data < data_end) {
  4193. btrfs_print_leaf(leaf);
  4194. btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
  4195. slot, old_data, data_end);
  4196. BUG_ON(1);
  4197. }
  4198. /*
  4199. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4200. */
  4201. /* first correct the data pointers */
  4202. for (i = slot; i < nritems; i++) {
  4203. u32 ioff;
  4204. item = btrfs_item_nr(i);
  4205. ioff = btrfs_token_item_offset(leaf, item, &token);
  4206. btrfs_set_token_item_offset(leaf, item,
  4207. ioff - total_data, &token);
  4208. }
  4209. /* shift the items */
  4210. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
  4211. btrfs_item_nr_offset(slot),
  4212. (nritems - slot) * sizeof(struct btrfs_item));
  4213. /* shift the data */
  4214. memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
  4215. data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
  4216. data_end, old_data - data_end);
  4217. data_end = old_data;
  4218. }
  4219. /* setup the item for the new data */
  4220. for (i = 0; i < nr; i++) {
  4221. btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
  4222. btrfs_set_item_key(leaf, &disk_key, slot + i);
  4223. item = btrfs_item_nr(slot + i);
  4224. btrfs_set_token_item_offset(leaf, item,
  4225. data_end - data_size[i], &token);
  4226. data_end -= data_size[i];
  4227. btrfs_set_token_item_size(leaf, item, data_size[i], &token);
  4228. }
  4229. btrfs_set_header_nritems(leaf, nritems + nr);
  4230. btrfs_mark_buffer_dirty(leaf);
  4231. if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
  4232. btrfs_print_leaf(leaf);
  4233. BUG();
  4234. }
  4235. }
  4236. /*
  4237. * Given a key and some data, insert items into the tree.
  4238. * This does all the path init required, making room in the tree if needed.
  4239. */
  4240. int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
  4241. struct btrfs_root *root,
  4242. struct btrfs_path *path,
  4243. const struct btrfs_key *cpu_key, u32 *data_size,
  4244. int nr)
  4245. {
  4246. int ret = 0;
  4247. int slot;
  4248. int i;
  4249. u32 total_size = 0;
  4250. u32 total_data = 0;
  4251. for (i = 0; i < nr; i++)
  4252. total_data += data_size[i];
  4253. total_size = total_data + (nr * sizeof(struct btrfs_item));
  4254. ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
  4255. if (ret == 0)
  4256. return -EEXIST;
  4257. if (ret < 0)
  4258. return ret;
  4259. slot = path->slots[0];
  4260. BUG_ON(slot < 0);
  4261. setup_items_for_insert(root, path, cpu_key, data_size,
  4262. total_data, total_size, nr);
  4263. return 0;
  4264. }
  4265. /*
  4266. * Given a key and some data, insert an item into the tree.
  4267. * This does all the path init required, making room in the tree if needed.
  4268. */
  4269. int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4270. const struct btrfs_key *cpu_key, void *data,
  4271. u32 data_size)
  4272. {
  4273. int ret = 0;
  4274. struct btrfs_path *path;
  4275. struct extent_buffer *leaf;
  4276. unsigned long ptr;
  4277. path = btrfs_alloc_path();
  4278. if (!path)
  4279. return -ENOMEM;
  4280. ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
  4281. if (!ret) {
  4282. leaf = path->nodes[0];
  4283. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  4284. write_extent_buffer(leaf, data, ptr, data_size);
  4285. btrfs_mark_buffer_dirty(leaf);
  4286. }
  4287. btrfs_free_path(path);
  4288. return ret;
  4289. }
  4290. /*
  4291. * delete the pointer from a given node.
  4292. *
  4293. * the tree should have been previously balanced so the deletion does not
  4294. * empty a node.
  4295. */
  4296. static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  4297. int level, int slot)
  4298. {
  4299. struct btrfs_fs_info *fs_info = root->fs_info;
  4300. struct extent_buffer *parent = path->nodes[level];
  4301. u32 nritems;
  4302. int ret;
  4303. nritems = btrfs_header_nritems(parent);
  4304. if (slot != nritems - 1) {
  4305. if (level)
  4306. tree_mod_log_eb_move(fs_info, parent, slot,
  4307. slot + 1, nritems - slot - 1);
  4308. memmove_extent_buffer(parent,
  4309. btrfs_node_key_ptr_offset(slot),
  4310. btrfs_node_key_ptr_offset(slot + 1),
  4311. sizeof(struct btrfs_key_ptr) *
  4312. (nritems - slot - 1));
  4313. } else if (level) {
  4314. ret = tree_mod_log_insert_key(fs_info, parent, slot,
  4315. MOD_LOG_KEY_REMOVE, GFP_NOFS);
  4316. BUG_ON(ret < 0);
  4317. }
  4318. nritems--;
  4319. btrfs_set_header_nritems(parent, nritems);
  4320. if (nritems == 0 && parent == root->node) {
  4321. BUG_ON(btrfs_header_level(root->node) != 1);
  4322. /* just turn the root into a leaf and break */
  4323. btrfs_set_header_level(root->node, 0);
  4324. } else if (slot == 0) {
  4325. struct btrfs_disk_key disk_key;
  4326. btrfs_node_key(parent, &disk_key, 0);
  4327. fixup_low_keys(fs_info, path, &disk_key, level + 1);
  4328. }
  4329. btrfs_mark_buffer_dirty(parent);
  4330. }
  4331. /*
  4332. * a helper function to delete the leaf pointed to by path->slots[1] and
  4333. * path->nodes[1].
  4334. *
  4335. * This deletes the pointer in path->nodes[1] and frees the leaf
  4336. * block extent. zero is returned if it all worked out, < 0 otherwise.
  4337. *
  4338. * The path must have already been setup for deleting the leaf, including
  4339. * all the proper balancing. path->nodes[1] must be locked.
  4340. */
  4341. static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
  4342. struct btrfs_root *root,
  4343. struct btrfs_path *path,
  4344. struct extent_buffer *leaf)
  4345. {
  4346. WARN_ON(btrfs_header_generation(leaf) != trans->transid);
  4347. del_ptr(root, path, 1, path->slots[1]);
  4348. /*
  4349. * btrfs_free_extent is expensive, we want to make sure we
  4350. * aren't holding any locks when we call it
  4351. */
  4352. btrfs_unlock_up_safe(path, 0);
  4353. root_sub_used(root, leaf->len);
  4354. extent_buffer_get(leaf);
  4355. btrfs_free_tree_block(trans, root, leaf, 0, 1);
  4356. free_extent_buffer_stale(leaf);
  4357. }
  4358. /*
  4359. * delete the item at the leaf level in path. If that empties
  4360. * the leaf, remove it from the tree
  4361. */
  4362. int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4363. struct btrfs_path *path, int slot, int nr)
  4364. {
  4365. struct btrfs_fs_info *fs_info = root->fs_info;
  4366. struct extent_buffer *leaf;
  4367. struct btrfs_item *item;
  4368. u32 last_off;
  4369. u32 dsize = 0;
  4370. int ret = 0;
  4371. int wret;
  4372. int i;
  4373. u32 nritems;
  4374. struct btrfs_map_token token;
  4375. btrfs_init_map_token(&token);
  4376. leaf = path->nodes[0];
  4377. last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
  4378. for (i = 0; i < nr; i++)
  4379. dsize += btrfs_item_size_nr(leaf, slot + i);
  4380. nritems = btrfs_header_nritems(leaf);
  4381. if (slot + nr != nritems) {
  4382. int data_end = leaf_data_end(fs_info, leaf);
  4383. memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
  4384. data_end + dsize,
  4385. BTRFS_LEAF_DATA_OFFSET + data_end,
  4386. last_off - data_end);
  4387. for (i = slot + nr; i < nritems; i++) {
  4388. u32 ioff;
  4389. item = btrfs_item_nr(i);
  4390. ioff = btrfs_token_item_offset(leaf, item, &token);
  4391. btrfs_set_token_item_offset(leaf, item,
  4392. ioff + dsize, &token);
  4393. }
  4394. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
  4395. btrfs_item_nr_offset(slot + nr),
  4396. sizeof(struct btrfs_item) *
  4397. (nritems - slot - nr));
  4398. }
  4399. btrfs_set_header_nritems(leaf, nritems - nr);
  4400. nritems -= nr;
  4401. /* delete the leaf if we've emptied it */
  4402. if (nritems == 0) {
  4403. if (leaf == root->node) {
  4404. btrfs_set_header_level(leaf, 0);
  4405. } else {
  4406. btrfs_set_path_blocking(path);
  4407. clean_tree_block(fs_info, leaf);
  4408. btrfs_del_leaf(trans, root, path, leaf);
  4409. }
  4410. } else {
  4411. int used = leaf_space_used(leaf, 0, nritems);
  4412. if (slot == 0) {
  4413. struct btrfs_disk_key disk_key;
  4414. btrfs_item_key(leaf, &disk_key, 0);
  4415. fixup_low_keys(fs_info, path, &disk_key, 1);
  4416. }
  4417. /* delete the leaf if it is mostly empty */
  4418. if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
  4419. /* push_leaf_left fixes the path.
  4420. * make sure the path still points to our leaf
  4421. * for possible call to del_ptr below
  4422. */
  4423. slot = path->slots[1];
  4424. extent_buffer_get(leaf);
  4425. btrfs_set_path_blocking(path);
  4426. wret = push_leaf_left(trans, root, path, 1, 1,
  4427. 1, (u32)-1);
  4428. if (wret < 0 && wret != -ENOSPC)
  4429. ret = wret;
  4430. if (path->nodes[0] == leaf &&
  4431. btrfs_header_nritems(leaf)) {
  4432. wret = push_leaf_right(trans, root, path, 1,
  4433. 1, 1, 0);
  4434. if (wret < 0 && wret != -ENOSPC)
  4435. ret = wret;
  4436. }
  4437. if (btrfs_header_nritems(leaf) == 0) {
  4438. path->slots[1] = slot;
  4439. btrfs_del_leaf(trans, root, path, leaf);
  4440. free_extent_buffer(leaf);
  4441. ret = 0;
  4442. } else {
  4443. /* if we're still in the path, make sure
  4444. * we're dirty. Otherwise, one of the
  4445. * push_leaf functions must have already
  4446. * dirtied this buffer
  4447. */
  4448. if (path->nodes[0] == leaf)
  4449. btrfs_mark_buffer_dirty(leaf);
  4450. free_extent_buffer(leaf);
  4451. }
  4452. } else {
  4453. btrfs_mark_buffer_dirty(leaf);
  4454. }
  4455. }
  4456. return ret;
  4457. }
  4458. /*
  4459. * search the tree again to find a leaf with lesser keys
  4460. * returns 0 if it found something or 1 if there are no lesser leaves.
  4461. * returns < 0 on io errors.
  4462. *
  4463. * This may release the path, and so you may lose any locks held at the
  4464. * time you call it.
  4465. */
  4466. int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
  4467. {
  4468. struct btrfs_key key;
  4469. struct btrfs_disk_key found_key;
  4470. int ret;
  4471. btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
  4472. if (key.offset > 0) {
  4473. key.offset--;
  4474. } else if (key.type > 0) {
  4475. key.type--;
  4476. key.offset = (u64)-1;
  4477. } else if (key.objectid > 0) {
  4478. key.objectid--;
  4479. key.type = (u8)-1;
  4480. key.offset = (u64)-1;
  4481. } else {
  4482. return 1;
  4483. }
  4484. btrfs_release_path(path);
  4485. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  4486. if (ret < 0)
  4487. return ret;
  4488. btrfs_item_key(path->nodes[0], &found_key, 0);
  4489. ret = comp_keys(&found_key, &key);
  4490. /*
  4491. * We might have had an item with the previous key in the tree right
  4492. * before we released our path. And after we released our path, that
  4493. * item might have been pushed to the first slot (0) of the leaf we
  4494. * were holding due to a tree balance. Alternatively, an item with the
  4495. * previous key can exist as the only element of a leaf (big fat item).
  4496. * Therefore account for these 2 cases, so that our callers (like
  4497. * btrfs_previous_item) don't miss an existing item with a key matching
  4498. * the previous key we computed above.
  4499. */
  4500. if (ret <= 0)
  4501. return 0;
  4502. return 1;
  4503. }
  4504. /*
  4505. * A helper function to walk down the tree starting at min_key, and looking
  4506. * for nodes or leaves that are have a minimum transaction id.
  4507. * This is used by the btree defrag code, and tree logging
  4508. *
  4509. * This does not cow, but it does stuff the starting key it finds back
  4510. * into min_key, so you can call btrfs_search_slot with cow=1 on the
  4511. * key and get a writable path.
  4512. *
  4513. * This does lock as it descends, and path->keep_locks should be set
  4514. * to 1 by the caller.
  4515. *
  4516. * This honors path->lowest_level to prevent descent past a given level
  4517. * of the tree.
  4518. *
  4519. * min_trans indicates the oldest transaction that you are interested
  4520. * in walking through. Any nodes or leaves older than min_trans are
  4521. * skipped over (without reading them).
  4522. *
  4523. * returns zero if something useful was found, < 0 on error and 1 if there
  4524. * was nothing in the tree that matched the search criteria.
  4525. */
  4526. int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
  4527. struct btrfs_path *path,
  4528. u64 min_trans)
  4529. {
  4530. struct btrfs_fs_info *fs_info = root->fs_info;
  4531. struct extent_buffer *cur;
  4532. struct btrfs_key found_key;
  4533. int slot;
  4534. int sret;
  4535. u32 nritems;
  4536. int level;
  4537. int ret = 1;
  4538. int keep_locks = path->keep_locks;
  4539. path->keep_locks = 1;
  4540. again:
  4541. cur = btrfs_read_lock_root_node(root);
  4542. level = btrfs_header_level(cur);
  4543. WARN_ON(path->nodes[level]);
  4544. path->nodes[level] = cur;
  4545. path->locks[level] = BTRFS_READ_LOCK;
  4546. if (btrfs_header_generation(cur) < min_trans) {
  4547. ret = 1;
  4548. goto out;
  4549. }
  4550. while (1) {
  4551. nritems = btrfs_header_nritems(cur);
  4552. level = btrfs_header_level(cur);
  4553. sret = bin_search(cur, min_key, level, &slot);
  4554. /* at the lowest level, we're done, setup the path and exit */
  4555. if (level == path->lowest_level) {
  4556. if (slot >= nritems)
  4557. goto find_next_key;
  4558. ret = 0;
  4559. path->slots[level] = slot;
  4560. btrfs_item_key_to_cpu(cur, &found_key, slot);
  4561. goto out;
  4562. }
  4563. if (sret && slot > 0)
  4564. slot--;
  4565. /*
  4566. * check this node pointer against the min_trans parameters.
  4567. * If it is too old, old, skip to the next one.
  4568. */
  4569. while (slot < nritems) {
  4570. u64 gen;
  4571. gen = btrfs_node_ptr_generation(cur, slot);
  4572. if (gen < min_trans) {
  4573. slot++;
  4574. continue;
  4575. }
  4576. break;
  4577. }
  4578. find_next_key:
  4579. /*
  4580. * we didn't find a candidate key in this node, walk forward
  4581. * and find another one
  4582. */
  4583. if (slot >= nritems) {
  4584. path->slots[level] = slot;
  4585. btrfs_set_path_blocking(path);
  4586. sret = btrfs_find_next_key(root, path, min_key, level,
  4587. min_trans);
  4588. if (sret == 0) {
  4589. btrfs_release_path(path);
  4590. goto again;
  4591. } else {
  4592. goto out;
  4593. }
  4594. }
  4595. /* save our key for returning back */
  4596. btrfs_node_key_to_cpu(cur, &found_key, slot);
  4597. path->slots[level] = slot;
  4598. if (level == path->lowest_level) {
  4599. ret = 0;
  4600. goto out;
  4601. }
  4602. btrfs_set_path_blocking(path);
  4603. cur = read_node_slot(fs_info, cur, slot);
  4604. if (IS_ERR(cur)) {
  4605. ret = PTR_ERR(cur);
  4606. goto out;
  4607. }
  4608. btrfs_tree_read_lock(cur);
  4609. path->locks[level - 1] = BTRFS_READ_LOCK;
  4610. path->nodes[level - 1] = cur;
  4611. unlock_up(path, level, 1, 0, NULL);
  4612. btrfs_clear_path_blocking(path, NULL, 0);
  4613. }
  4614. out:
  4615. path->keep_locks = keep_locks;
  4616. if (ret == 0) {
  4617. btrfs_unlock_up_safe(path, path->lowest_level + 1);
  4618. btrfs_set_path_blocking(path);
  4619. memcpy(min_key, &found_key, sizeof(found_key));
  4620. }
  4621. return ret;
  4622. }
  4623. static int tree_move_down(struct btrfs_fs_info *fs_info,
  4624. struct btrfs_path *path,
  4625. int *level)
  4626. {
  4627. struct extent_buffer *eb;
  4628. BUG_ON(*level == 0);
  4629. eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
  4630. if (IS_ERR(eb))
  4631. return PTR_ERR(eb);
  4632. path->nodes[*level - 1] = eb;
  4633. path->slots[*level - 1] = 0;
  4634. (*level)--;
  4635. return 0;
  4636. }
  4637. static int tree_move_next_or_upnext(struct btrfs_path *path,
  4638. int *level, int root_level)
  4639. {
  4640. int ret = 0;
  4641. int nritems;
  4642. nritems = btrfs_header_nritems(path->nodes[*level]);
  4643. path->slots[*level]++;
  4644. while (path->slots[*level] >= nritems) {
  4645. if (*level == root_level)
  4646. return -1;
  4647. /* move upnext */
  4648. path->slots[*level] = 0;
  4649. free_extent_buffer(path->nodes[*level]);
  4650. path->nodes[*level] = NULL;
  4651. (*level)++;
  4652. path->slots[*level]++;
  4653. nritems = btrfs_header_nritems(path->nodes[*level]);
  4654. ret = 1;
  4655. }
  4656. return ret;
  4657. }
  4658. /*
  4659. * Returns 1 if it had to move up and next. 0 is returned if it moved only next
  4660. * or down.
  4661. */
  4662. static int tree_advance(struct btrfs_fs_info *fs_info,
  4663. struct btrfs_path *path,
  4664. int *level, int root_level,
  4665. int allow_down,
  4666. struct btrfs_key *key)
  4667. {
  4668. int ret;
  4669. if (*level == 0 || !allow_down) {
  4670. ret = tree_move_next_or_upnext(path, level, root_level);
  4671. } else {
  4672. ret = tree_move_down(fs_info, path, level);
  4673. }
  4674. if (ret >= 0) {
  4675. if (*level == 0)
  4676. btrfs_item_key_to_cpu(path->nodes[*level], key,
  4677. path->slots[*level]);
  4678. else
  4679. btrfs_node_key_to_cpu(path->nodes[*level], key,
  4680. path->slots[*level]);
  4681. }
  4682. return ret;
  4683. }
  4684. static int tree_compare_item(struct btrfs_path *left_path,
  4685. struct btrfs_path *right_path,
  4686. char *tmp_buf)
  4687. {
  4688. int cmp;
  4689. int len1, len2;
  4690. unsigned long off1, off2;
  4691. len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
  4692. len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
  4693. if (len1 != len2)
  4694. return 1;
  4695. off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
  4696. off2 = btrfs_item_ptr_offset(right_path->nodes[0],
  4697. right_path->slots[0]);
  4698. read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
  4699. cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
  4700. if (cmp)
  4701. return 1;
  4702. return 0;
  4703. }
  4704. #define ADVANCE 1
  4705. #define ADVANCE_ONLY_NEXT -1
  4706. /*
  4707. * This function compares two trees and calls the provided callback for
  4708. * every changed/new/deleted item it finds.
  4709. * If shared tree blocks are encountered, whole subtrees are skipped, making
  4710. * the compare pretty fast on snapshotted subvolumes.
  4711. *
  4712. * This currently works on commit roots only. As commit roots are read only,
  4713. * we don't do any locking. The commit roots are protected with transactions.
  4714. * Transactions are ended and rejoined when a commit is tried in between.
  4715. *
  4716. * This function checks for modifications done to the trees while comparing.
  4717. * If it detects a change, it aborts immediately.
  4718. */
  4719. int btrfs_compare_trees(struct btrfs_root *left_root,
  4720. struct btrfs_root *right_root,
  4721. btrfs_changed_cb_t changed_cb, void *ctx)
  4722. {
  4723. struct btrfs_fs_info *fs_info = left_root->fs_info;
  4724. int ret;
  4725. int cmp;
  4726. struct btrfs_path *left_path = NULL;
  4727. struct btrfs_path *right_path = NULL;
  4728. struct btrfs_key left_key;
  4729. struct btrfs_key right_key;
  4730. char *tmp_buf = NULL;
  4731. int left_root_level;
  4732. int right_root_level;
  4733. int left_level;
  4734. int right_level;
  4735. int left_end_reached;
  4736. int right_end_reached;
  4737. int advance_left;
  4738. int advance_right;
  4739. u64 left_blockptr;
  4740. u64 right_blockptr;
  4741. u64 left_gen;
  4742. u64 right_gen;
  4743. left_path = btrfs_alloc_path();
  4744. if (!left_path) {
  4745. ret = -ENOMEM;
  4746. goto out;
  4747. }
  4748. right_path = btrfs_alloc_path();
  4749. if (!right_path) {
  4750. ret = -ENOMEM;
  4751. goto out;
  4752. }
  4753. tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
  4754. if (!tmp_buf) {
  4755. ret = -ENOMEM;
  4756. goto out;
  4757. }
  4758. left_path->search_commit_root = 1;
  4759. left_path->skip_locking = 1;
  4760. right_path->search_commit_root = 1;
  4761. right_path->skip_locking = 1;
  4762. /*
  4763. * Strategy: Go to the first items of both trees. Then do
  4764. *
  4765. * If both trees are at level 0
  4766. * Compare keys of current items
  4767. * If left < right treat left item as new, advance left tree
  4768. * and repeat
  4769. * If left > right treat right item as deleted, advance right tree
  4770. * and repeat
  4771. * If left == right do deep compare of items, treat as changed if
  4772. * needed, advance both trees and repeat
  4773. * If both trees are at the same level but not at level 0
  4774. * Compare keys of current nodes/leafs
  4775. * If left < right advance left tree and repeat
  4776. * If left > right advance right tree and repeat
  4777. * If left == right compare blockptrs of the next nodes/leafs
  4778. * If they match advance both trees but stay at the same level
  4779. * and repeat
  4780. * If they don't match advance both trees while allowing to go
  4781. * deeper and repeat
  4782. * If tree levels are different
  4783. * Advance the tree that needs it and repeat
  4784. *
  4785. * Advancing a tree means:
  4786. * If we are at level 0, try to go to the next slot. If that's not
  4787. * possible, go one level up and repeat. Stop when we found a level
  4788. * where we could go to the next slot. We may at this point be on a
  4789. * node or a leaf.
  4790. *
  4791. * If we are not at level 0 and not on shared tree blocks, go one
  4792. * level deeper.
  4793. *
  4794. * If we are not at level 0 and on shared tree blocks, go one slot to
  4795. * the right if possible or go up and right.
  4796. */
  4797. down_read(&fs_info->commit_root_sem);
  4798. left_level = btrfs_header_level(left_root->commit_root);
  4799. left_root_level = left_level;
  4800. left_path->nodes[left_level] = left_root->commit_root;
  4801. extent_buffer_get(left_path->nodes[left_level]);
  4802. right_level = btrfs_header_level(right_root->commit_root);
  4803. right_root_level = right_level;
  4804. right_path->nodes[right_level] = right_root->commit_root;
  4805. extent_buffer_get(right_path->nodes[right_level]);
  4806. up_read(&fs_info->commit_root_sem);
  4807. if (left_level == 0)
  4808. btrfs_item_key_to_cpu(left_path->nodes[left_level],
  4809. &left_key, left_path->slots[left_level]);
  4810. else
  4811. btrfs_node_key_to_cpu(left_path->nodes[left_level],
  4812. &left_key, left_path->slots[left_level]);
  4813. if (right_level == 0)
  4814. btrfs_item_key_to_cpu(right_path->nodes[right_level],
  4815. &right_key, right_path->slots[right_level]);
  4816. else
  4817. btrfs_node_key_to_cpu(right_path->nodes[right_level],
  4818. &right_key, right_path->slots[right_level]);
  4819. left_end_reached = right_end_reached = 0;
  4820. advance_left = advance_right = 0;
  4821. while (1) {
  4822. if (advance_left && !left_end_reached) {
  4823. ret = tree_advance(fs_info, left_path, &left_level,
  4824. left_root_level,
  4825. advance_left != ADVANCE_ONLY_NEXT,
  4826. &left_key);
  4827. if (ret == -1)
  4828. left_end_reached = ADVANCE;
  4829. else if (ret < 0)
  4830. goto out;
  4831. advance_left = 0;
  4832. }
  4833. if (advance_right && !right_end_reached) {
  4834. ret = tree_advance(fs_info, right_path, &right_level,
  4835. right_root_level,
  4836. advance_right != ADVANCE_ONLY_NEXT,
  4837. &right_key);
  4838. if (ret == -1)
  4839. right_end_reached = ADVANCE;
  4840. else if (ret < 0)
  4841. goto out;
  4842. advance_right = 0;
  4843. }
  4844. if (left_end_reached && right_end_reached) {
  4845. ret = 0;
  4846. goto out;
  4847. } else if (left_end_reached) {
  4848. if (right_level == 0) {
  4849. ret = changed_cb(left_path, right_path,
  4850. &right_key,
  4851. BTRFS_COMPARE_TREE_DELETED,
  4852. ctx);
  4853. if (ret < 0)
  4854. goto out;
  4855. }
  4856. advance_right = ADVANCE;
  4857. continue;
  4858. } else if (right_end_reached) {
  4859. if (left_level == 0) {
  4860. ret = changed_cb(left_path, right_path,
  4861. &left_key,
  4862. BTRFS_COMPARE_TREE_NEW,
  4863. ctx);
  4864. if (ret < 0)
  4865. goto out;
  4866. }
  4867. advance_left = ADVANCE;
  4868. continue;
  4869. }
  4870. if (left_level == 0 && right_level == 0) {
  4871. cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
  4872. if (cmp < 0) {
  4873. ret = changed_cb(left_path, right_path,
  4874. &left_key,
  4875. BTRFS_COMPARE_TREE_NEW,
  4876. ctx);
  4877. if (ret < 0)
  4878. goto out;
  4879. advance_left = ADVANCE;
  4880. } else if (cmp > 0) {
  4881. ret = changed_cb(left_path, right_path,
  4882. &right_key,
  4883. BTRFS_COMPARE_TREE_DELETED,
  4884. ctx);
  4885. if (ret < 0)
  4886. goto out;
  4887. advance_right = ADVANCE;
  4888. } else {
  4889. enum btrfs_compare_tree_result result;
  4890. WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
  4891. ret = tree_compare_item(left_path, right_path,
  4892. tmp_buf);
  4893. if (ret)
  4894. result = BTRFS_COMPARE_TREE_CHANGED;
  4895. else
  4896. result = BTRFS_COMPARE_TREE_SAME;
  4897. ret = changed_cb(left_path, right_path,
  4898. &left_key, result, ctx);
  4899. if (ret < 0)
  4900. goto out;
  4901. advance_left = ADVANCE;
  4902. advance_right = ADVANCE;
  4903. }
  4904. } else if (left_level == right_level) {
  4905. cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
  4906. if (cmp < 0) {
  4907. advance_left = ADVANCE;
  4908. } else if (cmp > 0) {
  4909. advance_right = ADVANCE;
  4910. } else {
  4911. left_blockptr = btrfs_node_blockptr(
  4912. left_path->nodes[left_level],
  4913. left_path->slots[left_level]);
  4914. right_blockptr = btrfs_node_blockptr(
  4915. right_path->nodes[right_level],
  4916. right_path->slots[right_level]);
  4917. left_gen = btrfs_node_ptr_generation(
  4918. left_path->nodes[left_level],
  4919. left_path->slots[left_level]);
  4920. right_gen = btrfs_node_ptr_generation(
  4921. right_path->nodes[right_level],
  4922. right_path->slots[right_level]);
  4923. if (left_blockptr == right_blockptr &&
  4924. left_gen == right_gen) {
  4925. /*
  4926. * As we're on a shared block, don't
  4927. * allow to go deeper.
  4928. */
  4929. advance_left = ADVANCE_ONLY_NEXT;
  4930. advance_right = ADVANCE_ONLY_NEXT;
  4931. } else {
  4932. advance_left = ADVANCE;
  4933. advance_right = ADVANCE;
  4934. }
  4935. }
  4936. } else if (left_level < right_level) {
  4937. advance_right = ADVANCE;
  4938. } else {
  4939. advance_left = ADVANCE;
  4940. }
  4941. }
  4942. out:
  4943. btrfs_free_path(left_path);
  4944. btrfs_free_path(right_path);
  4945. kvfree(tmp_buf);
  4946. return ret;
  4947. }
  4948. /*
  4949. * this is similar to btrfs_next_leaf, but does not try to preserve
  4950. * and fixup the path. It looks for and returns the next key in the
  4951. * tree based on the current path and the min_trans parameters.
  4952. *
  4953. * 0 is returned if another key is found, < 0 if there are any errors
  4954. * and 1 is returned if there are no higher keys in the tree
  4955. *
  4956. * path->keep_locks should be set to 1 on the search made before
  4957. * calling this function.
  4958. */
  4959. int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
  4960. struct btrfs_key *key, int level, u64 min_trans)
  4961. {
  4962. int slot;
  4963. struct extent_buffer *c;
  4964. WARN_ON(!path->keep_locks);
  4965. while (level < BTRFS_MAX_LEVEL) {
  4966. if (!path->nodes[level])
  4967. return 1;
  4968. slot = path->slots[level] + 1;
  4969. c = path->nodes[level];
  4970. next:
  4971. if (slot >= btrfs_header_nritems(c)) {
  4972. int ret;
  4973. int orig_lowest;
  4974. struct btrfs_key cur_key;
  4975. if (level + 1 >= BTRFS_MAX_LEVEL ||
  4976. !path->nodes[level + 1])
  4977. return 1;
  4978. if (path->locks[level + 1]) {
  4979. level++;
  4980. continue;
  4981. }
  4982. slot = btrfs_header_nritems(c) - 1;
  4983. if (level == 0)
  4984. btrfs_item_key_to_cpu(c, &cur_key, slot);
  4985. else
  4986. btrfs_node_key_to_cpu(c, &cur_key, slot);
  4987. orig_lowest = path->lowest_level;
  4988. btrfs_release_path(path);
  4989. path->lowest_level = level;
  4990. ret = btrfs_search_slot(NULL, root, &cur_key, path,
  4991. 0, 0);
  4992. path->lowest_level = orig_lowest;
  4993. if (ret < 0)
  4994. return ret;
  4995. c = path->nodes[level];
  4996. slot = path->slots[level];
  4997. if (ret == 0)
  4998. slot++;
  4999. goto next;
  5000. }
  5001. if (level == 0)
  5002. btrfs_item_key_to_cpu(c, key, slot);
  5003. else {
  5004. u64 gen = btrfs_node_ptr_generation(c, slot);
  5005. if (gen < min_trans) {
  5006. slot++;
  5007. goto next;
  5008. }
  5009. btrfs_node_key_to_cpu(c, key, slot);
  5010. }
  5011. return 0;
  5012. }
  5013. return 1;
  5014. }
  5015. /*
  5016. * search the tree again to find a leaf with greater keys
  5017. * returns 0 if it found something or 1 if there are no greater leaves.
  5018. * returns < 0 on io errors.
  5019. */
  5020. int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
  5021. {
  5022. return btrfs_next_old_leaf(root, path, 0);
  5023. }
  5024. int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
  5025. u64 time_seq)
  5026. {
  5027. int slot;
  5028. int level;
  5029. struct extent_buffer *c;
  5030. struct extent_buffer *next;
  5031. struct btrfs_key key;
  5032. u32 nritems;
  5033. int ret;
  5034. int old_spinning = path->leave_spinning;
  5035. int next_rw_lock = 0;
  5036. nritems = btrfs_header_nritems(path->nodes[0]);
  5037. if (nritems == 0)
  5038. return 1;
  5039. btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
  5040. again:
  5041. level = 1;
  5042. next = NULL;
  5043. next_rw_lock = 0;
  5044. btrfs_release_path(path);
  5045. path->keep_locks = 1;
  5046. path->leave_spinning = 1;
  5047. if (time_seq)
  5048. ret = btrfs_search_old_slot(root, &key, path, time_seq);
  5049. else
  5050. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  5051. path->keep_locks = 0;
  5052. if (ret < 0)
  5053. return ret;
  5054. nritems = btrfs_header_nritems(path->nodes[0]);
  5055. /*
  5056. * by releasing the path above we dropped all our locks. A balance
  5057. * could have added more items next to the key that used to be
  5058. * at the very end of the block. So, check again here and
  5059. * advance the path if there are now more items available.
  5060. */
  5061. if (nritems > 0 && path->slots[0] < nritems - 1) {
  5062. if (ret == 0)
  5063. path->slots[0]++;
  5064. ret = 0;
  5065. goto done;
  5066. }
  5067. /*
  5068. * So the above check misses one case:
  5069. * - after releasing the path above, someone has removed the item that
  5070. * used to be at the very end of the block, and balance between leafs
  5071. * gets another one with bigger key.offset to replace it.
  5072. *
  5073. * This one should be returned as well, or we can get leaf corruption
  5074. * later(esp. in __btrfs_drop_extents()).
  5075. *
  5076. * And a bit more explanation about this check,
  5077. * with ret > 0, the key isn't found, the path points to the slot
  5078. * where it should be inserted, so the path->slots[0] item must be the
  5079. * bigger one.
  5080. */
  5081. if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
  5082. ret = 0;
  5083. goto done;
  5084. }
  5085. while (level < BTRFS_MAX_LEVEL) {
  5086. if (!path->nodes[level]) {
  5087. ret = 1;
  5088. goto done;
  5089. }
  5090. slot = path->slots[level] + 1;
  5091. c = path->nodes[level];
  5092. if (slot >= btrfs_header_nritems(c)) {
  5093. level++;
  5094. if (level == BTRFS_MAX_LEVEL) {
  5095. ret = 1;
  5096. goto done;
  5097. }
  5098. continue;
  5099. }
  5100. if (next) {
  5101. btrfs_tree_unlock_rw(next, next_rw_lock);
  5102. free_extent_buffer(next);
  5103. }
  5104. next = c;
  5105. next_rw_lock = path->locks[level];
  5106. ret = read_block_for_search(root, path, &next, level,
  5107. slot, &key);
  5108. if (ret == -EAGAIN)
  5109. goto again;
  5110. if (ret < 0) {
  5111. btrfs_release_path(path);
  5112. goto done;
  5113. }
  5114. if (!path->skip_locking) {
  5115. ret = btrfs_try_tree_read_lock(next);
  5116. if (!ret && time_seq) {
  5117. /*
  5118. * If we don't get the lock, we may be racing
  5119. * with push_leaf_left, holding that lock while
  5120. * itself waiting for the leaf we've currently
  5121. * locked. To solve this situation, we give up
  5122. * on our lock and cycle.
  5123. */
  5124. free_extent_buffer(next);
  5125. btrfs_release_path(path);
  5126. cond_resched();
  5127. goto again;
  5128. }
  5129. if (!ret) {
  5130. btrfs_set_path_blocking(path);
  5131. btrfs_tree_read_lock(next);
  5132. btrfs_clear_path_blocking(path, next,
  5133. BTRFS_READ_LOCK);
  5134. }
  5135. next_rw_lock = BTRFS_READ_LOCK;
  5136. }
  5137. break;
  5138. }
  5139. path->slots[level] = slot;
  5140. while (1) {
  5141. level--;
  5142. c = path->nodes[level];
  5143. if (path->locks[level])
  5144. btrfs_tree_unlock_rw(c, path->locks[level]);
  5145. free_extent_buffer(c);
  5146. path->nodes[level] = next;
  5147. path->slots[level] = 0;
  5148. if (!path->skip_locking)
  5149. path->locks[level] = next_rw_lock;
  5150. if (!level)
  5151. break;
  5152. ret = read_block_for_search(root, path, &next, level,
  5153. 0, &key);
  5154. if (ret == -EAGAIN)
  5155. goto again;
  5156. if (ret < 0) {
  5157. btrfs_release_path(path);
  5158. goto done;
  5159. }
  5160. if (!path->skip_locking) {
  5161. ret = btrfs_try_tree_read_lock(next);
  5162. if (!ret) {
  5163. btrfs_set_path_blocking(path);
  5164. btrfs_tree_read_lock(next);
  5165. btrfs_clear_path_blocking(path, next,
  5166. BTRFS_READ_LOCK);
  5167. }
  5168. next_rw_lock = BTRFS_READ_LOCK;
  5169. }
  5170. }
  5171. ret = 0;
  5172. done:
  5173. unlock_up(path, 0, 1, 0, NULL);
  5174. path->leave_spinning = old_spinning;
  5175. if (!old_spinning)
  5176. btrfs_set_path_blocking(path);
  5177. return ret;
  5178. }
  5179. /*
  5180. * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
  5181. * searching until it gets past min_objectid or finds an item of 'type'
  5182. *
  5183. * returns 0 if something is found, 1 if nothing was found and < 0 on error
  5184. */
  5185. int btrfs_previous_item(struct btrfs_root *root,
  5186. struct btrfs_path *path, u64 min_objectid,
  5187. int type)
  5188. {
  5189. struct btrfs_key found_key;
  5190. struct extent_buffer *leaf;
  5191. u32 nritems;
  5192. int ret;
  5193. while (1) {
  5194. if (path->slots[0] == 0) {
  5195. btrfs_set_path_blocking(path);
  5196. ret = btrfs_prev_leaf(root, path);
  5197. if (ret != 0)
  5198. return ret;
  5199. } else {
  5200. path->slots[0]--;
  5201. }
  5202. leaf = path->nodes[0];
  5203. nritems = btrfs_header_nritems(leaf);
  5204. if (nritems == 0)
  5205. return 1;
  5206. if (path->slots[0] == nritems)
  5207. path->slots[0]--;
  5208. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  5209. if (found_key.objectid < min_objectid)
  5210. break;
  5211. if (found_key.type == type)
  5212. return 0;
  5213. if (found_key.objectid == min_objectid &&
  5214. found_key.type < type)
  5215. break;
  5216. }
  5217. return 1;
  5218. }
  5219. /*
  5220. * search in extent tree to find a previous Metadata/Data extent item with
  5221. * min objecitd.
  5222. *
  5223. * returns 0 if something is found, 1 if nothing was found and < 0 on error
  5224. */
  5225. int btrfs_previous_extent_item(struct btrfs_root *root,
  5226. struct btrfs_path *path, u64 min_objectid)
  5227. {
  5228. struct btrfs_key found_key;
  5229. struct extent_buffer *leaf;
  5230. u32 nritems;
  5231. int ret;
  5232. while (1) {
  5233. if (path->slots[0] == 0) {
  5234. btrfs_set_path_blocking(path);
  5235. ret = btrfs_prev_leaf(root, path);
  5236. if (ret != 0)
  5237. return ret;
  5238. } else {
  5239. path->slots[0]--;
  5240. }
  5241. leaf = path->nodes[0];
  5242. nritems = btrfs_header_nritems(leaf);
  5243. if (nritems == 0)
  5244. return 1;
  5245. if (path->slots[0] == nritems)
  5246. path->slots[0]--;
  5247. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  5248. if (found_key.objectid < min_objectid)
  5249. break;
  5250. if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
  5251. found_key.type == BTRFS_METADATA_ITEM_KEY)
  5252. return 0;
  5253. if (found_key.objectid == min_objectid &&
  5254. found_key.type < BTRFS_EXTENT_ITEM_KEY)
  5255. break;
  5256. }
  5257. return 1;
  5258. }