ctree.c 152 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915
  1. /*
  2. * Copyright (C) 2007,2008 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/rbtree.h>
  21. #include "ctree.h"
  22. #include "disk-io.h"
  23. #include "transaction.h"
  24. #include "print-tree.h"
  25. #include "locking.h"
  26. static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
  27. *root, struct btrfs_path *path, int level);
  28. static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
  29. *root, struct btrfs_key *ins_key,
  30. struct btrfs_path *path, int data_size, int extend);
  31. static int push_node_left(struct btrfs_trans_handle *trans,
  32. struct btrfs_root *root, struct extent_buffer *dst,
  33. struct extent_buffer *src, int empty);
  34. static int balance_node_right(struct btrfs_trans_handle *trans,
  35. struct btrfs_root *root,
  36. struct extent_buffer *dst_buf,
  37. struct extent_buffer *src_buf);
  38. static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  39. int level, int slot);
  40. static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  41. struct extent_buffer *eb);
  42. struct btrfs_path *btrfs_alloc_path(void)
  43. {
  44. struct btrfs_path *path;
  45. path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
  46. return path;
  47. }
  48. /*
  49. * set all locked nodes in the path to blocking locks. This should
  50. * be done before scheduling
  51. */
  52. noinline void btrfs_set_path_blocking(struct btrfs_path *p)
  53. {
  54. int i;
  55. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  56. if (!p->nodes[i] || !p->locks[i])
  57. continue;
  58. btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
  59. if (p->locks[i] == BTRFS_READ_LOCK)
  60. p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
  61. else if (p->locks[i] == BTRFS_WRITE_LOCK)
  62. p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
  63. }
  64. }
  65. /*
  66. * reset all the locked nodes in the patch to spinning locks.
  67. *
  68. * held is used to keep lockdep happy, when lockdep is enabled
  69. * we set held to a blocking lock before we go around and
  70. * retake all the spinlocks in the path. You can safely use NULL
  71. * for held
  72. */
  73. noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
  74. struct extent_buffer *held, int held_rw)
  75. {
  76. int i;
  77. if (held) {
  78. btrfs_set_lock_blocking_rw(held, held_rw);
  79. if (held_rw == BTRFS_WRITE_LOCK)
  80. held_rw = BTRFS_WRITE_LOCK_BLOCKING;
  81. else if (held_rw == BTRFS_READ_LOCK)
  82. held_rw = BTRFS_READ_LOCK_BLOCKING;
  83. }
  84. btrfs_set_path_blocking(p);
  85. for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
  86. if (p->nodes[i] && p->locks[i]) {
  87. btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
  88. if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
  89. p->locks[i] = BTRFS_WRITE_LOCK;
  90. else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
  91. p->locks[i] = BTRFS_READ_LOCK;
  92. }
  93. }
  94. if (held)
  95. btrfs_clear_lock_blocking_rw(held, held_rw);
  96. }
  97. /* this also releases the path */
  98. void btrfs_free_path(struct btrfs_path *p)
  99. {
  100. if (!p)
  101. return;
  102. btrfs_release_path(p);
  103. kmem_cache_free(btrfs_path_cachep, p);
  104. }
  105. /*
  106. * path release drops references on the extent buffers in the path
  107. * and it drops any locks held by this path
  108. *
  109. * It is safe to call this on paths that no locks or extent buffers held.
  110. */
  111. noinline void btrfs_release_path(struct btrfs_path *p)
  112. {
  113. int i;
  114. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  115. p->slots[i] = 0;
  116. if (!p->nodes[i])
  117. continue;
  118. if (p->locks[i]) {
  119. btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
  120. p->locks[i] = 0;
  121. }
  122. free_extent_buffer(p->nodes[i]);
  123. p->nodes[i] = NULL;
  124. }
  125. }
  126. /*
  127. * safely gets a reference on the root node of a tree. A lock
  128. * is not taken, so a concurrent writer may put a different node
  129. * at the root of the tree. See btrfs_lock_root_node for the
  130. * looping required.
  131. *
  132. * The extent buffer returned by this has a reference taken, so
  133. * it won't disappear. It may stop being the root of the tree
  134. * at any time because there are no locks held.
  135. */
  136. struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
  137. {
  138. struct extent_buffer *eb;
  139. while (1) {
  140. rcu_read_lock();
  141. eb = rcu_dereference(root->node);
  142. /*
  143. * RCU really hurts here, we could free up the root node because
  144. * it was cow'ed but we may not get the new root node yet so do
  145. * the inc_not_zero dance and if it doesn't work then
  146. * synchronize_rcu and try again.
  147. */
  148. if (atomic_inc_not_zero(&eb->refs)) {
  149. rcu_read_unlock();
  150. break;
  151. }
  152. rcu_read_unlock();
  153. synchronize_rcu();
  154. }
  155. return eb;
  156. }
  157. /* loop around taking references on and locking the root node of the
  158. * tree until you end up with a lock on the root. A locked buffer
  159. * is returned, with a reference held.
  160. */
  161. struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
  162. {
  163. struct extent_buffer *eb;
  164. while (1) {
  165. eb = btrfs_root_node(root);
  166. btrfs_tree_lock(eb);
  167. if (eb == root->node)
  168. break;
  169. btrfs_tree_unlock(eb);
  170. free_extent_buffer(eb);
  171. }
  172. return eb;
  173. }
  174. /* loop around taking references on and locking the root node of the
  175. * tree until you end up with a lock on the root. A locked buffer
  176. * is returned, with a reference held.
  177. */
  178. static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  179. {
  180. struct extent_buffer *eb;
  181. while (1) {
  182. eb = btrfs_root_node(root);
  183. btrfs_tree_read_lock(eb);
  184. if (eb == root->node)
  185. break;
  186. btrfs_tree_read_unlock(eb);
  187. free_extent_buffer(eb);
  188. }
  189. return eb;
  190. }
  191. /* cowonly root (everything not a reference counted cow subvolume), just get
  192. * put onto a simple dirty list. transaction.c walks this to make sure they
  193. * get properly updated on disk.
  194. */
  195. static void add_root_to_dirty_list(struct btrfs_root *root)
  196. {
  197. if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
  198. !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
  199. return;
  200. spin_lock(&root->fs_info->trans_lock);
  201. if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
  202. /* Want the extent tree to be the last on the list */
  203. if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  204. list_move_tail(&root->dirty_list,
  205. &root->fs_info->dirty_cowonly_roots);
  206. else
  207. list_move(&root->dirty_list,
  208. &root->fs_info->dirty_cowonly_roots);
  209. }
  210. spin_unlock(&root->fs_info->trans_lock);
  211. }
  212. /*
  213. * used by snapshot creation to make a copy of a root for a tree with
  214. * a given objectid. The buffer with the new root node is returned in
  215. * cow_ret, and this func returns zero on success or a negative error code.
  216. */
  217. int btrfs_copy_root(struct btrfs_trans_handle *trans,
  218. struct btrfs_root *root,
  219. struct extent_buffer *buf,
  220. struct extent_buffer **cow_ret, u64 new_root_objectid)
  221. {
  222. struct extent_buffer *cow;
  223. int ret = 0;
  224. int level;
  225. struct btrfs_disk_key disk_key;
  226. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  227. trans->transid != root->fs_info->running_transaction->transid);
  228. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  229. trans->transid != root->last_trans);
  230. level = btrfs_header_level(buf);
  231. if (level == 0)
  232. btrfs_item_key(buf, &disk_key, 0);
  233. else
  234. btrfs_node_key(buf, &disk_key, 0);
  235. cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
  236. &disk_key, level, buf->start, 0);
  237. if (IS_ERR(cow))
  238. return PTR_ERR(cow);
  239. copy_extent_buffer(cow, buf, 0, 0, cow->len);
  240. btrfs_set_header_bytenr(cow, cow->start);
  241. btrfs_set_header_generation(cow, trans->transid);
  242. btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
  243. btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
  244. BTRFS_HEADER_FLAG_RELOC);
  245. if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
  246. btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
  247. else
  248. btrfs_set_header_owner(cow, new_root_objectid);
  249. write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
  250. BTRFS_FSID_SIZE);
  251. WARN_ON(btrfs_header_generation(buf) > trans->transid);
  252. if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
  253. ret = btrfs_inc_ref(trans, root, cow, 1);
  254. else
  255. ret = btrfs_inc_ref(trans, root, cow, 0);
  256. if (ret)
  257. return ret;
  258. btrfs_mark_buffer_dirty(cow);
  259. *cow_ret = cow;
  260. return 0;
  261. }
  262. enum mod_log_op {
  263. MOD_LOG_KEY_REPLACE,
  264. MOD_LOG_KEY_ADD,
  265. MOD_LOG_KEY_REMOVE,
  266. MOD_LOG_KEY_REMOVE_WHILE_FREEING,
  267. MOD_LOG_KEY_REMOVE_WHILE_MOVING,
  268. MOD_LOG_MOVE_KEYS,
  269. MOD_LOG_ROOT_REPLACE,
  270. };
  271. struct tree_mod_move {
  272. int dst_slot;
  273. int nr_items;
  274. };
  275. struct tree_mod_root {
  276. u64 logical;
  277. u8 level;
  278. };
  279. struct tree_mod_elem {
  280. struct rb_node node;
  281. u64 index; /* shifted logical */
  282. u64 seq;
  283. enum mod_log_op op;
  284. /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
  285. int slot;
  286. /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
  287. u64 generation;
  288. /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
  289. struct btrfs_disk_key key;
  290. u64 blockptr;
  291. /* this is used for op == MOD_LOG_MOVE_KEYS */
  292. struct tree_mod_move move;
  293. /* this is used for op == MOD_LOG_ROOT_REPLACE */
  294. struct tree_mod_root old_root;
  295. };
  296. static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
  297. {
  298. read_lock(&fs_info->tree_mod_log_lock);
  299. }
  300. static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
  301. {
  302. read_unlock(&fs_info->tree_mod_log_lock);
  303. }
  304. static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
  305. {
  306. write_lock(&fs_info->tree_mod_log_lock);
  307. }
  308. static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
  309. {
  310. write_unlock(&fs_info->tree_mod_log_lock);
  311. }
  312. /*
  313. * Pull a new tree mod seq number for our operation.
  314. */
  315. static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
  316. {
  317. return atomic64_inc_return(&fs_info->tree_mod_seq);
  318. }
  319. /*
  320. * This adds a new blocker to the tree mod log's blocker list if the @elem
  321. * passed does not already have a sequence number set. So when a caller expects
  322. * to record tree modifications, it should ensure to set elem->seq to zero
  323. * before calling btrfs_get_tree_mod_seq.
  324. * Returns a fresh, unused tree log modification sequence number, even if no new
  325. * blocker was added.
  326. */
  327. u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
  328. struct seq_list *elem)
  329. {
  330. tree_mod_log_write_lock(fs_info);
  331. spin_lock(&fs_info->tree_mod_seq_lock);
  332. if (!elem->seq) {
  333. elem->seq = btrfs_inc_tree_mod_seq(fs_info);
  334. list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
  335. }
  336. spin_unlock(&fs_info->tree_mod_seq_lock);
  337. tree_mod_log_write_unlock(fs_info);
  338. return elem->seq;
  339. }
  340. void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
  341. struct seq_list *elem)
  342. {
  343. struct rb_root *tm_root;
  344. struct rb_node *node;
  345. struct rb_node *next;
  346. struct seq_list *cur_elem;
  347. struct tree_mod_elem *tm;
  348. u64 min_seq = (u64)-1;
  349. u64 seq_putting = elem->seq;
  350. if (!seq_putting)
  351. return;
  352. spin_lock(&fs_info->tree_mod_seq_lock);
  353. list_del(&elem->list);
  354. elem->seq = 0;
  355. list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
  356. if (cur_elem->seq < min_seq) {
  357. if (seq_putting > cur_elem->seq) {
  358. /*
  359. * blocker with lower sequence number exists, we
  360. * cannot remove anything from the log
  361. */
  362. spin_unlock(&fs_info->tree_mod_seq_lock);
  363. return;
  364. }
  365. min_seq = cur_elem->seq;
  366. }
  367. }
  368. spin_unlock(&fs_info->tree_mod_seq_lock);
  369. /*
  370. * anything that's lower than the lowest existing (read: blocked)
  371. * sequence number can be removed from the tree.
  372. */
  373. tree_mod_log_write_lock(fs_info);
  374. tm_root = &fs_info->tree_mod_log;
  375. for (node = rb_first(tm_root); node; node = next) {
  376. next = rb_next(node);
  377. tm = container_of(node, struct tree_mod_elem, node);
  378. if (tm->seq > min_seq)
  379. continue;
  380. rb_erase(node, tm_root);
  381. kfree(tm);
  382. }
  383. tree_mod_log_write_unlock(fs_info);
  384. }
  385. /*
  386. * key order of the log:
  387. * index -> sequence
  388. *
  389. * the index is the shifted logical of the *new* root node for root replace
  390. * operations, or the shifted logical of the affected block for all other
  391. * operations.
  392. *
  393. * Note: must be called with write lock (tree_mod_log_write_lock).
  394. */
  395. static noinline int
  396. __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
  397. {
  398. struct rb_root *tm_root;
  399. struct rb_node **new;
  400. struct rb_node *parent = NULL;
  401. struct tree_mod_elem *cur;
  402. BUG_ON(!tm);
  403. tm->seq = btrfs_inc_tree_mod_seq(fs_info);
  404. tm_root = &fs_info->tree_mod_log;
  405. new = &tm_root->rb_node;
  406. while (*new) {
  407. cur = container_of(*new, struct tree_mod_elem, node);
  408. parent = *new;
  409. if (cur->index < tm->index)
  410. new = &((*new)->rb_left);
  411. else if (cur->index > tm->index)
  412. new = &((*new)->rb_right);
  413. else if (cur->seq < tm->seq)
  414. new = &((*new)->rb_left);
  415. else if (cur->seq > tm->seq)
  416. new = &((*new)->rb_right);
  417. else
  418. return -EEXIST;
  419. }
  420. rb_link_node(&tm->node, parent, new);
  421. rb_insert_color(&tm->node, tm_root);
  422. return 0;
  423. }
  424. /*
  425. * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
  426. * returns zero with the tree_mod_log_lock acquired. The caller must hold
  427. * this until all tree mod log insertions are recorded in the rb tree and then
  428. * call tree_mod_log_write_unlock() to release.
  429. */
  430. static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
  431. struct extent_buffer *eb) {
  432. smp_mb();
  433. if (list_empty(&(fs_info)->tree_mod_seq_list))
  434. return 1;
  435. if (eb && btrfs_header_level(eb) == 0)
  436. return 1;
  437. tree_mod_log_write_lock(fs_info);
  438. if (list_empty(&(fs_info)->tree_mod_seq_list)) {
  439. tree_mod_log_write_unlock(fs_info);
  440. return 1;
  441. }
  442. return 0;
  443. }
  444. /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
  445. static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
  446. struct extent_buffer *eb)
  447. {
  448. smp_mb();
  449. if (list_empty(&(fs_info)->tree_mod_seq_list))
  450. return 0;
  451. if (eb && btrfs_header_level(eb) == 0)
  452. return 0;
  453. return 1;
  454. }
  455. static struct tree_mod_elem *
  456. alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
  457. enum mod_log_op op, gfp_t flags)
  458. {
  459. struct tree_mod_elem *tm;
  460. tm = kzalloc(sizeof(*tm), flags);
  461. if (!tm)
  462. return NULL;
  463. tm->index = eb->start >> PAGE_CACHE_SHIFT;
  464. if (op != MOD_LOG_KEY_ADD) {
  465. btrfs_node_key(eb, &tm->key, slot);
  466. tm->blockptr = btrfs_node_blockptr(eb, slot);
  467. }
  468. tm->op = op;
  469. tm->slot = slot;
  470. tm->generation = btrfs_node_ptr_generation(eb, slot);
  471. RB_CLEAR_NODE(&tm->node);
  472. return tm;
  473. }
  474. static noinline int
  475. tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
  476. struct extent_buffer *eb, int slot,
  477. enum mod_log_op op, gfp_t flags)
  478. {
  479. struct tree_mod_elem *tm;
  480. int ret;
  481. if (!tree_mod_need_log(fs_info, eb))
  482. return 0;
  483. tm = alloc_tree_mod_elem(eb, slot, op, flags);
  484. if (!tm)
  485. return -ENOMEM;
  486. if (tree_mod_dont_log(fs_info, eb)) {
  487. kfree(tm);
  488. return 0;
  489. }
  490. ret = __tree_mod_log_insert(fs_info, tm);
  491. tree_mod_log_write_unlock(fs_info);
  492. if (ret)
  493. kfree(tm);
  494. return ret;
  495. }
  496. static noinline int
  497. tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
  498. struct extent_buffer *eb, int dst_slot, int src_slot,
  499. int nr_items, gfp_t flags)
  500. {
  501. struct tree_mod_elem *tm = NULL;
  502. struct tree_mod_elem **tm_list = NULL;
  503. int ret = 0;
  504. int i;
  505. int locked = 0;
  506. if (!tree_mod_need_log(fs_info, eb))
  507. return 0;
  508. tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
  509. if (!tm_list)
  510. return -ENOMEM;
  511. tm = kzalloc(sizeof(*tm), flags);
  512. if (!tm) {
  513. ret = -ENOMEM;
  514. goto free_tms;
  515. }
  516. tm->index = eb->start >> PAGE_CACHE_SHIFT;
  517. tm->slot = src_slot;
  518. tm->move.dst_slot = dst_slot;
  519. tm->move.nr_items = nr_items;
  520. tm->op = MOD_LOG_MOVE_KEYS;
  521. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  522. tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
  523. MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
  524. if (!tm_list[i]) {
  525. ret = -ENOMEM;
  526. goto free_tms;
  527. }
  528. }
  529. if (tree_mod_dont_log(fs_info, eb))
  530. goto free_tms;
  531. locked = 1;
  532. /*
  533. * When we override something during the move, we log these removals.
  534. * This can only happen when we move towards the beginning of the
  535. * buffer, i.e. dst_slot < src_slot.
  536. */
  537. for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
  538. ret = __tree_mod_log_insert(fs_info, tm_list[i]);
  539. if (ret)
  540. goto free_tms;
  541. }
  542. ret = __tree_mod_log_insert(fs_info, tm);
  543. if (ret)
  544. goto free_tms;
  545. tree_mod_log_write_unlock(fs_info);
  546. kfree(tm_list);
  547. return 0;
  548. free_tms:
  549. for (i = 0; i < nr_items; i++) {
  550. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  551. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  552. kfree(tm_list[i]);
  553. }
  554. if (locked)
  555. tree_mod_log_write_unlock(fs_info);
  556. kfree(tm_list);
  557. kfree(tm);
  558. return ret;
  559. }
  560. static inline int
  561. __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
  562. struct tree_mod_elem **tm_list,
  563. int nritems)
  564. {
  565. int i, j;
  566. int ret;
  567. for (i = nritems - 1; i >= 0; i--) {
  568. ret = __tree_mod_log_insert(fs_info, tm_list[i]);
  569. if (ret) {
  570. for (j = nritems - 1; j > i; j--)
  571. rb_erase(&tm_list[j]->node,
  572. &fs_info->tree_mod_log);
  573. return ret;
  574. }
  575. }
  576. return 0;
  577. }
  578. static noinline int
  579. tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
  580. struct extent_buffer *old_root,
  581. struct extent_buffer *new_root, gfp_t flags,
  582. int log_removal)
  583. {
  584. struct tree_mod_elem *tm = NULL;
  585. struct tree_mod_elem **tm_list = NULL;
  586. int nritems = 0;
  587. int ret = 0;
  588. int i;
  589. if (!tree_mod_need_log(fs_info, NULL))
  590. return 0;
  591. if (log_removal && btrfs_header_level(old_root) > 0) {
  592. nritems = btrfs_header_nritems(old_root);
  593. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
  594. flags);
  595. if (!tm_list) {
  596. ret = -ENOMEM;
  597. goto free_tms;
  598. }
  599. for (i = 0; i < nritems; i++) {
  600. tm_list[i] = alloc_tree_mod_elem(old_root, i,
  601. MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
  602. if (!tm_list[i]) {
  603. ret = -ENOMEM;
  604. goto free_tms;
  605. }
  606. }
  607. }
  608. tm = kzalloc(sizeof(*tm), flags);
  609. if (!tm) {
  610. ret = -ENOMEM;
  611. goto free_tms;
  612. }
  613. tm->index = new_root->start >> PAGE_CACHE_SHIFT;
  614. tm->old_root.logical = old_root->start;
  615. tm->old_root.level = btrfs_header_level(old_root);
  616. tm->generation = btrfs_header_generation(old_root);
  617. tm->op = MOD_LOG_ROOT_REPLACE;
  618. if (tree_mod_dont_log(fs_info, NULL))
  619. goto free_tms;
  620. if (tm_list)
  621. ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
  622. if (!ret)
  623. ret = __tree_mod_log_insert(fs_info, tm);
  624. tree_mod_log_write_unlock(fs_info);
  625. if (ret)
  626. goto free_tms;
  627. kfree(tm_list);
  628. return ret;
  629. free_tms:
  630. if (tm_list) {
  631. for (i = 0; i < nritems; i++)
  632. kfree(tm_list[i]);
  633. kfree(tm_list);
  634. }
  635. kfree(tm);
  636. return ret;
  637. }
  638. static struct tree_mod_elem *
  639. __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
  640. int smallest)
  641. {
  642. struct rb_root *tm_root;
  643. struct rb_node *node;
  644. struct tree_mod_elem *cur = NULL;
  645. struct tree_mod_elem *found = NULL;
  646. u64 index = start >> PAGE_CACHE_SHIFT;
  647. tree_mod_log_read_lock(fs_info);
  648. tm_root = &fs_info->tree_mod_log;
  649. node = tm_root->rb_node;
  650. while (node) {
  651. cur = container_of(node, struct tree_mod_elem, node);
  652. if (cur->index < index) {
  653. node = node->rb_left;
  654. } else if (cur->index > index) {
  655. node = node->rb_right;
  656. } else if (cur->seq < min_seq) {
  657. node = node->rb_left;
  658. } else if (!smallest) {
  659. /* we want the node with the highest seq */
  660. if (found)
  661. BUG_ON(found->seq > cur->seq);
  662. found = cur;
  663. node = node->rb_left;
  664. } else if (cur->seq > min_seq) {
  665. /* we want the node with the smallest seq */
  666. if (found)
  667. BUG_ON(found->seq < cur->seq);
  668. found = cur;
  669. node = node->rb_right;
  670. } else {
  671. found = cur;
  672. break;
  673. }
  674. }
  675. tree_mod_log_read_unlock(fs_info);
  676. return found;
  677. }
  678. /*
  679. * this returns the element from the log with the smallest time sequence
  680. * value that's in the log (the oldest log item). any element with a time
  681. * sequence lower than min_seq will be ignored.
  682. */
  683. static struct tree_mod_elem *
  684. tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
  685. u64 min_seq)
  686. {
  687. return __tree_mod_log_search(fs_info, start, min_seq, 1);
  688. }
  689. /*
  690. * this returns the element from the log with the largest time sequence
  691. * value that's in the log (the most recent log item). any element with
  692. * a time sequence lower than min_seq will be ignored.
  693. */
  694. static struct tree_mod_elem *
  695. tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
  696. {
  697. return __tree_mod_log_search(fs_info, start, min_seq, 0);
  698. }
  699. static noinline int
  700. tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
  701. struct extent_buffer *src, unsigned long dst_offset,
  702. unsigned long src_offset, int nr_items)
  703. {
  704. int ret = 0;
  705. struct tree_mod_elem **tm_list = NULL;
  706. struct tree_mod_elem **tm_list_add, **tm_list_rem;
  707. int i;
  708. int locked = 0;
  709. if (!tree_mod_need_log(fs_info, NULL))
  710. return 0;
  711. if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
  712. return 0;
  713. tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
  714. GFP_NOFS);
  715. if (!tm_list)
  716. return -ENOMEM;
  717. tm_list_add = tm_list;
  718. tm_list_rem = tm_list + nr_items;
  719. for (i = 0; i < nr_items; i++) {
  720. tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
  721. MOD_LOG_KEY_REMOVE, GFP_NOFS);
  722. if (!tm_list_rem[i]) {
  723. ret = -ENOMEM;
  724. goto free_tms;
  725. }
  726. tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
  727. MOD_LOG_KEY_ADD, GFP_NOFS);
  728. if (!tm_list_add[i]) {
  729. ret = -ENOMEM;
  730. goto free_tms;
  731. }
  732. }
  733. if (tree_mod_dont_log(fs_info, NULL))
  734. goto free_tms;
  735. locked = 1;
  736. for (i = 0; i < nr_items; i++) {
  737. ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
  738. if (ret)
  739. goto free_tms;
  740. ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
  741. if (ret)
  742. goto free_tms;
  743. }
  744. tree_mod_log_write_unlock(fs_info);
  745. kfree(tm_list);
  746. return 0;
  747. free_tms:
  748. for (i = 0; i < nr_items * 2; i++) {
  749. if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
  750. rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
  751. kfree(tm_list[i]);
  752. }
  753. if (locked)
  754. tree_mod_log_write_unlock(fs_info);
  755. kfree(tm_list);
  756. return ret;
  757. }
  758. static inline void
  759. tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
  760. int dst_offset, int src_offset, int nr_items)
  761. {
  762. int ret;
  763. ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
  764. nr_items, GFP_NOFS);
  765. BUG_ON(ret < 0);
  766. }
  767. static noinline void
  768. tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
  769. struct extent_buffer *eb, int slot, int atomic)
  770. {
  771. int ret;
  772. ret = tree_mod_log_insert_key(fs_info, eb, slot,
  773. MOD_LOG_KEY_REPLACE,
  774. atomic ? GFP_ATOMIC : GFP_NOFS);
  775. BUG_ON(ret < 0);
  776. }
  777. static noinline int
  778. tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
  779. {
  780. struct tree_mod_elem **tm_list = NULL;
  781. int nritems = 0;
  782. int i;
  783. int ret = 0;
  784. if (btrfs_header_level(eb) == 0)
  785. return 0;
  786. if (!tree_mod_need_log(fs_info, NULL))
  787. return 0;
  788. nritems = btrfs_header_nritems(eb);
  789. tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
  790. if (!tm_list)
  791. return -ENOMEM;
  792. for (i = 0; i < nritems; i++) {
  793. tm_list[i] = alloc_tree_mod_elem(eb, i,
  794. MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
  795. if (!tm_list[i]) {
  796. ret = -ENOMEM;
  797. goto free_tms;
  798. }
  799. }
  800. if (tree_mod_dont_log(fs_info, eb))
  801. goto free_tms;
  802. ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
  803. tree_mod_log_write_unlock(fs_info);
  804. if (ret)
  805. goto free_tms;
  806. kfree(tm_list);
  807. return 0;
  808. free_tms:
  809. for (i = 0; i < nritems; i++)
  810. kfree(tm_list[i]);
  811. kfree(tm_list);
  812. return ret;
  813. }
  814. static noinline void
  815. tree_mod_log_set_root_pointer(struct btrfs_root *root,
  816. struct extent_buffer *new_root_node,
  817. int log_removal)
  818. {
  819. int ret;
  820. ret = tree_mod_log_insert_root(root->fs_info, root->node,
  821. new_root_node, GFP_NOFS, log_removal);
  822. BUG_ON(ret < 0);
  823. }
  824. /*
  825. * check if the tree block can be shared by multiple trees
  826. */
  827. int btrfs_block_can_be_shared(struct btrfs_root *root,
  828. struct extent_buffer *buf)
  829. {
  830. /*
  831. * Tree blocks not in refernece counted trees and tree roots
  832. * are never shared. If a block was allocated after the last
  833. * snapshot and the block was not allocated by tree relocation,
  834. * we know the block is not shared.
  835. */
  836. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  837. buf != root->node && buf != root->commit_root &&
  838. (btrfs_header_generation(buf) <=
  839. btrfs_root_last_snapshot(&root->root_item) ||
  840. btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
  841. return 1;
  842. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  843. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  844. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  845. return 1;
  846. #endif
  847. return 0;
  848. }
  849. static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
  850. struct btrfs_root *root,
  851. struct extent_buffer *buf,
  852. struct extent_buffer *cow,
  853. int *last_ref)
  854. {
  855. u64 refs;
  856. u64 owner;
  857. u64 flags;
  858. u64 new_flags = 0;
  859. int ret;
  860. /*
  861. * Backrefs update rules:
  862. *
  863. * Always use full backrefs for extent pointers in tree block
  864. * allocated by tree relocation.
  865. *
  866. * If a shared tree block is no longer referenced by its owner
  867. * tree (btrfs_header_owner(buf) == root->root_key.objectid),
  868. * use full backrefs for extent pointers in tree block.
  869. *
  870. * If a tree block is been relocating
  871. * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
  872. * use full backrefs for extent pointers in tree block.
  873. * The reason for this is some operations (such as drop tree)
  874. * are only allowed for blocks use full backrefs.
  875. */
  876. if (btrfs_block_can_be_shared(root, buf)) {
  877. ret = btrfs_lookup_extent_info(trans, root, buf->start,
  878. btrfs_header_level(buf), 1,
  879. &refs, &flags);
  880. if (ret)
  881. return ret;
  882. if (refs == 0) {
  883. ret = -EROFS;
  884. btrfs_std_error(root->fs_info, ret, NULL);
  885. return ret;
  886. }
  887. } else {
  888. refs = 1;
  889. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  890. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  891. flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
  892. else
  893. flags = 0;
  894. }
  895. owner = btrfs_header_owner(buf);
  896. BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
  897. !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
  898. if (refs > 1) {
  899. if ((owner == root->root_key.objectid ||
  900. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
  901. !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
  902. ret = btrfs_inc_ref(trans, root, buf, 1);
  903. BUG_ON(ret); /* -ENOMEM */
  904. if (root->root_key.objectid ==
  905. BTRFS_TREE_RELOC_OBJECTID) {
  906. ret = btrfs_dec_ref(trans, root, buf, 0);
  907. BUG_ON(ret); /* -ENOMEM */
  908. ret = btrfs_inc_ref(trans, root, cow, 1);
  909. BUG_ON(ret); /* -ENOMEM */
  910. }
  911. new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
  912. } else {
  913. if (root->root_key.objectid ==
  914. BTRFS_TREE_RELOC_OBJECTID)
  915. ret = btrfs_inc_ref(trans, root, cow, 1);
  916. else
  917. ret = btrfs_inc_ref(trans, root, cow, 0);
  918. BUG_ON(ret); /* -ENOMEM */
  919. }
  920. if (new_flags != 0) {
  921. int level = btrfs_header_level(buf);
  922. ret = btrfs_set_disk_extent_flags(trans, root,
  923. buf->start,
  924. buf->len,
  925. new_flags, level, 0);
  926. if (ret)
  927. return ret;
  928. }
  929. } else {
  930. if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
  931. if (root->root_key.objectid ==
  932. BTRFS_TREE_RELOC_OBJECTID)
  933. ret = btrfs_inc_ref(trans, root, cow, 1);
  934. else
  935. ret = btrfs_inc_ref(trans, root, cow, 0);
  936. BUG_ON(ret); /* -ENOMEM */
  937. ret = btrfs_dec_ref(trans, root, buf, 1);
  938. BUG_ON(ret); /* -ENOMEM */
  939. }
  940. clean_tree_block(trans, root->fs_info, buf);
  941. *last_ref = 1;
  942. }
  943. return 0;
  944. }
  945. /*
  946. * does the dirty work in cow of a single block. The parent block (if
  947. * supplied) is updated to point to the new cow copy. The new buffer is marked
  948. * dirty and returned locked. If you modify the block it needs to be marked
  949. * dirty again.
  950. *
  951. * search_start -- an allocation hint for the new block
  952. *
  953. * empty_size -- a hint that you plan on doing more cow. This is the size in
  954. * bytes the allocator should try to find free next to the block it returns.
  955. * This is just a hint and may be ignored by the allocator.
  956. */
  957. static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
  958. struct btrfs_root *root,
  959. struct extent_buffer *buf,
  960. struct extent_buffer *parent, int parent_slot,
  961. struct extent_buffer **cow_ret,
  962. u64 search_start, u64 empty_size)
  963. {
  964. struct btrfs_disk_key disk_key;
  965. struct extent_buffer *cow;
  966. int level, ret;
  967. int last_ref = 0;
  968. int unlock_orig = 0;
  969. u64 parent_start;
  970. if (*cow_ret == buf)
  971. unlock_orig = 1;
  972. btrfs_assert_tree_locked(buf);
  973. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  974. trans->transid != root->fs_info->running_transaction->transid);
  975. WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  976. trans->transid != root->last_trans);
  977. level = btrfs_header_level(buf);
  978. if (level == 0)
  979. btrfs_item_key(buf, &disk_key, 0);
  980. else
  981. btrfs_node_key(buf, &disk_key, 0);
  982. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
  983. if (parent)
  984. parent_start = parent->start;
  985. else
  986. parent_start = 0;
  987. } else
  988. parent_start = 0;
  989. cow = btrfs_alloc_tree_block(trans, root, parent_start,
  990. root->root_key.objectid, &disk_key, level,
  991. search_start, empty_size);
  992. if (IS_ERR(cow))
  993. return PTR_ERR(cow);
  994. /* cow is set to blocking by btrfs_init_new_buffer */
  995. copy_extent_buffer(cow, buf, 0, 0, cow->len);
  996. btrfs_set_header_bytenr(cow, cow->start);
  997. btrfs_set_header_generation(cow, trans->transid);
  998. btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
  999. btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
  1000. BTRFS_HEADER_FLAG_RELOC);
  1001. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1002. btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
  1003. else
  1004. btrfs_set_header_owner(cow, root->root_key.objectid);
  1005. write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
  1006. BTRFS_FSID_SIZE);
  1007. ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
  1008. if (ret) {
  1009. btrfs_abort_transaction(trans, root, ret);
  1010. return ret;
  1011. }
  1012. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  1013. ret = btrfs_reloc_cow_block(trans, root, buf, cow);
  1014. if (ret) {
  1015. btrfs_abort_transaction(trans, root, ret);
  1016. return ret;
  1017. }
  1018. }
  1019. if (buf == root->node) {
  1020. WARN_ON(parent && parent != buf);
  1021. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  1022. btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
  1023. parent_start = buf->start;
  1024. else
  1025. parent_start = 0;
  1026. extent_buffer_get(cow);
  1027. tree_mod_log_set_root_pointer(root, cow, 1);
  1028. rcu_assign_pointer(root->node, cow);
  1029. btrfs_free_tree_block(trans, root, buf, parent_start,
  1030. last_ref);
  1031. free_extent_buffer(buf);
  1032. add_root_to_dirty_list(root);
  1033. } else {
  1034. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1035. parent_start = parent->start;
  1036. else
  1037. parent_start = 0;
  1038. WARN_ON(trans->transid != btrfs_header_generation(parent));
  1039. tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
  1040. MOD_LOG_KEY_REPLACE, GFP_NOFS);
  1041. btrfs_set_node_blockptr(parent, parent_slot,
  1042. cow->start);
  1043. btrfs_set_node_ptr_generation(parent, parent_slot,
  1044. trans->transid);
  1045. btrfs_mark_buffer_dirty(parent);
  1046. if (last_ref) {
  1047. ret = tree_mod_log_free_eb(root->fs_info, buf);
  1048. if (ret) {
  1049. btrfs_abort_transaction(trans, root, ret);
  1050. return ret;
  1051. }
  1052. }
  1053. btrfs_free_tree_block(trans, root, buf, parent_start,
  1054. last_ref);
  1055. }
  1056. if (unlock_orig)
  1057. btrfs_tree_unlock(buf);
  1058. free_extent_buffer_stale(buf);
  1059. btrfs_mark_buffer_dirty(cow);
  1060. *cow_ret = cow;
  1061. return 0;
  1062. }
  1063. /*
  1064. * returns the logical address of the oldest predecessor of the given root.
  1065. * entries older than time_seq are ignored.
  1066. */
  1067. static struct tree_mod_elem *
  1068. __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
  1069. struct extent_buffer *eb_root, u64 time_seq)
  1070. {
  1071. struct tree_mod_elem *tm;
  1072. struct tree_mod_elem *found = NULL;
  1073. u64 root_logical = eb_root->start;
  1074. int looped = 0;
  1075. if (!time_seq)
  1076. return NULL;
  1077. /*
  1078. * the very last operation that's logged for a root is the replacement
  1079. * operation (if it is replaced at all). this has the index of the *new*
  1080. * root, making it the very first operation that's logged for this root.
  1081. */
  1082. while (1) {
  1083. tm = tree_mod_log_search_oldest(fs_info, root_logical,
  1084. time_seq);
  1085. if (!looped && !tm)
  1086. return NULL;
  1087. /*
  1088. * if there are no tree operation for the oldest root, we simply
  1089. * return it. this should only happen if that (old) root is at
  1090. * level 0.
  1091. */
  1092. if (!tm)
  1093. break;
  1094. /*
  1095. * if there's an operation that's not a root replacement, we
  1096. * found the oldest version of our root. normally, we'll find a
  1097. * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
  1098. */
  1099. if (tm->op != MOD_LOG_ROOT_REPLACE)
  1100. break;
  1101. found = tm;
  1102. root_logical = tm->old_root.logical;
  1103. looped = 1;
  1104. }
  1105. /* if there's no old root to return, return what we found instead */
  1106. if (!found)
  1107. found = tm;
  1108. return found;
  1109. }
  1110. /*
  1111. * tm is a pointer to the first operation to rewind within eb. then, all
  1112. * previous operations will be rewinded (until we reach something older than
  1113. * time_seq).
  1114. */
  1115. static void
  1116. __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
  1117. u64 time_seq, struct tree_mod_elem *first_tm)
  1118. {
  1119. u32 n;
  1120. struct rb_node *next;
  1121. struct tree_mod_elem *tm = first_tm;
  1122. unsigned long o_dst;
  1123. unsigned long o_src;
  1124. unsigned long p_size = sizeof(struct btrfs_key_ptr);
  1125. n = btrfs_header_nritems(eb);
  1126. tree_mod_log_read_lock(fs_info);
  1127. while (tm && tm->seq >= time_seq) {
  1128. /*
  1129. * all the operations are recorded with the operator used for
  1130. * the modification. as we're going backwards, we do the
  1131. * opposite of each operation here.
  1132. */
  1133. switch (tm->op) {
  1134. case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
  1135. BUG_ON(tm->slot < n);
  1136. /* Fallthrough */
  1137. case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
  1138. case MOD_LOG_KEY_REMOVE:
  1139. btrfs_set_node_key(eb, &tm->key, tm->slot);
  1140. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  1141. btrfs_set_node_ptr_generation(eb, tm->slot,
  1142. tm->generation);
  1143. n++;
  1144. break;
  1145. case MOD_LOG_KEY_REPLACE:
  1146. BUG_ON(tm->slot >= n);
  1147. btrfs_set_node_key(eb, &tm->key, tm->slot);
  1148. btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
  1149. btrfs_set_node_ptr_generation(eb, tm->slot,
  1150. tm->generation);
  1151. break;
  1152. case MOD_LOG_KEY_ADD:
  1153. /* if a move operation is needed it's in the log */
  1154. n--;
  1155. break;
  1156. case MOD_LOG_MOVE_KEYS:
  1157. o_dst = btrfs_node_key_ptr_offset(tm->slot);
  1158. o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
  1159. memmove_extent_buffer(eb, o_dst, o_src,
  1160. tm->move.nr_items * p_size);
  1161. break;
  1162. case MOD_LOG_ROOT_REPLACE:
  1163. /*
  1164. * this operation is special. for roots, this must be
  1165. * handled explicitly before rewinding.
  1166. * for non-roots, this operation may exist if the node
  1167. * was a root: root A -> child B; then A gets empty and
  1168. * B is promoted to the new root. in the mod log, we'll
  1169. * have a root-replace operation for B, a tree block
  1170. * that is no root. we simply ignore that operation.
  1171. */
  1172. break;
  1173. }
  1174. next = rb_next(&tm->node);
  1175. if (!next)
  1176. break;
  1177. tm = container_of(next, struct tree_mod_elem, node);
  1178. if (tm->index != first_tm->index)
  1179. break;
  1180. }
  1181. tree_mod_log_read_unlock(fs_info);
  1182. btrfs_set_header_nritems(eb, n);
  1183. }
  1184. /*
  1185. * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
  1186. * is returned. If rewind operations happen, a fresh buffer is returned. The
  1187. * returned buffer is always read-locked. If the returned buffer is not the
  1188. * input buffer, the lock on the input buffer is released and the input buffer
  1189. * is freed (its refcount is decremented).
  1190. */
  1191. static struct extent_buffer *
  1192. tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
  1193. struct extent_buffer *eb, u64 time_seq)
  1194. {
  1195. struct extent_buffer *eb_rewin;
  1196. struct tree_mod_elem *tm;
  1197. if (!time_seq)
  1198. return eb;
  1199. if (btrfs_header_level(eb) == 0)
  1200. return eb;
  1201. tm = tree_mod_log_search(fs_info, eb->start, time_seq);
  1202. if (!tm)
  1203. return eb;
  1204. btrfs_set_path_blocking(path);
  1205. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  1206. if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  1207. BUG_ON(tm->slot != 0);
  1208. eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
  1209. if (!eb_rewin) {
  1210. btrfs_tree_read_unlock_blocking(eb);
  1211. free_extent_buffer(eb);
  1212. return NULL;
  1213. }
  1214. btrfs_set_header_bytenr(eb_rewin, eb->start);
  1215. btrfs_set_header_backref_rev(eb_rewin,
  1216. btrfs_header_backref_rev(eb));
  1217. btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
  1218. btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
  1219. } else {
  1220. eb_rewin = btrfs_clone_extent_buffer(eb);
  1221. if (!eb_rewin) {
  1222. btrfs_tree_read_unlock_blocking(eb);
  1223. free_extent_buffer(eb);
  1224. return NULL;
  1225. }
  1226. }
  1227. btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
  1228. btrfs_tree_read_unlock_blocking(eb);
  1229. free_extent_buffer(eb);
  1230. extent_buffer_get(eb_rewin);
  1231. btrfs_tree_read_lock(eb_rewin);
  1232. __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
  1233. WARN_ON(btrfs_header_nritems(eb_rewin) >
  1234. BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
  1235. return eb_rewin;
  1236. }
  1237. /*
  1238. * get_old_root() rewinds the state of @root's root node to the given @time_seq
  1239. * value. If there are no changes, the current root->root_node is returned. If
  1240. * anything changed in between, there's a fresh buffer allocated on which the
  1241. * rewind operations are done. In any case, the returned buffer is read locked.
  1242. * Returns NULL on error (with no locks held).
  1243. */
  1244. static inline struct extent_buffer *
  1245. get_old_root(struct btrfs_root *root, u64 time_seq)
  1246. {
  1247. struct tree_mod_elem *tm;
  1248. struct extent_buffer *eb = NULL;
  1249. struct extent_buffer *eb_root;
  1250. struct extent_buffer *old;
  1251. struct tree_mod_root *old_root = NULL;
  1252. u64 old_generation = 0;
  1253. u64 logical;
  1254. eb_root = btrfs_read_lock_root_node(root);
  1255. tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
  1256. if (!tm)
  1257. return eb_root;
  1258. if (tm->op == MOD_LOG_ROOT_REPLACE) {
  1259. old_root = &tm->old_root;
  1260. old_generation = tm->generation;
  1261. logical = old_root->logical;
  1262. } else {
  1263. logical = eb_root->start;
  1264. }
  1265. tm = tree_mod_log_search(root->fs_info, logical, time_seq);
  1266. if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
  1267. btrfs_tree_read_unlock(eb_root);
  1268. free_extent_buffer(eb_root);
  1269. old = read_tree_block(root, logical, 0);
  1270. if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
  1271. if (!IS_ERR(old))
  1272. free_extent_buffer(old);
  1273. btrfs_warn(root->fs_info,
  1274. "failed to read tree block %llu from get_old_root", logical);
  1275. } else {
  1276. eb = btrfs_clone_extent_buffer(old);
  1277. free_extent_buffer(old);
  1278. }
  1279. } else if (old_root) {
  1280. btrfs_tree_read_unlock(eb_root);
  1281. free_extent_buffer(eb_root);
  1282. eb = alloc_dummy_extent_buffer(root->fs_info, logical);
  1283. } else {
  1284. btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
  1285. eb = btrfs_clone_extent_buffer(eb_root);
  1286. btrfs_tree_read_unlock_blocking(eb_root);
  1287. free_extent_buffer(eb_root);
  1288. }
  1289. if (!eb)
  1290. return NULL;
  1291. extent_buffer_get(eb);
  1292. btrfs_tree_read_lock(eb);
  1293. if (old_root) {
  1294. btrfs_set_header_bytenr(eb, eb->start);
  1295. btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
  1296. btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
  1297. btrfs_set_header_level(eb, old_root->level);
  1298. btrfs_set_header_generation(eb, old_generation);
  1299. }
  1300. if (tm)
  1301. __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
  1302. else
  1303. WARN_ON(btrfs_header_level(eb) != 0);
  1304. WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
  1305. return eb;
  1306. }
  1307. int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
  1308. {
  1309. struct tree_mod_elem *tm;
  1310. int level;
  1311. struct extent_buffer *eb_root = btrfs_root_node(root);
  1312. tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
  1313. if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
  1314. level = tm->old_root.level;
  1315. } else {
  1316. level = btrfs_header_level(eb_root);
  1317. }
  1318. free_extent_buffer(eb_root);
  1319. return level;
  1320. }
  1321. static inline int should_cow_block(struct btrfs_trans_handle *trans,
  1322. struct btrfs_root *root,
  1323. struct extent_buffer *buf)
  1324. {
  1325. if (btrfs_test_is_dummy_root(root))
  1326. return 0;
  1327. /* ensure we can see the force_cow */
  1328. smp_rmb();
  1329. /*
  1330. * We do not need to cow a block if
  1331. * 1) this block is not created or changed in this transaction;
  1332. * 2) this block does not belong to TREE_RELOC tree;
  1333. * 3) the root is not forced COW.
  1334. *
  1335. * What is forced COW:
  1336. * when we create snapshot during commiting the transaction,
  1337. * after we've finished coping src root, we must COW the shared
  1338. * block to ensure the metadata consistency.
  1339. */
  1340. if (btrfs_header_generation(buf) == trans->transid &&
  1341. !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
  1342. !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
  1343. btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
  1344. !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
  1345. return 0;
  1346. return 1;
  1347. }
  1348. /*
  1349. * cows a single block, see __btrfs_cow_block for the real work.
  1350. * This version of it has extra checks so that a block isn't cow'd more than
  1351. * once per transaction, as long as it hasn't been written yet
  1352. */
  1353. noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
  1354. struct btrfs_root *root, struct extent_buffer *buf,
  1355. struct extent_buffer *parent, int parent_slot,
  1356. struct extent_buffer **cow_ret)
  1357. {
  1358. u64 search_start;
  1359. int ret;
  1360. if (trans->transaction != root->fs_info->running_transaction)
  1361. WARN(1, KERN_CRIT "trans %llu running %llu\n",
  1362. trans->transid,
  1363. root->fs_info->running_transaction->transid);
  1364. if (trans->transid != root->fs_info->generation)
  1365. WARN(1, KERN_CRIT "trans %llu running %llu\n",
  1366. trans->transid, root->fs_info->generation);
  1367. if (!should_cow_block(trans, root, buf)) {
  1368. *cow_ret = buf;
  1369. return 0;
  1370. }
  1371. search_start = buf->start & ~((u64)SZ_1G - 1);
  1372. if (parent)
  1373. btrfs_set_lock_blocking(parent);
  1374. btrfs_set_lock_blocking(buf);
  1375. ret = __btrfs_cow_block(trans, root, buf, parent,
  1376. parent_slot, cow_ret, search_start, 0);
  1377. trace_btrfs_cow_block(root, buf, *cow_ret);
  1378. return ret;
  1379. }
  1380. /*
  1381. * helper function for defrag to decide if two blocks pointed to by a
  1382. * node are actually close by
  1383. */
  1384. static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
  1385. {
  1386. if (blocknr < other && other - (blocknr + blocksize) < 32768)
  1387. return 1;
  1388. if (blocknr > other && blocknr - (other + blocksize) < 32768)
  1389. return 1;
  1390. return 0;
  1391. }
  1392. /*
  1393. * compare two keys in a memcmp fashion
  1394. */
  1395. static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
  1396. {
  1397. struct btrfs_key k1;
  1398. btrfs_disk_key_to_cpu(&k1, disk);
  1399. return btrfs_comp_cpu_keys(&k1, k2);
  1400. }
  1401. /*
  1402. * same as comp_keys only with two btrfs_key's
  1403. */
  1404. int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
  1405. {
  1406. if (k1->objectid > k2->objectid)
  1407. return 1;
  1408. if (k1->objectid < k2->objectid)
  1409. return -1;
  1410. if (k1->type > k2->type)
  1411. return 1;
  1412. if (k1->type < k2->type)
  1413. return -1;
  1414. if (k1->offset > k2->offset)
  1415. return 1;
  1416. if (k1->offset < k2->offset)
  1417. return -1;
  1418. return 0;
  1419. }
  1420. /*
  1421. * this is used by the defrag code to go through all the
  1422. * leaves pointed to by a node and reallocate them so that
  1423. * disk order is close to key order
  1424. */
  1425. int btrfs_realloc_node(struct btrfs_trans_handle *trans,
  1426. struct btrfs_root *root, struct extent_buffer *parent,
  1427. int start_slot, u64 *last_ret,
  1428. struct btrfs_key *progress)
  1429. {
  1430. struct extent_buffer *cur;
  1431. u64 blocknr;
  1432. u64 gen;
  1433. u64 search_start = *last_ret;
  1434. u64 last_block = 0;
  1435. u64 other;
  1436. u32 parent_nritems;
  1437. int end_slot;
  1438. int i;
  1439. int err = 0;
  1440. int parent_level;
  1441. int uptodate;
  1442. u32 blocksize;
  1443. int progress_passed = 0;
  1444. struct btrfs_disk_key disk_key;
  1445. parent_level = btrfs_header_level(parent);
  1446. WARN_ON(trans->transaction != root->fs_info->running_transaction);
  1447. WARN_ON(trans->transid != root->fs_info->generation);
  1448. parent_nritems = btrfs_header_nritems(parent);
  1449. blocksize = root->nodesize;
  1450. end_slot = parent_nritems - 1;
  1451. if (parent_nritems <= 1)
  1452. return 0;
  1453. btrfs_set_lock_blocking(parent);
  1454. for (i = start_slot; i <= end_slot; i++) {
  1455. int close = 1;
  1456. btrfs_node_key(parent, &disk_key, i);
  1457. if (!progress_passed && comp_keys(&disk_key, progress) < 0)
  1458. continue;
  1459. progress_passed = 1;
  1460. blocknr = btrfs_node_blockptr(parent, i);
  1461. gen = btrfs_node_ptr_generation(parent, i);
  1462. if (last_block == 0)
  1463. last_block = blocknr;
  1464. if (i > 0) {
  1465. other = btrfs_node_blockptr(parent, i - 1);
  1466. close = close_blocks(blocknr, other, blocksize);
  1467. }
  1468. if (!close && i < end_slot) {
  1469. other = btrfs_node_blockptr(parent, i + 1);
  1470. close = close_blocks(blocknr, other, blocksize);
  1471. }
  1472. if (close) {
  1473. last_block = blocknr;
  1474. continue;
  1475. }
  1476. cur = btrfs_find_tree_block(root->fs_info, blocknr);
  1477. if (cur)
  1478. uptodate = btrfs_buffer_uptodate(cur, gen, 0);
  1479. else
  1480. uptodate = 0;
  1481. if (!cur || !uptodate) {
  1482. if (!cur) {
  1483. cur = read_tree_block(root, blocknr, gen);
  1484. if (IS_ERR(cur)) {
  1485. return PTR_ERR(cur);
  1486. } else if (!extent_buffer_uptodate(cur)) {
  1487. free_extent_buffer(cur);
  1488. return -EIO;
  1489. }
  1490. } else if (!uptodate) {
  1491. err = btrfs_read_buffer(cur, gen);
  1492. if (err) {
  1493. free_extent_buffer(cur);
  1494. return err;
  1495. }
  1496. }
  1497. }
  1498. if (search_start == 0)
  1499. search_start = last_block;
  1500. btrfs_tree_lock(cur);
  1501. btrfs_set_lock_blocking(cur);
  1502. err = __btrfs_cow_block(trans, root, cur, parent, i,
  1503. &cur, search_start,
  1504. min(16 * blocksize,
  1505. (end_slot - i) * blocksize));
  1506. if (err) {
  1507. btrfs_tree_unlock(cur);
  1508. free_extent_buffer(cur);
  1509. break;
  1510. }
  1511. search_start = cur->start;
  1512. last_block = cur->start;
  1513. *last_ret = search_start;
  1514. btrfs_tree_unlock(cur);
  1515. free_extent_buffer(cur);
  1516. }
  1517. return err;
  1518. }
  1519. /*
  1520. * The leaf data grows from end-to-front in the node.
  1521. * this returns the address of the start of the last item,
  1522. * which is the stop of the leaf data stack
  1523. */
  1524. static inline unsigned int leaf_data_end(struct btrfs_root *root,
  1525. struct extent_buffer *leaf)
  1526. {
  1527. u32 nr = btrfs_header_nritems(leaf);
  1528. if (nr == 0)
  1529. return BTRFS_LEAF_DATA_SIZE(root);
  1530. return btrfs_item_offset_nr(leaf, nr - 1);
  1531. }
  1532. /*
  1533. * search for key in the extent_buffer. The items start at offset p,
  1534. * and they are item_size apart. There are 'max' items in p.
  1535. *
  1536. * the slot in the array is returned via slot, and it points to
  1537. * the place where you would insert key if it is not found in
  1538. * the array.
  1539. *
  1540. * slot may point to max if the key is bigger than all of the keys
  1541. */
  1542. static noinline int generic_bin_search(struct extent_buffer *eb,
  1543. unsigned long p,
  1544. int item_size, struct btrfs_key *key,
  1545. int max, int *slot)
  1546. {
  1547. int low = 0;
  1548. int high = max;
  1549. int mid;
  1550. int ret;
  1551. struct btrfs_disk_key *tmp = NULL;
  1552. struct btrfs_disk_key unaligned;
  1553. unsigned long offset;
  1554. char *kaddr = NULL;
  1555. unsigned long map_start = 0;
  1556. unsigned long map_len = 0;
  1557. int err;
  1558. while (low < high) {
  1559. mid = (low + high) / 2;
  1560. offset = p + mid * item_size;
  1561. if (!kaddr || offset < map_start ||
  1562. (offset + sizeof(struct btrfs_disk_key)) >
  1563. map_start + map_len) {
  1564. err = map_private_extent_buffer(eb, offset,
  1565. sizeof(struct btrfs_disk_key),
  1566. &kaddr, &map_start, &map_len);
  1567. if (!err) {
  1568. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  1569. map_start);
  1570. } else {
  1571. read_extent_buffer(eb, &unaligned,
  1572. offset, sizeof(unaligned));
  1573. tmp = &unaligned;
  1574. }
  1575. } else {
  1576. tmp = (struct btrfs_disk_key *)(kaddr + offset -
  1577. map_start);
  1578. }
  1579. ret = comp_keys(tmp, key);
  1580. if (ret < 0)
  1581. low = mid + 1;
  1582. else if (ret > 0)
  1583. high = mid;
  1584. else {
  1585. *slot = mid;
  1586. return 0;
  1587. }
  1588. }
  1589. *slot = low;
  1590. return 1;
  1591. }
  1592. /*
  1593. * simple bin_search frontend that does the right thing for
  1594. * leaves vs nodes
  1595. */
  1596. static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
  1597. int level, int *slot)
  1598. {
  1599. if (level == 0)
  1600. return generic_bin_search(eb,
  1601. offsetof(struct btrfs_leaf, items),
  1602. sizeof(struct btrfs_item),
  1603. key, btrfs_header_nritems(eb),
  1604. slot);
  1605. else
  1606. return generic_bin_search(eb,
  1607. offsetof(struct btrfs_node, ptrs),
  1608. sizeof(struct btrfs_key_ptr),
  1609. key, btrfs_header_nritems(eb),
  1610. slot);
  1611. }
  1612. int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
  1613. int level, int *slot)
  1614. {
  1615. return bin_search(eb, key, level, slot);
  1616. }
  1617. static void root_add_used(struct btrfs_root *root, u32 size)
  1618. {
  1619. spin_lock(&root->accounting_lock);
  1620. btrfs_set_root_used(&root->root_item,
  1621. btrfs_root_used(&root->root_item) + size);
  1622. spin_unlock(&root->accounting_lock);
  1623. }
  1624. static void root_sub_used(struct btrfs_root *root, u32 size)
  1625. {
  1626. spin_lock(&root->accounting_lock);
  1627. btrfs_set_root_used(&root->root_item,
  1628. btrfs_root_used(&root->root_item) - size);
  1629. spin_unlock(&root->accounting_lock);
  1630. }
  1631. /* given a node and slot number, this reads the blocks it points to. The
  1632. * extent buffer is returned with a reference taken (but unlocked).
  1633. * NULL is returned on error.
  1634. */
  1635. static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
  1636. struct extent_buffer *parent, int slot)
  1637. {
  1638. int level = btrfs_header_level(parent);
  1639. struct extent_buffer *eb;
  1640. if (slot < 0)
  1641. return NULL;
  1642. if (slot >= btrfs_header_nritems(parent))
  1643. return NULL;
  1644. BUG_ON(level == 0);
  1645. eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
  1646. btrfs_node_ptr_generation(parent, slot));
  1647. if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
  1648. if (!IS_ERR(eb))
  1649. free_extent_buffer(eb);
  1650. eb = NULL;
  1651. }
  1652. return eb;
  1653. }
  1654. /*
  1655. * node level balancing, used to make sure nodes are in proper order for
  1656. * item deletion. We balance from the top down, so we have to make sure
  1657. * that a deletion won't leave an node completely empty later on.
  1658. */
  1659. static noinline int balance_level(struct btrfs_trans_handle *trans,
  1660. struct btrfs_root *root,
  1661. struct btrfs_path *path, int level)
  1662. {
  1663. struct extent_buffer *right = NULL;
  1664. struct extent_buffer *mid;
  1665. struct extent_buffer *left = NULL;
  1666. struct extent_buffer *parent = NULL;
  1667. int ret = 0;
  1668. int wret;
  1669. int pslot;
  1670. int orig_slot = path->slots[level];
  1671. u64 orig_ptr;
  1672. if (level == 0)
  1673. return 0;
  1674. mid = path->nodes[level];
  1675. WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
  1676. path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
  1677. WARN_ON(btrfs_header_generation(mid) != trans->transid);
  1678. orig_ptr = btrfs_node_blockptr(mid, orig_slot);
  1679. if (level < BTRFS_MAX_LEVEL - 1) {
  1680. parent = path->nodes[level + 1];
  1681. pslot = path->slots[level + 1];
  1682. }
  1683. /*
  1684. * deal with the case where there is only one pointer in the root
  1685. * by promoting the node below to a root
  1686. */
  1687. if (!parent) {
  1688. struct extent_buffer *child;
  1689. if (btrfs_header_nritems(mid) != 1)
  1690. return 0;
  1691. /* promote the child to a root */
  1692. child = read_node_slot(root, mid, 0);
  1693. if (!child) {
  1694. ret = -EROFS;
  1695. btrfs_std_error(root->fs_info, ret, NULL);
  1696. goto enospc;
  1697. }
  1698. btrfs_tree_lock(child);
  1699. btrfs_set_lock_blocking(child);
  1700. ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
  1701. if (ret) {
  1702. btrfs_tree_unlock(child);
  1703. free_extent_buffer(child);
  1704. goto enospc;
  1705. }
  1706. tree_mod_log_set_root_pointer(root, child, 1);
  1707. rcu_assign_pointer(root->node, child);
  1708. add_root_to_dirty_list(root);
  1709. btrfs_tree_unlock(child);
  1710. path->locks[level] = 0;
  1711. path->nodes[level] = NULL;
  1712. clean_tree_block(trans, root->fs_info, mid);
  1713. btrfs_tree_unlock(mid);
  1714. /* once for the path */
  1715. free_extent_buffer(mid);
  1716. root_sub_used(root, mid->len);
  1717. btrfs_free_tree_block(trans, root, mid, 0, 1);
  1718. /* once for the root ptr */
  1719. free_extent_buffer_stale(mid);
  1720. return 0;
  1721. }
  1722. if (btrfs_header_nritems(mid) >
  1723. BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
  1724. return 0;
  1725. left = read_node_slot(root, parent, pslot - 1);
  1726. if (left) {
  1727. btrfs_tree_lock(left);
  1728. btrfs_set_lock_blocking(left);
  1729. wret = btrfs_cow_block(trans, root, left,
  1730. parent, pslot - 1, &left);
  1731. if (wret) {
  1732. ret = wret;
  1733. goto enospc;
  1734. }
  1735. }
  1736. right = read_node_slot(root, parent, pslot + 1);
  1737. if (right) {
  1738. btrfs_tree_lock(right);
  1739. btrfs_set_lock_blocking(right);
  1740. wret = btrfs_cow_block(trans, root, right,
  1741. parent, pslot + 1, &right);
  1742. if (wret) {
  1743. ret = wret;
  1744. goto enospc;
  1745. }
  1746. }
  1747. /* first, try to make some room in the middle buffer */
  1748. if (left) {
  1749. orig_slot += btrfs_header_nritems(left);
  1750. wret = push_node_left(trans, root, left, mid, 1);
  1751. if (wret < 0)
  1752. ret = wret;
  1753. }
  1754. /*
  1755. * then try to empty the right most buffer into the middle
  1756. */
  1757. if (right) {
  1758. wret = push_node_left(trans, root, mid, right, 1);
  1759. if (wret < 0 && wret != -ENOSPC)
  1760. ret = wret;
  1761. if (btrfs_header_nritems(right) == 0) {
  1762. clean_tree_block(trans, root->fs_info, right);
  1763. btrfs_tree_unlock(right);
  1764. del_ptr(root, path, level + 1, pslot + 1);
  1765. root_sub_used(root, right->len);
  1766. btrfs_free_tree_block(trans, root, right, 0, 1);
  1767. free_extent_buffer_stale(right);
  1768. right = NULL;
  1769. } else {
  1770. struct btrfs_disk_key right_key;
  1771. btrfs_node_key(right, &right_key, 0);
  1772. tree_mod_log_set_node_key(root->fs_info, parent,
  1773. pslot + 1, 0);
  1774. btrfs_set_node_key(parent, &right_key, pslot + 1);
  1775. btrfs_mark_buffer_dirty(parent);
  1776. }
  1777. }
  1778. if (btrfs_header_nritems(mid) == 1) {
  1779. /*
  1780. * we're not allowed to leave a node with one item in the
  1781. * tree during a delete. A deletion from lower in the tree
  1782. * could try to delete the only pointer in this node.
  1783. * So, pull some keys from the left.
  1784. * There has to be a left pointer at this point because
  1785. * otherwise we would have pulled some pointers from the
  1786. * right
  1787. */
  1788. if (!left) {
  1789. ret = -EROFS;
  1790. btrfs_std_error(root->fs_info, ret, NULL);
  1791. goto enospc;
  1792. }
  1793. wret = balance_node_right(trans, root, mid, left);
  1794. if (wret < 0) {
  1795. ret = wret;
  1796. goto enospc;
  1797. }
  1798. if (wret == 1) {
  1799. wret = push_node_left(trans, root, left, mid, 1);
  1800. if (wret < 0)
  1801. ret = wret;
  1802. }
  1803. BUG_ON(wret == 1);
  1804. }
  1805. if (btrfs_header_nritems(mid) == 0) {
  1806. clean_tree_block(trans, root->fs_info, mid);
  1807. btrfs_tree_unlock(mid);
  1808. del_ptr(root, path, level + 1, pslot);
  1809. root_sub_used(root, mid->len);
  1810. btrfs_free_tree_block(trans, root, mid, 0, 1);
  1811. free_extent_buffer_stale(mid);
  1812. mid = NULL;
  1813. } else {
  1814. /* update the parent key to reflect our changes */
  1815. struct btrfs_disk_key mid_key;
  1816. btrfs_node_key(mid, &mid_key, 0);
  1817. tree_mod_log_set_node_key(root->fs_info, parent,
  1818. pslot, 0);
  1819. btrfs_set_node_key(parent, &mid_key, pslot);
  1820. btrfs_mark_buffer_dirty(parent);
  1821. }
  1822. /* update the path */
  1823. if (left) {
  1824. if (btrfs_header_nritems(left) > orig_slot) {
  1825. extent_buffer_get(left);
  1826. /* left was locked after cow */
  1827. path->nodes[level] = left;
  1828. path->slots[level + 1] -= 1;
  1829. path->slots[level] = orig_slot;
  1830. if (mid) {
  1831. btrfs_tree_unlock(mid);
  1832. free_extent_buffer(mid);
  1833. }
  1834. } else {
  1835. orig_slot -= btrfs_header_nritems(left);
  1836. path->slots[level] = orig_slot;
  1837. }
  1838. }
  1839. /* double check we haven't messed things up */
  1840. if (orig_ptr !=
  1841. btrfs_node_blockptr(path->nodes[level], path->slots[level]))
  1842. BUG();
  1843. enospc:
  1844. if (right) {
  1845. btrfs_tree_unlock(right);
  1846. free_extent_buffer(right);
  1847. }
  1848. if (left) {
  1849. if (path->nodes[level] != left)
  1850. btrfs_tree_unlock(left);
  1851. free_extent_buffer(left);
  1852. }
  1853. return ret;
  1854. }
  1855. /* Node balancing for insertion. Here we only split or push nodes around
  1856. * when they are completely full. This is also done top down, so we
  1857. * have to be pessimistic.
  1858. */
  1859. static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
  1860. struct btrfs_root *root,
  1861. struct btrfs_path *path, int level)
  1862. {
  1863. struct extent_buffer *right = NULL;
  1864. struct extent_buffer *mid;
  1865. struct extent_buffer *left = NULL;
  1866. struct extent_buffer *parent = NULL;
  1867. int ret = 0;
  1868. int wret;
  1869. int pslot;
  1870. int orig_slot = path->slots[level];
  1871. if (level == 0)
  1872. return 1;
  1873. mid = path->nodes[level];
  1874. WARN_ON(btrfs_header_generation(mid) != trans->transid);
  1875. if (level < BTRFS_MAX_LEVEL - 1) {
  1876. parent = path->nodes[level + 1];
  1877. pslot = path->slots[level + 1];
  1878. }
  1879. if (!parent)
  1880. return 1;
  1881. left = read_node_slot(root, parent, pslot - 1);
  1882. /* first, try to make some room in the middle buffer */
  1883. if (left) {
  1884. u32 left_nr;
  1885. btrfs_tree_lock(left);
  1886. btrfs_set_lock_blocking(left);
  1887. left_nr = btrfs_header_nritems(left);
  1888. if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
  1889. wret = 1;
  1890. } else {
  1891. ret = btrfs_cow_block(trans, root, left, parent,
  1892. pslot - 1, &left);
  1893. if (ret)
  1894. wret = 1;
  1895. else {
  1896. wret = push_node_left(trans, root,
  1897. left, mid, 0);
  1898. }
  1899. }
  1900. if (wret < 0)
  1901. ret = wret;
  1902. if (wret == 0) {
  1903. struct btrfs_disk_key disk_key;
  1904. orig_slot += left_nr;
  1905. btrfs_node_key(mid, &disk_key, 0);
  1906. tree_mod_log_set_node_key(root->fs_info, parent,
  1907. pslot, 0);
  1908. btrfs_set_node_key(parent, &disk_key, pslot);
  1909. btrfs_mark_buffer_dirty(parent);
  1910. if (btrfs_header_nritems(left) > orig_slot) {
  1911. path->nodes[level] = left;
  1912. path->slots[level + 1] -= 1;
  1913. path->slots[level] = orig_slot;
  1914. btrfs_tree_unlock(mid);
  1915. free_extent_buffer(mid);
  1916. } else {
  1917. orig_slot -=
  1918. btrfs_header_nritems(left);
  1919. path->slots[level] = orig_slot;
  1920. btrfs_tree_unlock(left);
  1921. free_extent_buffer(left);
  1922. }
  1923. return 0;
  1924. }
  1925. btrfs_tree_unlock(left);
  1926. free_extent_buffer(left);
  1927. }
  1928. right = read_node_slot(root, parent, pslot + 1);
  1929. /*
  1930. * then try to empty the right most buffer into the middle
  1931. */
  1932. if (right) {
  1933. u32 right_nr;
  1934. btrfs_tree_lock(right);
  1935. btrfs_set_lock_blocking(right);
  1936. right_nr = btrfs_header_nritems(right);
  1937. if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
  1938. wret = 1;
  1939. } else {
  1940. ret = btrfs_cow_block(trans, root, right,
  1941. parent, pslot + 1,
  1942. &right);
  1943. if (ret)
  1944. wret = 1;
  1945. else {
  1946. wret = balance_node_right(trans, root,
  1947. right, mid);
  1948. }
  1949. }
  1950. if (wret < 0)
  1951. ret = wret;
  1952. if (wret == 0) {
  1953. struct btrfs_disk_key disk_key;
  1954. btrfs_node_key(right, &disk_key, 0);
  1955. tree_mod_log_set_node_key(root->fs_info, parent,
  1956. pslot + 1, 0);
  1957. btrfs_set_node_key(parent, &disk_key, pslot + 1);
  1958. btrfs_mark_buffer_dirty(parent);
  1959. if (btrfs_header_nritems(mid) <= orig_slot) {
  1960. path->nodes[level] = right;
  1961. path->slots[level + 1] += 1;
  1962. path->slots[level] = orig_slot -
  1963. btrfs_header_nritems(mid);
  1964. btrfs_tree_unlock(mid);
  1965. free_extent_buffer(mid);
  1966. } else {
  1967. btrfs_tree_unlock(right);
  1968. free_extent_buffer(right);
  1969. }
  1970. return 0;
  1971. }
  1972. btrfs_tree_unlock(right);
  1973. free_extent_buffer(right);
  1974. }
  1975. return 1;
  1976. }
  1977. /*
  1978. * readahead one full node of leaves, finding things that are close
  1979. * to the block in 'slot', and triggering ra on them.
  1980. */
  1981. static void reada_for_search(struct btrfs_root *root,
  1982. struct btrfs_path *path,
  1983. int level, int slot, u64 objectid)
  1984. {
  1985. struct extent_buffer *node;
  1986. struct btrfs_disk_key disk_key;
  1987. u32 nritems;
  1988. u64 search;
  1989. u64 target;
  1990. u64 nread = 0;
  1991. u64 gen;
  1992. struct extent_buffer *eb;
  1993. u32 nr;
  1994. u32 blocksize;
  1995. u32 nscan = 0;
  1996. if (level != 1)
  1997. return;
  1998. if (!path->nodes[level])
  1999. return;
  2000. node = path->nodes[level];
  2001. search = btrfs_node_blockptr(node, slot);
  2002. blocksize = root->nodesize;
  2003. eb = btrfs_find_tree_block(root->fs_info, search);
  2004. if (eb) {
  2005. free_extent_buffer(eb);
  2006. return;
  2007. }
  2008. target = search;
  2009. nritems = btrfs_header_nritems(node);
  2010. nr = slot;
  2011. while (1) {
  2012. if (path->reada == READA_BACK) {
  2013. if (nr == 0)
  2014. break;
  2015. nr--;
  2016. } else if (path->reada == READA_FORWARD) {
  2017. nr++;
  2018. if (nr >= nritems)
  2019. break;
  2020. }
  2021. if (path->reada == READA_BACK && objectid) {
  2022. btrfs_node_key(node, &disk_key, nr);
  2023. if (btrfs_disk_key_objectid(&disk_key) != objectid)
  2024. break;
  2025. }
  2026. search = btrfs_node_blockptr(node, nr);
  2027. if ((search <= target && target - search <= 65536) ||
  2028. (search > target && search - target <= 65536)) {
  2029. gen = btrfs_node_ptr_generation(node, nr);
  2030. readahead_tree_block(root, search);
  2031. nread += blocksize;
  2032. }
  2033. nscan++;
  2034. if ((nread > 65536 || nscan > 32))
  2035. break;
  2036. }
  2037. }
  2038. static noinline void reada_for_balance(struct btrfs_root *root,
  2039. struct btrfs_path *path, int level)
  2040. {
  2041. int slot;
  2042. int nritems;
  2043. struct extent_buffer *parent;
  2044. struct extent_buffer *eb;
  2045. u64 gen;
  2046. u64 block1 = 0;
  2047. u64 block2 = 0;
  2048. parent = path->nodes[level + 1];
  2049. if (!parent)
  2050. return;
  2051. nritems = btrfs_header_nritems(parent);
  2052. slot = path->slots[level + 1];
  2053. if (slot > 0) {
  2054. block1 = btrfs_node_blockptr(parent, slot - 1);
  2055. gen = btrfs_node_ptr_generation(parent, slot - 1);
  2056. eb = btrfs_find_tree_block(root->fs_info, block1);
  2057. /*
  2058. * if we get -eagain from btrfs_buffer_uptodate, we
  2059. * don't want to return eagain here. That will loop
  2060. * forever
  2061. */
  2062. if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
  2063. block1 = 0;
  2064. free_extent_buffer(eb);
  2065. }
  2066. if (slot + 1 < nritems) {
  2067. block2 = btrfs_node_blockptr(parent, slot + 1);
  2068. gen = btrfs_node_ptr_generation(parent, slot + 1);
  2069. eb = btrfs_find_tree_block(root->fs_info, block2);
  2070. if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
  2071. block2 = 0;
  2072. free_extent_buffer(eb);
  2073. }
  2074. if (block1)
  2075. readahead_tree_block(root, block1);
  2076. if (block2)
  2077. readahead_tree_block(root, block2);
  2078. }
  2079. /*
  2080. * when we walk down the tree, it is usually safe to unlock the higher layers
  2081. * in the tree. The exceptions are when our path goes through slot 0, because
  2082. * operations on the tree might require changing key pointers higher up in the
  2083. * tree.
  2084. *
  2085. * callers might also have set path->keep_locks, which tells this code to keep
  2086. * the lock if the path points to the last slot in the block. This is part of
  2087. * walking through the tree, and selecting the next slot in the higher block.
  2088. *
  2089. * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
  2090. * if lowest_unlock is 1, level 0 won't be unlocked
  2091. */
  2092. static noinline void unlock_up(struct btrfs_path *path, int level,
  2093. int lowest_unlock, int min_write_lock_level,
  2094. int *write_lock_level)
  2095. {
  2096. int i;
  2097. int skip_level = level;
  2098. int no_skips = 0;
  2099. struct extent_buffer *t;
  2100. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2101. if (!path->nodes[i])
  2102. break;
  2103. if (!path->locks[i])
  2104. break;
  2105. if (!no_skips && path->slots[i] == 0) {
  2106. skip_level = i + 1;
  2107. continue;
  2108. }
  2109. if (!no_skips && path->keep_locks) {
  2110. u32 nritems;
  2111. t = path->nodes[i];
  2112. nritems = btrfs_header_nritems(t);
  2113. if (nritems < 1 || path->slots[i] >= nritems - 1) {
  2114. skip_level = i + 1;
  2115. continue;
  2116. }
  2117. }
  2118. if (skip_level < i && i >= lowest_unlock)
  2119. no_skips = 1;
  2120. t = path->nodes[i];
  2121. if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
  2122. btrfs_tree_unlock_rw(t, path->locks[i]);
  2123. path->locks[i] = 0;
  2124. if (write_lock_level &&
  2125. i > min_write_lock_level &&
  2126. i <= *write_lock_level) {
  2127. *write_lock_level = i - 1;
  2128. }
  2129. }
  2130. }
  2131. }
  2132. /*
  2133. * This releases any locks held in the path starting at level and
  2134. * going all the way up to the root.
  2135. *
  2136. * btrfs_search_slot will keep the lock held on higher nodes in a few
  2137. * corner cases, such as COW of the block at slot zero in the node. This
  2138. * ignores those rules, and it should only be called when there are no
  2139. * more updates to be done higher up in the tree.
  2140. */
  2141. noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
  2142. {
  2143. int i;
  2144. if (path->keep_locks)
  2145. return;
  2146. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2147. if (!path->nodes[i])
  2148. continue;
  2149. if (!path->locks[i])
  2150. continue;
  2151. btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
  2152. path->locks[i] = 0;
  2153. }
  2154. }
  2155. /*
  2156. * helper function for btrfs_search_slot. The goal is to find a block
  2157. * in cache without setting the path to blocking. If we find the block
  2158. * we return zero and the path is unchanged.
  2159. *
  2160. * If we can't find the block, we set the path blocking and do some
  2161. * reada. -EAGAIN is returned and the search must be repeated.
  2162. */
  2163. static int
  2164. read_block_for_search(struct btrfs_trans_handle *trans,
  2165. struct btrfs_root *root, struct btrfs_path *p,
  2166. struct extent_buffer **eb_ret, int level, int slot,
  2167. struct btrfs_key *key, u64 time_seq)
  2168. {
  2169. u64 blocknr;
  2170. u64 gen;
  2171. struct extent_buffer *b = *eb_ret;
  2172. struct extent_buffer *tmp;
  2173. int ret;
  2174. blocknr = btrfs_node_blockptr(b, slot);
  2175. gen = btrfs_node_ptr_generation(b, slot);
  2176. tmp = btrfs_find_tree_block(root->fs_info, blocknr);
  2177. if (tmp) {
  2178. /* first we do an atomic uptodate check */
  2179. if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
  2180. *eb_ret = tmp;
  2181. return 0;
  2182. }
  2183. /* the pages were up to date, but we failed
  2184. * the generation number check. Do a full
  2185. * read for the generation number that is correct.
  2186. * We must do this without dropping locks so
  2187. * we can trust our generation number
  2188. */
  2189. btrfs_set_path_blocking(p);
  2190. /* now we're allowed to do a blocking uptodate check */
  2191. ret = btrfs_read_buffer(tmp, gen);
  2192. if (!ret) {
  2193. *eb_ret = tmp;
  2194. return 0;
  2195. }
  2196. free_extent_buffer(tmp);
  2197. btrfs_release_path(p);
  2198. return -EIO;
  2199. }
  2200. /*
  2201. * reduce lock contention at high levels
  2202. * of the btree by dropping locks before
  2203. * we read. Don't release the lock on the current
  2204. * level because we need to walk this node to figure
  2205. * out which blocks to read.
  2206. */
  2207. btrfs_unlock_up_safe(p, level + 1);
  2208. btrfs_set_path_blocking(p);
  2209. free_extent_buffer(tmp);
  2210. if (p->reada != READA_NONE)
  2211. reada_for_search(root, p, level, slot, key->objectid);
  2212. btrfs_release_path(p);
  2213. ret = -EAGAIN;
  2214. tmp = read_tree_block(root, blocknr, 0);
  2215. if (!IS_ERR(tmp)) {
  2216. /*
  2217. * If the read above didn't mark this buffer up to date,
  2218. * it will never end up being up to date. Set ret to EIO now
  2219. * and give up so that our caller doesn't loop forever
  2220. * on our EAGAINs.
  2221. */
  2222. if (!btrfs_buffer_uptodate(tmp, 0, 0))
  2223. ret = -EIO;
  2224. free_extent_buffer(tmp);
  2225. }
  2226. return ret;
  2227. }
  2228. /*
  2229. * helper function for btrfs_search_slot. This does all of the checks
  2230. * for node-level blocks and does any balancing required based on
  2231. * the ins_len.
  2232. *
  2233. * If no extra work was required, zero is returned. If we had to
  2234. * drop the path, -EAGAIN is returned and btrfs_search_slot must
  2235. * start over
  2236. */
  2237. static int
  2238. setup_nodes_for_search(struct btrfs_trans_handle *trans,
  2239. struct btrfs_root *root, struct btrfs_path *p,
  2240. struct extent_buffer *b, int level, int ins_len,
  2241. int *write_lock_level)
  2242. {
  2243. int ret;
  2244. if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
  2245. BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
  2246. int sret;
  2247. if (*write_lock_level < level + 1) {
  2248. *write_lock_level = level + 1;
  2249. btrfs_release_path(p);
  2250. goto again;
  2251. }
  2252. btrfs_set_path_blocking(p);
  2253. reada_for_balance(root, p, level);
  2254. sret = split_node(trans, root, p, level);
  2255. btrfs_clear_path_blocking(p, NULL, 0);
  2256. BUG_ON(sret > 0);
  2257. if (sret) {
  2258. ret = sret;
  2259. goto done;
  2260. }
  2261. b = p->nodes[level];
  2262. } else if (ins_len < 0 && btrfs_header_nritems(b) <
  2263. BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
  2264. int sret;
  2265. if (*write_lock_level < level + 1) {
  2266. *write_lock_level = level + 1;
  2267. btrfs_release_path(p);
  2268. goto again;
  2269. }
  2270. btrfs_set_path_blocking(p);
  2271. reada_for_balance(root, p, level);
  2272. sret = balance_level(trans, root, p, level);
  2273. btrfs_clear_path_blocking(p, NULL, 0);
  2274. if (sret) {
  2275. ret = sret;
  2276. goto done;
  2277. }
  2278. b = p->nodes[level];
  2279. if (!b) {
  2280. btrfs_release_path(p);
  2281. goto again;
  2282. }
  2283. BUG_ON(btrfs_header_nritems(b) == 1);
  2284. }
  2285. return 0;
  2286. again:
  2287. ret = -EAGAIN;
  2288. done:
  2289. return ret;
  2290. }
  2291. static void key_search_validate(struct extent_buffer *b,
  2292. struct btrfs_key *key,
  2293. int level)
  2294. {
  2295. #ifdef CONFIG_BTRFS_ASSERT
  2296. struct btrfs_disk_key disk_key;
  2297. btrfs_cpu_key_to_disk(&disk_key, key);
  2298. if (level == 0)
  2299. ASSERT(!memcmp_extent_buffer(b, &disk_key,
  2300. offsetof(struct btrfs_leaf, items[0].key),
  2301. sizeof(disk_key)));
  2302. else
  2303. ASSERT(!memcmp_extent_buffer(b, &disk_key,
  2304. offsetof(struct btrfs_node, ptrs[0].key),
  2305. sizeof(disk_key)));
  2306. #endif
  2307. }
  2308. static int key_search(struct extent_buffer *b, struct btrfs_key *key,
  2309. int level, int *prev_cmp, int *slot)
  2310. {
  2311. if (*prev_cmp != 0) {
  2312. *prev_cmp = bin_search(b, key, level, slot);
  2313. return *prev_cmp;
  2314. }
  2315. key_search_validate(b, key, level);
  2316. *slot = 0;
  2317. return 0;
  2318. }
  2319. int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
  2320. u64 iobjectid, u64 ioff, u8 key_type,
  2321. struct btrfs_key *found_key)
  2322. {
  2323. int ret;
  2324. struct btrfs_key key;
  2325. struct extent_buffer *eb;
  2326. ASSERT(path);
  2327. ASSERT(found_key);
  2328. key.type = key_type;
  2329. key.objectid = iobjectid;
  2330. key.offset = ioff;
  2331. ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
  2332. if (ret < 0)
  2333. return ret;
  2334. eb = path->nodes[0];
  2335. if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
  2336. ret = btrfs_next_leaf(fs_root, path);
  2337. if (ret)
  2338. return ret;
  2339. eb = path->nodes[0];
  2340. }
  2341. btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
  2342. if (found_key->type != key.type ||
  2343. found_key->objectid != key.objectid)
  2344. return 1;
  2345. return 0;
  2346. }
  2347. /*
  2348. * look for key in the tree. path is filled in with nodes along the way
  2349. * if key is found, we return zero and you can find the item in the leaf
  2350. * level of the path (level 0)
  2351. *
  2352. * If the key isn't found, the path points to the slot where it should
  2353. * be inserted, and 1 is returned. If there are other errors during the
  2354. * search a negative error number is returned.
  2355. *
  2356. * if ins_len > 0, nodes and leaves will be split as we walk down the
  2357. * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
  2358. * possible)
  2359. */
  2360. int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
  2361. *root, struct btrfs_key *key, struct btrfs_path *p, int
  2362. ins_len, int cow)
  2363. {
  2364. struct extent_buffer *b;
  2365. int slot;
  2366. int ret;
  2367. int err;
  2368. int level;
  2369. int lowest_unlock = 1;
  2370. int root_lock;
  2371. /* everything at write_lock_level or lower must be write locked */
  2372. int write_lock_level = 0;
  2373. u8 lowest_level = 0;
  2374. int min_write_lock_level;
  2375. int prev_cmp;
  2376. lowest_level = p->lowest_level;
  2377. WARN_ON(lowest_level && ins_len > 0);
  2378. WARN_ON(p->nodes[0] != NULL);
  2379. BUG_ON(!cow && ins_len);
  2380. if (ins_len < 0) {
  2381. lowest_unlock = 2;
  2382. /* when we are removing items, we might have to go up to level
  2383. * two as we update tree pointers Make sure we keep write
  2384. * for those levels as well
  2385. */
  2386. write_lock_level = 2;
  2387. } else if (ins_len > 0) {
  2388. /*
  2389. * for inserting items, make sure we have a write lock on
  2390. * level 1 so we can update keys
  2391. */
  2392. write_lock_level = 1;
  2393. }
  2394. if (!cow)
  2395. write_lock_level = -1;
  2396. if (cow && (p->keep_locks || p->lowest_level))
  2397. write_lock_level = BTRFS_MAX_LEVEL;
  2398. min_write_lock_level = write_lock_level;
  2399. again:
  2400. prev_cmp = -1;
  2401. /*
  2402. * we try very hard to do read locks on the root
  2403. */
  2404. root_lock = BTRFS_READ_LOCK;
  2405. level = 0;
  2406. if (p->search_commit_root) {
  2407. /*
  2408. * the commit roots are read only
  2409. * so we always do read locks
  2410. */
  2411. if (p->need_commit_sem)
  2412. down_read(&root->fs_info->commit_root_sem);
  2413. b = root->commit_root;
  2414. extent_buffer_get(b);
  2415. level = btrfs_header_level(b);
  2416. if (p->need_commit_sem)
  2417. up_read(&root->fs_info->commit_root_sem);
  2418. if (!p->skip_locking)
  2419. btrfs_tree_read_lock(b);
  2420. } else {
  2421. if (p->skip_locking) {
  2422. b = btrfs_root_node(root);
  2423. level = btrfs_header_level(b);
  2424. } else {
  2425. /* we don't know the level of the root node
  2426. * until we actually have it read locked
  2427. */
  2428. b = btrfs_read_lock_root_node(root);
  2429. level = btrfs_header_level(b);
  2430. if (level <= write_lock_level) {
  2431. /* whoops, must trade for write lock */
  2432. btrfs_tree_read_unlock(b);
  2433. free_extent_buffer(b);
  2434. b = btrfs_lock_root_node(root);
  2435. root_lock = BTRFS_WRITE_LOCK;
  2436. /* the level might have changed, check again */
  2437. level = btrfs_header_level(b);
  2438. }
  2439. }
  2440. }
  2441. p->nodes[level] = b;
  2442. if (!p->skip_locking)
  2443. p->locks[level] = root_lock;
  2444. while (b) {
  2445. level = btrfs_header_level(b);
  2446. /*
  2447. * setup the path here so we can release it under lock
  2448. * contention with the cow code
  2449. */
  2450. if (cow) {
  2451. /*
  2452. * if we don't really need to cow this block
  2453. * then we don't want to set the path blocking,
  2454. * so we test it here
  2455. */
  2456. if (!should_cow_block(trans, root, b))
  2457. goto cow_done;
  2458. /*
  2459. * must have write locks on this node and the
  2460. * parent
  2461. */
  2462. if (level > write_lock_level ||
  2463. (level + 1 > write_lock_level &&
  2464. level + 1 < BTRFS_MAX_LEVEL &&
  2465. p->nodes[level + 1])) {
  2466. write_lock_level = level + 1;
  2467. btrfs_release_path(p);
  2468. goto again;
  2469. }
  2470. btrfs_set_path_blocking(p);
  2471. err = btrfs_cow_block(trans, root, b,
  2472. p->nodes[level + 1],
  2473. p->slots[level + 1], &b);
  2474. if (err) {
  2475. ret = err;
  2476. goto done;
  2477. }
  2478. }
  2479. cow_done:
  2480. p->nodes[level] = b;
  2481. btrfs_clear_path_blocking(p, NULL, 0);
  2482. /*
  2483. * we have a lock on b and as long as we aren't changing
  2484. * the tree, there is no way to for the items in b to change.
  2485. * It is safe to drop the lock on our parent before we
  2486. * go through the expensive btree search on b.
  2487. *
  2488. * If we're inserting or deleting (ins_len != 0), then we might
  2489. * be changing slot zero, which may require changing the parent.
  2490. * So, we can't drop the lock until after we know which slot
  2491. * we're operating on.
  2492. */
  2493. if (!ins_len && !p->keep_locks) {
  2494. int u = level + 1;
  2495. if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
  2496. btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
  2497. p->locks[u] = 0;
  2498. }
  2499. }
  2500. ret = key_search(b, key, level, &prev_cmp, &slot);
  2501. if (level != 0) {
  2502. int dec = 0;
  2503. if (ret && slot > 0) {
  2504. dec = 1;
  2505. slot -= 1;
  2506. }
  2507. p->slots[level] = slot;
  2508. err = setup_nodes_for_search(trans, root, p, b, level,
  2509. ins_len, &write_lock_level);
  2510. if (err == -EAGAIN)
  2511. goto again;
  2512. if (err) {
  2513. ret = err;
  2514. goto done;
  2515. }
  2516. b = p->nodes[level];
  2517. slot = p->slots[level];
  2518. /*
  2519. * slot 0 is special, if we change the key
  2520. * we have to update the parent pointer
  2521. * which means we must have a write lock
  2522. * on the parent
  2523. */
  2524. if (slot == 0 && ins_len &&
  2525. write_lock_level < level + 1) {
  2526. write_lock_level = level + 1;
  2527. btrfs_release_path(p);
  2528. goto again;
  2529. }
  2530. unlock_up(p, level, lowest_unlock,
  2531. min_write_lock_level, &write_lock_level);
  2532. if (level == lowest_level) {
  2533. if (dec)
  2534. p->slots[level]++;
  2535. goto done;
  2536. }
  2537. err = read_block_for_search(trans, root, p,
  2538. &b, level, slot, key, 0);
  2539. if (err == -EAGAIN)
  2540. goto again;
  2541. if (err) {
  2542. ret = err;
  2543. goto done;
  2544. }
  2545. if (!p->skip_locking) {
  2546. level = btrfs_header_level(b);
  2547. if (level <= write_lock_level) {
  2548. err = btrfs_try_tree_write_lock(b);
  2549. if (!err) {
  2550. btrfs_set_path_blocking(p);
  2551. btrfs_tree_lock(b);
  2552. btrfs_clear_path_blocking(p, b,
  2553. BTRFS_WRITE_LOCK);
  2554. }
  2555. p->locks[level] = BTRFS_WRITE_LOCK;
  2556. } else {
  2557. err = btrfs_tree_read_lock_atomic(b);
  2558. if (!err) {
  2559. btrfs_set_path_blocking(p);
  2560. btrfs_tree_read_lock(b);
  2561. btrfs_clear_path_blocking(p, b,
  2562. BTRFS_READ_LOCK);
  2563. }
  2564. p->locks[level] = BTRFS_READ_LOCK;
  2565. }
  2566. p->nodes[level] = b;
  2567. }
  2568. } else {
  2569. p->slots[level] = slot;
  2570. if (ins_len > 0 &&
  2571. btrfs_leaf_free_space(root, b) < ins_len) {
  2572. if (write_lock_level < 1) {
  2573. write_lock_level = 1;
  2574. btrfs_release_path(p);
  2575. goto again;
  2576. }
  2577. btrfs_set_path_blocking(p);
  2578. err = split_leaf(trans, root, key,
  2579. p, ins_len, ret == 0);
  2580. btrfs_clear_path_blocking(p, NULL, 0);
  2581. BUG_ON(err > 0);
  2582. if (err) {
  2583. ret = err;
  2584. goto done;
  2585. }
  2586. }
  2587. if (!p->search_for_split)
  2588. unlock_up(p, level, lowest_unlock,
  2589. min_write_lock_level, &write_lock_level);
  2590. goto done;
  2591. }
  2592. }
  2593. ret = 1;
  2594. done:
  2595. /*
  2596. * we don't really know what they plan on doing with the path
  2597. * from here on, so for now just mark it as blocking
  2598. */
  2599. if (!p->leave_spinning)
  2600. btrfs_set_path_blocking(p);
  2601. if (ret < 0 && !p->skip_release_on_error)
  2602. btrfs_release_path(p);
  2603. return ret;
  2604. }
  2605. /*
  2606. * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
  2607. * current state of the tree together with the operations recorded in the tree
  2608. * modification log to search for the key in a previous version of this tree, as
  2609. * denoted by the time_seq parameter.
  2610. *
  2611. * Naturally, there is no support for insert, delete or cow operations.
  2612. *
  2613. * The resulting path and return value will be set up as if we called
  2614. * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
  2615. */
  2616. int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
  2617. struct btrfs_path *p, u64 time_seq)
  2618. {
  2619. struct extent_buffer *b;
  2620. int slot;
  2621. int ret;
  2622. int err;
  2623. int level;
  2624. int lowest_unlock = 1;
  2625. u8 lowest_level = 0;
  2626. int prev_cmp = -1;
  2627. lowest_level = p->lowest_level;
  2628. WARN_ON(p->nodes[0] != NULL);
  2629. if (p->search_commit_root) {
  2630. BUG_ON(time_seq);
  2631. return btrfs_search_slot(NULL, root, key, p, 0, 0);
  2632. }
  2633. again:
  2634. b = get_old_root(root, time_seq);
  2635. level = btrfs_header_level(b);
  2636. p->locks[level] = BTRFS_READ_LOCK;
  2637. while (b) {
  2638. level = btrfs_header_level(b);
  2639. p->nodes[level] = b;
  2640. btrfs_clear_path_blocking(p, NULL, 0);
  2641. /*
  2642. * we have a lock on b and as long as we aren't changing
  2643. * the tree, there is no way to for the items in b to change.
  2644. * It is safe to drop the lock on our parent before we
  2645. * go through the expensive btree search on b.
  2646. */
  2647. btrfs_unlock_up_safe(p, level + 1);
  2648. /*
  2649. * Since we can unwind eb's we want to do a real search every
  2650. * time.
  2651. */
  2652. prev_cmp = -1;
  2653. ret = key_search(b, key, level, &prev_cmp, &slot);
  2654. if (level != 0) {
  2655. int dec = 0;
  2656. if (ret && slot > 0) {
  2657. dec = 1;
  2658. slot -= 1;
  2659. }
  2660. p->slots[level] = slot;
  2661. unlock_up(p, level, lowest_unlock, 0, NULL);
  2662. if (level == lowest_level) {
  2663. if (dec)
  2664. p->slots[level]++;
  2665. goto done;
  2666. }
  2667. err = read_block_for_search(NULL, root, p, &b, level,
  2668. slot, key, time_seq);
  2669. if (err == -EAGAIN)
  2670. goto again;
  2671. if (err) {
  2672. ret = err;
  2673. goto done;
  2674. }
  2675. level = btrfs_header_level(b);
  2676. err = btrfs_tree_read_lock_atomic(b);
  2677. if (!err) {
  2678. btrfs_set_path_blocking(p);
  2679. btrfs_tree_read_lock(b);
  2680. btrfs_clear_path_blocking(p, b,
  2681. BTRFS_READ_LOCK);
  2682. }
  2683. b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
  2684. if (!b) {
  2685. ret = -ENOMEM;
  2686. goto done;
  2687. }
  2688. p->locks[level] = BTRFS_READ_LOCK;
  2689. p->nodes[level] = b;
  2690. } else {
  2691. p->slots[level] = slot;
  2692. unlock_up(p, level, lowest_unlock, 0, NULL);
  2693. goto done;
  2694. }
  2695. }
  2696. ret = 1;
  2697. done:
  2698. if (!p->leave_spinning)
  2699. btrfs_set_path_blocking(p);
  2700. if (ret < 0)
  2701. btrfs_release_path(p);
  2702. return ret;
  2703. }
  2704. /*
  2705. * helper to use instead of search slot if no exact match is needed but
  2706. * instead the next or previous item should be returned.
  2707. * When find_higher is true, the next higher item is returned, the next lower
  2708. * otherwise.
  2709. * When return_any and find_higher are both true, and no higher item is found,
  2710. * return the next lower instead.
  2711. * When return_any is true and find_higher is false, and no lower item is found,
  2712. * return the next higher instead.
  2713. * It returns 0 if any item is found, 1 if none is found (tree empty), and
  2714. * < 0 on error
  2715. */
  2716. int btrfs_search_slot_for_read(struct btrfs_root *root,
  2717. struct btrfs_key *key, struct btrfs_path *p,
  2718. int find_higher, int return_any)
  2719. {
  2720. int ret;
  2721. struct extent_buffer *leaf;
  2722. again:
  2723. ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
  2724. if (ret <= 0)
  2725. return ret;
  2726. /*
  2727. * a return value of 1 means the path is at the position where the
  2728. * item should be inserted. Normally this is the next bigger item,
  2729. * but in case the previous item is the last in a leaf, path points
  2730. * to the first free slot in the previous leaf, i.e. at an invalid
  2731. * item.
  2732. */
  2733. leaf = p->nodes[0];
  2734. if (find_higher) {
  2735. if (p->slots[0] >= btrfs_header_nritems(leaf)) {
  2736. ret = btrfs_next_leaf(root, p);
  2737. if (ret <= 0)
  2738. return ret;
  2739. if (!return_any)
  2740. return 1;
  2741. /*
  2742. * no higher item found, return the next
  2743. * lower instead
  2744. */
  2745. return_any = 0;
  2746. find_higher = 0;
  2747. btrfs_release_path(p);
  2748. goto again;
  2749. }
  2750. } else {
  2751. if (p->slots[0] == 0) {
  2752. ret = btrfs_prev_leaf(root, p);
  2753. if (ret < 0)
  2754. return ret;
  2755. if (!ret) {
  2756. leaf = p->nodes[0];
  2757. if (p->slots[0] == btrfs_header_nritems(leaf))
  2758. p->slots[0]--;
  2759. return 0;
  2760. }
  2761. if (!return_any)
  2762. return 1;
  2763. /*
  2764. * no lower item found, return the next
  2765. * higher instead
  2766. */
  2767. return_any = 0;
  2768. find_higher = 1;
  2769. btrfs_release_path(p);
  2770. goto again;
  2771. } else {
  2772. --p->slots[0];
  2773. }
  2774. }
  2775. return 0;
  2776. }
  2777. /*
  2778. * adjust the pointers going up the tree, starting at level
  2779. * making sure the right key of each node is points to 'key'.
  2780. * This is used after shifting pointers to the left, so it stops
  2781. * fixing up pointers when a given leaf/node is not in slot 0 of the
  2782. * higher levels
  2783. *
  2784. */
  2785. static void fixup_low_keys(struct btrfs_fs_info *fs_info,
  2786. struct btrfs_path *path,
  2787. struct btrfs_disk_key *key, int level)
  2788. {
  2789. int i;
  2790. struct extent_buffer *t;
  2791. for (i = level; i < BTRFS_MAX_LEVEL; i++) {
  2792. int tslot = path->slots[i];
  2793. if (!path->nodes[i])
  2794. break;
  2795. t = path->nodes[i];
  2796. tree_mod_log_set_node_key(fs_info, t, tslot, 1);
  2797. btrfs_set_node_key(t, key, tslot);
  2798. btrfs_mark_buffer_dirty(path->nodes[i]);
  2799. if (tslot != 0)
  2800. break;
  2801. }
  2802. }
  2803. /*
  2804. * update item key.
  2805. *
  2806. * This function isn't completely safe. It's the caller's responsibility
  2807. * that the new key won't break the order
  2808. */
  2809. void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
  2810. struct btrfs_path *path,
  2811. struct btrfs_key *new_key)
  2812. {
  2813. struct btrfs_disk_key disk_key;
  2814. struct extent_buffer *eb;
  2815. int slot;
  2816. eb = path->nodes[0];
  2817. slot = path->slots[0];
  2818. if (slot > 0) {
  2819. btrfs_item_key(eb, &disk_key, slot - 1);
  2820. BUG_ON(comp_keys(&disk_key, new_key) >= 0);
  2821. }
  2822. if (slot < btrfs_header_nritems(eb) - 1) {
  2823. btrfs_item_key(eb, &disk_key, slot + 1);
  2824. BUG_ON(comp_keys(&disk_key, new_key) <= 0);
  2825. }
  2826. btrfs_cpu_key_to_disk(&disk_key, new_key);
  2827. btrfs_set_item_key(eb, &disk_key, slot);
  2828. btrfs_mark_buffer_dirty(eb);
  2829. if (slot == 0)
  2830. fixup_low_keys(fs_info, path, &disk_key, 1);
  2831. }
  2832. /*
  2833. * try to push data from one node into the next node left in the
  2834. * tree.
  2835. *
  2836. * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
  2837. * error, and > 0 if there was no room in the left hand block.
  2838. */
  2839. static int push_node_left(struct btrfs_trans_handle *trans,
  2840. struct btrfs_root *root, struct extent_buffer *dst,
  2841. struct extent_buffer *src, int empty)
  2842. {
  2843. int push_items = 0;
  2844. int src_nritems;
  2845. int dst_nritems;
  2846. int ret = 0;
  2847. src_nritems = btrfs_header_nritems(src);
  2848. dst_nritems = btrfs_header_nritems(dst);
  2849. push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
  2850. WARN_ON(btrfs_header_generation(src) != trans->transid);
  2851. WARN_ON(btrfs_header_generation(dst) != trans->transid);
  2852. if (!empty && src_nritems <= 8)
  2853. return 1;
  2854. if (push_items <= 0)
  2855. return 1;
  2856. if (empty) {
  2857. push_items = min(src_nritems, push_items);
  2858. if (push_items < src_nritems) {
  2859. /* leave at least 8 pointers in the node if
  2860. * we aren't going to empty it
  2861. */
  2862. if (src_nritems - push_items < 8) {
  2863. if (push_items <= 8)
  2864. return 1;
  2865. push_items -= 8;
  2866. }
  2867. }
  2868. } else
  2869. push_items = min(src_nritems - 8, push_items);
  2870. ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
  2871. push_items);
  2872. if (ret) {
  2873. btrfs_abort_transaction(trans, root, ret);
  2874. return ret;
  2875. }
  2876. copy_extent_buffer(dst, src,
  2877. btrfs_node_key_ptr_offset(dst_nritems),
  2878. btrfs_node_key_ptr_offset(0),
  2879. push_items * sizeof(struct btrfs_key_ptr));
  2880. if (push_items < src_nritems) {
  2881. /*
  2882. * don't call tree_mod_log_eb_move here, key removal was already
  2883. * fully logged by tree_mod_log_eb_copy above.
  2884. */
  2885. memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
  2886. btrfs_node_key_ptr_offset(push_items),
  2887. (src_nritems - push_items) *
  2888. sizeof(struct btrfs_key_ptr));
  2889. }
  2890. btrfs_set_header_nritems(src, src_nritems - push_items);
  2891. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  2892. btrfs_mark_buffer_dirty(src);
  2893. btrfs_mark_buffer_dirty(dst);
  2894. return ret;
  2895. }
  2896. /*
  2897. * try to push data from one node into the next node right in the
  2898. * tree.
  2899. *
  2900. * returns 0 if some ptrs were pushed, < 0 if there was some horrible
  2901. * error, and > 0 if there was no room in the right hand block.
  2902. *
  2903. * this will only push up to 1/2 the contents of the left node over
  2904. */
  2905. static int balance_node_right(struct btrfs_trans_handle *trans,
  2906. struct btrfs_root *root,
  2907. struct extent_buffer *dst,
  2908. struct extent_buffer *src)
  2909. {
  2910. int push_items = 0;
  2911. int max_push;
  2912. int src_nritems;
  2913. int dst_nritems;
  2914. int ret = 0;
  2915. WARN_ON(btrfs_header_generation(src) != trans->transid);
  2916. WARN_ON(btrfs_header_generation(dst) != trans->transid);
  2917. src_nritems = btrfs_header_nritems(src);
  2918. dst_nritems = btrfs_header_nritems(dst);
  2919. push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
  2920. if (push_items <= 0)
  2921. return 1;
  2922. if (src_nritems < 4)
  2923. return 1;
  2924. max_push = src_nritems / 2 + 1;
  2925. /* don't try to empty the node */
  2926. if (max_push >= src_nritems)
  2927. return 1;
  2928. if (max_push < push_items)
  2929. push_items = max_push;
  2930. tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
  2931. memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
  2932. btrfs_node_key_ptr_offset(0),
  2933. (dst_nritems) *
  2934. sizeof(struct btrfs_key_ptr));
  2935. ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
  2936. src_nritems - push_items, push_items);
  2937. if (ret) {
  2938. btrfs_abort_transaction(trans, root, ret);
  2939. return ret;
  2940. }
  2941. copy_extent_buffer(dst, src,
  2942. btrfs_node_key_ptr_offset(0),
  2943. btrfs_node_key_ptr_offset(src_nritems - push_items),
  2944. push_items * sizeof(struct btrfs_key_ptr));
  2945. btrfs_set_header_nritems(src, src_nritems - push_items);
  2946. btrfs_set_header_nritems(dst, dst_nritems + push_items);
  2947. btrfs_mark_buffer_dirty(src);
  2948. btrfs_mark_buffer_dirty(dst);
  2949. return ret;
  2950. }
  2951. /*
  2952. * helper function to insert a new root level in the tree.
  2953. * A new node is allocated, and a single item is inserted to
  2954. * point to the existing root
  2955. *
  2956. * returns zero on success or < 0 on failure.
  2957. */
  2958. static noinline int insert_new_root(struct btrfs_trans_handle *trans,
  2959. struct btrfs_root *root,
  2960. struct btrfs_path *path, int level)
  2961. {
  2962. u64 lower_gen;
  2963. struct extent_buffer *lower;
  2964. struct extent_buffer *c;
  2965. struct extent_buffer *old;
  2966. struct btrfs_disk_key lower_key;
  2967. BUG_ON(path->nodes[level]);
  2968. BUG_ON(path->nodes[level-1] != root->node);
  2969. lower = path->nodes[level-1];
  2970. if (level == 1)
  2971. btrfs_item_key(lower, &lower_key, 0);
  2972. else
  2973. btrfs_node_key(lower, &lower_key, 0);
  2974. c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  2975. &lower_key, level, root->node->start, 0);
  2976. if (IS_ERR(c))
  2977. return PTR_ERR(c);
  2978. root_add_used(root, root->nodesize);
  2979. memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
  2980. btrfs_set_header_nritems(c, 1);
  2981. btrfs_set_header_level(c, level);
  2982. btrfs_set_header_bytenr(c, c->start);
  2983. btrfs_set_header_generation(c, trans->transid);
  2984. btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
  2985. btrfs_set_header_owner(c, root->root_key.objectid);
  2986. write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
  2987. BTRFS_FSID_SIZE);
  2988. write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
  2989. btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
  2990. btrfs_set_node_key(c, &lower_key, 0);
  2991. btrfs_set_node_blockptr(c, 0, lower->start);
  2992. lower_gen = btrfs_header_generation(lower);
  2993. WARN_ON(lower_gen != trans->transid);
  2994. btrfs_set_node_ptr_generation(c, 0, lower_gen);
  2995. btrfs_mark_buffer_dirty(c);
  2996. old = root->node;
  2997. tree_mod_log_set_root_pointer(root, c, 0);
  2998. rcu_assign_pointer(root->node, c);
  2999. /* the super has an extra ref to root->node */
  3000. free_extent_buffer(old);
  3001. add_root_to_dirty_list(root);
  3002. extent_buffer_get(c);
  3003. path->nodes[level] = c;
  3004. path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
  3005. path->slots[level] = 0;
  3006. return 0;
  3007. }
  3008. /*
  3009. * worker function to insert a single pointer in a node.
  3010. * the node should have enough room for the pointer already
  3011. *
  3012. * slot and level indicate where you want the key to go, and
  3013. * blocknr is the block the key points to.
  3014. */
  3015. static void insert_ptr(struct btrfs_trans_handle *trans,
  3016. struct btrfs_root *root, struct btrfs_path *path,
  3017. struct btrfs_disk_key *key, u64 bytenr,
  3018. int slot, int level)
  3019. {
  3020. struct extent_buffer *lower;
  3021. int nritems;
  3022. int ret;
  3023. BUG_ON(!path->nodes[level]);
  3024. btrfs_assert_tree_locked(path->nodes[level]);
  3025. lower = path->nodes[level];
  3026. nritems = btrfs_header_nritems(lower);
  3027. BUG_ON(slot > nritems);
  3028. BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
  3029. if (slot != nritems) {
  3030. if (level)
  3031. tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
  3032. slot, nritems - slot);
  3033. memmove_extent_buffer(lower,
  3034. btrfs_node_key_ptr_offset(slot + 1),
  3035. btrfs_node_key_ptr_offset(slot),
  3036. (nritems - slot) * sizeof(struct btrfs_key_ptr));
  3037. }
  3038. if (level) {
  3039. ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
  3040. MOD_LOG_KEY_ADD, GFP_NOFS);
  3041. BUG_ON(ret < 0);
  3042. }
  3043. btrfs_set_node_key(lower, key, slot);
  3044. btrfs_set_node_blockptr(lower, slot, bytenr);
  3045. WARN_ON(trans->transid == 0);
  3046. btrfs_set_node_ptr_generation(lower, slot, trans->transid);
  3047. btrfs_set_header_nritems(lower, nritems + 1);
  3048. btrfs_mark_buffer_dirty(lower);
  3049. }
  3050. /*
  3051. * split the node at the specified level in path in two.
  3052. * The path is corrected to point to the appropriate node after the split
  3053. *
  3054. * Before splitting this tries to make some room in the node by pushing
  3055. * left and right, if either one works, it returns right away.
  3056. *
  3057. * returns 0 on success and < 0 on failure
  3058. */
  3059. static noinline int split_node(struct btrfs_trans_handle *trans,
  3060. struct btrfs_root *root,
  3061. struct btrfs_path *path, int level)
  3062. {
  3063. struct extent_buffer *c;
  3064. struct extent_buffer *split;
  3065. struct btrfs_disk_key disk_key;
  3066. int mid;
  3067. int ret;
  3068. u32 c_nritems;
  3069. c = path->nodes[level];
  3070. WARN_ON(btrfs_header_generation(c) != trans->transid);
  3071. if (c == root->node) {
  3072. /*
  3073. * trying to split the root, lets make a new one
  3074. *
  3075. * tree mod log: We don't log_removal old root in
  3076. * insert_new_root, because that root buffer will be kept as a
  3077. * normal node. We are going to log removal of half of the
  3078. * elements below with tree_mod_log_eb_copy. We're holding a
  3079. * tree lock on the buffer, which is why we cannot race with
  3080. * other tree_mod_log users.
  3081. */
  3082. ret = insert_new_root(trans, root, path, level + 1);
  3083. if (ret)
  3084. return ret;
  3085. } else {
  3086. ret = push_nodes_for_insert(trans, root, path, level);
  3087. c = path->nodes[level];
  3088. if (!ret && btrfs_header_nritems(c) <
  3089. BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
  3090. return 0;
  3091. if (ret < 0)
  3092. return ret;
  3093. }
  3094. c_nritems = btrfs_header_nritems(c);
  3095. mid = (c_nritems + 1) / 2;
  3096. btrfs_node_key(c, &disk_key, mid);
  3097. split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  3098. &disk_key, level, c->start, 0);
  3099. if (IS_ERR(split))
  3100. return PTR_ERR(split);
  3101. root_add_used(root, root->nodesize);
  3102. memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
  3103. btrfs_set_header_level(split, btrfs_header_level(c));
  3104. btrfs_set_header_bytenr(split, split->start);
  3105. btrfs_set_header_generation(split, trans->transid);
  3106. btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
  3107. btrfs_set_header_owner(split, root->root_key.objectid);
  3108. write_extent_buffer(split, root->fs_info->fsid,
  3109. btrfs_header_fsid(), BTRFS_FSID_SIZE);
  3110. write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
  3111. btrfs_header_chunk_tree_uuid(split),
  3112. BTRFS_UUID_SIZE);
  3113. ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
  3114. mid, c_nritems - mid);
  3115. if (ret) {
  3116. btrfs_abort_transaction(trans, root, ret);
  3117. return ret;
  3118. }
  3119. copy_extent_buffer(split, c,
  3120. btrfs_node_key_ptr_offset(0),
  3121. btrfs_node_key_ptr_offset(mid),
  3122. (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
  3123. btrfs_set_header_nritems(split, c_nritems - mid);
  3124. btrfs_set_header_nritems(c, mid);
  3125. ret = 0;
  3126. btrfs_mark_buffer_dirty(c);
  3127. btrfs_mark_buffer_dirty(split);
  3128. insert_ptr(trans, root, path, &disk_key, split->start,
  3129. path->slots[level + 1] + 1, level + 1);
  3130. if (path->slots[level] >= mid) {
  3131. path->slots[level] -= mid;
  3132. btrfs_tree_unlock(c);
  3133. free_extent_buffer(c);
  3134. path->nodes[level] = split;
  3135. path->slots[level + 1] += 1;
  3136. } else {
  3137. btrfs_tree_unlock(split);
  3138. free_extent_buffer(split);
  3139. }
  3140. return ret;
  3141. }
  3142. /*
  3143. * how many bytes are required to store the items in a leaf. start
  3144. * and nr indicate which items in the leaf to check. This totals up the
  3145. * space used both by the item structs and the item data
  3146. */
  3147. static int leaf_space_used(struct extent_buffer *l, int start, int nr)
  3148. {
  3149. struct btrfs_item *start_item;
  3150. struct btrfs_item *end_item;
  3151. struct btrfs_map_token token;
  3152. int data_len;
  3153. int nritems = btrfs_header_nritems(l);
  3154. int end = min(nritems, start + nr) - 1;
  3155. if (!nr)
  3156. return 0;
  3157. btrfs_init_map_token(&token);
  3158. start_item = btrfs_item_nr(start);
  3159. end_item = btrfs_item_nr(end);
  3160. data_len = btrfs_token_item_offset(l, start_item, &token) +
  3161. btrfs_token_item_size(l, start_item, &token);
  3162. data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
  3163. data_len += sizeof(struct btrfs_item) * nr;
  3164. WARN_ON(data_len < 0);
  3165. return data_len;
  3166. }
  3167. /*
  3168. * The space between the end of the leaf items and
  3169. * the start of the leaf data. IOW, how much room
  3170. * the leaf has left for both items and data
  3171. */
  3172. noinline int btrfs_leaf_free_space(struct btrfs_root *root,
  3173. struct extent_buffer *leaf)
  3174. {
  3175. int nritems = btrfs_header_nritems(leaf);
  3176. int ret;
  3177. ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
  3178. if (ret < 0) {
  3179. btrfs_crit(root->fs_info,
  3180. "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
  3181. ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
  3182. leaf_space_used(leaf, 0, nritems), nritems);
  3183. }
  3184. return ret;
  3185. }
  3186. /*
  3187. * min slot controls the lowest index we're willing to push to the
  3188. * right. We'll push up to and including min_slot, but no lower
  3189. */
  3190. static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
  3191. struct btrfs_root *root,
  3192. struct btrfs_path *path,
  3193. int data_size, int empty,
  3194. struct extent_buffer *right,
  3195. int free_space, u32 left_nritems,
  3196. u32 min_slot)
  3197. {
  3198. struct extent_buffer *left = path->nodes[0];
  3199. struct extent_buffer *upper = path->nodes[1];
  3200. struct btrfs_map_token token;
  3201. struct btrfs_disk_key disk_key;
  3202. int slot;
  3203. u32 i;
  3204. int push_space = 0;
  3205. int push_items = 0;
  3206. struct btrfs_item *item;
  3207. u32 nr;
  3208. u32 right_nritems;
  3209. u32 data_end;
  3210. u32 this_item_size;
  3211. btrfs_init_map_token(&token);
  3212. if (empty)
  3213. nr = 0;
  3214. else
  3215. nr = max_t(u32, 1, min_slot);
  3216. if (path->slots[0] >= left_nritems)
  3217. push_space += data_size;
  3218. slot = path->slots[1];
  3219. i = left_nritems - 1;
  3220. while (i >= nr) {
  3221. item = btrfs_item_nr(i);
  3222. if (!empty && push_items > 0) {
  3223. if (path->slots[0] > i)
  3224. break;
  3225. if (path->slots[0] == i) {
  3226. int space = btrfs_leaf_free_space(root, left);
  3227. if (space + push_space * 2 > free_space)
  3228. break;
  3229. }
  3230. }
  3231. if (path->slots[0] == i)
  3232. push_space += data_size;
  3233. this_item_size = btrfs_item_size(left, item);
  3234. if (this_item_size + sizeof(*item) + push_space > free_space)
  3235. break;
  3236. push_items++;
  3237. push_space += this_item_size + sizeof(*item);
  3238. if (i == 0)
  3239. break;
  3240. i--;
  3241. }
  3242. if (push_items == 0)
  3243. goto out_unlock;
  3244. WARN_ON(!empty && push_items == left_nritems);
  3245. /* push left to right */
  3246. right_nritems = btrfs_header_nritems(right);
  3247. push_space = btrfs_item_end_nr(left, left_nritems - push_items);
  3248. push_space -= leaf_data_end(root, left);
  3249. /* make room in the right data area */
  3250. data_end = leaf_data_end(root, right);
  3251. memmove_extent_buffer(right,
  3252. btrfs_leaf_data(right) + data_end - push_space,
  3253. btrfs_leaf_data(right) + data_end,
  3254. BTRFS_LEAF_DATA_SIZE(root) - data_end);
  3255. /* copy from the left data area */
  3256. copy_extent_buffer(right, left, btrfs_leaf_data(right) +
  3257. BTRFS_LEAF_DATA_SIZE(root) - push_space,
  3258. btrfs_leaf_data(left) + leaf_data_end(root, left),
  3259. push_space);
  3260. memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
  3261. btrfs_item_nr_offset(0),
  3262. right_nritems * sizeof(struct btrfs_item));
  3263. /* copy the items from left to right */
  3264. copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
  3265. btrfs_item_nr_offset(left_nritems - push_items),
  3266. push_items * sizeof(struct btrfs_item));
  3267. /* update the item pointers */
  3268. right_nritems += push_items;
  3269. btrfs_set_header_nritems(right, right_nritems);
  3270. push_space = BTRFS_LEAF_DATA_SIZE(root);
  3271. for (i = 0; i < right_nritems; i++) {
  3272. item = btrfs_item_nr(i);
  3273. push_space -= btrfs_token_item_size(right, item, &token);
  3274. btrfs_set_token_item_offset(right, item, push_space, &token);
  3275. }
  3276. left_nritems -= push_items;
  3277. btrfs_set_header_nritems(left, left_nritems);
  3278. if (left_nritems)
  3279. btrfs_mark_buffer_dirty(left);
  3280. else
  3281. clean_tree_block(trans, root->fs_info, left);
  3282. btrfs_mark_buffer_dirty(right);
  3283. btrfs_item_key(right, &disk_key, 0);
  3284. btrfs_set_node_key(upper, &disk_key, slot + 1);
  3285. btrfs_mark_buffer_dirty(upper);
  3286. /* then fixup the leaf pointer in the path */
  3287. if (path->slots[0] >= left_nritems) {
  3288. path->slots[0] -= left_nritems;
  3289. if (btrfs_header_nritems(path->nodes[0]) == 0)
  3290. clean_tree_block(trans, root->fs_info, path->nodes[0]);
  3291. btrfs_tree_unlock(path->nodes[0]);
  3292. free_extent_buffer(path->nodes[0]);
  3293. path->nodes[0] = right;
  3294. path->slots[1] += 1;
  3295. } else {
  3296. btrfs_tree_unlock(right);
  3297. free_extent_buffer(right);
  3298. }
  3299. return 0;
  3300. out_unlock:
  3301. btrfs_tree_unlock(right);
  3302. free_extent_buffer(right);
  3303. return 1;
  3304. }
  3305. /*
  3306. * push some data in the path leaf to the right, trying to free up at
  3307. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3308. *
  3309. * returns 1 if the push failed because the other node didn't have enough
  3310. * room, 0 if everything worked out and < 0 if there were major errors.
  3311. *
  3312. * this will push starting from min_slot to the end of the leaf. It won't
  3313. * push any slot lower than min_slot
  3314. */
  3315. static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
  3316. *root, struct btrfs_path *path,
  3317. int min_data_size, int data_size,
  3318. int empty, u32 min_slot)
  3319. {
  3320. struct extent_buffer *left = path->nodes[0];
  3321. struct extent_buffer *right;
  3322. struct extent_buffer *upper;
  3323. int slot;
  3324. int free_space;
  3325. u32 left_nritems;
  3326. int ret;
  3327. if (!path->nodes[1])
  3328. return 1;
  3329. slot = path->slots[1];
  3330. upper = path->nodes[1];
  3331. if (slot >= btrfs_header_nritems(upper) - 1)
  3332. return 1;
  3333. btrfs_assert_tree_locked(path->nodes[1]);
  3334. right = read_node_slot(root, upper, slot + 1);
  3335. if (right == NULL)
  3336. return 1;
  3337. btrfs_tree_lock(right);
  3338. btrfs_set_lock_blocking(right);
  3339. free_space = btrfs_leaf_free_space(root, right);
  3340. if (free_space < data_size)
  3341. goto out_unlock;
  3342. /* cow and double check */
  3343. ret = btrfs_cow_block(trans, root, right, upper,
  3344. slot + 1, &right);
  3345. if (ret)
  3346. goto out_unlock;
  3347. free_space = btrfs_leaf_free_space(root, right);
  3348. if (free_space < data_size)
  3349. goto out_unlock;
  3350. left_nritems = btrfs_header_nritems(left);
  3351. if (left_nritems == 0)
  3352. goto out_unlock;
  3353. if (path->slots[0] == left_nritems && !empty) {
  3354. /* Key greater than all keys in the leaf, right neighbor has
  3355. * enough room for it and we're not emptying our leaf to delete
  3356. * it, therefore use right neighbor to insert the new item and
  3357. * no need to touch/dirty our left leaft. */
  3358. btrfs_tree_unlock(left);
  3359. free_extent_buffer(left);
  3360. path->nodes[0] = right;
  3361. path->slots[0] = 0;
  3362. path->slots[1]++;
  3363. return 0;
  3364. }
  3365. return __push_leaf_right(trans, root, path, min_data_size, empty,
  3366. right, free_space, left_nritems, min_slot);
  3367. out_unlock:
  3368. btrfs_tree_unlock(right);
  3369. free_extent_buffer(right);
  3370. return 1;
  3371. }
  3372. /*
  3373. * push some data in the path leaf to the left, trying to free up at
  3374. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3375. *
  3376. * max_slot can put a limit on how far into the leaf we'll push items. The
  3377. * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
  3378. * items
  3379. */
  3380. static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
  3381. struct btrfs_root *root,
  3382. struct btrfs_path *path, int data_size,
  3383. int empty, struct extent_buffer *left,
  3384. int free_space, u32 right_nritems,
  3385. u32 max_slot)
  3386. {
  3387. struct btrfs_disk_key disk_key;
  3388. struct extent_buffer *right = path->nodes[0];
  3389. int i;
  3390. int push_space = 0;
  3391. int push_items = 0;
  3392. struct btrfs_item *item;
  3393. u32 old_left_nritems;
  3394. u32 nr;
  3395. int ret = 0;
  3396. u32 this_item_size;
  3397. u32 old_left_item_size;
  3398. struct btrfs_map_token token;
  3399. btrfs_init_map_token(&token);
  3400. if (empty)
  3401. nr = min(right_nritems, max_slot);
  3402. else
  3403. nr = min(right_nritems - 1, max_slot);
  3404. for (i = 0; i < nr; i++) {
  3405. item = btrfs_item_nr(i);
  3406. if (!empty && push_items > 0) {
  3407. if (path->slots[0] < i)
  3408. break;
  3409. if (path->slots[0] == i) {
  3410. int space = btrfs_leaf_free_space(root, right);
  3411. if (space + push_space * 2 > free_space)
  3412. break;
  3413. }
  3414. }
  3415. if (path->slots[0] == i)
  3416. push_space += data_size;
  3417. this_item_size = btrfs_item_size(right, item);
  3418. if (this_item_size + sizeof(*item) + push_space > free_space)
  3419. break;
  3420. push_items++;
  3421. push_space += this_item_size + sizeof(*item);
  3422. }
  3423. if (push_items == 0) {
  3424. ret = 1;
  3425. goto out;
  3426. }
  3427. WARN_ON(!empty && push_items == btrfs_header_nritems(right));
  3428. /* push data from right to left */
  3429. copy_extent_buffer(left, right,
  3430. btrfs_item_nr_offset(btrfs_header_nritems(left)),
  3431. btrfs_item_nr_offset(0),
  3432. push_items * sizeof(struct btrfs_item));
  3433. push_space = BTRFS_LEAF_DATA_SIZE(root) -
  3434. btrfs_item_offset_nr(right, push_items - 1);
  3435. copy_extent_buffer(left, right, btrfs_leaf_data(left) +
  3436. leaf_data_end(root, left) - push_space,
  3437. btrfs_leaf_data(right) +
  3438. btrfs_item_offset_nr(right, push_items - 1),
  3439. push_space);
  3440. old_left_nritems = btrfs_header_nritems(left);
  3441. BUG_ON(old_left_nritems <= 0);
  3442. old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
  3443. for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
  3444. u32 ioff;
  3445. item = btrfs_item_nr(i);
  3446. ioff = btrfs_token_item_offset(left, item, &token);
  3447. btrfs_set_token_item_offset(left, item,
  3448. ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
  3449. &token);
  3450. }
  3451. btrfs_set_header_nritems(left, old_left_nritems + push_items);
  3452. /* fixup right node */
  3453. if (push_items > right_nritems)
  3454. WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
  3455. right_nritems);
  3456. if (push_items < right_nritems) {
  3457. push_space = btrfs_item_offset_nr(right, push_items - 1) -
  3458. leaf_data_end(root, right);
  3459. memmove_extent_buffer(right, btrfs_leaf_data(right) +
  3460. BTRFS_LEAF_DATA_SIZE(root) - push_space,
  3461. btrfs_leaf_data(right) +
  3462. leaf_data_end(root, right), push_space);
  3463. memmove_extent_buffer(right, btrfs_item_nr_offset(0),
  3464. btrfs_item_nr_offset(push_items),
  3465. (btrfs_header_nritems(right) - push_items) *
  3466. sizeof(struct btrfs_item));
  3467. }
  3468. right_nritems -= push_items;
  3469. btrfs_set_header_nritems(right, right_nritems);
  3470. push_space = BTRFS_LEAF_DATA_SIZE(root);
  3471. for (i = 0; i < right_nritems; i++) {
  3472. item = btrfs_item_nr(i);
  3473. push_space = push_space - btrfs_token_item_size(right,
  3474. item, &token);
  3475. btrfs_set_token_item_offset(right, item, push_space, &token);
  3476. }
  3477. btrfs_mark_buffer_dirty(left);
  3478. if (right_nritems)
  3479. btrfs_mark_buffer_dirty(right);
  3480. else
  3481. clean_tree_block(trans, root->fs_info, right);
  3482. btrfs_item_key(right, &disk_key, 0);
  3483. fixup_low_keys(root->fs_info, path, &disk_key, 1);
  3484. /* then fixup the leaf pointer in the path */
  3485. if (path->slots[0] < push_items) {
  3486. path->slots[0] += old_left_nritems;
  3487. btrfs_tree_unlock(path->nodes[0]);
  3488. free_extent_buffer(path->nodes[0]);
  3489. path->nodes[0] = left;
  3490. path->slots[1] -= 1;
  3491. } else {
  3492. btrfs_tree_unlock(left);
  3493. free_extent_buffer(left);
  3494. path->slots[0] -= push_items;
  3495. }
  3496. BUG_ON(path->slots[0] < 0);
  3497. return ret;
  3498. out:
  3499. btrfs_tree_unlock(left);
  3500. free_extent_buffer(left);
  3501. return ret;
  3502. }
  3503. /*
  3504. * push some data in the path leaf to the left, trying to free up at
  3505. * least data_size bytes. returns zero if the push worked, nonzero otherwise
  3506. *
  3507. * max_slot can put a limit on how far into the leaf we'll push items. The
  3508. * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
  3509. * items
  3510. */
  3511. static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
  3512. *root, struct btrfs_path *path, int min_data_size,
  3513. int data_size, int empty, u32 max_slot)
  3514. {
  3515. struct extent_buffer *right = path->nodes[0];
  3516. struct extent_buffer *left;
  3517. int slot;
  3518. int free_space;
  3519. u32 right_nritems;
  3520. int ret = 0;
  3521. slot = path->slots[1];
  3522. if (slot == 0)
  3523. return 1;
  3524. if (!path->nodes[1])
  3525. return 1;
  3526. right_nritems = btrfs_header_nritems(right);
  3527. if (right_nritems == 0)
  3528. return 1;
  3529. btrfs_assert_tree_locked(path->nodes[1]);
  3530. left = read_node_slot(root, path->nodes[1], slot - 1);
  3531. if (left == NULL)
  3532. return 1;
  3533. btrfs_tree_lock(left);
  3534. btrfs_set_lock_blocking(left);
  3535. free_space = btrfs_leaf_free_space(root, left);
  3536. if (free_space < data_size) {
  3537. ret = 1;
  3538. goto out;
  3539. }
  3540. /* cow and double check */
  3541. ret = btrfs_cow_block(trans, root, left,
  3542. path->nodes[1], slot - 1, &left);
  3543. if (ret) {
  3544. /* we hit -ENOSPC, but it isn't fatal here */
  3545. if (ret == -ENOSPC)
  3546. ret = 1;
  3547. goto out;
  3548. }
  3549. free_space = btrfs_leaf_free_space(root, left);
  3550. if (free_space < data_size) {
  3551. ret = 1;
  3552. goto out;
  3553. }
  3554. return __push_leaf_left(trans, root, path, min_data_size,
  3555. empty, left, free_space, right_nritems,
  3556. max_slot);
  3557. out:
  3558. btrfs_tree_unlock(left);
  3559. free_extent_buffer(left);
  3560. return ret;
  3561. }
  3562. /*
  3563. * split the path's leaf in two, making sure there is at least data_size
  3564. * available for the resulting leaf level of the path.
  3565. */
  3566. static noinline void copy_for_split(struct btrfs_trans_handle *trans,
  3567. struct btrfs_root *root,
  3568. struct btrfs_path *path,
  3569. struct extent_buffer *l,
  3570. struct extent_buffer *right,
  3571. int slot, int mid, int nritems)
  3572. {
  3573. int data_copy_size;
  3574. int rt_data_off;
  3575. int i;
  3576. struct btrfs_disk_key disk_key;
  3577. struct btrfs_map_token token;
  3578. btrfs_init_map_token(&token);
  3579. nritems = nritems - mid;
  3580. btrfs_set_header_nritems(right, nritems);
  3581. data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
  3582. copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
  3583. btrfs_item_nr_offset(mid),
  3584. nritems * sizeof(struct btrfs_item));
  3585. copy_extent_buffer(right, l,
  3586. btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
  3587. data_copy_size, btrfs_leaf_data(l) +
  3588. leaf_data_end(root, l), data_copy_size);
  3589. rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
  3590. btrfs_item_end_nr(l, mid);
  3591. for (i = 0; i < nritems; i++) {
  3592. struct btrfs_item *item = btrfs_item_nr(i);
  3593. u32 ioff;
  3594. ioff = btrfs_token_item_offset(right, item, &token);
  3595. btrfs_set_token_item_offset(right, item,
  3596. ioff + rt_data_off, &token);
  3597. }
  3598. btrfs_set_header_nritems(l, mid);
  3599. btrfs_item_key(right, &disk_key, 0);
  3600. insert_ptr(trans, root, path, &disk_key, right->start,
  3601. path->slots[1] + 1, 1);
  3602. btrfs_mark_buffer_dirty(right);
  3603. btrfs_mark_buffer_dirty(l);
  3604. BUG_ON(path->slots[0] != slot);
  3605. if (mid <= slot) {
  3606. btrfs_tree_unlock(path->nodes[0]);
  3607. free_extent_buffer(path->nodes[0]);
  3608. path->nodes[0] = right;
  3609. path->slots[0] -= mid;
  3610. path->slots[1] += 1;
  3611. } else {
  3612. btrfs_tree_unlock(right);
  3613. free_extent_buffer(right);
  3614. }
  3615. BUG_ON(path->slots[0] < 0);
  3616. }
  3617. /*
  3618. * double splits happen when we need to insert a big item in the middle
  3619. * of a leaf. A double split can leave us with 3 mostly empty leaves:
  3620. * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
  3621. * A B C
  3622. *
  3623. * We avoid this by trying to push the items on either side of our target
  3624. * into the adjacent leaves. If all goes well we can avoid the double split
  3625. * completely.
  3626. */
  3627. static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
  3628. struct btrfs_root *root,
  3629. struct btrfs_path *path,
  3630. int data_size)
  3631. {
  3632. int ret;
  3633. int progress = 0;
  3634. int slot;
  3635. u32 nritems;
  3636. int space_needed = data_size;
  3637. slot = path->slots[0];
  3638. if (slot < btrfs_header_nritems(path->nodes[0]))
  3639. space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
  3640. /*
  3641. * try to push all the items after our slot into the
  3642. * right leaf
  3643. */
  3644. ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
  3645. if (ret < 0)
  3646. return ret;
  3647. if (ret == 0)
  3648. progress++;
  3649. nritems = btrfs_header_nritems(path->nodes[0]);
  3650. /*
  3651. * our goal is to get our slot at the start or end of a leaf. If
  3652. * we've done so we're done
  3653. */
  3654. if (path->slots[0] == 0 || path->slots[0] == nritems)
  3655. return 0;
  3656. if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
  3657. return 0;
  3658. /* try to push all the items before our slot into the next leaf */
  3659. slot = path->slots[0];
  3660. ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
  3661. if (ret < 0)
  3662. return ret;
  3663. if (ret == 0)
  3664. progress++;
  3665. if (progress)
  3666. return 0;
  3667. return 1;
  3668. }
  3669. /*
  3670. * split the path's leaf in two, making sure there is at least data_size
  3671. * available for the resulting leaf level of the path.
  3672. *
  3673. * returns 0 if all went well and < 0 on failure.
  3674. */
  3675. static noinline int split_leaf(struct btrfs_trans_handle *trans,
  3676. struct btrfs_root *root,
  3677. struct btrfs_key *ins_key,
  3678. struct btrfs_path *path, int data_size,
  3679. int extend)
  3680. {
  3681. struct btrfs_disk_key disk_key;
  3682. struct extent_buffer *l;
  3683. u32 nritems;
  3684. int mid;
  3685. int slot;
  3686. struct extent_buffer *right;
  3687. struct btrfs_fs_info *fs_info = root->fs_info;
  3688. int ret = 0;
  3689. int wret;
  3690. int split;
  3691. int num_doubles = 0;
  3692. int tried_avoid_double = 0;
  3693. l = path->nodes[0];
  3694. slot = path->slots[0];
  3695. if (extend && data_size + btrfs_item_size_nr(l, slot) +
  3696. sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
  3697. return -EOVERFLOW;
  3698. /* first try to make some room by pushing left and right */
  3699. if (data_size && path->nodes[1]) {
  3700. int space_needed = data_size;
  3701. if (slot < btrfs_header_nritems(l))
  3702. space_needed -= btrfs_leaf_free_space(root, l);
  3703. wret = push_leaf_right(trans, root, path, space_needed,
  3704. space_needed, 0, 0);
  3705. if (wret < 0)
  3706. return wret;
  3707. if (wret) {
  3708. wret = push_leaf_left(trans, root, path, space_needed,
  3709. space_needed, 0, (u32)-1);
  3710. if (wret < 0)
  3711. return wret;
  3712. }
  3713. l = path->nodes[0];
  3714. /* did the pushes work? */
  3715. if (btrfs_leaf_free_space(root, l) >= data_size)
  3716. return 0;
  3717. }
  3718. if (!path->nodes[1]) {
  3719. ret = insert_new_root(trans, root, path, 1);
  3720. if (ret)
  3721. return ret;
  3722. }
  3723. again:
  3724. split = 1;
  3725. l = path->nodes[0];
  3726. slot = path->slots[0];
  3727. nritems = btrfs_header_nritems(l);
  3728. mid = (nritems + 1) / 2;
  3729. if (mid <= slot) {
  3730. if (nritems == 1 ||
  3731. leaf_space_used(l, mid, nritems - mid) + data_size >
  3732. BTRFS_LEAF_DATA_SIZE(root)) {
  3733. if (slot >= nritems) {
  3734. split = 0;
  3735. } else {
  3736. mid = slot;
  3737. if (mid != nritems &&
  3738. leaf_space_used(l, mid, nritems - mid) +
  3739. data_size > BTRFS_LEAF_DATA_SIZE(root)) {
  3740. if (data_size && !tried_avoid_double)
  3741. goto push_for_double;
  3742. split = 2;
  3743. }
  3744. }
  3745. }
  3746. } else {
  3747. if (leaf_space_used(l, 0, mid) + data_size >
  3748. BTRFS_LEAF_DATA_SIZE(root)) {
  3749. if (!extend && data_size && slot == 0) {
  3750. split = 0;
  3751. } else if ((extend || !data_size) && slot == 0) {
  3752. mid = 1;
  3753. } else {
  3754. mid = slot;
  3755. if (mid != nritems &&
  3756. leaf_space_used(l, mid, nritems - mid) +
  3757. data_size > BTRFS_LEAF_DATA_SIZE(root)) {
  3758. if (data_size && !tried_avoid_double)
  3759. goto push_for_double;
  3760. split = 2;
  3761. }
  3762. }
  3763. }
  3764. }
  3765. if (split == 0)
  3766. btrfs_cpu_key_to_disk(&disk_key, ins_key);
  3767. else
  3768. btrfs_item_key(l, &disk_key, mid);
  3769. right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
  3770. &disk_key, 0, l->start, 0);
  3771. if (IS_ERR(right))
  3772. return PTR_ERR(right);
  3773. root_add_used(root, root->nodesize);
  3774. memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
  3775. btrfs_set_header_bytenr(right, right->start);
  3776. btrfs_set_header_generation(right, trans->transid);
  3777. btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
  3778. btrfs_set_header_owner(right, root->root_key.objectid);
  3779. btrfs_set_header_level(right, 0);
  3780. write_extent_buffer(right, fs_info->fsid,
  3781. btrfs_header_fsid(), BTRFS_FSID_SIZE);
  3782. write_extent_buffer(right, fs_info->chunk_tree_uuid,
  3783. btrfs_header_chunk_tree_uuid(right),
  3784. BTRFS_UUID_SIZE);
  3785. if (split == 0) {
  3786. if (mid <= slot) {
  3787. btrfs_set_header_nritems(right, 0);
  3788. insert_ptr(trans, root, path, &disk_key, right->start,
  3789. path->slots[1] + 1, 1);
  3790. btrfs_tree_unlock(path->nodes[0]);
  3791. free_extent_buffer(path->nodes[0]);
  3792. path->nodes[0] = right;
  3793. path->slots[0] = 0;
  3794. path->slots[1] += 1;
  3795. } else {
  3796. btrfs_set_header_nritems(right, 0);
  3797. insert_ptr(trans, root, path, &disk_key, right->start,
  3798. path->slots[1], 1);
  3799. btrfs_tree_unlock(path->nodes[0]);
  3800. free_extent_buffer(path->nodes[0]);
  3801. path->nodes[0] = right;
  3802. path->slots[0] = 0;
  3803. if (path->slots[1] == 0)
  3804. fixup_low_keys(fs_info, path, &disk_key, 1);
  3805. }
  3806. btrfs_mark_buffer_dirty(right);
  3807. return ret;
  3808. }
  3809. copy_for_split(trans, root, path, l, right, slot, mid, nritems);
  3810. if (split == 2) {
  3811. BUG_ON(num_doubles != 0);
  3812. num_doubles++;
  3813. goto again;
  3814. }
  3815. return 0;
  3816. push_for_double:
  3817. push_for_double_split(trans, root, path, data_size);
  3818. tried_avoid_double = 1;
  3819. if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
  3820. return 0;
  3821. goto again;
  3822. }
  3823. static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
  3824. struct btrfs_root *root,
  3825. struct btrfs_path *path, int ins_len)
  3826. {
  3827. struct btrfs_key key;
  3828. struct extent_buffer *leaf;
  3829. struct btrfs_file_extent_item *fi;
  3830. u64 extent_len = 0;
  3831. u32 item_size;
  3832. int ret;
  3833. leaf = path->nodes[0];
  3834. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3835. BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
  3836. key.type != BTRFS_EXTENT_CSUM_KEY);
  3837. if (btrfs_leaf_free_space(root, leaf) >= ins_len)
  3838. return 0;
  3839. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  3840. if (key.type == BTRFS_EXTENT_DATA_KEY) {
  3841. fi = btrfs_item_ptr(leaf, path->slots[0],
  3842. struct btrfs_file_extent_item);
  3843. extent_len = btrfs_file_extent_num_bytes(leaf, fi);
  3844. }
  3845. btrfs_release_path(path);
  3846. path->keep_locks = 1;
  3847. path->search_for_split = 1;
  3848. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  3849. path->search_for_split = 0;
  3850. if (ret > 0)
  3851. ret = -EAGAIN;
  3852. if (ret < 0)
  3853. goto err;
  3854. ret = -EAGAIN;
  3855. leaf = path->nodes[0];
  3856. /* if our item isn't there, return now */
  3857. if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
  3858. goto err;
  3859. /* the leaf has changed, it now has room. return now */
  3860. if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
  3861. goto err;
  3862. if (key.type == BTRFS_EXTENT_DATA_KEY) {
  3863. fi = btrfs_item_ptr(leaf, path->slots[0],
  3864. struct btrfs_file_extent_item);
  3865. if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
  3866. goto err;
  3867. }
  3868. btrfs_set_path_blocking(path);
  3869. ret = split_leaf(trans, root, &key, path, ins_len, 1);
  3870. if (ret)
  3871. goto err;
  3872. path->keep_locks = 0;
  3873. btrfs_unlock_up_safe(path, 1);
  3874. return 0;
  3875. err:
  3876. path->keep_locks = 0;
  3877. return ret;
  3878. }
  3879. static noinline int split_item(struct btrfs_trans_handle *trans,
  3880. struct btrfs_root *root,
  3881. struct btrfs_path *path,
  3882. struct btrfs_key *new_key,
  3883. unsigned long split_offset)
  3884. {
  3885. struct extent_buffer *leaf;
  3886. struct btrfs_item *item;
  3887. struct btrfs_item *new_item;
  3888. int slot;
  3889. char *buf;
  3890. u32 nritems;
  3891. u32 item_size;
  3892. u32 orig_offset;
  3893. struct btrfs_disk_key disk_key;
  3894. leaf = path->nodes[0];
  3895. BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
  3896. btrfs_set_path_blocking(path);
  3897. item = btrfs_item_nr(path->slots[0]);
  3898. orig_offset = btrfs_item_offset(leaf, item);
  3899. item_size = btrfs_item_size(leaf, item);
  3900. buf = kmalloc(item_size, GFP_NOFS);
  3901. if (!buf)
  3902. return -ENOMEM;
  3903. read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
  3904. path->slots[0]), item_size);
  3905. slot = path->slots[0] + 1;
  3906. nritems = btrfs_header_nritems(leaf);
  3907. if (slot != nritems) {
  3908. /* shift the items */
  3909. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
  3910. btrfs_item_nr_offset(slot),
  3911. (nritems - slot) * sizeof(struct btrfs_item));
  3912. }
  3913. btrfs_cpu_key_to_disk(&disk_key, new_key);
  3914. btrfs_set_item_key(leaf, &disk_key, slot);
  3915. new_item = btrfs_item_nr(slot);
  3916. btrfs_set_item_offset(leaf, new_item, orig_offset);
  3917. btrfs_set_item_size(leaf, new_item, item_size - split_offset);
  3918. btrfs_set_item_offset(leaf, item,
  3919. orig_offset + item_size - split_offset);
  3920. btrfs_set_item_size(leaf, item, split_offset);
  3921. btrfs_set_header_nritems(leaf, nritems + 1);
  3922. /* write the data for the start of the original item */
  3923. write_extent_buffer(leaf, buf,
  3924. btrfs_item_ptr_offset(leaf, path->slots[0]),
  3925. split_offset);
  3926. /* write the data for the new item */
  3927. write_extent_buffer(leaf, buf + split_offset,
  3928. btrfs_item_ptr_offset(leaf, slot),
  3929. item_size - split_offset);
  3930. btrfs_mark_buffer_dirty(leaf);
  3931. BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
  3932. kfree(buf);
  3933. return 0;
  3934. }
  3935. /*
  3936. * This function splits a single item into two items,
  3937. * giving 'new_key' to the new item and splitting the
  3938. * old one at split_offset (from the start of the item).
  3939. *
  3940. * The path may be released by this operation. After
  3941. * the split, the path is pointing to the old item. The
  3942. * new item is going to be in the same node as the old one.
  3943. *
  3944. * Note, the item being split must be smaller enough to live alone on
  3945. * a tree block with room for one extra struct btrfs_item
  3946. *
  3947. * This allows us to split the item in place, keeping a lock on the
  3948. * leaf the entire time.
  3949. */
  3950. int btrfs_split_item(struct btrfs_trans_handle *trans,
  3951. struct btrfs_root *root,
  3952. struct btrfs_path *path,
  3953. struct btrfs_key *new_key,
  3954. unsigned long split_offset)
  3955. {
  3956. int ret;
  3957. ret = setup_leaf_for_split(trans, root, path,
  3958. sizeof(struct btrfs_item));
  3959. if (ret)
  3960. return ret;
  3961. ret = split_item(trans, root, path, new_key, split_offset);
  3962. return ret;
  3963. }
  3964. /*
  3965. * This function duplicate a item, giving 'new_key' to the new item.
  3966. * It guarantees both items live in the same tree leaf and the new item
  3967. * is contiguous with the original item.
  3968. *
  3969. * This allows us to split file extent in place, keeping a lock on the
  3970. * leaf the entire time.
  3971. */
  3972. int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
  3973. struct btrfs_root *root,
  3974. struct btrfs_path *path,
  3975. struct btrfs_key *new_key)
  3976. {
  3977. struct extent_buffer *leaf;
  3978. int ret;
  3979. u32 item_size;
  3980. leaf = path->nodes[0];
  3981. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  3982. ret = setup_leaf_for_split(trans, root, path,
  3983. item_size + sizeof(struct btrfs_item));
  3984. if (ret)
  3985. return ret;
  3986. path->slots[0]++;
  3987. setup_items_for_insert(root, path, new_key, &item_size,
  3988. item_size, item_size +
  3989. sizeof(struct btrfs_item), 1);
  3990. leaf = path->nodes[0];
  3991. memcpy_extent_buffer(leaf,
  3992. btrfs_item_ptr_offset(leaf, path->slots[0]),
  3993. btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
  3994. item_size);
  3995. return 0;
  3996. }
  3997. /*
  3998. * make the item pointed to by the path smaller. new_size indicates
  3999. * how small to make it, and from_end tells us if we just chop bytes
  4000. * off the end of the item or if we shift the item to chop bytes off
  4001. * the front.
  4002. */
  4003. void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
  4004. u32 new_size, int from_end)
  4005. {
  4006. int slot;
  4007. struct extent_buffer *leaf;
  4008. struct btrfs_item *item;
  4009. u32 nritems;
  4010. unsigned int data_end;
  4011. unsigned int old_data_start;
  4012. unsigned int old_size;
  4013. unsigned int size_diff;
  4014. int i;
  4015. struct btrfs_map_token token;
  4016. btrfs_init_map_token(&token);
  4017. leaf = path->nodes[0];
  4018. slot = path->slots[0];
  4019. old_size = btrfs_item_size_nr(leaf, slot);
  4020. if (old_size == new_size)
  4021. return;
  4022. nritems = btrfs_header_nritems(leaf);
  4023. data_end = leaf_data_end(root, leaf);
  4024. old_data_start = btrfs_item_offset_nr(leaf, slot);
  4025. size_diff = old_size - new_size;
  4026. BUG_ON(slot < 0);
  4027. BUG_ON(slot >= nritems);
  4028. /*
  4029. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4030. */
  4031. /* first correct the data pointers */
  4032. for (i = slot; i < nritems; i++) {
  4033. u32 ioff;
  4034. item = btrfs_item_nr(i);
  4035. ioff = btrfs_token_item_offset(leaf, item, &token);
  4036. btrfs_set_token_item_offset(leaf, item,
  4037. ioff + size_diff, &token);
  4038. }
  4039. /* shift the data */
  4040. if (from_end) {
  4041. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  4042. data_end + size_diff, btrfs_leaf_data(leaf) +
  4043. data_end, old_data_start + new_size - data_end);
  4044. } else {
  4045. struct btrfs_disk_key disk_key;
  4046. u64 offset;
  4047. btrfs_item_key(leaf, &disk_key, slot);
  4048. if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
  4049. unsigned long ptr;
  4050. struct btrfs_file_extent_item *fi;
  4051. fi = btrfs_item_ptr(leaf, slot,
  4052. struct btrfs_file_extent_item);
  4053. fi = (struct btrfs_file_extent_item *)(
  4054. (unsigned long)fi - size_diff);
  4055. if (btrfs_file_extent_type(leaf, fi) ==
  4056. BTRFS_FILE_EXTENT_INLINE) {
  4057. ptr = btrfs_item_ptr_offset(leaf, slot);
  4058. memmove_extent_buffer(leaf, ptr,
  4059. (unsigned long)fi,
  4060. BTRFS_FILE_EXTENT_INLINE_DATA_START);
  4061. }
  4062. }
  4063. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  4064. data_end + size_diff, btrfs_leaf_data(leaf) +
  4065. data_end, old_data_start - data_end);
  4066. offset = btrfs_disk_key_offset(&disk_key);
  4067. btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
  4068. btrfs_set_item_key(leaf, &disk_key, slot);
  4069. if (slot == 0)
  4070. fixup_low_keys(root->fs_info, path, &disk_key, 1);
  4071. }
  4072. item = btrfs_item_nr(slot);
  4073. btrfs_set_item_size(leaf, item, new_size);
  4074. btrfs_mark_buffer_dirty(leaf);
  4075. if (btrfs_leaf_free_space(root, leaf) < 0) {
  4076. btrfs_print_leaf(root, leaf);
  4077. BUG();
  4078. }
  4079. }
  4080. /*
  4081. * make the item pointed to by the path bigger, data_size is the added size.
  4082. */
  4083. void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
  4084. u32 data_size)
  4085. {
  4086. int slot;
  4087. struct extent_buffer *leaf;
  4088. struct btrfs_item *item;
  4089. u32 nritems;
  4090. unsigned int data_end;
  4091. unsigned int old_data;
  4092. unsigned int old_size;
  4093. int i;
  4094. struct btrfs_map_token token;
  4095. btrfs_init_map_token(&token);
  4096. leaf = path->nodes[0];
  4097. nritems = btrfs_header_nritems(leaf);
  4098. data_end = leaf_data_end(root, leaf);
  4099. if (btrfs_leaf_free_space(root, leaf) < data_size) {
  4100. btrfs_print_leaf(root, leaf);
  4101. BUG();
  4102. }
  4103. slot = path->slots[0];
  4104. old_data = btrfs_item_end_nr(leaf, slot);
  4105. BUG_ON(slot < 0);
  4106. if (slot >= nritems) {
  4107. btrfs_print_leaf(root, leaf);
  4108. btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
  4109. slot, nritems);
  4110. BUG_ON(1);
  4111. }
  4112. /*
  4113. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4114. */
  4115. /* first correct the data pointers */
  4116. for (i = slot; i < nritems; i++) {
  4117. u32 ioff;
  4118. item = btrfs_item_nr(i);
  4119. ioff = btrfs_token_item_offset(leaf, item, &token);
  4120. btrfs_set_token_item_offset(leaf, item,
  4121. ioff - data_size, &token);
  4122. }
  4123. /* shift the data */
  4124. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  4125. data_end - data_size, btrfs_leaf_data(leaf) +
  4126. data_end, old_data - data_end);
  4127. data_end = old_data;
  4128. old_size = btrfs_item_size_nr(leaf, slot);
  4129. item = btrfs_item_nr(slot);
  4130. btrfs_set_item_size(leaf, item, old_size + data_size);
  4131. btrfs_mark_buffer_dirty(leaf);
  4132. if (btrfs_leaf_free_space(root, leaf) < 0) {
  4133. btrfs_print_leaf(root, leaf);
  4134. BUG();
  4135. }
  4136. }
  4137. /*
  4138. * this is a helper for btrfs_insert_empty_items, the main goal here is
  4139. * to save stack depth by doing the bulk of the work in a function
  4140. * that doesn't call btrfs_search_slot
  4141. */
  4142. void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
  4143. struct btrfs_key *cpu_key, u32 *data_size,
  4144. u32 total_data, u32 total_size, int nr)
  4145. {
  4146. struct btrfs_item *item;
  4147. int i;
  4148. u32 nritems;
  4149. unsigned int data_end;
  4150. struct btrfs_disk_key disk_key;
  4151. struct extent_buffer *leaf;
  4152. int slot;
  4153. struct btrfs_map_token token;
  4154. if (path->slots[0] == 0) {
  4155. btrfs_cpu_key_to_disk(&disk_key, cpu_key);
  4156. fixup_low_keys(root->fs_info, path, &disk_key, 1);
  4157. }
  4158. btrfs_unlock_up_safe(path, 1);
  4159. btrfs_init_map_token(&token);
  4160. leaf = path->nodes[0];
  4161. slot = path->slots[0];
  4162. nritems = btrfs_header_nritems(leaf);
  4163. data_end = leaf_data_end(root, leaf);
  4164. if (btrfs_leaf_free_space(root, leaf) < total_size) {
  4165. btrfs_print_leaf(root, leaf);
  4166. btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
  4167. total_size, btrfs_leaf_free_space(root, leaf));
  4168. BUG();
  4169. }
  4170. if (slot != nritems) {
  4171. unsigned int old_data = btrfs_item_end_nr(leaf, slot);
  4172. if (old_data < data_end) {
  4173. btrfs_print_leaf(root, leaf);
  4174. btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
  4175. slot, old_data, data_end);
  4176. BUG_ON(1);
  4177. }
  4178. /*
  4179. * item0..itemN ... dataN.offset..dataN.size .. data0.size
  4180. */
  4181. /* first correct the data pointers */
  4182. for (i = slot; i < nritems; i++) {
  4183. u32 ioff;
  4184. item = btrfs_item_nr( i);
  4185. ioff = btrfs_token_item_offset(leaf, item, &token);
  4186. btrfs_set_token_item_offset(leaf, item,
  4187. ioff - total_data, &token);
  4188. }
  4189. /* shift the items */
  4190. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
  4191. btrfs_item_nr_offset(slot),
  4192. (nritems - slot) * sizeof(struct btrfs_item));
  4193. /* shift the data */
  4194. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  4195. data_end - total_data, btrfs_leaf_data(leaf) +
  4196. data_end, old_data - data_end);
  4197. data_end = old_data;
  4198. }
  4199. /* setup the item for the new data */
  4200. for (i = 0; i < nr; i++) {
  4201. btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
  4202. btrfs_set_item_key(leaf, &disk_key, slot + i);
  4203. item = btrfs_item_nr(slot + i);
  4204. btrfs_set_token_item_offset(leaf, item,
  4205. data_end - data_size[i], &token);
  4206. data_end -= data_size[i];
  4207. btrfs_set_token_item_size(leaf, item, data_size[i], &token);
  4208. }
  4209. btrfs_set_header_nritems(leaf, nritems + nr);
  4210. btrfs_mark_buffer_dirty(leaf);
  4211. if (btrfs_leaf_free_space(root, leaf) < 0) {
  4212. btrfs_print_leaf(root, leaf);
  4213. BUG();
  4214. }
  4215. }
  4216. /*
  4217. * Given a key and some data, insert items into the tree.
  4218. * This does all the path init required, making room in the tree if needed.
  4219. */
  4220. int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
  4221. struct btrfs_root *root,
  4222. struct btrfs_path *path,
  4223. struct btrfs_key *cpu_key, u32 *data_size,
  4224. int nr)
  4225. {
  4226. int ret = 0;
  4227. int slot;
  4228. int i;
  4229. u32 total_size = 0;
  4230. u32 total_data = 0;
  4231. for (i = 0; i < nr; i++)
  4232. total_data += data_size[i];
  4233. total_size = total_data + (nr * sizeof(struct btrfs_item));
  4234. ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
  4235. if (ret == 0)
  4236. return -EEXIST;
  4237. if (ret < 0)
  4238. return ret;
  4239. slot = path->slots[0];
  4240. BUG_ON(slot < 0);
  4241. setup_items_for_insert(root, path, cpu_key, data_size,
  4242. total_data, total_size, nr);
  4243. return 0;
  4244. }
  4245. /*
  4246. * Given a key and some data, insert an item into the tree.
  4247. * This does all the path init required, making room in the tree if needed.
  4248. */
  4249. int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
  4250. *root, struct btrfs_key *cpu_key, void *data, u32
  4251. data_size)
  4252. {
  4253. int ret = 0;
  4254. struct btrfs_path *path;
  4255. struct extent_buffer *leaf;
  4256. unsigned long ptr;
  4257. path = btrfs_alloc_path();
  4258. if (!path)
  4259. return -ENOMEM;
  4260. ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
  4261. if (!ret) {
  4262. leaf = path->nodes[0];
  4263. ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
  4264. write_extent_buffer(leaf, data, ptr, data_size);
  4265. btrfs_mark_buffer_dirty(leaf);
  4266. }
  4267. btrfs_free_path(path);
  4268. return ret;
  4269. }
  4270. /*
  4271. * delete the pointer from a given node.
  4272. *
  4273. * the tree should have been previously balanced so the deletion does not
  4274. * empty a node.
  4275. */
  4276. static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
  4277. int level, int slot)
  4278. {
  4279. struct extent_buffer *parent = path->nodes[level];
  4280. u32 nritems;
  4281. int ret;
  4282. nritems = btrfs_header_nritems(parent);
  4283. if (slot != nritems - 1) {
  4284. if (level)
  4285. tree_mod_log_eb_move(root->fs_info, parent, slot,
  4286. slot + 1, nritems - slot - 1);
  4287. memmove_extent_buffer(parent,
  4288. btrfs_node_key_ptr_offset(slot),
  4289. btrfs_node_key_ptr_offset(slot + 1),
  4290. sizeof(struct btrfs_key_ptr) *
  4291. (nritems - slot - 1));
  4292. } else if (level) {
  4293. ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
  4294. MOD_LOG_KEY_REMOVE, GFP_NOFS);
  4295. BUG_ON(ret < 0);
  4296. }
  4297. nritems--;
  4298. btrfs_set_header_nritems(parent, nritems);
  4299. if (nritems == 0 && parent == root->node) {
  4300. BUG_ON(btrfs_header_level(root->node) != 1);
  4301. /* just turn the root into a leaf and break */
  4302. btrfs_set_header_level(root->node, 0);
  4303. } else if (slot == 0) {
  4304. struct btrfs_disk_key disk_key;
  4305. btrfs_node_key(parent, &disk_key, 0);
  4306. fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
  4307. }
  4308. btrfs_mark_buffer_dirty(parent);
  4309. }
  4310. /*
  4311. * a helper function to delete the leaf pointed to by path->slots[1] and
  4312. * path->nodes[1].
  4313. *
  4314. * This deletes the pointer in path->nodes[1] and frees the leaf
  4315. * block extent. zero is returned if it all worked out, < 0 otherwise.
  4316. *
  4317. * The path must have already been setup for deleting the leaf, including
  4318. * all the proper balancing. path->nodes[1] must be locked.
  4319. */
  4320. static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
  4321. struct btrfs_root *root,
  4322. struct btrfs_path *path,
  4323. struct extent_buffer *leaf)
  4324. {
  4325. WARN_ON(btrfs_header_generation(leaf) != trans->transid);
  4326. del_ptr(root, path, 1, path->slots[1]);
  4327. /*
  4328. * btrfs_free_extent is expensive, we want to make sure we
  4329. * aren't holding any locks when we call it
  4330. */
  4331. btrfs_unlock_up_safe(path, 0);
  4332. root_sub_used(root, leaf->len);
  4333. extent_buffer_get(leaf);
  4334. btrfs_free_tree_block(trans, root, leaf, 0, 1);
  4335. free_extent_buffer_stale(leaf);
  4336. }
  4337. /*
  4338. * delete the item at the leaf level in path. If that empties
  4339. * the leaf, remove it from the tree
  4340. */
  4341. int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  4342. struct btrfs_path *path, int slot, int nr)
  4343. {
  4344. struct extent_buffer *leaf;
  4345. struct btrfs_item *item;
  4346. u32 last_off;
  4347. u32 dsize = 0;
  4348. int ret = 0;
  4349. int wret;
  4350. int i;
  4351. u32 nritems;
  4352. struct btrfs_map_token token;
  4353. btrfs_init_map_token(&token);
  4354. leaf = path->nodes[0];
  4355. last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
  4356. for (i = 0; i < nr; i++)
  4357. dsize += btrfs_item_size_nr(leaf, slot + i);
  4358. nritems = btrfs_header_nritems(leaf);
  4359. if (slot + nr != nritems) {
  4360. int data_end = leaf_data_end(root, leaf);
  4361. memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
  4362. data_end + dsize,
  4363. btrfs_leaf_data(leaf) + data_end,
  4364. last_off - data_end);
  4365. for (i = slot + nr; i < nritems; i++) {
  4366. u32 ioff;
  4367. item = btrfs_item_nr(i);
  4368. ioff = btrfs_token_item_offset(leaf, item, &token);
  4369. btrfs_set_token_item_offset(leaf, item,
  4370. ioff + dsize, &token);
  4371. }
  4372. memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
  4373. btrfs_item_nr_offset(slot + nr),
  4374. sizeof(struct btrfs_item) *
  4375. (nritems - slot - nr));
  4376. }
  4377. btrfs_set_header_nritems(leaf, nritems - nr);
  4378. nritems -= nr;
  4379. /* delete the leaf if we've emptied it */
  4380. if (nritems == 0) {
  4381. if (leaf == root->node) {
  4382. btrfs_set_header_level(leaf, 0);
  4383. } else {
  4384. btrfs_set_path_blocking(path);
  4385. clean_tree_block(trans, root->fs_info, leaf);
  4386. btrfs_del_leaf(trans, root, path, leaf);
  4387. }
  4388. } else {
  4389. int used = leaf_space_used(leaf, 0, nritems);
  4390. if (slot == 0) {
  4391. struct btrfs_disk_key disk_key;
  4392. btrfs_item_key(leaf, &disk_key, 0);
  4393. fixup_low_keys(root->fs_info, path, &disk_key, 1);
  4394. }
  4395. /* delete the leaf if it is mostly empty */
  4396. if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
  4397. /* push_leaf_left fixes the path.
  4398. * make sure the path still points to our leaf
  4399. * for possible call to del_ptr below
  4400. */
  4401. slot = path->slots[1];
  4402. extent_buffer_get(leaf);
  4403. btrfs_set_path_blocking(path);
  4404. wret = push_leaf_left(trans, root, path, 1, 1,
  4405. 1, (u32)-1);
  4406. if (wret < 0 && wret != -ENOSPC)
  4407. ret = wret;
  4408. if (path->nodes[0] == leaf &&
  4409. btrfs_header_nritems(leaf)) {
  4410. wret = push_leaf_right(trans, root, path, 1,
  4411. 1, 1, 0);
  4412. if (wret < 0 && wret != -ENOSPC)
  4413. ret = wret;
  4414. }
  4415. if (btrfs_header_nritems(leaf) == 0) {
  4416. path->slots[1] = slot;
  4417. btrfs_del_leaf(trans, root, path, leaf);
  4418. free_extent_buffer(leaf);
  4419. ret = 0;
  4420. } else {
  4421. /* if we're still in the path, make sure
  4422. * we're dirty. Otherwise, one of the
  4423. * push_leaf functions must have already
  4424. * dirtied this buffer
  4425. */
  4426. if (path->nodes[0] == leaf)
  4427. btrfs_mark_buffer_dirty(leaf);
  4428. free_extent_buffer(leaf);
  4429. }
  4430. } else {
  4431. btrfs_mark_buffer_dirty(leaf);
  4432. }
  4433. }
  4434. return ret;
  4435. }
  4436. /*
  4437. * search the tree again to find a leaf with lesser keys
  4438. * returns 0 if it found something or 1 if there are no lesser leaves.
  4439. * returns < 0 on io errors.
  4440. *
  4441. * This may release the path, and so you may lose any locks held at the
  4442. * time you call it.
  4443. */
  4444. int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
  4445. {
  4446. struct btrfs_key key;
  4447. struct btrfs_disk_key found_key;
  4448. int ret;
  4449. btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
  4450. if (key.offset > 0) {
  4451. key.offset--;
  4452. } else if (key.type > 0) {
  4453. key.type--;
  4454. key.offset = (u64)-1;
  4455. } else if (key.objectid > 0) {
  4456. key.objectid--;
  4457. key.type = (u8)-1;
  4458. key.offset = (u64)-1;
  4459. } else {
  4460. return 1;
  4461. }
  4462. btrfs_release_path(path);
  4463. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  4464. if (ret < 0)
  4465. return ret;
  4466. btrfs_item_key(path->nodes[0], &found_key, 0);
  4467. ret = comp_keys(&found_key, &key);
  4468. /*
  4469. * We might have had an item with the previous key in the tree right
  4470. * before we released our path. And after we released our path, that
  4471. * item might have been pushed to the first slot (0) of the leaf we
  4472. * were holding due to a tree balance. Alternatively, an item with the
  4473. * previous key can exist as the only element of a leaf (big fat item).
  4474. * Therefore account for these 2 cases, so that our callers (like
  4475. * btrfs_previous_item) don't miss an existing item with a key matching
  4476. * the previous key we computed above.
  4477. */
  4478. if (ret <= 0)
  4479. return 0;
  4480. return 1;
  4481. }
  4482. /*
  4483. * A helper function to walk down the tree starting at min_key, and looking
  4484. * for nodes or leaves that are have a minimum transaction id.
  4485. * This is used by the btree defrag code, and tree logging
  4486. *
  4487. * This does not cow, but it does stuff the starting key it finds back
  4488. * into min_key, so you can call btrfs_search_slot with cow=1 on the
  4489. * key and get a writable path.
  4490. *
  4491. * This does lock as it descends, and path->keep_locks should be set
  4492. * to 1 by the caller.
  4493. *
  4494. * This honors path->lowest_level to prevent descent past a given level
  4495. * of the tree.
  4496. *
  4497. * min_trans indicates the oldest transaction that you are interested
  4498. * in walking through. Any nodes or leaves older than min_trans are
  4499. * skipped over (without reading them).
  4500. *
  4501. * returns zero if something useful was found, < 0 on error and 1 if there
  4502. * was nothing in the tree that matched the search criteria.
  4503. */
  4504. int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
  4505. struct btrfs_path *path,
  4506. u64 min_trans)
  4507. {
  4508. struct extent_buffer *cur;
  4509. struct btrfs_key found_key;
  4510. int slot;
  4511. int sret;
  4512. u32 nritems;
  4513. int level;
  4514. int ret = 1;
  4515. int keep_locks = path->keep_locks;
  4516. path->keep_locks = 1;
  4517. again:
  4518. cur = btrfs_read_lock_root_node(root);
  4519. level = btrfs_header_level(cur);
  4520. WARN_ON(path->nodes[level]);
  4521. path->nodes[level] = cur;
  4522. path->locks[level] = BTRFS_READ_LOCK;
  4523. if (btrfs_header_generation(cur) < min_trans) {
  4524. ret = 1;
  4525. goto out;
  4526. }
  4527. while (1) {
  4528. nritems = btrfs_header_nritems(cur);
  4529. level = btrfs_header_level(cur);
  4530. sret = bin_search(cur, min_key, level, &slot);
  4531. /* at the lowest level, we're done, setup the path and exit */
  4532. if (level == path->lowest_level) {
  4533. if (slot >= nritems)
  4534. goto find_next_key;
  4535. ret = 0;
  4536. path->slots[level] = slot;
  4537. btrfs_item_key_to_cpu(cur, &found_key, slot);
  4538. goto out;
  4539. }
  4540. if (sret && slot > 0)
  4541. slot--;
  4542. /*
  4543. * check this node pointer against the min_trans parameters.
  4544. * If it is too old, old, skip to the next one.
  4545. */
  4546. while (slot < nritems) {
  4547. u64 gen;
  4548. gen = btrfs_node_ptr_generation(cur, slot);
  4549. if (gen < min_trans) {
  4550. slot++;
  4551. continue;
  4552. }
  4553. break;
  4554. }
  4555. find_next_key:
  4556. /*
  4557. * we didn't find a candidate key in this node, walk forward
  4558. * and find another one
  4559. */
  4560. if (slot >= nritems) {
  4561. path->slots[level] = slot;
  4562. btrfs_set_path_blocking(path);
  4563. sret = btrfs_find_next_key(root, path, min_key, level,
  4564. min_trans);
  4565. if (sret == 0) {
  4566. btrfs_release_path(path);
  4567. goto again;
  4568. } else {
  4569. goto out;
  4570. }
  4571. }
  4572. /* save our key for returning back */
  4573. btrfs_node_key_to_cpu(cur, &found_key, slot);
  4574. path->slots[level] = slot;
  4575. if (level == path->lowest_level) {
  4576. ret = 0;
  4577. goto out;
  4578. }
  4579. btrfs_set_path_blocking(path);
  4580. cur = read_node_slot(root, cur, slot);
  4581. BUG_ON(!cur); /* -ENOMEM */
  4582. btrfs_tree_read_lock(cur);
  4583. path->locks[level - 1] = BTRFS_READ_LOCK;
  4584. path->nodes[level - 1] = cur;
  4585. unlock_up(path, level, 1, 0, NULL);
  4586. btrfs_clear_path_blocking(path, NULL, 0);
  4587. }
  4588. out:
  4589. path->keep_locks = keep_locks;
  4590. if (ret == 0) {
  4591. btrfs_unlock_up_safe(path, path->lowest_level + 1);
  4592. btrfs_set_path_blocking(path);
  4593. memcpy(min_key, &found_key, sizeof(found_key));
  4594. }
  4595. return ret;
  4596. }
  4597. static void tree_move_down(struct btrfs_root *root,
  4598. struct btrfs_path *path,
  4599. int *level, int root_level)
  4600. {
  4601. BUG_ON(*level == 0);
  4602. path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
  4603. path->slots[*level]);
  4604. path->slots[*level - 1] = 0;
  4605. (*level)--;
  4606. }
  4607. static int tree_move_next_or_upnext(struct btrfs_root *root,
  4608. struct btrfs_path *path,
  4609. int *level, int root_level)
  4610. {
  4611. int ret = 0;
  4612. int nritems;
  4613. nritems = btrfs_header_nritems(path->nodes[*level]);
  4614. path->slots[*level]++;
  4615. while (path->slots[*level] >= nritems) {
  4616. if (*level == root_level)
  4617. return -1;
  4618. /* move upnext */
  4619. path->slots[*level] = 0;
  4620. free_extent_buffer(path->nodes[*level]);
  4621. path->nodes[*level] = NULL;
  4622. (*level)++;
  4623. path->slots[*level]++;
  4624. nritems = btrfs_header_nritems(path->nodes[*level]);
  4625. ret = 1;
  4626. }
  4627. return ret;
  4628. }
  4629. /*
  4630. * Returns 1 if it had to move up and next. 0 is returned if it moved only next
  4631. * or down.
  4632. */
  4633. static int tree_advance(struct btrfs_root *root,
  4634. struct btrfs_path *path,
  4635. int *level, int root_level,
  4636. int allow_down,
  4637. struct btrfs_key *key)
  4638. {
  4639. int ret;
  4640. if (*level == 0 || !allow_down) {
  4641. ret = tree_move_next_or_upnext(root, path, level, root_level);
  4642. } else {
  4643. tree_move_down(root, path, level, root_level);
  4644. ret = 0;
  4645. }
  4646. if (ret >= 0) {
  4647. if (*level == 0)
  4648. btrfs_item_key_to_cpu(path->nodes[*level], key,
  4649. path->slots[*level]);
  4650. else
  4651. btrfs_node_key_to_cpu(path->nodes[*level], key,
  4652. path->slots[*level]);
  4653. }
  4654. return ret;
  4655. }
  4656. static int tree_compare_item(struct btrfs_root *left_root,
  4657. struct btrfs_path *left_path,
  4658. struct btrfs_path *right_path,
  4659. char *tmp_buf)
  4660. {
  4661. int cmp;
  4662. int len1, len2;
  4663. unsigned long off1, off2;
  4664. len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
  4665. len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
  4666. if (len1 != len2)
  4667. return 1;
  4668. off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
  4669. off2 = btrfs_item_ptr_offset(right_path->nodes[0],
  4670. right_path->slots[0]);
  4671. read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
  4672. cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
  4673. if (cmp)
  4674. return 1;
  4675. return 0;
  4676. }
  4677. #define ADVANCE 1
  4678. #define ADVANCE_ONLY_NEXT -1
  4679. /*
  4680. * This function compares two trees and calls the provided callback for
  4681. * every changed/new/deleted item it finds.
  4682. * If shared tree blocks are encountered, whole subtrees are skipped, making
  4683. * the compare pretty fast on snapshotted subvolumes.
  4684. *
  4685. * This currently works on commit roots only. As commit roots are read only,
  4686. * we don't do any locking. The commit roots are protected with transactions.
  4687. * Transactions are ended and rejoined when a commit is tried in between.
  4688. *
  4689. * This function checks for modifications done to the trees while comparing.
  4690. * If it detects a change, it aborts immediately.
  4691. */
  4692. int btrfs_compare_trees(struct btrfs_root *left_root,
  4693. struct btrfs_root *right_root,
  4694. btrfs_changed_cb_t changed_cb, void *ctx)
  4695. {
  4696. int ret;
  4697. int cmp;
  4698. struct btrfs_path *left_path = NULL;
  4699. struct btrfs_path *right_path = NULL;
  4700. struct btrfs_key left_key;
  4701. struct btrfs_key right_key;
  4702. char *tmp_buf = NULL;
  4703. int left_root_level;
  4704. int right_root_level;
  4705. int left_level;
  4706. int right_level;
  4707. int left_end_reached;
  4708. int right_end_reached;
  4709. int advance_left;
  4710. int advance_right;
  4711. u64 left_blockptr;
  4712. u64 right_blockptr;
  4713. u64 left_gen;
  4714. u64 right_gen;
  4715. left_path = btrfs_alloc_path();
  4716. if (!left_path) {
  4717. ret = -ENOMEM;
  4718. goto out;
  4719. }
  4720. right_path = btrfs_alloc_path();
  4721. if (!right_path) {
  4722. ret = -ENOMEM;
  4723. goto out;
  4724. }
  4725. tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
  4726. if (!tmp_buf) {
  4727. ret = -ENOMEM;
  4728. goto out;
  4729. }
  4730. left_path->search_commit_root = 1;
  4731. left_path->skip_locking = 1;
  4732. right_path->search_commit_root = 1;
  4733. right_path->skip_locking = 1;
  4734. /*
  4735. * Strategy: Go to the first items of both trees. Then do
  4736. *
  4737. * If both trees are at level 0
  4738. * Compare keys of current items
  4739. * If left < right treat left item as new, advance left tree
  4740. * and repeat
  4741. * If left > right treat right item as deleted, advance right tree
  4742. * and repeat
  4743. * If left == right do deep compare of items, treat as changed if
  4744. * needed, advance both trees and repeat
  4745. * If both trees are at the same level but not at level 0
  4746. * Compare keys of current nodes/leafs
  4747. * If left < right advance left tree and repeat
  4748. * If left > right advance right tree and repeat
  4749. * If left == right compare blockptrs of the next nodes/leafs
  4750. * If they match advance both trees but stay at the same level
  4751. * and repeat
  4752. * If they don't match advance both trees while allowing to go
  4753. * deeper and repeat
  4754. * If tree levels are different
  4755. * Advance the tree that needs it and repeat
  4756. *
  4757. * Advancing a tree means:
  4758. * If we are at level 0, try to go to the next slot. If that's not
  4759. * possible, go one level up and repeat. Stop when we found a level
  4760. * where we could go to the next slot. We may at this point be on a
  4761. * node or a leaf.
  4762. *
  4763. * If we are not at level 0 and not on shared tree blocks, go one
  4764. * level deeper.
  4765. *
  4766. * If we are not at level 0 and on shared tree blocks, go one slot to
  4767. * the right if possible or go up and right.
  4768. */
  4769. down_read(&left_root->fs_info->commit_root_sem);
  4770. left_level = btrfs_header_level(left_root->commit_root);
  4771. left_root_level = left_level;
  4772. left_path->nodes[left_level] = left_root->commit_root;
  4773. extent_buffer_get(left_path->nodes[left_level]);
  4774. right_level = btrfs_header_level(right_root->commit_root);
  4775. right_root_level = right_level;
  4776. right_path->nodes[right_level] = right_root->commit_root;
  4777. extent_buffer_get(right_path->nodes[right_level]);
  4778. up_read(&left_root->fs_info->commit_root_sem);
  4779. if (left_level == 0)
  4780. btrfs_item_key_to_cpu(left_path->nodes[left_level],
  4781. &left_key, left_path->slots[left_level]);
  4782. else
  4783. btrfs_node_key_to_cpu(left_path->nodes[left_level],
  4784. &left_key, left_path->slots[left_level]);
  4785. if (right_level == 0)
  4786. btrfs_item_key_to_cpu(right_path->nodes[right_level],
  4787. &right_key, right_path->slots[right_level]);
  4788. else
  4789. btrfs_node_key_to_cpu(right_path->nodes[right_level],
  4790. &right_key, right_path->slots[right_level]);
  4791. left_end_reached = right_end_reached = 0;
  4792. advance_left = advance_right = 0;
  4793. while (1) {
  4794. if (advance_left && !left_end_reached) {
  4795. ret = tree_advance(left_root, left_path, &left_level,
  4796. left_root_level,
  4797. advance_left != ADVANCE_ONLY_NEXT,
  4798. &left_key);
  4799. if (ret < 0)
  4800. left_end_reached = ADVANCE;
  4801. advance_left = 0;
  4802. }
  4803. if (advance_right && !right_end_reached) {
  4804. ret = tree_advance(right_root, right_path, &right_level,
  4805. right_root_level,
  4806. advance_right != ADVANCE_ONLY_NEXT,
  4807. &right_key);
  4808. if (ret < 0)
  4809. right_end_reached = ADVANCE;
  4810. advance_right = 0;
  4811. }
  4812. if (left_end_reached && right_end_reached) {
  4813. ret = 0;
  4814. goto out;
  4815. } else if (left_end_reached) {
  4816. if (right_level == 0) {
  4817. ret = changed_cb(left_root, right_root,
  4818. left_path, right_path,
  4819. &right_key,
  4820. BTRFS_COMPARE_TREE_DELETED,
  4821. ctx);
  4822. if (ret < 0)
  4823. goto out;
  4824. }
  4825. advance_right = ADVANCE;
  4826. continue;
  4827. } else if (right_end_reached) {
  4828. if (left_level == 0) {
  4829. ret = changed_cb(left_root, right_root,
  4830. left_path, right_path,
  4831. &left_key,
  4832. BTRFS_COMPARE_TREE_NEW,
  4833. ctx);
  4834. if (ret < 0)
  4835. goto out;
  4836. }
  4837. advance_left = ADVANCE;
  4838. continue;
  4839. }
  4840. if (left_level == 0 && right_level == 0) {
  4841. cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
  4842. if (cmp < 0) {
  4843. ret = changed_cb(left_root, right_root,
  4844. left_path, right_path,
  4845. &left_key,
  4846. BTRFS_COMPARE_TREE_NEW,
  4847. ctx);
  4848. if (ret < 0)
  4849. goto out;
  4850. advance_left = ADVANCE;
  4851. } else if (cmp > 0) {
  4852. ret = changed_cb(left_root, right_root,
  4853. left_path, right_path,
  4854. &right_key,
  4855. BTRFS_COMPARE_TREE_DELETED,
  4856. ctx);
  4857. if (ret < 0)
  4858. goto out;
  4859. advance_right = ADVANCE;
  4860. } else {
  4861. enum btrfs_compare_tree_result result;
  4862. WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
  4863. ret = tree_compare_item(left_root, left_path,
  4864. right_path, tmp_buf);
  4865. if (ret)
  4866. result = BTRFS_COMPARE_TREE_CHANGED;
  4867. else
  4868. result = BTRFS_COMPARE_TREE_SAME;
  4869. ret = changed_cb(left_root, right_root,
  4870. left_path, right_path,
  4871. &left_key, result, ctx);
  4872. if (ret < 0)
  4873. goto out;
  4874. advance_left = ADVANCE;
  4875. advance_right = ADVANCE;
  4876. }
  4877. } else if (left_level == right_level) {
  4878. cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
  4879. if (cmp < 0) {
  4880. advance_left = ADVANCE;
  4881. } else if (cmp > 0) {
  4882. advance_right = ADVANCE;
  4883. } else {
  4884. left_blockptr = btrfs_node_blockptr(
  4885. left_path->nodes[left_level],
  4886. left_path->slots[left_level]);
  4887. right_blockptr = btrfs_node_blockptr(
  4888. right_path->nodes[right_level],
  4889. right_path->slots[right_level]);
  4890. left_gen = btrfs_node_ptr_generation(
  4891. left_path->nodes[left_level],
  4892. left_path->slots[left_level]);
  4893. right_gen = btrfs_node_ptr_generation(
  4894. right_path->nodes[right_level],
  4895. right_path->slots[right_level]);
  4896. if (left_blockptr == right_blockptr &&
  4897. left_gen == right_gen) {
  4898. /*
  4899. * As we're on a shared block, don't
  4900. * allow to go deeper.
  4901. */
  4902. advance_left = ADVANCE_ONLY_NEXT;
  4903. advance_right = ADVANCE_ONLY_NEXT;
  4904. } else {
  4905. advance_left = ADVANCE;
  4906. advance_right = ADVANCE;
  4907. }
  4908. }
  4909. } else if (left_level < right_level) {
  4910. advance_right = ADVANCE;
  4911. } else {
  4912. advance_left = ADVANCE;
  4913. }
  4914. }
  4915. out:
  4916. btrfs_free_path(left_path);
  4917. btrfs_free_path(right_path);
  4918. kfree(tmp_buf);
  4919. return ret;
  4920. }
  4921. /*
  4922. * this is similar to btrfs_next_leaf, but does not try to preserve
  4923. * and fixup the path. It looks for and returns the next key in the
  4924. * tree based on the current path and the min_trans parameters.
  4925. *
  4926. * 0 is returned if another key is found, < 0 if there are any errors
  4927. * and 1 is returned if there are no higher keys in the tree
  4928. *
  4929. * path->keep_locks should be set to 1 on the search made before
  4930. * calling this function.
  4931. */
  4932. int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
  4933. struct btrfs_key *key, int level, u64 min_trans)
  4934. {
  4935. int slot;
  4936. struct extent_buffer *c;
  4937. WARN_ON(!path->keep_locks);
  4938. while (level < BTRFS_MAX_LEVEL) {
  4939. if (!path->nodes[level])
  4940. return 1;
  4941. slot = path->slots[level] + 1;
  4942. c = path->nodes[level];
  4943. next:
  4944. if (slot >= btrfs_header_nritems(c)) {
  4945. int ret;
  4946. int orig_lowest;
  4947. struct btrfs_key cur_key;
  4948. if (level + 1 >= BTRFS_MAX_LEVEL ||
  4949. !path->nodes[level + 1])
  4950. return 1;
  4951. if (path->locks[level + 1]) {
  4952. level++;
  4953. continue;
  4954. }
  4955. slot = btrfs_header_nritems(c) - 1;
  4956. if (level == 0)
  4957. btrfs_item_key_to_cpu(c, &cur_key, slot);
  4958. else
  4959. btrfs_node_key_to_cpu(c, &cur_key, slot);
  4960. orig_lowest = path->lowest_level;
  4961. btrfs_release_path(path);
  4962. path->lowest_level = level;
  4963. ret = btrfs_search_slot(NULL, root, &cur_key, path,
  4964. 0, 0);
  4965. path->lowest_level = orig_lowest;
  4966. if (ret < 0)
  4967. return ret;
  4968. c = path->nodes[level];
  4969. slot = path->slots[level];
  4970. if (ret == 0)
  4971. slot++;
  4972. goto next;
  4973. }
  4974. if (level == 0)
  4975. btrfs_item_key_to_cpu(c, key, slot);
  4976. else {
  4977. u64 gen = btrfs_node_ptr_generation(c, slot);
  4978. if (gen < min_trans) {
  4979. slot++;
  4980. goto next;
  4981. }
  4982. btrfs_node_key_to_cpu(c, key, slot);
  4983. }
  4984. return 0;
  4985. }
  4986. return 1;
  4987. }
  4988. /*
  4989. * search the tree again to find a leaf with greater keys
  4990. * returns 0 if it found something or 1 if there are no greater leaves.
  4991. * returns < 0 on io errors.
  4992. */
  4993. int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
  4994. {
  4995. return btrfs_next_old_leaf(root, path, 0);
  4996. }
  4997. int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
  4998. u64 time_seq)
  4999. {
  5000. int slot;
  5001. int level;
  5002. struct extent_buffer *c;
  5003. struct extent_buffer *next;
  5004. struct btrfs_key key;
  5005. u32 nritems;
  5006. int ret;
  5007. int old_spinning = path->leave_spinning;
  5008. int next_rw_lock = 0;
  5009. nritems = btrfs_header_nritems(path->nodes[0]);
  5010. if (nritems == 0)
  5011. return 1;
  5012. btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
  5013. again:
  5014. level = 1;
  5015. next = NULL;
  5016. next_rw_lock = 0;
  5017. btrfs_release_path(path);
  5018. path->keep_locks = 1;
  5019. path->leave_spinning = 1;
  5020. if (time_seq)
  5021. ret = btrfs_search_old_slot(root, &key, path, time_seq);
  5022. else
  5023. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  5024. path->keep_locks = 0;
  5025. if (ret < 0)
  5026. return ret;
  5027. nritems = btrfs_header_nritems(path->nodes[0]);
  5028. /*
  5029. * by releasing the path above we dropped all our locks. A balance
  5030. * could have added more items next to the key that used to be
  5031. * at the very end of the block. So, check again here and
  5032. * advance the path if there are now more items available.
  5033. */
  5034. if (nritems > 0 && path->slots[0] < nritems - 1) {
  5035. if (ret == 0)
  5036. path->slots[0]++;
  5037. ret = 0;
  5038. goto done;
  5039. }
  5040. /*
  5041. * So the above check misses one case:
  5042. * - after releasing the path above, someone has removed the item that
  5043. * used to be at the very end of the block, and balance between leafs
  5044. * gets another one with bigger key.offset to replace it.
  5045. *
  5046. * This one should be returned as well, or we can get leaf corruption
  5047. * later(esp. in __btrfs_drop_extents()).
  5048. *
  5049. * And a bit more explanation about this check,
  5050. * with ret > 0, the key isn't found, the path points to the slot
  5051. * where it should be inserted, so the path->slots[0] item must be the
  5052. * bigger one.
  5053. */
  5054. if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
  5055. ret = 0;
  5056. goto done;
  5057. }
  5058. while (level < BTRFS_MAX_LEVEL) {
  5059. if (!path->nodes[level]) {
  5060. ret = 1;
  5061. goto done;
  5062. }
  5063. slot = path->slots[level] + 1;
  5064. c = path->nodes[level];
  5065. if (slot >= btrfs_header_nritems(c)) {
  5066. level++;
  5067. if (level == BTRFS_MAX_LEVEL) {
  5068. ret = 1;
  5069. goto done;
  5070. }
  5071. continue;
  5072. }
  5073. if (next) {
  5074. btrfs_tree_unlock_rw(next, next_rw_lock);
  5075. free_extent_buffer(next);
  5076. }
  5077. next = c;
  5078. next_rw_lock = path->locks[level];
  5079. ret = read_block_for_search(NULL, root, path, &next, level,
  5080. slot, &key, 0);
  5081. if (ret == -EAGAIN)
  5082. goto again;
  5083. if (ret < 0) {
  5084. btrfs_release_path(path);
  5085. goto done;
  5086. }
  5087. if (!path->skip_locking) {
  5088. ret = btrfs_try_tree_read_lock(next);
  5089. if (!ret && time_seq) {
  5090. /*
  5091. * If we don't get the lock, we may be racing
  5092. * with push_leaf_left, holding that lock while
  5093. * itself waiting for the leaf we've currently
  5094. * locked. To solve this situation, we give up
  5095. * on our lock and cycle.
  5096. */
  5097. free_extent_buffer(next);
  5098. btrfs_release_path(path);
  5099. cond_resched();
  5100. goto again;
  5101. }
  5102. if (!ret) {
  5103. btrfs_set_path_blocking(path);
  5104. btrfs_tree_read_lock(next);
  5105. btrfs_clear_path_blocking(path, next,
  5106. BTRFS_READ_LOCK);
  5107. }
  5108. next_rw_lock = BTRFS_READ_LOCK;
  5109. }
  5110. break;
  5111. }
  5112. path->slots[level] = slot;
  5113. while (1) {
  5114. level--;
  5115. c = path->nodes[level];
  5116. if (path->locks[level])
  5117. btrfs_tree_unlock_rw(c, path->locks[level]);
  5118. free_extent_buffer(c);
  5119. path->nodes[level] = next;
  5120. path->slots[level] = 0;
  5121. if (!path->skip_locking)
  5122. path->locks[level] = next_rw_lock;
  5123. if (!level)
  5124. break;
  5125. ret = read_block_for_search(NULL, root, path, &next, level,
  5126. 0, &key, 0);
  5127. if (ret == -EAGAIN)
  5128. goto again;
  5129. if (ret < 0) {
  5130. btrfs_release_path(path);
  5131. goto done;
  5132. }
  5133. if (!path->skip_locking) {
  5134. ret = btrfs_try_tree_read_lock(next);
  5135. if (!ret) {
  5136. btrfs_set_path_blocking(path);
  5137. btrfs_tree_read_lock(next);
  5138. btrfs_clear_path_blocking(path, next,
  5139. BTRFS_READ_LOCK);
  5140. }
  5141. next_rw_lock = BTRFS_READ_LOCK;
  5142. }
  5143. }
  5144. ret = 0;
  5145. done:
  5146. unlock_up(path, 0, 1, 0, NULL);
  5147. path->leave_spinning = old_spinning;
  5148. if (!old_spinning)
  5149. btrfs_set_path_blocking(path);
  5150. return ret;
  5151. }
  5152. /*
  5153. * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
  5154. * searching until it gets past min_objectid or finds an item of 'type'
  5155. *
  5156. * returns 0 if something is found, 1 if nothing was found and < 0 on error
  5157. */
  5158. int btrfs_previous_item(struct btrfs_root *root,
  5159. struct btrfs_path *path, u64 min_objectid,
  5160. int type)
  5161. {
  5162. struct btrfs_key found_key;
  5163. struct extent_buffer *leaf;
  5164. u32 nritems;
  5165. int ret;
  5166. while (1) {
  5167. if (path->slots[0] == 0) {
  5168. btrfs_set_path_blocking(path);
  5169. ret = btrfs_prev_leaf(root, path);
  5170. if (ret != 0)
  5171. return ret;
  5172. } else {
  5173. path->slots[0]--;
  5174. }
  5175. leaf = path->nodes[0];
  5176. nritems = btrfs_header_nritems(leaf);
  5177. if (nritems == 0)
  5178. return 1;
  5179. if (path->slots[0] == nritems)
  5180. path->slots[0]--;
  5181. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  5182. if (found_key.objectid < min_objectid)
  5183. break;
  5184. if (found_key.type == type)
  5185. return 0;
  5186. if (found_key.objectid == min_objectid &&
  5187. found_key.type < type)
  5188. break;
  5189. }
  5190. return 1;
  5191. }
  5192. /*
  5193. * search in extent tree to find a previous Metadata/Data extent item with
  5194. * min objecitd.
  5195. *
  5196. * returns 0 if something is found, 1 if nothing was found and < 0 on error
  5197. */
  5198. int btrfs_previous_extent_item(struct btrfs_root *root,
  5199. struct btrfs_path *path, u64 min_objectid)
  5200. {
  5201. struct btrfs_key found_key;
  5202. struct extent_buffer *leaf;
  5203. u32 nritems;
  5204. int ret;
  5205. while (1) {
  5206. if (path->slots[0] == 0) {
  5207. btrfs_set_path_blocking(path);
  5208. ret = btrfs_prev_leaf(root, path);
  5209. if (ret != 0)
  5210. return ret;
  5211. } else {
  5212. path->slots[0]--;
  5213. }
  5214. leaf = path->nodes[0];
  5215. nritems = btrfs_header_nritems(leaf);
  5216. if (nritems == 0)
  5217. return 1;
  5218. if (path->slots[0] == nritems)
  5219. path->slots[0]--;
  5220. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  5221. if (found_key.objectid < min_objectid)
  5222. break;
  5223. if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
  5224. found_key.type == BTRFS_METADATA_ITEM_KEY)
  5225. return 0;
  5226. if (found_key.objectid == min_objectid &&
  5227. found_key.type < BTRFS_EXTENT_ITEM_KEY)
  5228. break;
  5229. }
  5230. return 1;
  5231. }