relocation.c 109 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/slab.h>
  24. #include "ctree.h"
  25. #include "disk-io.h"
  26. #include "transaction.h"
  27. #include "volumes.h"
  28. #include "locking.h"
  29. #include "btrfs_inode.h"
  30. #include "async-thread.h"
  31. #include "free-space-cache.h"
  32. #include "inode-map.h"
  33. /*
  34. * backref_node, mapping_node and tree_block start with this
  35. */
  36. struct tree_entry {
  37. struct rb_node rb_node;
  38. u64 bytenr;
  39. };
  40. /*
  41. * present a tree block in the backref cache
  42. */
  43. struct backref_node {
  44. struct rb_node rb_node;
  45. u64 bytenr;
  46. u64 new_bytenr;
  47. /* objectid of tree block owner, can be not uptodate */
  48. u64 owner;
  49. /* link to pending, changed or detached list */
  50. struct list_head list;
  51. /* list of upper level blocks reference this block */
  52. struct list_head upper;
  53. /* list of child blocks in the cache */
  54. struct list_head lower;
  55. /* NULL if this node is not tree root */
  56. struct btrfs_root *root;
  57. /* extent buffer got by COW the block */
  58. struct extent_buffer *eb;
  59. /* level of tree block */
  60. unsigned int level:8;
  61. /* is the block in non-reference counted tree */
  62. unsigned int cowonly:1;
  63. /* 1 if no child node in the cache */
  64. unsigned int lowest:1;
  65. /* is the extent buffer locked */
  66. unsigned int locked:1;
  67. /* has the block been processed */
  68. unsigned int processed:1;
  69. /* have backrefs of this block been checked */
  70. unsigned int checked:1;
  71. /*
  72. * 1 if corresponding block has been cowed but some upper
  73. * level block pointers may not point to the new location
  74. */
  75. unsigned int pending:1;
  76. /*
  77. * 1 if the backref node isn't connected to any other
  78. * backref node.
  79. */
  80. unsigned int detached:1;
  81. };
  82. /*
  83. * present a block pointer in the backref cache
  84. */
  85. struct backref_edge {
  86. struct list_head list[2];
  87. struct backref_node *node[2];
  88. };
  89. #define LOWER 0
  90. #define UPPER 1
  91. #define RELOCATION_RESERVED_NODES 256
  92. struct backref_cache {
  93. /* red black tree of all backref nodes in the cache */
  94. struct rb_root rb_root;
  95. /* for passing backref nodes to btrfs_reloc_cow_block */
  96. struct backref_node *path[BTRFS_MAX_LEVEL];
  97. /*
  98. * list of blocks that have been cowed but some block
  99. * pointers in upper level blocks may not reflect the
  100. * new location
  101. */
  102. struct list_head pending[BTRFS_MAX_LEVEL];
  103. /* list of backref nodes with no child node */
  104. struct list_head leaves;
  105. /* list of blocks that have been cowed in current transaction */
  106. struct list_head changed;
  107. /* list of detached backref node. */
  108. struct list_head detached;
  109. u64 last_trans;
  110. int nr_nodes;
  111. int nr_edges;
  112. };
  113. /*
  114. * map address of tree root to tree
  115. */
  116. struct mapping_node {
  117. struct rb_node rb_node;
  118. u64 bytenr;
  119. void *data;
  120. };
  121. struct mapping_tree {
  122. struct rb_root rb_root;
  123. spinlock_t lock;
  124. };
  125. /*
  126. * present a tree block to process
  127. */
  128. struct tree_block {
  129. struct rb_node rb_node;
  130. u64 bytenr;
  131. struct btrfs_key key;
  132. unsigned int level:8;
  133. unsigned int key_ready:1;
  134. };
  135. #define MAX_EXTENTS 128
  136. struct file_extent_cluster {
  137. u64 start;
  138. u64 end;
  139. u64 boundary[MAX_EXTENTS];
  140. unsigned int nr;
  141. };
  142. struct reloc_control {
  143. /* block group to relocate */
  144. struct btrfs_block_group_cache *block_group;
  145. /* extent tree */
  146. struct btrfs_root *extent_root;
  147. /* inode for moving data */
  148. struct inode *data_inode;
  149. struct btrfs_block_rsv *block_rsv;
  150. struct backref_cache backref_cache;
  151. struct file_extent_cluster cluster;
  152. /* tree blocks have been processed */
  153. struct extent_io_tree processed_blocks;
  154. /* map start of tree root to corresponding reloc tree */
  155. struct mapping_tree reloc_root_tree;
  156. /* list of reloc trees */
  157. struct list_head reloc_roots;
  158. /* size of metadata reservation for merging reloc trees */
  159. u64 merging_rsv_size;
  160. /* size of relocated tree nodes */
  161. u64 nodes_relocated;
  162. /* reserved size for block group relocation*/
  163. u64 reserved_bytes;
  164. u64 search_start;
  165. u64 extents_found;
  166. unsigned int stage:8;
  167. unsigned int create_reloc_tree:1;
  168. unsigned int merge_reloc_tree:1;
  169. unsigned int found_file_extent:1;
  170. };
  171. /* stages of data relocation */
  172. #define MOVE_DATA_EXTENTS 0
  173. #define UPDATE_DATA_PTRS 1
  174. static void remove_backref_node(struct backref_cache *cache,
  175. struct backref_node *node);
  176. static void __mark_block_processed(struct reloc_control *rc,
  177. struct backref_node *node);
  178. static void mapping_tree_init(struct mapping_tree *tree)
  179. {
  180. tree->rb_root = RB_ROOT;
  181. spin_lock_init(&tree->lock);
  182. }
  183. static void backref_cache_init(struct backref_cache *cache)
  184. {
  185. int i;
  186. cache->rb_root = RB_ROOT;
  187. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  188. INIT_LIST_HEAD(&cache->pending[i]);
  189. INIT_LIST_HEAD(&cache->changed);
  190. INIT_LIST_HEAD(&cache->detached);
  191. INIT_LIST_HEAD(&cache->leaves);
  192. }
  193. static void backref_cache_cleanup(struct backref_cache *cache)
  194. {
  195. struct backref_node *node;
  196. int i;
  197. while (!list_empty(&cache->detached)) {
  198. node = list_entry(cache->detached.next,
  199. struct backref_node, list);
  200. remove_backref_node(cache, node);
  201. }
  202. while (!list_empty(&cache->leaves)) {
  203. node = list_entry(cache->leaves.next,
  204. struct backref_node, lower);
  205. remove_backref_node(cache, node);
  206. }
  207. cache->last_trans = 0;
  208. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  209. BUG_ON(!list_empty(&cache->pending[i]));
  210. BUG_ON(!list_empty(&cache->changed));
  211. BUG_ON(!list_empty(&cache->detached));
  212. BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
  213. BUG_ON(cache->nr_nodes);
  214. BUG_ON(cache->nr_edges);
  215. }
  216. static struct backref_node *alloc_backref_node(struct backref_cache *cache)
  217. {
  218. struct backref_node *node;
  219. node = kzalloc(sizeof(*node), GFP_NOFS);
  220. if (node) {
  221. INIT_LIST_HEAD(&node->list);
  222. INIT_LIST_HEAD(&node->upper);
  223. INIT_LIST_HEAD(&node->lower);
  224. RB_CLEAR_NODE(&node->rb_node);
  225. cache->nr_nodes++;
  226. }
  227. return node;
  228. }
  229. static void free_backref_node(struct backref_cache *cache,
  230. struct backref_node *node)
  231. {
  232. if (node) {
  233. cache->nr_nodes--;
  234. kfree(node);
  235. }
  236. }
  237. static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
  238. {
  239. struct backref_edge *edge;
  240. edge = kzalloc(sizeof(*edge), GFP_NOFS);
  241. if (edge)
  242. cache->nr_edges++;
  243. return edge;
  244. }
  245. static void free_backref_edge(struct backref_cache *cache,
  246. struct backref_edge *edge)
  247. {
  248. if (edge) {
  249. cache->nr_edges--;
  250. kfree(edge);
  251. }
  252. }
  253. static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
  254. struct rb_node *node)
  255. {
  256. struct rb_node **p = &root->rb_node;
  257. struct rb_node *parent = NULL;
  258. struct tree_entry *entry;
  259. while (*p) {
  260. parent = *p;
  261. entry = rb_entry(parent, struct tree_entry, rb_node);
  262. if (bytenr < entry->bytenr)
  263. p = &(*p)->rb_left;
  264. else if (bytenr > entry->bytenr)
  265. p = &(*p)->rb_right;
  266. else
  267. return parent;
  268. }
  269. rb_link_node(node, parent, p);
  270. rb_insert_color(node, root);
  271. return NULL;
  272. }
  273. static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
  274. {
  275. struct rb_node *n = root->rb_node;
  276. struct tree_entry *entry;
  277. while (n) {
  278. entry = rb_entry(n, struct tree_entry, rb_node);
  279. if (bytenr < entry->bytenr)
  280. n = n->rb_left;
  281. else if (bytenr > entry->bytenr)
  282. n = n->rb_right;
  283. else
  284. return n;
  285. }
  286. return NULL;
  287. }
  288. static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
  289. {
  290. struct btrfs_fs_info *fs_info = NULL;
  291. struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
  292. rb_node);
  293. if (bnode->root)
  294. fs_info = bnode->root->fs_info;
  295. btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
  296. "found at offset %llu", bytenr);
  297. }
  298. /*
  299. * walk up backref nodes until reach node presents tree root
  300. */
  301. static struct backref_node *walk_up_backref(struct backref_node *node,
  302. struct backref_edge *edges[],
  303. int *index)
  304. {
  305. struct backref_edge *edge;
  306. int idx = *index;
  307. while (!list_empty(&node->upper)) {
  308. edge = list_entry(node->upper.next,
  309. struct backref_edge, list[LOWER]);
  310. edges[idx++] = edge;
  311. node = edge->node[UPPER];
  312. }
  313. BUG_ON(node->detached);
  314. *index = idx;
  315. return node;
  316. }
  317. /*
  318. * walk down backref nodes to find start of next reference path
  319. */
  320. static struct backref_node *walk_down_backref(struct backref_edge *edges[],
  321. int *index)
  322. {
  323. struct backref_edge *edge;
  324. struct backref_node *lower;
  325. int idx = *index;
  326. while (idx > 0) {
  327. edge = edges[idx - 1];
  328. lower = edge->node[LOWER];
  329. if (list_is_last(&edge->list[LOWER], &lower->upper)) {
  330. idx--;
  331. continue;
  332. }
  333. edge = list_entry(edge->list[LOWER].next,
  334. struct backref_edge, list[LOWER]);
  335. edges[idx - 1] = edge;
  336. *index = idx;
  337. return edge->node[UPPER];
  338. }
  339. *index = 0;
  340. return NULL;
  341. }
  342. static void unlock_node_buffer(struct backref_node *node)
  343. {
  344. if (node->locked) {
  345. btrfs_tree_unlock(node->eb);
  346. node->locked = 0;
  347. }
  348. }
  349. static void drop_node_buffer(struct backref_node *node)
  350. {
  351. if (node->eb) {
  352. unlock_node_buffer(node);
  353. free_extent_buffer(node->eb);
  354. node->eb = NULL;
  355. }
  356. }
  357. static void drop_backref_node(struct backref_cache *tree,
  358. struct backref_node *node)
  359. {
  360. BUG_ON(!list_empty(&node->upper));
  361. drop_node_buffer(node);
  362. list_del(&node->list);
  363. list_del(&node->lower);
  364. if (!RB_EMPTY_NODE(&node->rb_node))
  365. rb_erase(&node->rb_node, &tree->rb_root);
  366. free_backref_node(tree, node);
  367. }
  368. /*
  369. * remove a backref node from the backref cache
  370. */
  371. static void remove_backref_node(struct backref_cache *cache,
  372. struct backref_node *node)
  373. {
  374. struct backref_node *upper;
  375. struct backref_edge *edge;
  376. if (!node)
  377. return;
  378. BUG_ON(!node->lowest && !node->detached);
  379. while (!list_empty(&node->upper)) {
  380. edge = list_entry(node->upper.next, struct backref_edge,
  381. list[LOWER]);
  382. upper = edge->node[UPPER];
  383. list_del(&edge->list[LOWER]);
  384. list_del(&edge->list[UPPER]);
  385. free_backref_edge(cache, edge);
  386. if (RB_EMPTY_NODE(&upper->rb_node)) {
  387. BUG_ON(!list_empty(&node->upper));
  388. drop_backref_node(cache, node);
  389. node = upper;
  390. node->lowest = 1;
  391. continue;
  392. }
  393. /*
  394. * add the node to leaf node list if no other
  395. * child block cached.
  396. */
  397. if (list_empty(&upper->lower)) {
  398. list_add_tail(&upper->lower, &cache->leaves);
  399. upper->lowest = 1;
  400. }
  401. }
  402. drop_backref_node(cache, node);
  403. }
  404. static void update_backref_node(struct backref_cache *cache,
  405. struct backref_node *node, u64 bytenr)
  406. {
  407. struct rb_node *rb_node;
  408. rb_erase(&node->rb_node, &cache->rb_root);
  409. node->bytenr = bytenr;
  410. rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
  411. if (rb_node)
  412. backref_tree_panic(rb_node, -EEXIST, bytenr);
  413. }
  414. /*
  415. * update backref cache after a transaction commit
  416. */
  417. static int update_backref_cache(struct btrfs_trans_handle *trans,
  418. struct backref_cache *cache)
  419. {
  420. struct backref_node *node;
  421. int level = 0;
  422. if (cache->last_trans == 0) {
  423. cache->last_trans = trans->transid;
  424. return 0;
  425. }
  426. if (cache->last_trans == trans->transid)
  427. return 0;
  428. /*
  429. * detached nodes are used to avoid unnecessary backref
  430. * lookup. transaction commit changes the extent tree.
  431. * so the detached nodes are no longer useful.
  432. */
  433. while (!list_empty(&cache->detached)) {
  434. node = list_entry(cache->detached.next,
  435. struct backref_node, list);
  436. remove_backref_node(cache, node);
  437. }
  438. while (!list_empty(&cache->changed)) {
  439. node = list_entry(cache->changed.next,
  440. struct backref_node, list);
  441. list_del_init(&node->list);
  442. BUG_ON(node->pending);
  443. update_backref_node(cache, node, node->new_bytenr);
  444. }
  445. /*
  446. * some nodes can be left in the pending list if there were
  447. * errors during processing the pending nodes.
  448. */
  449. for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
  450. list_for_each_entry(node, &cache->pending[level], list) {
  451. BUG_ON(!node->pending);
  452. if (node->bytenr == node->new_bytenr)
  453. continue;
  454. update_backref_node(cache, node, node->new_bytenr);
  455. }
  456. }
  457. cache->last_trans = 0;
  458. return 1;
  459. }
  460. static int should_ignore_root(struct btrfs_root *root)
  461. {
  462. struct btrfs_root *reloc_root;
  463. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  464. return 0;
  465. reloc_root = root->reloc_root;
  466. if (!reloc_root)
  467. return 0;
  468. if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
  469. root->fs_info->running_transaction->transid - 1)
  470. return 0;
  471. /*
  472. * if there is reloc tree and it was created in previous
  473. * transaction backref lookup can find the reloc tree,
  474. * so backref node for the fs tree root is useless for
  475. * relocation.
  476. */
  477. return 1;
  478. }
  479. /*
  480. * find reloc tree by address of tree root
  481. */
  482. static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
  483. u64 bytenr)
  484. {
  485. struct rb_node *rb_node;
  486. struct mapping_node *node;
  487. struct btrfs_root *root = NULL;
  488. spin_lock(&rc->reloc_root_tree.lock);
  489. rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
  490. if (rb_node) {
  491. node = rb_entry(rb_node, struct mapping_node, rb_node);
  492. root = (struct btrfs_root *)node->data;
  493. }
  494. spin_unlock(&rc->reloc_root_tree.lock);
  495. return root;
  496. }
  497. static int is_cowonly_root(u64 root_objectid)
  498. {
  499. if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
  500. root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
  501. root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
  502. root_objectid == BTRFS_DEV_TREE_OBJECTID ||
  503. root_objectid == BTRFS_TREE_LOG_OBJECTID ||
  504. root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
  505. root_objectid == BTRFS_UUID_TREE_OBJECTID ||
  506. root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
  507. return 1;
  508. return 0;
  509. }
  510. static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
  511. u64 root_objectid)
  512. {
  513. struct btrfs_key key;
  514. key.objectid = root_objectid;
  515. key.type = BTRFS_ROOT_ITEM_KEY;
  516. if (is_cowonly_root(root_objectid))
  517. key.offset = 0;
  518. else
  519. key.offset = (u64)-1;
  520. return btrfs_get_fs_root(fs_info, &key, false);
  521. }
  522. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  523. static noinline_for_stack
  524. struct btrfs_root *find_tree_root(struct reloc_control *rc,
  525. struct extent_buffer *leaf,
  526. struct btrfs_extent_ref_v0 *ref0)
  527. {
  528. struct btrfs_root *root;
  529. u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
  530. u64 generation = btrfs_ref_generation_v0(leaf, ref0);
  531. BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
  532. root = read_fs_root(rc->extent_root->fs_info, root_objectid);
  533. BUG_ON(IS_ERR(root));
  534. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  535. generation != btrfs_root_generation(&root->root_item))
  536. return NULL;
  537. return root;
  538. }
  539. #endif
  540. static noinline_for_stack
  541. int find_inline_backref(struct extent_buffer *leaf, int slot,
  542. unsigned long *ptr, unsigned long *end)
  543. {
  544. struct btrfs_key key;
  545. struct btrfs_extent_item *ei;
  546. struct btrfs_tree_block_info *bi;
  547. u32 item_size;
  548. btrfs_item_key_to_cpu(leaf, &key, slot);
  549. item_size = btrfs_item_size_nr(leaf, slot);
  550. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  551. if (item_size < sizeof(*ei)) {
  552. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  553. return 1;
  554. }
  555. #endif
  556. ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
  557. WARN_ON(!(btrfs_extent_flags(leaf, ei) &
  558. BTRFS_EXTENT_FLAG_TREE_BLOCK));
  559. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  560. item_size <= sizeof(*ei) + sizeof(*bi)) {
  561. WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
  562. return 1;
  563. }
  564. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  565. item_size <= sizeof(*ei)) {
  566. WARN_ON(item_size < sizeof(*ei));
  567. return 1;
  568. }
  569. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  570. bi = (struct btrfs_tree_block_info *)(ei + 1);
  571. *ptr = (unsigned long)(bi + 1);
  572. } else {
  573. *ptr = (unsigned long)(ei + 1);
  574. }
  575. *end = (unsigned long)ei + item_size;
  576. return 0;
  577. }
  578. /*
  579. * build backref tree for a given tree block. root of the backref tree
  580. * corresponds the tree block, leaves of the backref tree correspond
  581. * roots of b-trees that reference the tree block.
  582. *
  583. * the basic idea of this function is check backrefs of a given block
  584. * to find upper level blocks that refernece the block, and then check
  585. * bakcrefs of these upper level blocks recursively. the recursion stop
  586. * when tree root is reached or backrefs for the block is cached.
  587. *
  588. * NOTE: if we find backrefs for a block are cached, we know backrefs
  589. * for all upper level blocks that directly/indirectly reference the
  590. * block are also cached.
  591. */
  592. static noinline_for_stack
  593. struct backref_node *build_backref_tree(struct reloc_control *rc,
  594. struct btrfs_key *node_key,
  595. int level, u64 bytenr)
  596. {
  597. struct backref_cache *cache = &rc->backref_cache;
  598. struct btrfs_path *path1;
  599. struct btrfs_path *path2;
  600. struct extent_buffer *eb;
  601. struct btrfs_root *root;
  602. struct backref_node *cur;
  603. struct backref_node *upper;
  604. struct backref_node *lower;
  605. struct backref_node *node = NULL;
  606. struct backref_node *exist = NULL;
  607. struct backref_edge *edge;
  608. struct rb_node *rb_node;
  609. struct btrfs_key key;
  610. unsigned long end;
  611. unsigned long ptr;
  612. LIST_HEAD(list);
  613. LIST_HEAD(useless);
  614. int cowonly;
  615. int ret;
  616. int err = 0;
  617. bool need_check = true;
  618. path1 = btrfs_alloc_path();
  619. path2 = btrfs_alloc_path();
  620. if (!path1 || !path2) {
  621. err = -ENOMEM;
  622. goto out;
  623. }
  624. path1->reada = 1;
  625. path2->reada = 2;
  626. node = alloc_backref_node(cache);
  627. if (!node) {
  628. err = -ENOMEM;
  629. goto out;
  630. }
  631. node->bytenr = bytenr;
  632. node->level = level;
  633. node->lowest = 1;
  634. cur = node;
  635. again:
  636. end = 0;
  637. ptr = 0;
  638. key.objectid = cur->bytenr;
  639. key.type = BTRFS_METADATA_ITEM_KEY;
  640. key.offset = (u64)-1;
  641. path1->search_commit_root = 1;
  642. path1->skip_locking = 1;
  643. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
  644. 0, 0);
  645. if (ret < 0) {
  646. err = ret;
  647. goto out;
  648. }
  649. BUG_ON(!ret || !path1->slots[0]);
  650. path1->slots[0]--;
  651. WARN_ON(cur->checked);
  652. if (!list_empty(&cur->upper)) {
  653. /*
  654. * the backref was added previously when processing
  655. * backref of type BTRFS_TREE_BLOCK_REF_KEY
  656. */
  657. BUG_ON(!list_is_singular(&cur->upper));
  658. edge = list_entry(cur->upper.next, struct backref_edge,
  659. list[LOWER]);
  660. BUG_ON(!list_empty(&edge->list[UPPER]));
  661. exist = edge->node[UPPER];
  662. /*
  663. * add the upper level block to pending list if we need
  664. * check its backrefs
  665. */
  666. if (!exist->checked)
  667. list_add_tail(&edge->list[UPPER], &list);
  668. } else {
  669. exist = NULL;
  670. }
  671. while (1) {
  672. cond_resched();
  673. eb = path1->nodes[0];
  674. if (ptr >= end) {
  675. if (path1->slots[0] >= btrfs_header_nritems(eb)) {
  676. ret = btrfs_next_leaf(rc->extent_root, path1);
  677. if (ret < 0) {
  678. err = ret;
  679. goto out;
  680. }
  681. if (ret > 0)
  682. break;
  683. eb = path1->nodes[0];
  684. }
  685. btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
  686. if (key.objectid != cur->bytenr) {
  687. WARN_ON(exist);
  688. break;
  689. }
  690. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  691. key.type == BTRFS_METADATA_ITEM_KEY) {
  692. ret = find_inline_backref(eb, path1->slots[0],
  693. &ptr, &end);
  694. if (ret)
  695. goto next;
  696. }
  697. }
  698. if (ptr < end) {
  699. /* update key for inline back ref */
  700. struct btrfs_extent_inline_ref *iref;
  701. iref = (struct btrfs_extent_inline_ref *)ptr;
  702. key.type = btrfs_extent_inline_ref_type(eb, iref);
  703. key.offset = btrfs_extent_inline_ref_offset(eb, iref);
  704. WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
  705. key.type != BTRFS_SHARED_BLOCK_REF_KEY);
  706. }
  707. if (exist &&
  708. ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
  709. exist->owner == key.offset) ||
  710. (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
  711. exist->bytenr == key.offset))) {
  712. exist = NULL;
  713. goto next;
  714. }
  715. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  716. if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
  717. key.type == BTRFS_EXTENT_REF_V0_KEY) {
  718. if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  719. struct btrfs_extent_ref_v0 *ref0;
  720. ref0 = btrfs_item_ptr(eb, path1->slots[0],
  721. struct btrfs_extent_ref_v0);
  722. if (key.objectid == key.offset) {
  723. root = find_tree_root(rc, eb, ref0);
  724. if (root && !should_ignore_root(root))
  725. cur->root = root;
  726. else
  727. list_add(&cur->list, &useless);
  728. break;
  729. }
  730. if (is_cowonly_root(btrfs_ref_root_v0(eb,
  731. ref0)))
  732. cur->cowonly = 1;
  733. }
  734. #else
  735. BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
  736. if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
  737. #endif
  738. if (key.objectid == key.offset) {
  739. /*
  740. * only root blocks of reloc trees use
  741. * backref of this type.
  742. */
  743. root = find_reloc_root(rc, cur->bytenr);
  744. BUG_ON(!root);
  745. cur->root = root;
  746. break;
  747. }
  748. edge = alloc_backref_edge(cache);
  749. if (!edge) {
  750. err = -ENOMEM;
  751. goto out;
  752. }
  753. rb_node = tree_search(&cache->rb_root, key.offset);
  754. if (!rb_node) {
  755. upper = alloc_backref_node(cache);
  756. if (!upper) {
  757. free_backref_edge(cache, edge);
  758. err = -ENOMEM;
  759. goto out;
  760. }
  761. upper->bytenr = key.offset;
  762. upper->level = cur->level + 1;
  763. /*
  764. * backrefs for the upper level block isn't
  765. * cached, add the block to pending list
  766. */
  767. list_add_tail(&edge->list[UPPER], &list);
  768. } else {
  769. upper = rb_entry(rb_node, struct backref_node,
  770. rb_node);
  771. BUG_ON(!upper->checked);
  772. INIT_LIST_HEAD(&edge->list[UPPER]);
  773. }
  774. list_add_tail(&edge->list[LOWER], &cur->upper);
  775. edge->node[LOWER] = cur;
  776. edge->node[UPPER] = upper;
  777. goto next;
  778. } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
  779. goto next;
  780. }
  781. /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
  782. root = read_fs_root(rc->extent_root->fs_info, key.offset);
  783. if (IS_ERR(root)) {
  784. err = PTR_ERR(root);
  785. goto out;
  786. }
  787. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  788. cur->cowonly = 1;
  789. if (btrfs_root_level(&root->root_item) == cur->level) {
  790. /* tree root */
  791. BUG_ON(btrfs_root_bytenr(&root->root_item) !=
  792. cur->bytenr);
  793. if (should_ignore_root(root))
  794. list_add(&cur->list, &useless);
  795. else
  796. cur->root = root;
  797. break;
  798. }
  799. level = cur->level + 1;
  800. /*
  801. * searching the tree to find upper level blocks
  802. * reference the block.
  803. */
  804. path2->search_commit_root = 1;
  805. path2->skip_locking = 1;
  806. path2->lowest_level = level;
  807. ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
  808. path2->lowest_level = 0;
  809. if (ret < 0) {
  810. err = ret;
  811. goto out;
  812. }
  813. if (ret > 0 && path2->slots[level] > 0)
  814. path2->slots[level]--;
  815. eb = path2->nodes[level];
  816. WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
  817. cur->bytenr);
  818. lower = cur;
  819. need_check = true;
  820. for (; level < BTRFS_MAX_LEVEL; level++) {
  821. if (!path2->nodes[level]) {
  822. BUG_ON(btrfs_root_bytenr(&root->root_item) !=
  823. lower->bytenr);
  824. if (should_ignore_root(root))
  825. list_add(&lower->list, &useless);
  826. else
  827. lower->root = root;
  828. break;
  829. }
  830. edge = alloc_backref_edge(cache);
  831. if (!edge) {
  832. err = -ENOMEM;
  833. goto out;
  834. }
  835. eb = path2->nodes[level];
  836. rb_node = tree_search(&cache->rb_root, eb->start);
  837. if (!rb_node) {
  838. upper = alloc_backref_node(cache);
  839. if (!upper) {
  840. free_backref_edge(cache, edge);
  841. err = -ENOMEM;
  842. goto out;
  843. }
  844. upper->bytenr = eb->start;
  845. upper->owner = btrfs_header_owner(eb);
  846. upper->level = lower->level + 1;
  847. if (!test_bit(BTRFS_ROOT_REF_COWS,
  848. &root->state))
  849. upper->cowonly = 1;
  850. /*
  851. * if we know the block isn't shared
  852. * we can void checking its backrefs.
  853. */
  854. if (btrfs_block_can_be_shared(root, eb))
  855. upper->checked = 0;
  856. else
  857. upper->checked = 1;
  858. /*
  859. * add the block to pending list if we
  860. * need check its backrefs, we only do this once
  861. * while walking up a tree as we will catch
  862. * anything else later on.
  863. */
  864. if (!upper->checked && need_check) {
  865. need_check = false;
  866. list_add_tail(&edge->list[UPPER],
  867. &list);
  868. } else
  869. INIT_LIST_HEAD(&edge->list[UPPER]);
  870. } else {
  871. upper = rb_entry(rb_node, struct backref_node,
  872. rb_node);
  873. BUG_ON(!upper->checked);
  874. INIT_LIST_HEAD(&edge->list[UPPER]);
  875. if (!upper->owner)
  876. upper->owner = btrfs_header_owner(eb);
  877. }
  878. list_add_tail(&edge->list[LOWER], &lower->upper);
  879. edge->node[LOWER] = lower;
  880. edge->node[UPPER] = upper;
  881. if (rb_node)
  882. break;
  883. lower = upper;
  884. upper = NULL;
  885. }
  886. btrfs_release_path(path2);
  887. next:
  888. if (ptr < end) {
  889. ptr += btrfs_extent_inline_ref_size(key.type);
  890. if (ptr >= end) {
  891. WARN_ON(ptr > end);
  892. ptr = 0;
  893. end = 0;
  894. }
  895. }
  896. if (ptr >= end)
  897. path1->slots[0]++;
  898. }
  899. btrfs_release_path(path1);
  900. cur->checked = 1;
  901. WARN_ON(exist);
  902. /* the pending list isn't empty, take the first block to process */
  903. if (!list_empty(&list)) {
  904. edge = list_entry(list.next, struct backref_edge, list[UPPER]);
  905. list_del_init(&edge->list[UPPER]);
  906. cur = edge->node[UPPER];
  907. goto again;
  908. }
  909. /*
  910. * everything goes well, connect backref nodes and insert backref nodes
  911. * into the cache.
  912. */
  913. BUG_ON(!node->checked);
  914. cowonly = node->cowonly;
  915. if (!cowonly) {
  916. rb_node = tree_insert(&cache->rb_root, node->bytenr,
  917. &node->rb_node);
  918. if (rb_node)
  919. backref_tree_panic(rb_node, -EEXIST, node->bytenr);
  920. list_add_tail(&node->lower, &cache->leaves);
  921. }
  922. list_for_each_entry(edge, &node->upper, list[LOWER])
  923. list_add_tail(&edge->list[UPPER], &list);
  924. while (!list_empty(&list)) {
  925. edge = list_entry(list.next, struct backref_edge, list[UPPER]);
  926. list_del_init(&edge->list[UPPER]);
  927. upper = edge->node[UPPER];
  928. if (upper->detached) {
  929. list_del(&edge->list[LOWER]);
  930. lower = edge->node[LOWER];
  931. free_backref_edge(cache, edge);
  932. if (list_empty(&lower->upper))
  933. list_add(&lower->list, &useless);
  934. continue;
  935. }
  936. if (!RB_EMPTY_NODE(&upper->rb_node)) {
  937. if (upper->lowest) {
  938. list_del_init(&upper->lower);
  939. upper->lowest = 0;
  940. }
  941. list_add_tail(&edge->list[UPPER], &upper->lower);
  942. continue;
  943. }
  944. BUG_ON(!upper->checked);
  945. BUG_ON(cowonly != upper->cowonly);
  946. if (!cowonly) {
  947. rb_node = tree_insert(&cache->rb_root, upper->bytenr,
  948. &upper->rb_node);
  949. if (rb_node)
  950. backref_tree_panic(rb_node, -EEXIST,
  951. upper->bytenr);
  952. }
  953. list_add_tail(&edge->list[UPPER], &upper->lower);
  954. list_for_each_entry(edge, &upper->upper, list[LOWER])
  955. list_add_tail(&edge->list[UPPER], &list);
  956. }
  957. /*
  958. * process useless backref nodes. backref nodes for tree leaves
  959. * are deleted from the cache. backref nodes for upper level
  960. * tree blocks are left in the cache to avoid unnecessary backref
  961. * lookup.
  962. */
  963. while (!list_empty(&useless)) {
  964. upper = list_entry(useless.next, struct backref_node, list);
  965. list_del_init(&upper->list);
  966. BUG_ON(!list_empty(&upper->upper));
  967. if (upper == node)
  968. node = NULL;
  969. if (upper->lowest) {
  970. list_del_init(&upper->lower);
  971. upper->lowest = 0;
  972. }
  973. while (!list_empty(&upper->lower)) {
  974. edge = list_entry(upper->lower.next,
  975. struct backref_edge, list[UPPER]);
  976. list_del(&edge->list[UPPER]);
  977. list_del(&edge->list[LOWER]);
  978. lower = edge->node[LOWER];
  979. free_backref_edge(cache, edge);
  980. if (list_empty(&lower->upper))
  981. list_add(&lower->list, &useless);
  982. }
  983. __mark_block_processed(rc, upper);
  984. if (upper->level > 0) {
  985. list_add(&upper->list, &cache->detached);
  986. upper->detached = 1;
  987. } else {
  988. rb_erase(&upper->rb_node, &cache->rb_root);
  989. free_backref_node(cache, upper);
  990. }
  991. }
  992. out:
  993. btrfs_free_path(path1);
  994. btrfs_free_path(path2);
  995. if (err) {
  996. while (!list_empty(&useless)) {
  997. lower = list_entry(useless.next,
  998. struct backref_node, upper);
  999. list_del_init(&lower->upper);
  1000. }
  1001. upper = node;
  1002. INIT_LIST_HEAD(&list);
  1003. while (upper) {
  1004. if (RB_EMPTY_NODE(&upper->rb_node)) {
  1005. list_splice_tail(&upper->upper, &list);
  1006. free_backref_node(cache, upper);
  1007. }
  1008. if (list_empty(&list))
  1009. break;
  1010. edge = list_entry(list.next, struct backref_edge,
  1011. list[LOWER]);
  1012. list_del(&edge->list[LOWER]);
  1013. upper = edge->node[UPPER];
  1014. free_backref_edge(cache, edge);
  1015. }
  1016. return ERR_PTR(err);
  1017. }
  1018. BUG_ON(node && node->detached);
  1019. return node;
  1020. }
  1021. /*
  1022. * helper to add backref node for the newly created snapshot.
  1023. * the backref node is created by cloning backref node that
  1024. * corresponds to root of source tree
  1025. */
  1026. static int clone_backref_node(struct btrfs_trans_handle *trans,
  1027. struct reloc_control *rc,
  1028. struct btrfs_root *src,
  1029. struct btrfs_root *dest)
  1030. {
  1031. struct btrfs_root *reloc_root = src->reloc_root;
  1032. struct backref_cache *cache = &rc->backref_cache;
  1033. struct backref_node *node = NULL;
  1034. struct backref_node *new_node;
  1035. struct backref_edge *edge;
  1036. struct backref_edge *new_edge;
  1037. struct rb_node *rb_node;
  1038. if (cache->last_trans > 0)
  1039. update_backref_cache(trans, cache);
  1040. rb_node = tree_search(&cache->rb_root, src->commit_root->start);
  1041. if (rb_node) {
  1042. node = rb_entry(rb_node, struct backref_node, rb_node);
  1043. if (node->detached)
  1044. node = NULL;
  1045. else
  1046. BUG_ON(node->new_bytenr != reloc_root->node->start);
  1047. }
  1048. if (!node) {
  1049. rb_node = tree_search(&cache->rb_root,
  1050. reloc_root->commit_root->start);
  1051. if (rb_node) {
  1052. node = rb_entry(rb_node, struct backref_node,
  1053. rb_node);
  1054. BUG_ON(node->detached);
  1055. }
  1056. }
  1057. if (!node)
  1058. return 0;
  1059. new_node = alloc_backref_node(cache);
  1060. if (!new_node)
  1061. return -ENOMEM;
  1062. new_node->bytenr = dest->node->start;
  1063. new_node->level = node->level;
  1064. new_node->lowest = node->lowest;
  1065. new_node->checked = 1;
  1066. new_node->root = dest;
  1067. if (!node->lowest) {
  1068. list_for_each_entry(edge, &node->lower, list[UPPER]) {
  1069. new_edge = alloc_backref_edge(cache);
  1070. if (!new_edge)
  1071. goto fail;
  1072. new_edge->node[UPPER] = new_node;
  1073. new_edge->node[LOWER] = edge->node[LOWER];
  1074. list_add_tail(&new_edge->list[UPPER],
  1075. &new_node->lower);
  1076. }
  1077. } else {
  1078. list_add_tail(&new_node->lower, &cache->leaves);
  1079. }
  1080. rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
  1081. &new_node->rb_node);
  1082. if (rb_node)
  1083. backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
  1084. if (!new_node->lowest) {
  1085. list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
  1086. list_add_tail(&new_edge->list[LOWER],
  1087. &new_edge->node[LOWER]->upper);
  1088. }
  1089. }
  1090. return 0;
  1091. fail:
  1092. while (!list_empty(&new_node->lower)) {
  1093. new_edge = list_entry(new_node->lower.next,
  1094. struct backref_edge, list[UPPER]);
  1095. list_del(&new_edge->list[UPPER]);
  1096. free_backref_edge(cache, new_edge);
  1097. }
  1098. free_backref_node(cache, new_node);
  1099. return -ENOMEM;
  1100. }
  1101. /*
  1102. * helper to add 'address of tree root -> reloc tree' mapping
  1103. */
  1104. static int __must_check __add_reloc_root(struct btrfs_root *root)
  1105. {
  1106. struct rb_node *rb_node;
  1107. struct mapping_node *node;
  1108. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1109. node = kmalloc(sizeof(*node), GFP_NOFS);
  1110. if (!node)
  1111. return -ENOMEM;
  1112. node->bytenr = root->node->start;
  1113. node->data = root;
  1114. spin_lock(&rc->reloc_root_tree.lock);
  1115. rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
  1116. node->bytenr, &node->rb_node);
  1117. spin_unlock(&rc->reloc_root_tree.lock);
  1118. if (rb_node) {
  1119. btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
  1120. "for start=%llu while inserting into relocation "
  1121. "tree", node->bytenr);
  1122. kfree(node);
  1123. return -EEXIST;
  1124. }
  1125. list_add_tail(&root->root_list, &rc->reloc_roots);
  1126. return 0;
  1127. }
  1128. /*
  1129. * helper to delete the 'address of tree root -> reloc tree'
  1130. * mapping
  1131. */
  1132. static void __del_reloc_root(struct btrfs_root *root)
  1133. {
  1134. struct rb_node *rb_node;
  1135. struct mapping_node *node = NULL;
  1136. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1137. spin_lock(&rc->reloc_root_tree.lock);
  1138. rb_node = tree_search(&rc->reloc_root_tree.rb_root,
  1139. root->node->start);
  1140. if (rb_node) {
  1141. node = rb_entry(rb_node, struct mapping_node, rb_node);
  1142. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  1143. }
  1144. spin_unlock(&rc->reloc_root_tree.lock);
  1145. if (!node)
  1146. return;
  1147. BUG_ON((struct btrfs_root *)node->data != root);
  1148. spin_lock(&root->fs_info->trans_lock);
  1149. list_del_init(&root->root_list);
  1150. spin_unlock(&root->fs_info->trans_lock);
  1151. kfree(node);
  1152. }
  1153. /*
  1154. * helper to update the 'address of tree root -> reloc tree'
  1155. * mapping
  1156. */
  1157. static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
  1158. {
  1159. struct rb_node *rb_node;
  1160. struct mapping_node *node = NULL;
  1161. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1162. spin_lock(&rc->reloc_root_tree.lock);
  1163. rb_node = tree_search(&rc->reloc_root_tree.rb_root,
  1164. root->node->start);
  1165. if (rb_node) {
  1166. node = rb_entry(rb_node, struct mapping_node, rb_node);
  1167. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  1168. }
  1169. spin_unlock(&rc->reloc_root_tree.lock);
  1170. if (!node)
  1171. return 0;
  1172. BUG_ON((struct btrfs_root *)node->data != root);
  1173. spin_lock(&rc->reloc_root_tree.lock);
  1174. node->bytenr = new_bytenr;
  1175. rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
  1176. node->bytenr, &node->rb_node);
  1177. spin_unlock(&rc->reloc_root_tree.lock);
  1178. if (rb_node)
  1179. backref_tree_panic(rb_node, -EEXIST, node->bytenr);
  1180. return 0;
  1181. }
  1182. static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
  1183. struct btrfs_root *root, u64 objectid)
  1184. {
  1185. struct btrfs_root *reloc_root;
  1186. struct extent_buffer *eb;
  1187. struct btrfs_root_item *root_item;
  1188. struct btrfs_key root_key;
  1189. u64 last_snap = 0;
  1190. int ret;
  1191. root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
  1192. BUG_ON(!root_item);
  1193. root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  1194. root_key.type = BTRFS_ROOT_ITEM_KEY;
  1195. root_key.offset = objectid;
  1196. if (root->root_key.objectid == objectid) {
  1197. /* called by btrfs_init_reloc_root */
  1198. ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
  1199. BTRFS_TREE_RELOC_OBJECTID);
  1200. BUG_ON(ret);
  1201. last_snap = btrfs_root_last_snapshot(&root->root_item);
  1202. btrfs_set_root_last_snapshot(&root->root_item,
  1203. trans->transid - 1);
  1204. } else {
  1205. /*
  1206. * called by btrfs_reloc_post_snapshot_hook.
  1207. * the source tree is a reloc tree, all tree blocks
  1208. * modified after it was created have RELOC flag
  1209. * set in their headers. so it's OK to not update
  1210. * the 'last_snapshot'.
  1211. */
  1212. ret = btrfs_copy_root(trans, root, root->node, &eb,
  1213. BTRFS_TREE_RELOC_OBJECTID);
  1214. BUG_ON(ret);
  1215. }
  1216. memcpy(root_item, &root->root_item, sizeof(*root_item));
  1217. btrfs_set_root_bytenr(root_item, eb->start);
  1218. btrfs_set_root_level(root_item, btrfs_header_level(eb));
  1219. btrfs_set_root_generation(root_item, trans->transid);
  1220. if (root->root_key.objectid == objectid) {
  1221. btrfs_set_root_refs(root_item, 0);
  1222. memset(&root_item->drop_progress, 0,
  1223. sizeof(struct btrfs_disk_key));
  1224. root_item->drop_level = 0;
  1225. /*
  1226. * abuse rtransid, it is safe because it is impossible to
  1227. * receive data into a relocation tree.
  1228. */
  1229. btrfs_set_root_rtransid(root_item, last_snap);
  1230. btrfs_set_root_otransid(root_item, trans->transid);
  1231. }
  1232. btrfs_tree_unlock(eb);
  1233. free_extent_buffer(eb);
  1234. ret = btrfs_insert_root(trans, root->fs_info->tree_root,
  1235. &root_key, root_item);
  1236. BUG_ON(ret);
  1237. kfree(root_item);
  1238. reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
  1239. BUG_ON(IS_ERR(reloc_root));
  1240. reloc_root->last_trans = trans->transid;
  1241. return reloc_root;
  1242. }
  1243. /*
  1244. * create reloc tree for a given fs tree. reloc tree is just a
  1245. * snapshot of the fs tree with special root objectid.
  1246. */
  1247. int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
  1248. struct btrfs_root *root)
  1249. {
  1250. struct btrfs_root *reloc_root;
  1251. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1252. struct btrfs_block_rsv *rsv;
  1253. int clear_rsv = 0;
  1254. int ret;
  1255. if (root->reloc_root) {
  1256. reloc_root = root->reloc_root;
  1257. reloc_root->last_trans = trans->transid;
  1258. return 0;
  1259. }
  1260. if (!rc || !rc->create_reloc_tree ||
  1261. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1262. return 0;
  1263. if (!trans->reloc_reserved) {
  1264. rsv = trans->block_rsv;
  1265. trans->block_rsv = rc->block_rsv;
  1266. clear_rsv = 1;
  1267. }
  1268. reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
  1269. if (clear_rsv)
  1270. trans->block_rsv = rsv;
  1271. ret = __add_reloc_root(reloc_root);
  1272. BUG_ON(ret < 0);
  1273. root->reloc_root = reloc_root;
  1274. return 0;
  1275. }
  1276. /*
  1277. * update root item of reloc tree
  1278. */
  1279. int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
  1280. struct btrfs_root *root)
  1281. {
  1282. struct btrfs_root *reloc_root;
  1283. struct btrfs_root_item *root_item;
  1284. int ret;
  1285. if (!root->reloc_root)
  1286. goto out;
  1287. reloc_root = root->reloc_root;
  1288. root_item = &reloc_root->root_item;
  1289. if (root->fs_info->reloc_ctl->merge_reloc_tree &&
  1290. btrfs_root_refs(root_item) == 0) {
  1291. root->reloc_root = NULL;
  1292. __del_reloc_root(reloc_root);
  1293. }
  1294. if (reloc_root->commit_root != reloc_root->node) {
  1295. btrfs_set_root_node(root_item, reloc_root->node);
  1296. free_extent_buffer(reloc_root->commit_root);
  1297. reloc_root->commit_root = btrfs_root_node(reloc_root);
  1298. }
  1299. ret = btrfs_update_root(trans, root->fs_info->tree_root,
  1300. &reloc_root->root_key, root_item);
  1301. BUG_ON(ret);
  1302. out:
  1303. return 0;
  1304. }
  1305. /*
  1306. * helper to find first cached inode with inode number >= objectid
  1307. * in a subvolume
  1308. */
  1309. static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
  1310. {
  1311. struct rb_node *node;
  1312. struct rb_node *prev;
  1313. struct btrfs_inode *entry;
  1314. struct inode *inode;
  1315. spin_lock(&root->inode_lock);
  1316. again:
  1317. node = root->inode_tree.rb_node;
  1318. prev = NULL;
  1319. while (node) {
  1320. prev = node;
  1321. entry = rb_entry(node, struct btrfs_inode, rb_node);
  1322. if (objectid < btrfs_ino(&entry->vfs_inode))
  1323. node = node->rb_left;
  1324. else if (objectid > btrfs_ino(&entry->vfs_inode))
  1325. node = node->rb_right;
  1326. else
  1327. break;
  1328. }
  1329. if (!node) {
  1330. while (prev) {
  1331. entry = rb_entry(prev, struct btrfs_inode, rb_node);
  1332. if (objectid <= btrfs_ino(&entry->vfs_inode)) {
  1333. node = prev;
  1334. break;
  1335. }
  1336. prev = rb_next(prev);
  1337. }
  1338. }
  1339. while (node) {
  1340. entry = rb_entry(node, struct btrfs_inode, rb_node);
  1341. inode = igrab(&entry->vfs_inode);
  1342. if (inode) {
  1343. spin_unlock(&root->inode_lock);
  1344. return inode;
  1345. }
  1346. objectid = btrfs_ino(&entry->vfs_inode) + 1;
  1347. if (cond_resched_lock(&root->inode_lock))
  1348. goto again;
  1349. node = rb_next(node);
  1350. }
  1351. spin_unlock(&root->inode_lock);
  1352. return NULL;
  1353. }
  1354. static int in_block_group(u64 bytenr,
  1355. struct btrfs_block_group_cache *block_group)
  1356. {
  1357. if (bytenr >= block_group->key.objectid &&
  1358. bytenr < block_group->key.objectid + block_group->key.offset)
  1359. return 1;
  1360. return 0;
  1361. }
  1362. /*
  1363. * get new location of data
  1364. */
  1365. static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
  1366. u64 bytenr, u64 num_bytes)
  1367. {
  1368. struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
  1369. struct btrfs_path *path;
  1370. struct btrfs_file_extent_item *fi;
  1371. struct extent_buffer *leaf;
  1372. int ret;
  1373. path = btrfs_alloc_path();
  1374. if (!path)
  1375. return -ENOMEM;
  1376. bytenr -= BTRFS_I(reloc_inode)->index_cnt;
  1377. ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
  1378. bytenr, 0);
  1379. if (ret < 0)
  1380. goto out;
  1381. if (ret > 0) {
  1382. ret = -ENOENT;
  1383. goto out;
  1384. }
  1385. leaf = path->nodes[0];
  1386. fi = btrfs_item_ptr(leaf, path->slots[0],
  1387. struct btrfs_file_extent_item);
  1388. BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
  1389. btrfs_file_extent_compression(leaf, fi) ||
  1390. btrfs_file_extent_encryption(leaf, fi) ||
  1391. btrfs_file_extent_other_encoding(leaf, fi));
  1392. if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
  1393. ret = -EINVAL;
  1394. goto out;
  1395. }
  1396. *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  1397. ret = 0;
  1398. out:
  1399. btrfs_free_path(path);
  1400. return ret;
  1401. }
  1402. /*
  1403. * update file extent items in the tree leaf to point to
  1404. * the new locations.
  1405. */
  1406. static noinline_for_stack
  1407. int replace_file_extents(struct btrfs_trans_handle *trans,
  1408. struct reloc_control *rc,
  1409. struct btrfs_root *root,
  1410. struct extent_buffer *leaf)
  1411. {
  1412. struct btrfs_key key;
  1413. struct btrfs_file_extent_item *fi;
  1414. struct inode *inode = NULL;
  1415. u64 parent;
  1416. u64 bytenr;
  1417. u64 new_bytenr = 0;
  1418. u64 num_bytes;
  1419. u64 end;
  1420. u32 nritems;
  1421. u32 i;
  1422. int ret = 0;
  1423. int first = 1;
  1424. int dirty = 0;
  1425. if (rc->stage != UPDATE_DATA_PTRS)
  1426. return 0;
  1427. /* reloc trees always use full backref */
  1428. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1429. parent = leaf->start;
  1430. else
  1431. parent = 0;
  1432. nritems = btrfs_header_nritems(leaf);
  1433. for (i = 0; i < nritems; i++) {
  1434. cond_resched();
  1435. btrfs_item_key_to_cpu(leaf, &key, i);
  1436. if (key.type != BTRFS_EXTENT_DATA_KEY)
  1437. continue;
  1438. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  1439. if (btrfs_file_extent_type(leaf, fi) ==
  1440. BTRFS_FILE_EXTENT_INLINE)
  1441. continue;
  1442. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  1443. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  1444. if (bytenr == 0)
  1445. continue;
  1446. if (!in_block_group(bytenr, rc->block_group))
  1447. continue;
  1448. /*
  1449. * if we are modifying block in fs tree, wait for readpage
  1450. * to complete and drop the extent cache
  1451. */
  1452. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  1453. if (first) {
  1454. inode = find_next_inode(root, key.objectid);
  1455. first = 0;
  1456. } else if (inode && btrfs_ino(inode) < key.objectid) {
  1457. btrfs_add_delayed_iput(inode);
  1458. inode = find_next_inode(root, key.objectid);
  1459. }
  1460. if (inode && btrfs_ino(inode) == key.objectid) {
  1461. end = key.offset +
  1462. btrfs_file_extent_num_bytes(leaf, fi);
  1463. WARN_ON(!IS_ALIGNED(key.offset,
  1464. root->sectorsize));
  1465. WARN_ON(!IS_ALIGNED(end, root->sectorsize));
  1466. end--;
  1467. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  1468. key.offset, end);
  1469. if (!ret)
  1470. continue;
  1471. btrfs_drop_extent_cache(inode, key.offset, end,
  1472. 1);
  1473. unlock_extent(&BTRFS_I(inode)->io_tree,
  1474. key.offset, end);
  1475. }
  1476. }
  1477. ret = get_new_location(rc->data_inode, &new_bytenr,
  1478. bytenr, num_bytes);
  1479. if (ret) {
  1480. /*
  1481. * Don't have to abort since we've not changed anything
  1482. * in the file extent yet.
  1483. */
  1484. break;
  1485. }
  1486. btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
  1487. dirty = 1;
  1488. key.offset -= btrfs_file_extent_offset(leaf, fi);
  1489. ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
  1490. num_bytes, parent,
  1491. btrfs_header_owner(leaf),
  1492. key.objectid, key.offset, 1);
  1493. if (ret) {
  1494. btrfs_abort_transaction(trans, root, ret);
  1495. break;
  1496. }
  1497. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  1498. parent, btrfs_header_owner(leaf),
  1499. key.objectid, key.offset, 1);
  1500. if (ret) {
  1501. btrfs_abort_transaction(trans, root, ret);
  1502. break;
  1503. }
  1504. }
  1505. if (dirty)
  1506. btrfs_mark_buffer_dirty(leaf);
  1507. if (inode)
  1508. btrfs_add_delayed_iput(inode);
  1509. return ret;
  1510. }
  1511. static noinline_for_stack
  1512. int memcmp_node_keys(struct extent_buffer *eb, int slot,
  1513. struct btrfs_path *path, int level)
  1514. {
  1515. struct btrfs_disk_key key1;
  1516. struct btrfs_disk_key key2;
  1517. btrfs_node_key(eb, &key1, slot);
  1518. btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
  1519. return memcmp(&key1, &key2, sizeof(key1));
  1520. }
  1521. /*
  1522. * try to replace tree blocks in fs tree with the new blocks
  1523. * in reloc tree. tree blocks haven't been modified since the
  1524. * reloc tree was create can be replaced.
  1525. *
  1526. * if a block was replaced, level of the block + 1 is returned.
  1527. * if no block got replaced, 0 is returned. if there are other
  1528. * errors, a negative error number is returned.
  1529. */
  1530. static noinline_for_stack
  1531. int replace_path(struct btrfs_trans_handle *trans,
  1532. struct btrfs_root *dest, struct btrfs_root *src,
  1533. struct btrfs_path *path, struct btrfs_key *next_key,
  1534. int lowest_level, int max_level)
  1535. {
  1536. struct extent_buffer *eb;
  1537. struct extent_buffer *parent;
  1538. struct btrfs_key key;
  1539. u64 old_bytenr;
  1540. u64 new_bytenr;
  1541. u64 old_ptr_gen;
  1542. u64 new_ptr_gen;
  1543. u64 last_snapshot;
  1544. u32 blocksize;
  1545. int cow = 0;
  1546. int level;
  1547. int ret;
  1548. int slot;
  1549. BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  1550. BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
  1551. last_snapshot = btrfs_root_last_snapshot(&src->root_item);
  1552. again:
  1553. slot = path->slots[lowest_level];
  1554. btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
  1555. eb = btrfs_lock_root_node(dest);
  1556. btrfs_set_lock_blocking(eb);
  1557. level = btrfs_header_level(eb);
  1558. if (level < lowest_level) {
  1559. btrfs_tree_unlock(eb);
  1560. free_extent_buffer(eb);
  1561. return 0;
  1562. }
  1563. if (cow) {
  1564. ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
  1565. BUG_ON(ret);
  1566. }
  1567. btrfs_set_lock_blocking(eb);
  1568. if (next_key) {
  1569. next_key->objectid = (u64)-1;
  1570. next_key->type = (u8)-1;
  1571. next_key->offset = (u64)-1;
  1572. }
  1573. parent = eb;
  1574. while (1) {
  1575. level = btrfs_header_level(parent);
  1576. BUG_ON(level < lowest_level);
  1577. ret = btrfs_bin_search(parent, &key, level, &slot);
  1578. if (ret && slot > 0)
  1579. slot--;
  1580. if (next_key && slot + 1 < btrfs_header_nritems(parent))
  1581. btrfs_node_key_to_cpu(parent, next_key, slot + 1);
  1582. old_bytenr = btrfs_node_blockptr(parent, slot);
  1583. blocksize = btrfs_level_size(dest, level - 1);
  1584. old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
  1585. if (level <= max_level) {
  1586. eb = path->nodes[level];
  1587. new_bytenr = btrfs_node_blockptr(eb,
  1588. path->slots[level]);
  1589. new_ptr_gen = btrfs_node_ptr_generation(eb,
  1590. path->slots[level]);
  1591. } else {
  1592. new_bytenr = 0;
  1593. new_ptr_gen = 0;
  1594. }
  1595. if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
  1596. ret = level;
  1597. break;
  1598. }
  1599. if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
  1600. memcmp_node_keys(parent, slot, path, level)) {
  1601. if (level <= lowest_level) {
  1602. ret = 0;
  1603. break;
  1604. }
  1605. eb = read_tree_block(dest, old_bytenr, blocksize,
  1606. old_ptr_gen);
  1607. if (!eb || !extent_buffer_uptodate(eb)) {
  1608. ret = (!eb) ? -ENOMEM : -EIO;
  1609. free_extent_buffer(eb);
  1610. break;
  1611. }
  1612. btrfs_tree_lock(eb);
  1613. if (cow) {
  1614. ret = btrfs_cow_block(trans, dest, eb, parent,
  1615. slot, &eb);
  1616. BUG_ON(ret);
  1617. }
  1618. btrfs_set_lock_blocking(eb);
  1619. btrfs_tree_unlock(parent);
  1620. free_extent_buffer(parent);
  1621. parent = eb;
  1622. continue;
  1623. }
  1624. if (!cow) {
  1625. btrfs_tree_unlock(parent);
  1626. free_extent_buffer(parent);
  1627. cow = 1;
  1628. goto again;
  1629. }
  1630. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1631. path->slots[level]);
  1632. btrfs_release_path(path);
  1633. path->lowest_level = level;
  1634. ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
  1635. path->lowest_level = 0;
  1636. BUG_ON(ret);
  1637. /*
  1638. * swap blocks in fs tree and reloc tree.
  1639. */
  1640. btrfs_set_node_blockptr(parent, slot, new_bytenr);
  1641. btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
  1642. btrfs_mark_buffer_dirty(parent);
  1643. btrfs_set_node_blockptr(path->nodes[level],
  1644. path->slots[level], old_bytenr);
  1645. btrfs_set_node_ptr_generation(path->nodes[level],
  1646. path->slots[level], old_ptr_gen);
  1647. btrfs_mark_buffer_dirty(path->nodes[level]);
  1648. ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
  1649. path->nodes[level]->start,
  1650. src->root_key.objectid, level - 1, 0,
  1651. 1);
  1652. BUG_ON(ret);
  1653. ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
  1654. 0, dest->root_key.objectid, level - 1,
  1655. 0, 1);
  1656. BUG_ON(ret);
  1657. ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
  1658. path->nodes[level]->start,
  1659. src->root_key.objectid, level - 1, 0,
  1660. 1);
  1661. BUG_ON(ret);
  1662. ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
  1663. 0, dest->root_key.objectid, level - 1,
  1664. 0, 1);
  1665. BUG_ON(ret);
  1666. btrfs_unlock_up_safe(path, 0);
  1667. ret = level;
  1668. break;
  1669. }
  1670. btrfs_tree_unlock(parent);
  1671. free_extent_buffer(parent);
  1672. return ret;
  1673. }
  1674. /*
  1675. * helper to find next relocated block in reloc tree
  1676. */
  1677. static noinline_for_stack
  1678. int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1679. int *level)
  1680. {
  1681. struct extent_buffer *eb;
  1682. int i;
  1683. u64 last_snapshot;
  1684. u32 nritems;
  1685. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1686. for (i = 0; i < *level; i++) {
  1687. free_extent_buffer(path->nodes[i]);
  1688. path->nodes[i] = NULL;
  1689. }
  1690. for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
  1691. eb = path->nodes[i];
  1692. nritems = btrfs_header_nritems(eb);
  1693. while (path->slots[i] + 1 < nritems) {
  1694. path->slots[i]++;
  1695. if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
  1696. last_snapshot)
  1697. continue;
  1698. *level = i;
  1699. return 0;
  1700. }
  1701. free_extent_buffer(path->nodes[i]);
  1702. path->nodes[i] = NULL;
  1703. }
  1704. return 1;
  1705. }
  1706. /*
  1707. * walk down reloc tree to find relocated block of lowest level
  1708. */
  1709. static noinline_for_stack
  1710. int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1711. int *level)
  1712. {
  1713. struct extent_buffer *eb = NULL;
  1714. int i;
  1715. u64 bytenr;
  1716. u64 ptr_gen = 0;
  1717. u64 last_snapshot;
  1718. u32 blocksize;
  1719. u32 nritems;
  1720. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1721. for (i = *level; i > 0; i--) {
  1722. eb = path->nodes[i];
  1723. nritems = btrfs_header_nritems(eb);
  1724. while (path->slots[i] < nritems) {
  1725. ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
  1726. if (ptr_gen > last_snapshot)
  1727. break;
  1728. path->slots[i]++;
  1729. }
  1730. if (path->slots[i] >= nritems) {
  1731. if (i == *level)
  1732. break;
  1733. *level = i + 1;
  1734. return 0;
  1735. }
  1736. if (i == 1) {
  1737. *level = i;
  1738. return 0;
  1739. }
  1740. bytenr = btrfs_node_blockptr(eb, path->slots[i]);
  1741. blocksize = btrfs_level_size(root, i - 1);
  1742. eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
  1743. if (!eb || !extent_buffer_uptodate(eb)) {
  1744. free_extent_buffer(eb);
  1745. return -EIO;
  1746. }
  1747. BUG_ON(btrfs_header_level(eb) != i - 1);
  1748. path->nodes[i - 1] = eb;
  1749. path->slots[i - 1] = 0;
  1750. }
  1751. return 1;
  1752. }
  1753. /*
  1754. * invalidate extent cache for file extents whose key in range of
  1755. * [min_key, max_key)
  1756. */
  1757. static int invalidate_extent_cache(struct btrfs_root *root,
  1758. struct btrfs_key *min_key,
  1759. struct btrfs_key *max_key)
  1760. {
  1761. struct inode *inode = NULL;
  1762. u64 objectid;
  1763. u64 start, end;
  1764. u64 ino;
  1765. objectid = min_key->objectid;
  1766. while (1) {
  1767. cond_resched();
  1768. iput(inode);
  1769. if (objectid > max_key->objectid)
  1770. break;
  1771. inode = find_next_inode(root, objectid);
  1772. if (!inode)
  1773. break;
  1774. ino = btrfs_ino(inode);
  1775. if (ino > max_key->objectid) {
  1776. iput(inode);
  1777. break;
  1778. }
  1779. objectid = ino + 1;
  1780. if (!S_ISREG(inode->i_mode))
  1781. continue;
  1782. if (unlikely(min_key->objectid == ino)) {
  1783. if (min_key->type > BTRFS_EXTENT_DATA_KEY)
  1784. continue;
  1785. if (min_key->type < BTRFS_EXTENT_DATA_KEY)
  1786. start = 0;
  1787. else {
  1788. start = min_key->offset;
  1789. WARN_ON(!IS_ALIGNED(start, root->sectorsize));
  1790. }
  1791. } else {
  1792. start = 0;
  1793. }
  1794. if (unlikely(max_key->objectid == ino)) {
  1795. if (max_key->type < BTRFS_EXTENT_DATA_KEY)
  1796. continue;
  1797. if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
  1798. end = (u64)-1;
  1799. } else {
  1800. if (max_key->offset == 0)
  1801. continue;
  1802. end = max_key->offset;
  1803. WARN_ON(!IS_ALIGNED(end, root->sectorsize));
  1804. end--;
  1805. }
  1806. } else {
  1807. end = (u64)-1;
  1808. }
  1809. /* the lock_extent waits for readpage to complete */
  1810. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  1811. btrfs_drop_extent_cache(inode, start, end, 1);
  1812. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  1813. }
  1814. return 0;
  1815. }
  1816. static int find_next_key(struct btrfs_path *path, int level,
  1817. struct btrfs_key *key)
  1818. {
  1819. while (level < BTRFS_MAX_LEVEL) {
  1820. if (!path->nodes[level])
  1821. break;
  1822. if (path->slots[level] + 1 <
  1823. btrfs_header_nritems(path->nodes[level])) {
  1824. btrfs_node_key_to_cpu(path->nodes[level], key,
  1825. path->slots[level] + 1);
  1826. return 0;
  1827. }
  1828. level++;
  1829. }
  1830. return 1;
  1831. }
  1832. /*
  1833. * merge the relocated tree blocks in reloc tree with corresponding
  1834. * fs tree.
  1835. */
  1836. static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
  1837. struct btrfs_root *root)
  1838. {
  1839. LIST_HEAD(inode_list);
  1840. struct btrfs_key key;
  1841. struct btrfs_key next_key;
  1842. struct btrfs_trans_handle *trans = NULL;
  1843. struct btrfs_root *reloc_root;
  1844. struct btrfs_root_item *root_item;
  1845. struct btrfs_path *path;
  1846. struct extent_buffer *leaf;
  1847. int level;
  1848. int max_level;
  1849. int replaced = 0;
  1850. int ret;
  1851. int err = 0;
  1852. u32 min_reserved;
  1853. path = btrfs_alloc_path();
  1854. if (!path)
  1855. return -ENOMEM;
  1856. path->reada = 1;
  1857. reloc_root = root->reloc_root;
  1858. root_item = &reloc_root->root_item;
  1859. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  1860. level = btrfs_root_level(root_item);
  1861. extent_buffer_get(reloc_root->node);
  1862. path->nodes[level] = reloc_root->node;
  1863. path->slots[level] = 0;
  1864. } else {
  1865. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  1866. level = root_item->drop_level;
  1867. BUG_ON(level == 0);
  1868. path->lowest_level = level;
  1869. ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
  1870. path->lowest_level = 0;
  1871. if (ret < 0) {
  1872. btrfs_free_path(path);
  1873. return ret;
  1874. }
  1875. btrfs_node_key_to_cpu(path->nodes[level], &next_key,
  1876. path->slots[level]);
  1877. WARN_ON(memcmp(&key, &next_key, sizeof(key)));
  1878. btrfs_unlock_up_safe(path, 0);
  1879. }
  1880. min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
  1881. memset(&next_key, 0, sizeof(next_key));
  1882. while (1) {
  1883. ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
  1884. BTRFS_RESERVE_FLUSH_ALL);
  1885. if (ret) {
  1886. err = ret;
  1887. goto out;
  1888. }
  1889. trans = btrfs_start_transaction(root, 0);
  1890. if (IS_ERR(trans)) {
  1891. err = PTR_ERR(trans);
  1892. trans = NULL;
  1893. goto out;
  1894. }
  1895. trans->block_rsv = rc->block_rsv;
  1896. replaced = 0;
  1897. max_level = level;
  1898. ret = walk_down_reloc_tree(reloc_root, path, &level);
  1899. if (ret < 0) {
  1900. err = ret;
  1901. goto out;
  1902. }
  1903. if (ret > 0)
  1904. break;
  1905. if (!find_next_key(path, level, &key) &&
  1906. btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
  1907. ret = 0;
  1908. } else {
  1909. ret = replace_path(trans, root, reloc_root, path,
  1910. &next_key, level, max_level);
  1911. }
  1912. if (ret < 0) {
  1913. err = ret;
  1914. goto out;
  1915. }
  1916. if (ret > 0) {
  1917. level = ret;
  1918. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1919. path->slots[level]);
  1920. replaced = 1;
  1921. }
  1922. ret = walk_up_reloc_tree(reloc_root, path, &level);
  1923. if (ret > 0)
  1924. break;
  1925. BUG_ON(level == 0);
  1926. /*
  1927. * save the merging progress in the drop_progress.
  1928. * this is OK since root refs == 1 in this case.
  1929. */
  1930. btrfs_node_key(path->nodes[level], &root_item->drop_progress,
  1931. path->slots[level]);
  1932. root_item->drop_level = level;
  1933. btrfs_end_transaction_throttle(trans, root);
  1934. trans = NULL;
  1935. btrfs_btree_balance_dirty(root);
  1936. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1937. invalidate_extent_cache(root, &key, &next_key);
  1938. }
  1939. /*
  1940. * handle the case only one block in the fs tree need to be
  1941. * relocated and the block is tree root.
  1942. */
  1943. leaf = btrfs_lock_root_node(root);
  1944. ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
  1945. btrfs_tree_unlock(leaf);
  1946. free_extent_buffer(leaf);
  1947. if (ret < 0)
  1948. err = ret;
  1949. out:
  1950. btrfs_free_path(path);
  1951. if (err == 0) {
  1952. memset(&root_item->drop_progress, 0,
  1953. sizeof(root_item->drop_progress));
  1954. root_item->drop_level = 0;
  1955. btrfs_set_root_refs(root_item, 0);
  1956. btrfs_update_reloc_root(trans, root);
  1957. }
  1958. if (trans)
  1959. btrfs_end_transaction_throttle(trans, root);
  1960. btrfs_btree_balance_dirty(root);
  1961. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1962. invalidate_extent_cache(root, &key, &next_key);
  1963. return err;
  1964. }
  1965. static noinline_for_stack
  1966. int prepare_to_merge(struct reloc_control *rc, int err)
  1967. {
  1968. struct btrfs_root *root = rc->extent_root;
  1969. struct btrfs_root *reloc_root;
  1970. struct btrfs_trans_handle *trans;
  1971. LIST_HEAD(reloc_roots);
  1972. u64 num_bytes = 0;
  1973. int ret;
  1974. mutex_lock(&root->fs_info->reloc_mutex);
  1975. rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
  1976. rc->merging_rsv_size += rc->nodes_relocated * 2;
  1977. mutex_unlock(&root->fs_info->reloc_mutex);
  1978. again:
  1979. if (!err) {
  1980. num_bytes = rc->merging_rsv_size;
  1981. ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
  1982. BTRFS_RESERVE_FLUSH_ALL);
  1983. if (ret)
  1984. err = ret;
  1985. }
  1986. trans = btrfs_join_transaction(rc->extent_root);
  1987. if (IS_ERR(trans)) {
  1988. if (!err)
  1989. btrfs_block_rsv_release(rc->extent_root,
  1990. rc->block_rsv, num_bytes);
  1991. return PTR_ERR(trans);
  1992. }
  1993. if (!err) {
  1994. if (num_bytes != rc->merging_rsv_size) {
  1995. btrfs_end_transaction(trans, rc->extent_root);
  1996. btrfs_block_rsv_release(rc->extent_root,
  1997. rc->block_rsv, num_bytes);
  1998. goto again;
  1999. }
  2000. }
  2001. rc->merge_reloc_tree = 1;
  2002. while (!list_empty(&rc->reloc_roots)) {
  2003. reloc_root = list_entry(rc->reloc_roots.next,
  2004. struct btrfs_root, root_list);
  2005. list_del_init(&reloc_root->root_list);
  2006. root = read_fs_root(reloc_root->fs_info,
  2007. reloc_root->root_key.offset);
  2008. BUG_ON(IS_ERR(root));
  2009. BUG_ON(root->reloc_root != reloc_root);
  2010. /*
  2011. * set reference count to 1, so btrfs_recover_relocation
  2012. * knows it should resumes merging
  2013. */
  2014. if (!err)
  2015. btrfs_set_root_refs(&reloc_root->root_item, 1);
  2016. btrfs_update_reloc_root(trans, root);
  2017. list_add(&reloc_root->root_list, &reloc_roots);
  2018. }
  2019. list_splice(&reloc_roots, &rc->reloc_roots);
  2020. if (!err)
  2021. btrfs_commit_transaction(trans, rc->extent_root);
  2022. else
  2023. btrfs_end_transaction(trans, rc->extent_root);
  2024. return err;
  2025. }
  2026. static noinline_for_stack
  2027. void free_reloc_roots(struct list_head *list)
  2028. {
  2029. struct btrfs_root *reloc_root;
  2030. while (!list_empty(list)) {
  2031. reloc_root = list_entry(list->next, struct btrfs_root,
  2032. root_list);
  2033. __del_reloc_root(reloc_root);
  2034. }
  2035. }
  2036. static noinline_for_stack
  2037. int merge_reloc_roots(struct reloc_control *rc)
  2038. {
  2039. struct btrfs_root *root;
  2040. struct btrfs_root *reloc_root;
  2041. u64 last_snap;
  2042. u64 otransid;
  2043. u64 objectid;
  2044. LIST_HEAD(reloc_roots);
  2045. int found = 0;
  2046. int ret = 0;
  2047. again:
  2048. root = rc->extent_root;
  2049. /*
  2050. * this serializes us with btrfs_record_root_in_transaction,
  2051. * we have to make sure nobody is in the middle of
  2052. * adding their roots to the list while we are
  2053. * doing this splice
  2054. */
  2055. mutex_lock(&root->fs_info->reloc_mutex);
  2056. list_splice_init(&rc->reloc_roots, &reloc_roots);
  2057. mutex_unlock(&root->fs_info->reloc_mutex);
  2058. while (!list_empty(&reloc_roots)) {
  2059. found = 1;
  2060. reloc_root = list_entry(reloc_roots.next,
  2061. struct btrfs_root, root_list);
  2062. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  2063. root = read_fs_root(reloc_root->fs_info,
  2064. reloc_root->root_key.offset);
  2065. BUG_ON(IS_ERR(root));
  2066. BUG_ON(root->reloc_root != reloc_root);
  2067. ret = merge_reloc_root(rc, root);
  2068. if (ret) {
  2069. if (list_empty(&reloc_root->root_list))
  2070. list_add_tail(&reloc_root->root_list,
  2071. &reloc_roots);
  2072. goto out;
  2073. }
  2074. } else {
  2075. list_del_init(&reloc_root->root_list);
  2076. }
  2077. /*
  2078. * we keep the old last snapshod transid in rtranid when we
  2079. * created the relocation tree.
  2080. */
  2081. last_snap = btrfs_root_rtransid(&reloc_root->root_item);
  2082. otransid = btrfs_root_otransid(&reloc_root->root_item);
  2083. objectid = reloc_root->root_key.offset;
  2084. ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
  2085. if (ret < 0) {
  2086. if (list_empty(&reloc_root->root_list))
  2087. list_add_tail(&reloc_root->root_list,
  2088. &reloc_roots);
  2089. goto out;
  2090. }
  2091. }
  2092. if (found) {
  2093. found = 0;
  2094. goto again;
  2095. }
  2096. out:
  2097. if (ret) {
  2098. btrfs_std_error(root->fs_info, ret);
  2099. if (!list_empty(&reloc_roots))
  2100. free_reloc_roots(&reloc_roots);
  2101. /* new reloc root may be added */
  2102. mutex_lock(&root->fs_info->reloc_mutex);
  2103. list_splice_init(&rc->reloc_roots, &reloc_roots);
  2104. mutex_unlock(&root->fs_info->reloc_mutex);
  2105. if (!list_empty(&reloc_roots))
  2106. free_reloc_roots(&reloc_roots);
  2107. }
  2108. BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
  2109. return ret;
  2110. }
  2111. static void free_block_list(struct rb_root *blocks)
  2112. {
  2113. struct tree_block *block;
  2114. struct rb_node *rb_node;
  2115. while ((rb_node = rb_first(blocks))) {
  2116. block = rb_entry(rb_node, struct tree_block, rb_node);
  2117. rb_erase(rb_node, blocks);
  2118. kfree(block);
  2119. }
  2120. }
  2121. static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
  2122. struct btrfs_root *reloc_root)
  2123. {
  2124. struct btrfs_root *root;
  2125. if (reloc_root->last_trans == trans->transid)
  2126. return 0;
  2127. root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
  2128. BUG_ON(IS_ERR(root));
  2129. BUG_ON(root->reloc_root != reloc_root);
  2130. return btrfs_record_root_in_trans(trans, root);
  2131. }
  2132. static noinline_for_stack
  2133. struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
  2134. struct reloc_control *rc,
  2135. struct backref_node *node,
  2136. struct backref_edge *edges[])
  2137. {
  2138. struct backref_node *next;
  2139. struct btrfs_root *root;
  2140. int index = 0;
  2141. next = node;
  2142. while (1) {
  2143. cond_resched();
  2144. next = walk_up_backref(next, edges, &index);
  2145. root = next->root;
  2146. BUG_ON(!root);
  2147. BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
  2148. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
  2149. record_reloc_root_in_trans(trans, root);
  2150. break;
  2151. }
  2152. btrfs_record_root_in_trans(trans, root);
  2153. root = root->reloc_root;
  2154. if (next->new_bytenr != root->node->start) {
  2155. BUG_ON(next->new_bytenr);
  2156. BUG_ON(!list_empty(&next->list));
  2157. next->new_bytenr = root->node->start;
  2158. next->root = root;
  2159. list_add_tail(&next->list,
  2160. &rc->backref_cache.changed);
  2161. __mark_block_processed(rc, next);
  2162. break;
  2163. }
  2164. WARN_ON(1);
  2165. root = NULL;
  2166. next = walk_down_backref(edges, &index);
  2167. if (!next || next->level <= node->level)
  2168. break;
  2169. }
  2170. if (!root)
  2171. return NULL;
  2172. next = node;
  2173. /* setup backref node path for btrfs_reloc_cow_block */
  2174. while (1) {
  2175. rc->backref_cache.path[next->level] = next;
  2176. if (--index < 0)
  2177. break;
  2178. next = edges[index]->node[UPPER];
  2179. }
  2180. return root;
  2181. }
  2182. /*
  2183. * select a tree root for relocation. return NULL if the block
  2184. * is reference counted. we should use do_relocation() in this
  2185. * case. return a tree root pointer if the block isn't reference
  2186. * counted. return -ENOENT if the block is root of reloc tree.
  2187. */
  2188. static noinline_for_stack
  2189. struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
  2190. struct backref_node *node)
  2191. {
  2192. struct backref_node *next;
  2193. struct btrfs_root *root;
  2194. struct btrfs_root *fs_root = NULL;
  2195. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2196. int index = 0;
  2197. next = node;
  2198. while (1) {
  2199. cond_resched();
  2200. next = walk_up_backref(next, edges, &index);
  2201. root = next->root;
  2202. BUG_ON(!root);
  2203. /* no other choice for non-references counted tree */
  2204. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  2205. return root;
  2206. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
  2207. fs_root = root;
  2208. if (next != node)
  2209. return NULL;
  2210. next = walk_down_backref(edges, &index);
  2211. if (!next || next->level <= node->level)
  2212. break;
  2213. }
  2214. if (!fs_root)
  2215. return ERR_PTR(-ENOENT);
  2216. return fs_root;
  2217. }
  2218. static noinline_for_stack
  2219. u64 calcu_metadata_size(struct reloc_control *rc,
  2220. struct backref_node *node, int reserve)
  2221. {
  2222. struct backref_node *next = node;
  2223. struct backref_edge *edge;
  2224. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2225. u64 num_bytes = 0;
  2226. int index = 0;
  2227. BUG_ON(reserve && node->processed);
  2228. while (next) {
  2229. cond_resched();
  2230. while (1) {
  2231. if (next->processed && (reserve || next != node))
  2232. break;
  2233. num_bytes += btrfs_level_size(rc->extent_root,
  2234. next->level);
  2235. if (list_empty(&next->upper))
  2236. break;
  2237. edge = list_entry(next->upper.next,
  2238. struct backref_edge, list[LOWER]);
  2239. edges[index++] = edge;
  2240. next = edge->node[UPPER];
  2241. }
  2242. next = walk_down_backref(edges, &index);
  2243. }
  2244. return num_bytes;
  2245. }
  2246. static int reserve_metadata_space(struct btrfs_trans_handle *trans,
  2247. struct reloc_control *rc,
  2248. struct backref_node *node)
  2249. {
  2250. struct btrfs_root *root = rc->extent_root;
  2251. u64 num_bytes;
  2252. int ret;
  2253. u64 tmp;
  2254. num_bytes = calcu_metadata_size(rc, node, 1) * 2;
  2255. trans->block_rsv = rc->block_rsv;
  2256. rc->reserved_bytes += num_bytes;
  2257. ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
  2258. BTRFS_RESERVE_FLUSH_ALL);
  2259. if (ret) {
  2260. if (ret == -EAGAIN) {
  2261. tmp = rc->extent_root->nodesize *
  2262. RELOCATION_RESERVED_NODES;
  2263. while (tmp <= rc->reserved_bytes)
  2264. tmp <<= 1;
  2265. /*
  2266. * only one thread can access block_rsv at this point,
  2267. * so we don't need hold lock to protect block_rsv.
  2268. * we expand more reservation size here to allow enough
  2269. * space for relocation and we will return eailer in
  2270. * enospc case.
  2271. */
  2272. rc->block_rsv->size = tmp + rc->extent_root->nodesize *
  2273. RELOCATION_RESERVED_NODES;
  2274. }
  2275. return ret;
  2276. }
  2277. return 0;
  2278. }
  2279. /*
  2280. * relocate a block tree, and then update pointers in upper level
  2281. * blocks that reference the block to point to the new location.
  2282. *
  2283. * if called by link_to_upper, the block has already been relocated.
  2284. * in that case this function just updates pointers.
  2285. */
  2286. static int do_relocation(struct btrfs_trans_handle *trans,
  2287. struct reloc_control *rc,
  2288. struct backref_node *node,
  2289. struct btrfs_key *key,
  2290. struct btrfs_path *path, int lowest)
  2291. {
  2292. struct backref_node *upper;
  2293. struct backref_edge *edge;
  2294. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2295. struct btrfs_root *root;
  2296. struct extent_buffer *eb;
  2297. u32 blocksize;
  2298. u64 bytenr;
  2299. u64 generation;
  2300. int slot;
  2301. int ret;
  2302. int err = 0;
  2303. BUG_ON(lowest && node->eb);
  2304. path->lowest_level = node->level + 1;
  2305. rc->backref_cache.path[node->level] = node;
  2306. list_for_each_entry(edge, &node->upper, list[LOWER]) {
  2307. cond_resched();
  2308. upper = edge->node[UPPER];
  2309. root = select_reloc_root(trans, rc, upper, edges);
  2310. BUG_ON(!root);
  2311. if (upper->eb && !upper->locked) {
  2312. if (!lowest) {
  2313. ret = btrfs_bin_search(upper->eb, key,
  2314. upper->level, &slot);
  2315. BUG_ON(ret);
  2316. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2317. if (node->eb->start == bytenr)
  2318. goto next;
  2319. }
  2320. drop_node_buffer(upper);
  2321. }
  2322. if (!upper->eb) {
  2323. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2324. if (ret < 0) {
  2325. err = ret;
  2326. break;
  2327. }
  2328. BUG_ON(ret > 0);
  2329. if (!upper->eb) {
  2330. upper->eb = path->nodes[upper->level];
  2331. path->nodes[upper->level] = NULL;
  2332. } else {
  2333. BUG_ON(upper->eb != path->nodes[upper->level]);
  2334. }
  2335. upper->locked = 1;
  2336. path->locks[upper->level] = 0;
  2337. slot = path->slots[upper->level];
  2338. btrfs_release_path(path);
  2339. } else {
  2340. ret = btrfs_bin_search(upper->eb, key, upper->level,
  2341. &slot);
  2342. BUG_ON(ret);
  2343. }
  2344. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2345. if (lowest) {
  2346. BUG_ON(bytenr != node->bytenr);
  2347. } else {
  2348. if (node->eb->start == bytenr)
  2349. goto next;
  2350. }
  2351. blocksize = btrfs_level_size(root, node->level);
  2352. generation = btrfs_node_ptr_generation(upper->eb, slot);
  2353. eb = read_tree_block(root, bytenr, blocksize, generation);
  2354. if (!eb || !extent_buffer_uptodate(eb)) {
  2355. free_extent_buffer(eb);
  2356. err = -EIO;
  2357. goto next;
  2358. }
  2359. btrfs_tree_lock(eb);
  2360. btrfs_set_lock_blocking(eb);
  2361. if (!node->eb) {
  2362. ret = btrfs_cow_block(trans, root, eb, upper->eb,
  2363. slot, &eb);
  2364. btrfs_tree_unlock(eb);
  2365. free_extent_buffer(eb);
  2366. if (ret < 0) {
  2367. err = ret;
  2368. goto next;
  2369. }
  2370. BUG_ON(node->eb != eb);
  2371. } else {
  2372. btrfs_set_node_blockptr(upper->eb, slot,
  2373. node->eb->start);
  2374. btrfs_set_node_ptr_generation(upper->eb, slot,
  2375. trans->transid);
  2376. btrfs_mark_buffer_dirty(upper->eb);
  2377. ret = btrfs_inc_extent_ref(trans, root,
  2378. node->eb->start, blocksize,
  2379. upper->eb->start,
  2380. btrfs_header_owner(upper->eb),
  2381. node->level, 0, 1);
  2382. BUG_ON(ret);
  2383. ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
  2384. BUG_ON(ret);
  2385. }
  2386. next:
  2387. if (!upper->pending)
  2388. drop_node_buffer(upper);
  2389. else
  2390. unlock_node_buffer(upper);
  2391. if (err)
  2392. break;
  2393. }
  2394. if (!err && node->pending) {
  2395. drop_node_buffer(node);
  2396. list_move_tail(&node->list, &rc->backref_cache.changed);
  2397. node->pending = 0;
  2398. }
  2399. path->lowest_level = 0;
  2400. BUG_ON(err == -ENOSPC);
  2401. return err;
  2402. }
  2403. static int link_to_upper(struct btrfs_trans_handle *trans,
  2404. struct reloc_control *rc,
  2405. struct backref_node *node,
  2406. struct btrfs_path *path)
  2407. {
  2408. struct btrfs_key key;
  2409. btrfs_node_key_to_cpu(node->eb, &key, 0);
  2410. return do_relocation(trans, rc, node, &key, path, 0);
  2411. }
  2412. static int finish_pending_nodes(struct btrfs_trans_handle *trans,
  2413. struct reloc_control *rc,
  2414. struct btrfs_path *path, int err)
  2415. {
  2416. LIST_HEAD(list);
  2417. struct backref_cache *cache = &rc->backref_cache;
  2418. struct backref_node *node;
  2419. int level;
  2420. int ret;
  2421. for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
  2422. while (!list_empty(&cache->pending[level])) {
  2423. node = list_entry(cache->pending[level].next,
  2424. struct backref_node, list);
  2425. list_move_tail(&node->list, &list);
  2426. BUG_ON(!node->pending);
  2427. if (!err) {
  2428. ret = link_to_upper(trans, rc, node, path);
  2429. if (ret < 0)
  2430. err = ret;
  2431. }
  2432. }
  2433. list_splice_init(&list, &cache->pending[level]);
  2434. }
  2435. return err;
  2436. }
  2437. static void mark_block_processed(struct reloc_control *rc,
  2438. u64 bytenr, u32 blocksize)
  2439. {
  2440. set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
  2441. EXTENT_DIRTY, GFP_NOFS);
  2442. }
  2443. static void __mark_block_processed(struct reloc_control *rc,
  2444. struct backref_node *node)
  2445. {
  2446. u32 blocksize;
  2447. if (node->level == 0 ||
  2448. in_block_group(node->bytenr, rc->block_group)) {
  2449. blocksize = btrfs_level_size(rc->extent_root, node->level);
  2450. mark_block_processed(rc, node->bytenr, blocksize);
  2451. }
  2452. node->processed = 1;
  2453. }
  2454. /*
  2455. * mark a block and all blocks directly/indirectly reference the block
  2456. * as processed.
  2457. */
  2458. static void update_processed_blocks(struct reloc_control *rc,
  2459. struct backref_node *node)
  2460. {
  2461. struct backref_node *next = node;
  2462. struct backref_edge *edge;
  2463. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2464. int index = 0;
  2465. while (next) {
  2466. cond_resched();
  2467. while (1) {
  2468. if (next->processed)
  2469. break;
  2470. __mark_block_processed(rc, next);
  2471. if (list_empty(&next->upper))
  2472. break;
  2473. edge = list_entry(next->upper.next,
  2474. struct backref_edge, list[LOWER]);
  2475. edges[index++] = edge;
  2476. next = edge->node[UPPER];
  2477. }
  2478. next = walk_down_backref(edges, &index);
  2479. }
  2480. }
  2481. static int tree_block_processed(u64 bytenr, u32 blocksize,
  2482. struct reloc_control *rc)
  2483. {
  2484. if (test_range_bit(&rc->processed_blocks, bytenr,
  2485. bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
  2486. return 1;
  2487. return 0;
  2488. }
  2489. static int get_tree_block_key(struct reloc_control *rc,
  2490. struct tree_block *block)
  2491. {
  2492. struct extent_buffer *eb;
  2493. BUG_ON(block->key_ready);
  2494. eb = read_tree_block(rc->extent_root, block->bytenr,
  2495. block->key.objectid, block->key.offset);
  2496. if (!eb || !extent_buffer_uptodate(eb)) {
  2497. free_extent_buffer(eb);
  2498. return -EIO;
  2499. }
  2500. WARN_ON(btrfs_header_level(eb) != block->level);
  2501. if (block->level == 0)
  2502. btrfs_item_key_to_cpu(eb, &block->key, 0);
  2503. else
  2504. btrfs_node_key_to_cpu(eb, &block->key, 0);
  2505. free_extent_buffer(eb);
  2506. block->key_ready = 1;
  2507. return 0;
  2508. }
  2509. static int reada_tree_block(struct reloc_control *rc,
  2510. struct tree_block *block)
  2511. {
  2512. BUG_ON(block->key_ready);
  2513. if (block->key.type == BTRFS_METADATA_ITEM_KEY)
  2514. readahead_tree_block(rc->extent_root, block->bytenr,
  2515. block->key.objectid,
  2516. rc->extent_root->leafsize);
  2517. else
  2518. readahead_tree_block(rc->extent_root, block->bytenr,
  2519. block->key.objectid, block->key.offset);
  2520. return 0;
  2521. }
  2522. /*
  2523. * helper function to relocate a tree block
  2524. */
  2525. static int relocate_tree_block(struct btrfs_trans_handle *trans,
  2526. struct reloc_control *rc,
  2527. struct backref_node *node,
  2528. struct btrfs_key *key,
  2529. struct btrfs_path *path)
  2530. {
  2531. struct btrfs_root *root;
  2532. int ret = 0;
  2533. if (!node)
  2534. return 0;
  2535. BUG_ON(node->processed);
  2536. root = select_one_root(trans, node);
  2537. if (root == ERR_PTR(-ENOENT)) {
  2538. update_processed_blocks(rc, node);
  2539. goto out;
  2540. }
  2541. if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  2542. ret = reserve_metadata_space(trans, rc, node);
  2543. if (ret)
  2544. goto out;
  2545. }
  2546. if (root) {
  2547. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  2548. BUG_ON(node->new_bytenr);
  2549. BUG_ON(!list_empty(&node->list));
  2550. btrfs_record_root_in_trans(trans, root);
  2551. root = root->reloc_root;
  2552. node->new_bytenr = root->node->start;
  2553. node->root = root;
  2554. list_add_tail(&node->list, &rc->backref_cache.changed);
  2555. } else {
  2556. path->lowest_level = node->level;
  2557. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2558. btrfs_release_path(path);
  2559. if (ret > 0)
  2560. ret = 0;
  2561. }
  2562. if (!ret)
  2563. update_processed_blocks(rc, node);
  2564. } else {
  2565. ret = do_relocation(trans, rc, node, key, path, 1);
  2566. }
  2567. out:
  2568. if (ret || node->level == 0 || node->cowonly)
  2569. remove_backref_node(&rc->backref_cache, node);
  2570. return ret;
  2571. }
  2572. /*
  2573. * relocate a list of blocks
  2574. */
  2575. static noinline_for_stack
  2576. int relocate_tree_blocks(struct btrfs_trans_handle *trans,
  2577. struct reloc_control *rc, struct rb_root *blocks)
  2578. {
  2579. struct backref_node *node;
  2580. struct btrfs_path *path;
  2581. struct tree_block *block;
  2582. struct rb_node *rb_node;
  2583. int ret;
  2584. int err = 0;
  2585. path = btrfs_alloc_path();
  2586. if (!path) {
  2587. err = -ENOMEM;
  2588. goto out_free_blocks;
  2589. }
  2590. rb_node = rb_first(blocks);
  2591. while (rb_node) {
  2592. block = rb_entry(rb_node, struct tree_block, rb_node);
  2593. if (!block->key_ready)
  2594. reada_tree_block(rc, block);
  2595. rb_node = rb_next(rb_node);
  2596. }
  2597. rb_node = rb_first(blocks);
  2598. while (rb_node) {
  2599. block = rb_entry(rb_node, struct tree_block, rb_node);
  2600. if (!block->key_ready) {
  2601. err = get_tree_block_key(rc, block);
  2602. if (err)
  2603. goto out_free_path;
  2604. }
  2605. rb_node = rb_next(rb_node);
  2606. }
  2607. rb_node = rb_first(blocks);
  2608. while (rb_node) {
  2609. block = rb_entry(rb_node, struct tree_block, rb_node);
  2610. node = build_backref_tree(rc, &block->key,
  2611. block->level, block->bytenr);
  2612. if (IS_ERR(node)) {
  2613. err = PTR_ERR(node);
  2614. goto out;
  2615. }
  2616. ret = relocate_tree_block(trans, rc, node, &block->key,
  2617. path);
  2618. if (ret < 0) {
  2619. if (ret != -EAGAIN || rb_node == rb_first(blocks))
  2620. err = ret;
  2621. goto out;
  2622. }
  2623. rb_node = rb_next(rb_node);
  2624. }
  2625. out:
  2626. err = finish_pending_nodes(trans, rc, path, err);
  2627. out_free_path:
  2628. btrfs_free_path(path);
  2629. out_free_blocks:
  2630. free_block_list(blocks);
  2631. return err;
  2632. }
  2633. static noinline_for_stack
  2634. int prealloc_file_extent_cluster(struct inode *inode,
  2635. struct file_extent_cluster *cluster)
  2636. {
  2637. u64 alloc_hint = 0;
  2638. u64 start;
  2639. u64 end;
  2640. u64 offset = BTRFS_I(inode)->index_cnt;
  2641. u64 num_bytes;
  2642. int nr = 0;
  2643. int ret = 0;
  2644. BUG_ON(cluster->start != cluster->boundary[0]);
  2645. mutex_lock(&inode->i_mutex);
  2646. ret = btrfs_check_data_free_space(inode, cluster->end +
  2647. 1 - cluster->start);
  2648. if (ret)
  2649. goto out;
  2650. while (nr < cluster->nr) {
  2651. start = cluster->boundary[nr] - offset;
  2652. if (nr + 1 < cluster->nr)
  2653. end = cluster->boundary[nr + 1] - 1 - offset;
  2654. else
  2655. end = cluster->end - offset;
  2656. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2657. num_bytes = end + 1 - start;
  2658. ret = btrfs_prealloc_file_range(inode, 0, start,
  2659. num_bytes, num_bytes,
  2660. end + 1, &alloc_hint);
  2661. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2662. if (ret)
  2663. break;
  2664. nr++;
  2665. }
  2666. btrfs_free_reserved_data_space(inode, cluster->end +
  2667. 1 - cluster->start);
  2668. out:
  2669. mutex_unlock(&inode->i_mutex);
  2670. return ret;
  2671. }
  2672. static noinline_for_stack
  2673. int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
  2674. u64 block_start)
  2675. {
  2676. struct btrfs_root *root = BTRFS_I(inode)->root;
  2677. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  2678. struct extent_map *em;
  2679. int ret = 0;
  2680. em = alloc_extent_map();
  2681. if (!em)
  2682. return -ENOMEM;
  2683. em->start = start;
  2684. em->len = end + 1 - start;
  2685. em->block_len = em->len;
  2686. em->block_start = block_start;
  2687. em->bdev = root->fs_info->fs_devices->latest_bdev;
  2688. set_bit(EXTENT_FLAG_PINNED, &em->flags);
  2689. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2690. while (1) {
  2691. write_lock(&em_tree->lock);
  2692. ret = add_extent_mapping(em_tree, em, 0);
  2693. write_unlock(&em_tree->lock);
  2694. if (ret != -EEXIST) {
  2695. free_extent_map(em);
  2696. break;
  2697. }
  2698. btrfs_drop_extent_cache(inode, start, end, 0);
  2699. }
  2700. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2701. return ret;
  2702. }
  2703. static int relocate_file_extent_cluster(struct inode *inode,
  2704. struct file_extent_cluster *cluster)
  2705. {
  2706. u64 page_start;
  2707. u64 page_end;
  2708. u64 offset = BTRFS_I(inode)->index_cnt;
  2709. unsigned long index;
  2710. unsigned long last_index;
  2711. struct page *page;
  2712. struct file_ra_state *ra;
  2713. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  2714. int nr = 0;
  2715. int ret = 0;
  2716. if (!cluster->nr)
  2717. return 0;
  2718. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  2719. if (!ra)
  2720. return -ENOMEM;
  2721. ret = prealloc_file_extent_cluster(inode, cluster);
  2722. if (ret)
  2723. goto out;
  2724. file_ra_state_init(ra, inode->i_mapping);
  2725. ret = setup_extent_mapping(inode, cluster->start - offset,
  2726. cluster->end - offset, cluster->start);
  2727. if (ret)
  2728. goto out;
  2729. index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
  2730. last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
  2731. while (index <= last_index) {
  2732. ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
  2733. if (ret)
  2734. goto out;
  2735. page = find_lock_page(inode->i_mapping, index);
  2736. if (!page) {
  2737. page_cache_sync_readahead(inode->i_mapping,
  2738. ra, NULL, index,
  2739. last_index + 1 - index);
  2740. page = find_or_create_page(inode->i_mapping, index,
  2741. mask);
  2742. if (!page) {
  2743. btrfs_delalloc_release_metadata(inode,
  2744. PAGE_CACHE_SIZE);
  2745. ret = -ENOMEM;
  2746. goto out;
  2747. }
  2748. }
  2749. if (PageReadahead(page)) {
  2750. page_cache_async_readahead(inode->i_mapping,
  2751. ra, NULL, page, index,
  2752. last_index + 1 - index);
  2753. }
  2754. if (!PageUptodate(page)) {
  2755. btrfs_readpage(NULL, page);
  2756. lock_page(page);
  2757. if (!PageUptodate(page)) {
  2758. unlock_page(page);
  2759. page_cache_release(page);
  2760. btrfs_delalloc_release_metadata(inode,
  2761. PAGE_CACHE_SIZE);
  2762. ret = -EIO;
  2763. goto out;
  2764. }
  2765. }
  2766. page_start = page_offset(page);
  2767. page_end = page_start + PAGE_CACHE_SIZE - 1;
  2768. lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
  2769. set_page_extent_mapped(page);
  2770. if (nr < cluster->nr &&
  2771. page_start + offset == cluster->boundary[nr]) {
  2772. set_extent_bits(&BTRFS_I(inode)->io_tree,
  2773. page_start, page_end,
  2774. EXTENT_BOUNDARY, GFP_NOFS);
  2775. nr++;
  2776. }
  2777. btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
  2778. set_page_dirty(page);
  2779. unlock_extent(&BTRFS_I(inode)->io_tree,
  2780. page_start, page_end);
  2781. unlock_page(page);
  2782. page_cache_release(page);
  2783. index++;
  2784. balance_dirty_pages_ratelimited(inode->i_mapping);
  2785. btrfs_throttle(BTRFS_I(inode)->root);
  2786. }
  2787. WARN_ON(nr != cluster->nr);
  2788. out:
  2789. kfree(ra);
  2790. return ret;
  2791. }
  2792. static noinline_for_stack
  2793. int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
  2794. struct file_extent_cluster *cluster)
  2795. {
  2796. int ret;
  2797. if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
  2798. ret = relocate_file_extent_cluster(inode, cluster);
  2799. if (ret)
  2800. return ret;
  2801. cluster->nr = 0;
  2802. }
  2803. if (!cluster->nr)
  2804. cluster->start = extent_key->objectid;
  2805. else
  2806. BUG_ON(cluster->nr >= MAX_EXTENTS);
  2807. cluster->end = extent_key->objectid + extent_key->offset - 1;
  2808. cluster->boundary[cluster->nr] = extent_key->objectid;
  2809. cluster->nr++;
  2810. if (cluster->nr >= MAX_EXTENTS) {
  2811. ret = relocate_file_extent_cluster(inode, cluster);
  2812. if (ret)
  2813. return ret;
  2814. cluster->nr = 0;
  2815. }
  2816. return 0;
  2817. }
  2818. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2819. static int get_ref_objectid_v0(struct reloc_control *rc,
  2820. struct btrfs_path *path,
  2821. struct btrfs_key *extent_key,
  2822. u64 *ref_objectid, int *path_change)
  2823. {
  2824. struct btrfs_key key;
  2825. struct extent_buffer *leaf;
  2826. struct btrfs_extent_ref_v0 *ref0;
  2827. int ret;
  2828. int slot;
  2829. leaf = path->nodes[0];
  2830. slot = path->slots[0];
  2831. while (1) {
  2832. if (slot >= btrfs_header_nritems(leaf)) {
  2833. ret = btrfs_next_leaf(rc->extent_root, path);
  2834. if (ret < 0)
  2835. return ret;
  2836. BUG_ON(ret > 0);
  2837. leaf = path->nodes[0];
  2838. slot = path->slots[0];
  2839. if (path_change)
  2840. *path_change = 1;
  2841. }
  2842. btrfs_item_key_to_cpu(leaf, &key, slot);
  2843. if (key.objectid != extent_key->objectid)
  2844. return -ENOENT;
  2845. if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
  2846. slot++;
  2847. continue;
  2848. }
  2849. ref0 = btrfs_item_ptr(leaf, slot,
  2850. struct btrfs_extent_ref_v0);
  2851. *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
  2852. break;
  2853. }
  2854. return 0;
  2855. }
  2856. #endif
  2857. /*
  2858. * helper to add a tree block to the list.
  2859. * the major work is getting the generation and level of the block
  2860. */
  2861. static int add_tree_block(struct reloc_control *rc,
  2862. struct btrfs_key *extent_key,
  2863. struct btrfs_path *path,
  2864. struct rb_root *blocks)
  2865. {
  2866. struct extent_buffer *eb;
  2867. struct btrfs_extent_item *ei;
  2868. struct btrfs_tree_block_info *bi;
  2869. struct tree_block *block;
  2870. struct rb_node *rb_node;
  2871. u32 item_size;
  2872. int level = -1;
  2873. u64 generation;
  2874. eb = path->nodes[0];
  2875. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  2876. if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
  2877. item_size >= sizeof(*ei) + sizeof(*bi)) {
  2878. ei = btrfs_item_ptr(eb, path->slots[0],
  2879. struct btrfs_extent_item);
  2880. if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
  2881. bi = (struct btrfs_tree_block_info *)(ei + 1);
  2882. level = btrfs_tree_block_level(eb, bi);
  2883. } else {
  2884. level = (int)extent_key->offset;
  2885. }
  2886. generation = btrfs_extent_generation(eb, ei);
  2887. } else {
  2888. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2889. u64 ref_owner;
  2890. int ret;
  2891. BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2892. ret = get_ref_objectid_v0(rc, path, extent_key,
  2893. &ref_owner, NULL);
  2894. if (ret < 0)
  2895. return ret;
  2896. BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
  2897. level = (int)ref_owner;
  2898. /* FIXME: get real generation */
  2899. generation = 0;
  2900. #else
  2901. BUG();
  2902. #endif
  2903. }
  2904. btrfs_release_path(path);
  2905. BUG_ON(level == -1);
  2906. block = kmalloc(sizeof(*block), GFP_NOFS);
  2907. if (!block)
  2908. return -ENOMEM;
  2909. block->bytenr = extent_key->objectid;
  2910. block->key.objectid = rc->extent_root->leafsize;
  2911. block->key.offset = generation;
  2912. block->level = level;
  2913. block->key_ready = 0;
  2914. rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
  2915. if (rb_node)
  2916. backref_tree_panic(rb_node, -EEXIST, block->bytenr);
  2917. return 0;
  2918. }
  2919. /*
  2920. * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
  2921. */
  2922. static int __add_tree_block(struct reloc_control *rc,
  2923. u64 bytenr, u32 blocksize,
  2924. struct rb_root *blocks)
  2925. {
  2926. struct btrfs_path *path;
  2927. struct btrfs_key key;
  2928. int ret;
  2929. bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
  2930. SKINNY_METADATA);
  2931. if (tree_block_processed(bytenr, blocksize, rc))
  2932. return 0;
  2933. if (tree_search(blocks, bytenr))
  2934. return 0;
  2935. path = btrfs_alloc_path();
  2936. if (!path)
  2937. return -ENOMEM;
  2938. again:
  2939. key.objectid = bytenr;
  2940. if (skinny) {
  2941. key.type = BTRFS_METADATA_ITEM_KEY;
  2942. key.offset = (u64)-1;
  2943. } else {
  2944. key.type = BTRFS_EXTENT_ITEM_KEY;
  2945. key.offset = blocksize;
  2946. }
  2947. path->search_commit_root = 1;
  2948. path->skip_locking = 1;
  2949. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
  2950. if (ret < 0)
  2951. goto out;
  2952. if (ret > 0 && skinny) {
  2953. if (path->slots[0]) {
  2954. path->slots[0]--;
  2955. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2956. path->slots[0]);
  2957. if (key.objectid == bytenr &&
  2958. (key.type == BTRFS_METADATA_ITEM_KEY ||
  2959. (key.type == BTRFS_EXTENT_ITEM_KEY &&
  2960. key.offset == blocksize)))
  2961. ret = 0;
  2962. }
  2963. if (ret) {
  2964. skinny = false;
  2965. btrfs_release_path(path);
  2966. goto again;
  2967. }
  2968. }
  2969. BUG_ON(ret);
  2970. ret = add_tree_block(rc, &key, path, blocks);
  2971. out:
  2972. btrfs_free_path(path);
  2973. return ret;
  2974. }
  2975. /*
  2976. * helper to check if the block use full backrefs for pointers in it
  2977. */
  2978. static int block_use_full_backref(struct reloc_control *rc,
  2979. struct extent_buffer *eb)
  2980. {
  2981. u64 flags;
  2982. int ret;
  2983. if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
  2984. btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
  2985. return 1;
  2986. ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
  2987. eb->start, btrfs_header_level(eb), 1,
  2988. NULL, &flags);
  2989. BUG_ON(ret);
  2990. if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  2991. ret = 1;
  2992. else
  2993. ret = 0;
  2994. return ret;
  2995. }
  2996. static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
  2997. struct inode *inode, u64 ino)
  2998. {
  2999. struct btrfs_key key;
  3000. struct btrfs_root *root = fs_info->tree_root;
  3001. struct btrfs_trans_handle *trans;
  3002. int ret = 0;
  3003. if (inode)
  3004. goto truncate;
  3005. key.objectid = ino;
  3006. key.type = BTRFS_INODE_ITEM_KEY;
  3007. key.offset = 0;
  3008. inode = btrfs_iget(fs_info->sb, &key, root, NULL);
  3009. if (IS_ERR(inode) || is_bad_inode(inode)) {
  3010. if (!IS_ERR(inode))
  3011. iput(inode);
  3012. return -ENOENT;
  3013. }
  3014. truncate:
  3015. ret = btrfs_check_trunc_cache_free_space(root,
  3016. &fs_info->global_block_rsv);
  3017. if (ret)
  3018. goto out;
  3019. trans = btrfs_join_transaction(root);
  3020. if (IS_ERR(trans)) {
  3021. ret = PTR_ERR(trans);
  3022. goto out;
  3023. }
  3024. ret = btrfs_truncate_free_space_cache(root, trans, inode);
  3025. btrfs_end_transaction(trans, root);
  3026. btrfs_btree_balance_dirty(root);
  3027. out:
  3028. iput(inode);
  3029. return ret;
  3030. }
  3031. /*
  3032. * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
  3033. * this function scans fs tree to find blocks reference the data extent
  3034. */
  3035. static int find_data_references(struct reloc_control *rc,
  3036. struct btrfs_key *extent_key,
  3037. struct extent_buffer *leaf,
  3038. struct btrfs_extent_data_ref *ref,
  3039. struct rb_root *blocks)
  3040. {
  3041. struct btrfs_path *path;
  3042. struct tree_block *block;
  3043. struct btrfs_root *root;
  3044. struct btrfs_file_extent_item *fi;
  3045. struct rb_node *rb_node;
  3046. struct btrfs_key key;
  3047. u64 ref_root;
  3048. u64 ref_objectid;
  3049. u64 ref_offset;
  3050. u32 ref_count;
  3051. u32 nritems;
  3052. int err = 0;
  3053. int added = 0;
  3054. int counted;
  3055. int ret;
  3056. ref_root = btrfs_extent_data_ref_root(leaf, ref);
  3057. ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
  3058. ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
  3059. ref_count = btrfs_extent_data_ref_count(leaf, ref);
  3060. /*
  3061. * This is an extent belonging to the free space cache, lets just delete
  3062. * it and redo the search.
  3063. */
  3064. if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
  3065. ret = delete_block_group_cache(rc->extent_root->fs_info,
  3066. NULL, ref_objectid);
  3067. if (ret != -ENOENT)
  3068. return ret;
  3069. ret = 0;
  3070. }
  3071. path = btrfs_alloc_path();
  3072. if (!path)
  3073. return -ENOMEM;
  3074. path->reada = 1;
  3075. root = read_fs_root(rc->extent_root->fs_info, ref_root);
  3076. if (IS_ERR(root)) {
  3077. err = PTR_ERR(root);
  3078. goto out;
  3079. }
  3080. key.objectid = ref_objectid;
  3081. key.type = BTRFS_EXTENT_DATA_KEY;
  3082. if (ref_offset > ((u64)-1 << 32))
  3083. key.offset = 0;
  3084. else
  3085. key.offset = ref_offset;
  3086. path->search_commit_root = 1;
  3087. path->skip_locking = 1;
  3088. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3089. if (ret < 0) {
  3090. err = ret;
  3091. goto out;
  3092. }
  3093. leaf = path->nodes[0];
  3094. nritems = btrfs_header_nritems(leaf);
  3095. /*
  3096. * the references in tree blocks that use full backrefs
  3097. * are not counted in
  3098. */
  3099. if (block_use_full_backref(rc, leaf))
  3100. counted = 0;
  3101. else
  3102. counted = 1;
  3103. rb_node = tree_search(blocks, leaf->start);
  3104. if (rb_node) {
  3105. if (counted)
  3106. added = 1;
  3107. else
  3108. path->slots[0] = nritems;
  3109. }
  3110. while (ref_count > 0) {
  3111. while (path->slots[0] >= nritems) {
  3112. ret = btrfs_next_leaf(root, path);
  3113. if (ret < 0) {
  3114. err = ret;
  3115. goto out;
  3116. }
  3117. if (WARN_ON(ret > 0))
  3118. goto out;
  3119. leaf = path->nodes[0];
  3120. nritems = btrfs_header_nritems(leaf);
  3121. added = 0;
  3122. if (block_use_full_backref(rc, leaf))
  3123. counted = 0;
  3124. else
  3125. counted = 1;
  3126. rb_node = tree_search(blocks, leaf->start);
  3127. if (rb_node) {
  3128. if (counted)
  3129. added = 1;
  3130. else
  3131. path->slots[0] = nritems;
  3132. }
  3133. }
  3134. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3135. if (WARN_ON(key.objectid != ref_objectid ||
  3136. key.type != BTRFS_EXTENT_DATA_KEY))
  3137. break;
  3138. fi = btrfs_item_ptr(leaf, path->slots[0],
  3139. struct btrfs_file_extent_item);
  3140. if (btrfs_file_extent_type(leaf, fi) ==
  3141. BTRFS_FILE_EXTENT_INLINE)
  3142. goto next;
  3143. if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
  3144. extent_key->objectid)
  3145. goto next;
  3146. key.offset -= btrfs_file_extent_offset(leaf, fi);
  3147. if (key.offset != ref_offset)
  3148. goto next;
  3149. if (counted)
  3150. ref_count--;
  3151. if (added)
  3152. goto next;
  3153. if (!tree_block_processed(leaf->start, leaf->len, rc)) {
  3154. block = kmalloc(sizeof(*block), GFP_NOFS);
  3155. if (!block) {
  3156. err = -ENOMEM;
  3157. break;
  3158. }
  3159. block->bytenr = leaf->start;
  3160. btrfs_item_key_to_cpu(leaf, &block->key, 0);
  3161. block->level = 0;
  3162. block->key_ready = 1;
  3163. rb_node = tree_insert(blocks, block->bytenr,
  3164. &block->rb_node);
  3165. if (rb_node)
  3166. backref_tree_panic(rb_node, -EEXIST,
  3167. block->bytenr);
  3168. }
  3169. if (counted)
  3170. added = 1;
  3171. else
  3172. path->slots[0] = nritems;
  3173. next:
  3174. path->slots[0]++;
  3175. }
  3176. out:
  3177. btrfs_free_path(path);
  3178. return err;
  3179. }
  3180. /*
  3181. * helper to find all tree blocks that reference a given data extent
  3182. */
  3183. static noinline_for_stack
  3184. int add_data_references(struct reloc_control *rc,
  3185. struct btrfs_key *extent_key,
  3186. struct btrfs_path *path,
  3187. struct rb_root *blocks)
  3188. {
  3189. struct btrfs_key key;
  3190. struct extent_buffer *eb;
  3191. struct btrfs_extent_data_ref *dref;
  3192. struct btrfs_extent_inline_ref *iref;
  3193. unsigned long ptr;
  3194. unsigned long end;
  3195. u32 blocksize = btrfs_level_size(rc->extent_root, 0);
  3196. int ret = 0;
  3197. int err = 0;
  3198. eb = path->nodes[0];
  3199. ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
  3200. end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
  3201. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3202. if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
  3203. ptr = end;
  3204. else
  3205. #endif
  3206. ptr += sizeof(struct btrfs_extent_item);
  3207. while (ptr < end) {
  3208. iref = (struct btrfs_extent_inline_ref *)ptr;
  3209. key.type = btrfs_extent_inline_ref_type(eb, iref);
  3210. if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  3211. key.offset = btrfs_extent_inline_ref_offset(eb, iref);
  3212. ret = __add_tree_block(rc, key.offset, blocksize,
  3213. blocks);
  3214. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  3215. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  3216. ret = find_data_references(rc, extent_key,
  3217. eb, dref, blocks);
  3218. } else {
  3219. BUG();
  3220. }
  3221. if (ret) {
  3222. err = ret;
  3223. goto out;
  3224. }
  3225. ptr += btrfs_extent_inline_ref_size(key.type);
  3226. }
  3227. WARN_ON(ptr > end);
  3228. while (1) {
  3229. cond_resched();
  3230. eb = path->nodes[0];
  3231. if (path->slots[0] >= btrfs_header_nritems(eb)) {
  3232. ret = btrfs_next_leaf(rc->extent_root, path);
  3233. if (ret < 0) {
  3234. err = ret;
  3235. break;
  3236. }
  3237. if (ret > 0)
  3238. break;
  3239. eb = path->nodes[0];
  3240. }
  3241. btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
  3242. if (key.objectid != extent_key->objectid)
  3243. break;
  3244. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3245. if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
  3246. key.type == BTRFS_EXTENT_REF_V0_KEY) {
  3247. #else
  3248. BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
  3249. if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  3250. #endif
  3251. ret = __add_tree_block(rc, key.offset, blocksize,
  3252. blocks);
  3253. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  3254. dref = btrfs_item_ptr(eb, path->slots[0],
  3255. struct btrfs_extent_data_ref);
  3256. ret = find_data_references(rc, extent_key,
  3257. eb, dref, blocks);
  3258. } else {
  3259. ret = 0;
  3260. }
  3261. if (ret) {
  3262. err = ret;
  3263. break;
  3264. }
  3265. path->slots[0]++;
  3266. }
  3267. out:
  3268. btrfs_release_path(path);
  3269. if (err)
  3270. free_block_list(blocks);
  3271. return err;
  3272. }
  3273. /*
  3274. * helper to find next unprocessed extent
  3275. */
  3276. static noinline_for_stack
  3277. int find_next_extent(struct btrfs_trans_handle *trans,
  3278. struct reloc_control *rc, struct btrfs_path *path,
  3279. struct btrfs_key *extent_key)
  3280. {
  3281. struct btrfs_key key;
  3282. struct extent_buffer *leaf;
  3283. u64 start, end, last;
  3284. int ret;
  3285. last = rc->block_group->key.objectid + rc->block_group->key.offset;
  3286. while (1) {
  3287. cond_resched();
  3288. if (rc->search_start >= last) {
  3289. ret = 1;
  3290. break;
  3291. }
  3292. key.objectid = rc->search_start;
  3293. key.type = BTRFS_EXTENT_ITEM_KEY;
  3294. key.offset = 0;
  3295. path->search_commit_root = 1;
  3296. path->skip_locking = 1;
  3297. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
  3298. 0, 0);
  3299. if (ret < 0)
  3300. break;
  3301. next:
  3302. leaf = path->nodes[0];
  3303. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  3304. ret = btrfs_next_leaf(rc->extent_root, path);
  3305. if (ret != 0)
  3306. break;
  3307. leaf = path->nodes[0];
  3308. }
  3309. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3310. if (key.objectid >= last) {
  3311. ret = 1;
  3312. break;
  3313. }
  3314. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  3315. key.type != BTRFS_METADATA_ITEM_KEY) {
  3316. path->slots[0]++;
  3317. goto next;
  3318. }
  3319. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  3320. key.objectid + key.offset <= rc->search_start) {
  3321. path->slots[0]++;
  3322. goto next;
  3323. }
  3324. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  3325. key.objectid + rc->extent_root->leafsize <=
  3326. rc->search_start) {
  3327. path->slots[0]++;
  3328. goto next;
  3329. }
  3330. ret = find_first_extent_bit(&rc->processed_blocks,
  3331. key.objectid, &start, &end,
  3332. EXTENT_DIRTY, NULL);
  3333. if (ret == 0 && start <= key.objectid) {
  3334. btrfs_release_path(path);
  3335. rc->search_start = end + 1;
  3336. } else {
  3337. if (key.type == BTRFS_EXTENT_ITEM_KEY)
  3338. rc->search_start = key.objectid + key.offset;
  3339. else
  3340. rc->search_start = key.objectid +
  3341. rc->extent_root->leafsize;
  3342. memcpy(extent_key, &key, sizeof(key));
  3343. return 0;
  3344. }
  3345. }
  3346. btrfs_release_path(path);
  3347. return ret;
  3348. }
  3349. static void set_reloc_control(struct reloc_control *rc)
  3350. {
  3351. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3352. mutex_lock(&fs_info->reloc_mutex);
  3353. fs_info->reloc_ctl = rc;
  3354. mutex_unlock(&fs_info->reloc_mutex);
  3355. }
  3356. static void unset_reloc_control(struct reloc_control *rc)
  3357. {
  3358. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3359. mutex_lock(&fs_info->reloc_mutex);
  3360. fs_info->reloc_ctl = NULL;
  3361. mutex_unlock(&fs_info->reloc_mutex);
  3362. }
  3363. static int check_extent_flags(u64 flags)
  3364. {
  3365. if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
  3366. (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
  3367. return 1;
  3368. if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
  3369. !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
  3370. return 1;
  3371. if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
  3372. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  3373. return 1;
  3374. return 0;
  3375. }
  3376. static noinline_for_stack
  3377. int prepare_to_relocate(struct reloc_control *rc)
  3378. {
  3379. struct btrfs_trans_handle *trans;
  3380. rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
  3381. BTRFS_BLOCK_RSV_TEMP);
  3382. if (!rc->block_rsv)
  3383. return -ENOMEM;
  3384. memset(&rc->cluster, 0, sizeof(rc->cluster));
  3385. rc->search_start = rc->block_group->key.objectid;
  3386. rc->extents_found = 0;
  3387. rc->nodes_relocated = 0;
  3388. rc->merging_rsv_size = 0;
  3389. rc->reserved_bytes = 0;
  3390. rc->block_rsv->size = rc->extent_root->nodesize *
  3391. RELOCATION_RESERVED_NODES;
  3392. rc->create_reloc_tree = 1;
  3393. set_reloc_control(rc);
  3394. trans = btrfs_join_transaction(rc->extent_root);
  3395. if (IS_ERR(trans)) {
  3396. unset_reloc_control(rc);
  3397. /*
  3398. * extent tree is not a ref_cow tree and has no reloc_root to
  3399. * cleanup. And callers are responsible to free the above
  3400. * block rsv.
  3401. */
  3402. return PTR_ERR(trans);
  3403. }
  3404. btrfs_commit_transaction(trans, rc->extent_root);
  3405. return 0;
  3406. }
  3407. static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
  3408. {
  3409. struct rb_root blocks = RB_ROOT;
  3410. struct btrfs_key key;
  3411. struct btrfs_trans_handle *trans = NULL;
  3412. struct btrfs_path *path;
  3413. struct btrfs_extent_item *ei;
  3414. u64 flags;
  3415. u32 item_size;
  3416. int ret;
  3417. int err = 0;
  3418. int progress = 0;
  3419. path = btrfs_alloc_path();
  3420. if (!path)
  3421. return -ENOMEM;
  3422. path->reada = 1;
  3423. ret = prepare_to_relocate(rc);
  3424. if (ret) {
  3425. err = ret;
  3426. goto out_free;
  3427. }
  3428. while (1) {
  3429. rc->reserved_bytes = 0;
  3430. ret = btrfs_block_rsv_refill(rc->extent_root,
  3431. rc->block_rsv, rc->block_rsv->size,
  3432. BTRFS_RESERVE_FLUSH_ALL);
  3433. if (ret) {
  3434. err = ret;
  3435. break;
  3436. }
  3437. progress++;
  3438. trans = btrfs_start_transaction(rc->extent_root, 0);
  3439. if (IS_ERR(trans)) {
  3440. err = PTR_ERR(trans);
  3441. trans = NULL;
  3442. break;
  3443. }
  3444. restart:
  3445. if (update_backref_cache(trans, &rc->backref_cache)) {
  3446. btrfs_end_transaction(trans, rc->extent_root);
  3447. continue;
  3448. }
  3449. ret = find_next_extent(trans, rc, path, &key);
  3450. if (ret < 0)
  3451. err = ret;
  3452. if (ret != 0)
  3453. break;
  3454. rc->extents_found++;
  3455. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  3456. struct btrfs_extent_item);
  3457. item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
  3458. if (item_size >= sizeof(*ei)) {
  3459. flags = btrfs_extent_flags(path->nodes[0], ei);
  3460. ret = check_extent_flags(flags);
  3461. BUG_ON(ret);
  3462. } else {
  3463. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3464. u64 ref_owner;
  3465. int path_change = 0;
  3466. BUG_ON(item_size !=
  3467. sizeof(struct btrfs_extent_item_v0));
  3468. ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
  3469. &path_change);
  3470. if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
  3471. flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
  3472. else
  3473. flags = BTRFS_EXTENT_FLAG_DATA;
  3474. if (path_change) {
  3475. btrfs_release_path(path);
  3476. path->search_commit_root = 1;
  3477. path->skip_locking = 1;
  3478. ret = btrfs_search_slot(NULL, rc->extent_root,
  3479. &key, path, 0, 0);
  3480. if (ret < 0) {
  3481. err = ret;
  3482. break;
  3483. }
  3484. BUG_ON(ret > 0);
  3485. }
  3486. #else
  3487. BUG();
  3488. #endif
  3489. }
  3490. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  3491. ret = add_tree_block(rc, &key, path, &blocks);
  3492. } else if (rc->stage == UPDATE_DATA_PTRS &&
  3493. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3494. ret = add_data_references(rc, &key, path, &blocks);
  3495. } else {
  3496. btrfs_release_path(path);
  3497. ret = 0;
  3498. }
  3499. if (ret < 0) {
  3500. err = ret;
  3501. break;
  3502. }
  3503. if (!RB_EMPTY_ROOT(&blocks)) {
  3504. ret = relocate_tree_blocks(trans, rc, &blocks);
  3505. if (ret < 0) {
  3506. /*
  3507. * if we fail to relocate tree blocks, force to update
  3508. * backref cache when committing transaction.
  3509. */
  3510. rc->backref_cache.last_trans = trans->transid - 1;
  3511. if (ret != -EAGAIN) {
  3512. err = ret;
  3513. break;
  3514. }
  3515. rc->extents_found--;
  3516. rc->search_start = key.objectid;
  3517. }
  3518. }
  3519. btrfs_end_transaction_throttle(trans, rc->extent_root);
  3520. btrfs_btree_balance_dirty(rc->extent_root);
  3521. trans = NULL;
  3522. if (rc->stage == MOVE_DATA_EXTENTS &&
  3523. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3524. rc->found_file_extent = 1;
  3525. ret = relocate_data_extent(rc->data_inode,
  3526. &key, &rc->cluster);
  3527. if (ret < 0) {
  3528. err = ret;
  3529. break;
  3530. }
  3531. }
  3532. }
  3533. if (trans && progress && err == -ENOSPC) {
  3534. ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
  3535. rc->block_group->flags);
  3536. if (ret == 0) {
  3537. err = 0;
  3538. progress = 0;
  3539. goto restart;
  3540. }
  3541. }
  3542. btrfs_release_path(path);
  3543. clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
  3544. GFP_NOFS);
  3545. if (trans) {
  3546. btrfs_end_transaction_throttle(trans, rc->extent_root);
  3547. btrfs_btree_balance_dirty(rc->extent_root);
  3548. }
  3549. if (!err) {
  3550. ret = relocate_file_extent_cluster(rc->data_inode,
  3551. &rc->cluster);
  3552. if (ret < 0)
  3553. err = ret;
  3554. }
  3555. rc->create_reloc_tree = 0;
  3556. set_reloc_control(rc);
  3557. backref_cache_cleanup(&rc->backref_cache);
  3558. btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
  3559. err = prepare_to_merge(rc, err);
  3560. merge_reloc_roots(rc);
  3561. rc->merge_reloc_tree = 0;
  3562. unset_reloc_control(rc);
  3563. btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
  3564. /* get rid of pinned extents */
  3565. trans = btrfs_join_transaction(rc->extent_root);
  3566. if (IS_ERR(trans))
  3567. err = PTR_ERR(trans);
  3568. else
  3569. btrfs_commit_transaction(trans, rc->extent_root);
  3570. out_free:
  3571. btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
  3572. btrfs_free_path(path);
  3573. return err;
  3574. }
  3575. static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
  3576. struct btrfs_root *root, u64 objectid)
  3577. {
  3578. struct btrfs_path *path;
  3579. struct btrfs_inode_item *item;
  3580. struct extent_buffer *leaf;
  3581. int ret;
  3582. path = btrfs_alloc_path();
  3583. if (!path)
  3584. return -ENOMEM;
  3585. ret = btrfs_insert_empty_inode(trans, root, path, objectid);
  3586. if (ret)
  3587. goto out;
  3588. leaf = path->nodes[0];
  3589. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
  3590. memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
  3591. btrfs_set_inode_generation(leaf, item, 1);
  3592. btrfs_set_inode_size(leaf, item, 0);
  3593. btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
  3594. btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
  3595. BTRFS_INODE_PREALLOC);
  3596. btrfs_mark_buffer_dirty(leaf);
  3597. btrfs_release_path(path);
  3598. out:
  3599. btrfs_free_path(path);
  3600. return ret;
  3601. }
  3602. /*
  3603. * helper to create inode for data relocation.
  3604. * the inode is in data relocation tree and its link count is 0
  3605. */
  3606. static noinline_for_stack
  3607. struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
  3608. struct btrfs_block_group_cache *group)
  3609. {
  3610. struct inode *inode = NULL;
  3611. struct btrfs_trans_handle *trans;
  3612. struct btrfs_root *root;
  3613. struct btrfs_key key;
  3614. u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
  3615. int err = 0;
  3616. root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
  3617. if (IS_ERR(root))
  3618. return ERR_CAST(root);
  3619. trans = btrfs_start_transaction(root, 6);
  3620. if (IS_ERR(trans))
  3621. return ERR_CAST(trans);
  3622. err = btrfs_find_free_objectid(root, &objectid);
  3623. if (err)
  3624. goto out;
  3625. err = __insert_orphan_inode(trans, root, objectid);
  3626. BUG_ON(err);
  3627. key.objectid = objectid;
  3628. key.type = BTRFS_INODE_ITEM_KEY;
  3629. key.offset = 0;
  3630. inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
  3631. BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
  3632. BTRFS_I(inode)->index_cnt = group->key.objectid;
  3633. err = btrfs_orphan_add(trans, inode);
  3634. out:
  3635. btrfs_end_transaction(trans, root);
  3636. btrfs_btree_balance_dirty(root);
  3637. if (err) {
  3638. if (inode)
  3639. iput(inode);
  3640. inode = ERR_PTR(err);
  3641. }
  3642. return inode;
  3643. }
  3644. static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
  3645. {
  3646. struct reloc_control *rc;
  3647. rc = kzalloc(sizeof(*rc), GFP_NOFS);
  3648. if (!rc)
  3649. return NULL;
  3650. INIT_LIST_HEAD(&rc->reloc_roots);
  3651. backref_cache_init(&rc->backref_cache);
  3652. mapping_tree_init(&rc->reloc_root_tree);
  3653. extent_io_tree_init(&rc->processed_blocks,
  3654. fs_info->btree_inode->i_mapping);
  3655. return rc;
  3656. }
  3657. /*
  3658. * function to relocate all extents in a block group.
  3659. */
  3660. int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
  3661. {
  3662. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3663. struct reloc_control *rc;
  3664. struct inode *inode;
  3665. struct btrfs_path *path;
  3666. int ret;
  3667. int rw = 0;
  3668. int err = 0;
  3669. rc = alloc_reloc_control(fs_info);
  3670. if (!rc)
  3671. return -ENOMEM;
  3672. rc->extent_root = extent_root;
  3673. rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
  3674. BUG_ON(!rc->block_group);
  3675. if (!rc->block_group->ro) {
  3676. ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
  3677. if (ret) {
  3678. err = ret;
  3679. goto out;
  3680. }
  3681. rw = 1;
  3682. }
  3683. path = btrfs_alloc_path();
  3684. if (!path) {
  3685. err = -ENOMEM;
  3686. goto out;
  3687. }
  3688. inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group,
  3689. path);
  3690. btrfs_free_path(path);
  3691. if (!IS_ERR(inode))
  3692. ret = delete_block_group_cache(fs_info, inode, 0);
  3693. else
  3694. ret = PTR_ERR(inode);
  3695. if (ret && ret != -ENOENT) {
  3696. err = ret;
  3697. goto out;
  3698. }
  3699. rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
  3700. if (IS_ERR(rc->data_inode)) {
  3701. err = PTR_ERR(rc->data_inode);
  3702. rc->data_inode = NULL;
  3703. goto out;
  3704. }
  3705. btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
  3706. rc->block_group->key.objectid, rc->block_group->flags);
  3707. ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
  3708. if (ret < 0) {
  3709. err = ret;
  3710. goto out;
  3711. }
  3712. btrfs_wait_ordered_roots(fs_info, -1);
  3713. while (1) {
  3714. mutex_lock(&fs_info->cleaner_mutex);
  3715. ret = relocate_block_group(rc);
  3716. mutex_unlock(&fs_info->cleaner_mutex);
  3717. if (ret < 0) {
  3718. err = ret;
  3719. goto out;
  3720. }
  3721. if (rc->extents_found == 0)
  3722. break;
  3723. btrfs_info(extent_root->fs_info, "found %llu extents",
  3724. rc->extents_found);
  3725. if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
  3726. ret = btrfs_wait_ordered_range(rc->data_inode, 0,
  3727. (u64)-1);
  3728. if (ret) {
  3729. err = ret;
  3730. goto out;
  3731. }
  3732. invalidate_mapping_pages(rc->data_inode->i_mapping,
  3733. 0, -1);
  3734. rc->stage = UPDATE_DATA_PTRS;
  3735. }
  3736. }
  3737. WARN_ON(rc->block_group->pinned > 0);
  3738. WARN_ON(rc->block_group->reserved > 0);
  3739. WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
  3740. out:
  3741. if (err && rw)
  3742. btrfs_set_block_group_rw(extent_root, rc->block_group);
  3743. iput(rc->data_inode);
  3744. btrfs_put_block_group(rc->block_group);
  3745. kfree(rc);
  3746. return err;
  3747. }
  3748. static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
  3749. {
  3750. struct btrfs_trans_handle *trans;
  3751. int ret, err;
  3752. trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
  3753. if (IS_ERR(trans))
  3754. return PTR_ERR(trans);
  3755. memset(&root->root_item.drop_progress, 0,
  3756. sizeof(root->root_item.drop_progress));
  3757. root->root_item.drop_level = 0;
  3758. btrfs_set_root_refs(&root->root_item, 0);
  3759. ret = btrfs_update_root(trans, root->fs_info->tree_root,
  3760. &root->root_key, &root->root_item);
  3761. err = btrfs_end_transaction(trans, root->fs_info->tree_root);
  3762. if (err)
  3763. return err;
  3764. return ret;
  3765. }
  3766. /*
  3767. * recover relocation interrupted by system crash.
  3768. *
  3769. * this function resumes merging reloc trees with corresponding fs trees.
  3770. * this is important for keeping the sharing of tree blocks
  3771. */
  3772. int btrfs_recover_relocation(struct btrfs_root *root)
  3773. {
  3774. LIST_HEAD(reloc_roots);
  3775. struct btrfs_key key;
  3776. struct btrfs_root *fs_root;
  3777. struct btrfs_root *reloc_root;
  3778. struct btrfs_path *path;
  3779. struct extent_buffer *leaf;
  3780. struct reloc_control *rc = NULL;
  3781. struct btrfs_trans_handle *trans;
  3782. int ret;
  3783. int err = 0;
  3784. path = btrfs_alloc_path();
  3785. if (!path)
  3786. return -ENOMEM;
  3787. path->reada = -1;
  3788. key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  3789. key.type = BTRFS_ROOT_ITEM_KEY;
  3790. key.offset = (u64)-1;
  3791. while (1) {
  3792. ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
  3793. path, 0, 0);
  3794. if (ret < 0) {
  3795. err = ret;
  3796. goto out;
  3797. }
  3798. if (ret > 0) {
  3799. if (path->slots[0] == 0)
  3800. break;
  3801. path->slots[0]--;
  3802. }
  3803. leaf = path->nodes[0];
  3804. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3805. btrfs_release_path(path);
  3806. if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
  3807. key.type != BTRFS_ROOT_ITEM_KEY)
  3808. break;
  3809. reloc_root = btrfs_read_fs_root(root, &key);
  3810. if (IS_ERR(reloc_root)) {
  3811. err = PTR_ERR(reloc_root);
  3812. goto out;
  3813. }
  3814. list_add(&reloc_root->root_list, &reloc_roots);
  3815. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  3816. fs_root = read_fs_root(root->fs_info,
  3817. reloc_root->root_key.offset);
  3818. if (IS_ERR(fs_root)) {
  3819. ret = PTR_ERR(fs_root);
  3820. if (ret != -ENOENT) {
  3821. err = ret;
  3822. goto out;
  3823. }
  3824. ret = mark_garbage_root(reloc_root);
  3825. if (ret < 0) {
  3826. err = ret;
  3827. goto out;
  3828. }
  3829. }
  3830. }
  3831. if (key.offset == 0)
  3832. break;
  3833. key.offset--;
  3834. }
  3835. btrfs_release_path(path);
  3836. if (list_empty(&reloc_roots))
  3837. goto out;
  3838. rc = alloc_reloc_control(root->fs_info);
  3839. if (!rc) {
  3840. err = -ENOMEM;
  3841. goto out;
  3842. }
  3843. rc->extent_root = root->fs_info->extent_root;
  3844. set_reloc_control(rc);
  3845. trans = btrfs_join_transaction(rc->extent_root);
  3846. if (IS_ERR(trans)) {
  3847. unset_reloc_control(rc);
  3848. err = PTR_ERR(trans);
  3849. goto out_free;
  3850. }
  3851. rc->merge_reloc_tree = 1;
  3852. while (!list_empty(&reloc_roots)) {
  3853. reloc_root = list_entry(reloc_roots.next,
  3854. struct btrfs_root, root_list);
  3855. list_del(&reloc_root->root_list);
  3856. if (btrfs_root_refs(&reloc_root->root_item) == 0) {
  3857. list_add_tail(&reloc_root->root_list,
  3858. &rc->reloc_roots);
  3859. continue;
  3860. }
  3861. fs_root = read_fs_root(root->fs_info,
  3862. reloc_root->root_key.offset);
  3863. if (IS_ERR(fs_root)) {
  3864. err = PTR_ERR(fs_root);
  3865. goto out_free;
  3866. }
  3867. err = __add_reloc_root(reloc_root);
  3868. BUG_ON(err < 0); /* -ENOMEM or logic error */
  3869. fs_root->reloc_root = reloc_root;
  3870. }
  3871. err = btrfs_commit_transaction(trans, rc->extent_root);
  3872. if (err)
  3873. goto out_free;
  3874. merge_reloc_roots(rc);
  3875. unset_reloc_control(rc);
  3876. trans = btrfs_join_transaction(rc->extent_root);
  3877. if (IS_ERR(trans))
  3878. err = PTR_ERR(trans);
  3879. else
  3880. err = btrfs_commit_transaction(trans, rc->extent_root);
  3881. out_free:
  3882. kfree(rc);
  3883. out:
  3884. if (!list_empty(&reloc_roots))
  3885. free_reloc_roots(&reloc_roots);
  3886. btrfs_free_path(path);
  3887. if (err == 0) {
  3888. /* cleanup orphan inode in data relocation tree */
  3889. fs_root = read_fs_root(root->fs_info,
  3890. BTRFS_DATA_RELOC_TREE_OBJECTID);
  3891. if (IS_ERR(fs_root))
  3892. err = PTR_ERR(fs_root);
  3893. else
  3894. err = btrfs_orphan_cleanup(fs_root);
  3895. }
  3896. return err;
  3897. }
  3898. /*
  3899. * helper to add ordered checksum for data relocation.
  3900. *
  3901. * cloning checksum properly handles the nodatasum extents.
  3902. * it also saves CPU time to re-calculate the checksum.
  3903. */
  3904. int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
  3905. {
  3906. struct btrfs_ordered_sum *sums;
  3907. struct btrfs_ordered_extent *ordered;
  3908. struct btrfs_root *root = BTRFS_I(inode)->root;
  3909. int ret;
  3910. u64 disk_bytenr;
  3911. u64 new_bytenr;
  3912. LIST_HEAD(list);
  3913. ordered = btrfs_lookup_ordered_extent(inode, file_pos);
  3914. BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
  3915. disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
  3916. ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
  3917. disk_bytenr + len - 1, &list, 0);
  3918. if (ret)
  3919. goto out;
  3920. while (!list_empty(&list)) {
  3921. sums = list_entry(list.next, struct btrfs_ordered_sum, list);
  3922. list_del_init(&sums->list);
  3923. /*
  3924. * We need to offset the new_bytenr based on where the csum is.
  3925. * We need to do this because we will read in entire prealloc
  3926. * extents but we may have written to say the middle of the
  3927. * prealloc extent, so we need to make sure the csum goes with
  3928. * the right disk offset.
  3929. *
  3930. * We can do this because the data reloc inode refers strictly
  3931. * to the on disk bytes, so we don't have to worry about
  3932. * disk_len vs real len like with real inodes since it's all
  3933. * disk length.
  3934. */
  3935. new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
  3936. sums->bytenr = new_bytenr;
  3937. btrfs_add_ordered_sum(inode, ordered, sums);
  3938. }
  3939. out:
  3940. btrfs_put_ordered_extent(ordered);
  3941. return ret;
  3942. }
  3943. int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
  3944. struct btrfs_root *root, struct extent_buffer *buf,
  3945. struct extent_buffer *cow)
  3946. {
  3947. struct reloc_control *rc;
  3948. struct backref_node *node;
  3949. int first_cow = 0;
  3950. int level;
  3951. int ret = 0;
  3952. rc = root->fs_info->reloc_ctl;
  3953. if (!rc)
  3954. return 0;
  3955. BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
  3956. root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
  3957. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
  3958. if (buf == root->node)
  3959. __update_reloc_root(root, cow->start);
  3960. }
  3961. level = btrfs_header_level(buf);
  3962. if (btrfs_header_generation(buf) <=
  3963. btrfs_root_last_snapshot(&root->root_item))
  3964. first_cow = 1;
  3965. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
  3966. rc->create_reloc_tree) {
  3967. WARN_ON(!first_cow && level == 0);
  3968. node = rc->backref_cache.path[level];
  3969. BUG_ON(node->bytenr != buf->start &&
  3970. node->new_bytenr != buf->start);
  3971. drop_node_buffer(node);
  3972. extent_buffer_get(cow);
  3973. node->eb = cow;
  3974. node->new_bytenr = cow->start;
  3975. if (!node->pending) {
  3976. list_move_tail(&node->list,
  3977. &rc->backref_cache.pending[level]);
  3978. node->pending = 1;
  3979. }
  3980. if (first_cow)
  3981. __mark_block_processed(rc, node);
  3982. if (first_cow && level > 0)
  3983. rc->nodes_relocated += buf->len;
  3984. }
  3985. if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
  3986. ret = replace_file_extents(trans, rc, root, cow);
  3987. return ret;
  3988. }
  3989. /*
  3990. * called before creating snapshot. it calculates metadata reservation
  3991. * requried for relocating tree blocks in the snapshot
  3992. */
  3993. void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
  3994. struct btrfs_pending_snapshot *pending,
  3995. u64 *bytes_to_reserve)
  3996. {
  3997. struct btrfs_root *root;
  3998. struct reloc_control *rc;
  3999. root = pending->root;
  4000. if (!root->reloc_root)
  4001. return;
  4002. rc = root->fs_info->reloc_ctl;
  4003. if (!rc->merge_reloc_tree)
  4004. return;
  4005. root = root->reloc_root;
  4006. BUG_ON(btrfs_root_refs(&root->root_item) == 0);
  4007. /*
  4008. * relocation is in the stage of merging trees. the space
  4009. * used by merging a reloc tree is twice the size of
  4010. * relocated tree nodes in the worst case. half for cowing
  4011. * the reloc tree, half for cowing the fs tree. the space
  4012. * used by cowing the reloc tree will be freed after the
  4013. * tree is dropped. if we create snapshot, cowing the fs
  4014. * tree may use more space than it frees. so we need
  4015. * reserve extra space.
  4016. */
  4017. *bytes_to_reserve += rc->nodes_relocated;
  4018. }
  4019. /*
  4020. * called after snapshot is created. migrate block reservation
  4021. * and create reloc root for the newly created snapshot
  4022. */
  4023. int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
  4024. struct btrfs_pending_snapshot *pending)
  4025. {
  4026. struct btrfs_root *root = pending->root;
  4027. struct btrfs_root *reloc_root;
  4028. struct btrfs_root *new_root;
  4029. struct reloc_control *rc;
  4030. int ret;
  4031. if (!root->reloc_root)
  4032. return 0;
  4033. rc = root->fs_info->reloc_ctl;
  4034. rc->merging_rsv_size += rc->nodes_relocated;
  4035. if (rc->merge_reloc_tree) {
  4036. ret = btrfs_block_rsv_migrate(&pending->block_rsv,
  4037. rc->block_rsv,
  4038. rc->nodes_relocated);
  4039. if (ret)
  4040. return ret;
  4041. }
  4042. new_root = pending->snap;
  4043. reloc_root = create_reloc_root(trans, root->reloc_root,
  4044. new_root->root_key.objectid);
  4045. if (IS_ERR(reloc_root))
  4046. return PTR_ERR(reloc_root);
  4047. ret = __add_reloc_root(reloc_root);
  4048. BUG_ON(ret < 0);
  4049. new_root->reloc_root = reloc_root;
  4050. if (rc->create_reloc_tree)
  4051. ret = clone_backref_node(trans, rc, root, reloc_root);
  4052. return ret;
  4053. }