relocation.c 109 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655
  1. /*
  2. * Copyright (C) 2009 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/slab.h>
  24. #include "ctree.h"
  25. #include "disk-io.h"
  26. #include "transaction.h"
  27. #include "volumes.h"
  28. #include "locking.h"
  29. #include "btrfs_inode.h"
  30. #include "async-thread.h"
  31. #include "free-space-cache.h"
  32. #include "inode-map.h"
  33. /*
  34. * backref_node, mapping_node and tree_block start with this
  35. */
  36. struct tree_entry {
  37. struct rb_node rb_node;
  38. u64 bytenr;
  39. };
  40. /*
  41. * present a tree block in the backref cache
  42. */
  43. struct backref_node {
  44. struct rb_node rb_node;
  45. u64 bytenr;
  46. u64 new_bytenr;
  47. /* objectid of tree block owner, can be not uptodate */
  48. u64 owner;
  49. /* link to pending, changed or detached list */
  50. struct list_head list;
  51. /* list of upper level blocks reference this block */
  52. struct list_head upper;
  53. /* list of child blocks in the cache */
  54. struct list_head lower;
  55. /* NULL if this node is not tree root */
  56. struct btrfs_root *root;
  57. /* extent buffer got by COW the block */
  58. struct extent_buffer *eb;
  59. /* level of tree block */
  60. unsigned int level:8;
  61. /* is the block in non-reference counted tree */
  62. unsigned int cowonly:1;
  63. /* 1 if no child node in the cache */
  64. unsigned int lowest:1;
  65. /* is the extent buffer locked */
  66. unsigned int locked:1;
  67. /* has the block been processed */
  68. unsigned int processed:1;
  69. /* have backrefs of this block been checked */
  70. unsigned int checked:1;
  71. /*
  72. * 1 if corresponding block has been cowed but some upper
  73. * level block pointers may not point to the new location
  74. */
  75. unsigned int pending:1;
  76. /*
  77. * 1 if the backref node isn't connected to any other
  78. * backref node.
  79. */
  80. unsigned int detached:1;
  81. };
  82. /*
  83. * present a block pointer in the backref cache
  84. */
  85. struct backref_edge {
  86. struct list_head list[2];
  87. struct backref_node *node[2];
  88. };
  89. #define LOWER 0
  90. #define UPPER 1
  91. #define RELOCATION_RESERVED_NODES 256
  92. struct backref_cache {
  93. /* red black tree of all backref nodes in the cache */
  94. struct rb_root rb_root;
  95. /* for passing backref nodes to btrfs_reloc_cow_block */
  96. struct backref_node *path[BTRFS_MAX_LEVEL];
  97. /*
  98. * list of blocks that have been cowed but some block
  99. * pointers in upper level blocks may not reflect the
  100. * new location
  101. */
  102. struct list_head pending[BTRFS_MAX_LEVEL];
  103. /* list of backref nodes with no child node */
  104. struct list_head leaves;
  105. /* list of blocks that have been cowed in current transaction */
  106. struct list_head changed;
  107. /* list of detached backref node. */
  108. struct list_head detached;
  109. u64 last_trans;
  110. int nr_nodes;
  111. int nr_edges;
  112. };
  113. /*
  114. * map address of tree root to tree
  115. */
  116. struct mapping_node {
  117. struct rb_node rb_node;
  118. u64 bytenr;
  119. void *data;
  120. };
  121. struct mapping_tree {
  122. struct rb_root rb_root;
  123. spinlock_t lock;
  124. };
  125. /*
  126. * present a tree block to process
  127. */
  128. struct tree_block {
  129. struct rb_node rb_node;
  130. u64 bytenr;
  131. struct btrfs_key key;
  132. unsigned int level:8;
  133. unsigned int key_ready:1;
  134. };
  135. #define MAX_EXTENTS 128
  136. struct file_extent_cluster {
  137. u64 start;
  138. u64 end;
  139. u64 boundary[MAX_EXTENTS];
  140. unsigned int nr;
  141. };
  142. struct reloc_control {
  143. /* block group to relocate */
  144. struct btrfs_block_group_cache *block_group;
  145. /* extent tree */
  146. struct btrfs_root *extent_root;
  147. /* inode for moving data */
  148. struct inode *data_inode;
  149. struct btrfs_block_rsv *block_rsv;
  150. struct backref_cache backref_cache;
  151. struct file_extent_cluster cluster;
  152. /* tree blocks have been processed */
  153. struct extent_io_tree processed_blocks;
  154. /* map start of tree root to corresponding reloc tree */
  155. struct mapping_tree reloc_root_tree;
  156. /* list of reloc trees */
  157. struct list_head reloc_roots;
  158. /* size of metadata reservation for merging reloc trees */
  159. u64 merging_rsv_size;
  160. /* size of relocated tree nodes */
  161. u64 nodes_relocated;
  162. /* reserved size for block group relocation*/
  163. u64 reserved_bytes;
  164. u64 search_start;
  165. u64 extents_found;
  166. unsigned int stage:8;
  167. unsigned int create_reloc_tree:1;
  168. unsigned int merge_reloc_tree:1;
  169. unsigned int found_file_extent:1;
  170. };
  171. /* stages of data relocation */
  172. #define MOVE_DATA_EXTENTS 0
  173. #define UPDATE_DATA_PTRS 1
  174. static void remove_backref_node(struct backref_cache *cache,
  175. struct backref_node *node);
  176. static void __mark_block_processed(struct reloc_control *rc,
  177. struct backref_node *node);
  178. static void mapping_tree_init(struct mapping_tree *tree)
  179. {
  180. tree->rb_root = RB_ROOT;
  181. spin_lock_init(&tree->lock);
  182. }
  183. static void backref_cache_init(struct backref_cache *cache)
  184. {
  185. int i;
  186. cache->rb_root = RB_ROOT;
  187. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  188. INIT_LIST_HEAD(&cache->pending[i]);
  189. INIT_LIST_HEAD(&cache->changed);
  190. INIT_LIST_HEAD(&cache->detached);
  191. INIT_LIST_HEAD(&cache->leaves);
  192. }
  193. static void backref_cache_cleanup(struct backref_cache *cache)
  194. {
  195. struct backref_node *node;
  196. int i;
  197. while (!list_empty(&cache->detached)) {
  198. node = list_entry(cache->detached.next,
  199. struct backref_node, list);
  200. remove_backref_node(cache, node);
  201. }
  202. while (!list_empty(&cache->leaves)) {
  203. node = list_entry(cache->leaves.next,
  204. struct backref_node, lower);
  205. remove_backref_node(cache, node);
  206. }
  207. cache->last_trans = 0;
  208. for (i = 0; i < BTRFS_MAX_LEVEL; i++)
  209. BUG_ON(!list_empty(&cache->pending[i]));
  210. BUG_ON(!list_empty(&cache->changed));
  211. BUG_ON(!list_empty(&cache->detached));
  212. BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
  213. BUG_ON(cache->nr_nodes);
  214. BUG_ON(cache->nr_edges);
  215. }
  216. static struct backref_node *alloc_backref_node(struct backref_cache *cache)
  217. {
  218. struct backref_node *node;
  219. node = kzalloc(sizeof(*node), GFP_NOFS);
  220. if (node) {
  221. INIT_LIST_HEAD(&node->list);
  222. INIT_LIST_HEAD(&node->upper);
  223. INIT_LIST_HEAD(&node->lower);
  224. RB_CLEAR_NODE(&node->rb_node);
  225. cache->nr_nodes++;
  226. }
  227. return node;
  228. }
  229. static void free_backref_node(struct backref_cache *cache,
  230. struct backref_node *node)
  231. {
  232. if (node) {
  233. cache->nr_nodes--;
  234. kfree(node);
  235. }
  236. }
  237. static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
  238. {
  239. struct backref_edge *edge;
  240. edge = kzalloc(sizeof(*edge), GFP_NOFS);
  241. if (edge)
  242. cache->nr_edges++;
  243. return edge;
  244. }
  245. static void free_backref_edge(struct backref_cache *cache,
  246. struct backref_edge *edge)
  247. {
  248. if (edge) {
  249. cache->nr_edges--;
  250. kfree(edge);
  251. }
  252. }
  253. static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
  254. struct rb_node *node)
  255. {
  256. struct rb_node **p = &root->rb_node;
  257. struct rb_node *parent = NULL;
  258. struct tree_entry *entry;
  259. while (*p) {
  260. parent = *p;
  261. entry = rb_entry(parent, struct tree_entry, rb_node);
  262. if (bytenr < entry->bytenr)
  263. p = &(*p)->rb_left;
  264. else if (bytenr > entry->bytenr)
  265. p = &(*p)->rb_right;
  266. else
  267. return parent;
  268. }
  269. rb_link_node(node, parent, p);
  270. rb_insert_color(node, root);
  271. return NULL;
  272. }
  273. static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
  274. {
  275. struct rb_node *n = root->rb_node;
  276. struct tree_entry *entry;
  277. while (n) {
  278. entry = rb_entry(n, struct tree_entry, rb_node);
  279. if (bytenr < entry->bytenr)
  280. n = n->rb_left;
  281. else if (bytenr > entry->bytenr)
  282. n = n->rb_right;
  283. else
  284. return n;
  285. }
  286. return NULL;
  287. }
  288. static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
  289. {
  290. struct btrfs_fs_info *fs_info = NULL;
  291. struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
  292. rb_node);
  293. if (bnode->root)
  294. fs_info = bnode->root->fs_info;
  295. btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
  296. "found at offset %llu", bytenr);
  297. }
  298. /*
  299. * walk up backref nodes until reach node presents tree root
  300. */
  301. static struct backref_node *walk_up_backref(struct backref_node *node,
  302. struct backref_edge *edges[],
  303. int *index)
  304. {
  305. struct backref_edge *edge;
  306. int idx = *index;
  307. while (!list_empty(&node->upper)) {
  308. edge = list_entry(node->upper.next,
  309. struct backref_edge, list[LOWER]);
  310. edges[idx++] = edge;
  311. node = edge->node[UPPER];
  312. }
  313. BUG_ON(node->detached);
  314. *index = idx;
  315. return node;
  316. }
  317. /*
  318. * walk down backref nodes to find start of next reference path
  319. */
  320. static struct backref_node *walk_down_backref(struct backref_edge *edges[],
  321. int *index)
  322. {
  323. struct backref_edge *edge;
  324. struct backref_node *lower;
  325. int idx = *index;
  326. while (idx > 0) {
  327. edge = edges[idx - 1];
  328. lower = edge->node[LOWER];
  329. if (list_is_last(&edge->list[LOWER], &lower->upper)) {
  330. idx--;
  331. continue;
  332. }
  333. edge = list_entry(edge->list[LOWER].next,
  334. struct backref_edge, list[LOWER]);
  335. edges[idx - 1] = edge;
  336. *index = idx;
  337. return edge->node[UPPER];
  338. }
  339. *index = 0;
  340. return NULL;
  341. }
  342. static void unlock_node_buffer(struct backref_node *node)
  343. {
  344. if (node->locked) {
  345. btrfs_tree_unlock(node->eb);
  346. node->locked = 0;
  347. }
  348. }
  349. static void drop_node_buffer(struct backref_node *node)
  350. {
  351. if (node->eb) {
  352. unlock_node_buffer(node);
  353. free_extent_buffer(node->eb);
  354. node->eb = NULL;
  355. }
  356. }
  357. static void drop_backref_node(struct backref_cache *tree,
  358. struct backref_node *node)
  359. {
  360. BUG_ON(!list_empty(&node->upper));
  361. drop_node_buffer(node);
  362. list_del(&node->list);
  363. list_del(&node->lower);
  364. if (!RB_EMPTY_NODE(&node->rb_node))
  365. rb_erase(&node->rb_node, &tree->rb_root);
  366. free_backref_node(tree, node);
  367. }
  368. /*
  369. * remove a backref node from the backref cache
  370. */
  371. static void remove_backref_node(struct backref_cache *cache,
  372. struct backref_node *node)
  373. {
  374. struct backref_node *upper;
  375. struct backref_edge *edge;
  376. if (!node)
  377. return;
  378. BUG_ON(!node->lowest && !node->detached);
  379. while (!list_empty(&node->upper)) {
  380. edge = list_entry(node->upper.next, struct backref_edge,
  381. list[LOWER]);
  382. upper = edge->node[UPPER];
  383. list_del(&edge->list[LOWER]);
  384. list_del(&edge->list[UPPER]);
  385. free_backref_edge(cache, edge);
  386. if (RB_EMPTY_NODE(&upper->rb_node)) {
  387. BUG_ON(!list_empty(&node->upper));
  388. drop_backref_node(cache, node);
  389. node = upper;
  390. node->lowest = 1;
  391. continue;
  392. }
  393. /*
  394. * add the node to leaf node list if no other
  395. * child block cached.
  396. */
  397. if (list_empty(&upper->lower)) {
  398. list_add_tail(&upper->lower, &cache->leaves);
  399. upper->lowest = 1;
  400. }
  401. }
  402. drop_backref_node(cache, node);
  403. }
  404. static void update_backref_node(struct backref_cache *cache,
  405. struct backref_node *node, u64 bytenr)
  406. {
  407. struct rb_node *rb_node;
  408. rb_erase(&node->rb_node, &cache->rb_root);
  409. node->bytenr = bytenr;
  410. rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
  411. if (rb_node)
  412. backref_tree_panic(rb_node, -EEXIST, bytenr);
  413. }
  414. /*
  415. * update backref cache after a transaction commit
  416. */
  417. static int update_backref_cache(struct btrfs_trans_handle *trans,
  418. struct backref_cache *cache)
  419. {
  420. struct backref_node *node;
  421. int level = 0;
  422. if (cache->last_trans == 0) {
  423. cache->last_trans = trans->transid;
  424. return 0;
  425. }
  426. if (cache->last_trans == trans->transid)
  427. return 0;
  428. /*
  429. * detached nodes are used to avoid unnecessary backref
  430. * lookup. transaction commit changes the extent tree.
  431. * so the detached nodes are no longer useful.
  432. */
  433. while (!list_empty(&cache->detached)) {
  434. node = list_entry(cache->detached.next,
  435. struct backref_node, list);
  436. remove_backref_node(cache, node);
  437. }
  438. while (!list_empty(&cache->changed)) {
  439. node = list_entry(cache->changed.next,
  440. struct backref_node, list);
  441. list_del_init(&node->list);
  442. BUG_ON(node->pending);
  443. update_backref_node(cache, node, node->new_bytenr);
  444. }
  445. /*
  446. * some nodes can be left in the pending list if there were
  447. * errors during processing the pending nodes.
  448. */
  449. for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
  450. list_for_each_entry(node, &cache->pending[level], list) {
  451. BUG_ON(!node->pending);
  452. if (node->bytenr == node->new_bytenr)
  453. continue;
  454. update_backref_node(cache, node, node->new_bytenr);
  455. }
  456. }
  457. cache->last_trans = 0;
  458. return 1;
  459. }
  460. static int should_ignore_root(struct btrfs_root *root)
  461. {
  462. struct btrfs_root *reloc_root;
  463. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  464. return 0;
  465. reloc_root = root->reloc_root;
  466. if (!reloc_root)
  467. return 0;
  468. if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
  469. root->fs_info->running_transaction->transid - 1)
  470. return 0;
  471. /*
  472. * if there is reloc tree and it was created in previous
  473. * transaction backref lookup can find the reloc tree,
  474. * so backref node for the fs tree root is useless for
  475. * relocation.
  476. */
  477. return 1;
  478. }
  479. /*
  480. * find reloc tree by address of tree root
  481. */
  482. static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
  483. u64 bytenr)
  484. {
  485. struct rb_node *rb_node;
  486. struct mapping_node *node;
  487. struct btrfs_root *root = NULL;
  488. spin_lock(&rc->reloc_root_tree.lock);
  489. rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
  490. if (rb_node) {
  491. node = rb_entry(rb_node, struct mapping_node, rb_node);
  492. root = (struct btrfs_root *)node->data;
  493. }
  494. spin_unlock(&rc->reloc_root_tree.lock);
  495. return root;
  496. }
  497. static int is_cowonly_root(u64 root_objectid)
  498. {
  499. if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
  500. root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
  501. root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
  502. root_objectid == BTRFS_DEV_TREE_OBJECTID ||
  503. root_objectid == BTRFS_TREE_LOG_OBJECTID ||
  504. root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
  505. root_objectid == BTRFS_UUID_TREE_OBJECTID ||
  506. root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
  507. return 1;
  508. return 0;
  509. }
  510. static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
  511. u64 root_objectid)
  512. {
  513. struct btrfs_key key;
  514. key.objectid = root_objectid;
  515. key.type = BTRFS_ROOT_ITEM_KEY;
  516. if (is_cowonly_root(root_objectid))
  517. key.offset = 0;
  518. else
  519. key.offset = (u64)-1;
  520. return btrfs_get_fs_root(fs_info, &key, false);
  521. }
  522. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  523. static noinline_for_stack
  524. struct btrfs_root *find_tree_root(struct reloc_control *rc,
  525. struct extent_buffer *leaf,
  526. struct btrfs_extent_ref_v0 *ref0)
  527. {
  528. struct btrfs_root *root;
  529. u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
  530. u64 generation = btrfs_ref_generation_v0(leaf, ref0);
  531. BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
  532. root = read_fs_root(rc->extent_root->fs_info, root_objectid);
  533. BUG_ON(IS_ERR(root));
  534. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  535. generation != btrfs_root_generation(&root->root_item))
  536. return NULL;
  537. return root;
  538. }
  539. #endif
  540. static noinline_for_stack
  541. int find_inline_backref(struct extent_buffer *leaf, int slot,
  542. unsigned long *ptr, unsigned long *end)
  543. {
  544. struct btrfs_key key;
  545. struct btrfs_extent_item *ei;
  546. struct btrfs_tree_block_info *bi;
  547. u32 item_size;
  548. btrfs_item_key_to_cpu(leaf, &key, slot);
  549. item_size = btrfs_item_size_nr(leaf, slot);
  550. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  551. if (item_size < sizeof(*ei)) {
  552. WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  553. return 1;
  554. }
  555. #endif
  556. ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
  557. WARN_ON(!(btrfs_extent_flags(leaf, ei) &
  558. BTRFS_EXTENT_FLAG_TREE_BLOCK));
  559. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  560. item_size <= sizeof(*ei) + sizeof(*bi)) {
  561. WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
  562. return 1;
  563. }
  564. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  565. item_size <= sizeof(*ei)) {
  566. WARN_ON(item_size < sizeof(*ei));
  567. return 1;
  568. }
  569. if (key.type == BTRFS_EXTENT_ITEM_KEY) {
  570. bi = (struct btrfs_tree_block_info *)(ei + 1);
  571. *ptr = (unsigned long)(bi + 1);
  572. } else {
  573. *ptr = (unsigned long)(ei + 1);
  574. }
  575. *end = (unsigned long)ei + item_size;
  576. return 0;
  577. }
  578. /*
  579. * build backref tree for a given tree block. root of the backref tree
  580. * corresponds the tree block, leaves of the backref tree correspond
  581. * roots of b-trees that reference the tree block.
  582. *
  583. * the basic idea of this function is check backrefs of a given block
  584. * to find upper level blocks that refernece the block, and then check
  585. * bakcrefs of these upper level blocks recursively. the recursion stop
  586. * when tree root is reached or backrefs for the block is cached.
  587. *
  588. * NOTE: if we find backrefs for a block are cached, we know backrefs
  589. * for all upper level blocks that directly/indirectly reference the
  590. * block are also cached.
  591. */
  592. static noinline_for_stack
  593. struct backref_node *build_backref_tree(struct reloc_control *rc,
  594. struct btrfs_key *node_key,
  595. int level, u64 bytenr)
  596. {
  597. struct backref_cache *cache = &rc->backref_cache;
  598. struct btrfs_path *path1;
  599. struct btrfs_path *path2;
  600. struct extent_buffer *eb;
  601. struct btrfs_root *root;
  602. struct backref_node *cur;
  603. struct backref_node *upper;
  604. struct backref_node *lower;
  605. struct backref_node *node = NULL;
  606. struct backref_node *exist = NULL;
  607. struct backref_edge *edge;
  608. struct rb_node *rb_node;
  609. struct btrfs_key key;
  610. unsigned long end;
  611. unsigned long ptr;
  612. LIST_HEAD(list);
  613. LIST_HEAD(useless);
  614. int cowonly;
  615. int ret;
  616. int err = 0;
  617. bool need_check = true;
  618. path1 = btrfs_alloc_path();
  619. path2 = btrfs_alloc_path();
  620. if (!path1 || !path2) {
  621. err = -ENOMEM;
  622. goto out;
  623. }
  624. path1->reada = 1;
  625. path2->reada = 2;
  626. node = alloc_backref_node(cache);
  627. if (!node) {
  628. err = -ENOMEM;
  629. goto out;
  630. }
  631. node->bytenr = bytenr;
  632. node->level = level;
  633. node->lowest = 1;
  634. cur = node;
  635. again:
  636. end = 0;
  637. ptr = 0;
  638. key.objectid = cur->bytenr;
  639. key.type = BTRFS_METADATA_ITEM_KEY;
  640. key.offset = (u64)-1;
  641. path1->search_commit_root = 1;
  642. path1->skip_locking = 1;
  643. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
  644. 0, 0);
  645. if (ret < 0) {
  646. err = ret;
  647. goto out;
  648. }
  649. ASSERT(ret);
  650. ASSERT(path1->slots[0]);
  651. path1->slots[0]--;
  652. WARN_ON(cur->checked);
  653. if (!list_empty(&cur->upper)) {
  654. /*
  655. * the backref was added previously when processing
  656. * backref of type BTRFS_TREE_BLOCK_REF_KEY
  657. */
  658. ASSERT(list_is_singular(&cur->upper));
  659. edge = list_entry(cur->upper.next, struct backref_edge,
  660. list[LOWER]);
  661. ASSERT(list_empty(&edge->list[UPPER]));
  662. exist = edge->node[UPPER];
  663. /*
  664. * add the upper level block to pending list if we need
  665. * check its backrefs
  666. */
  667. if (!exist->checked)
  668. list_add_tail(&edge->list[UPPER], &list);
  669. } else {
  670. exist = NULL;
  671. }
  672. while (1) {
  673. cond_resched();
  674. eb = path1->nodes[0];
  675. if (ptr >= end) {
  676. if (path1->slots[0] >= btrfs_header_nritems(eb)) {
  677. ret = btrfs_next_leaf(rc->extent_root, path1);
  678. if (ret < 0) {
  679. err = ret;
  680. goto out;
  681. }
  682. if (ret > 0)
  683. break;
  684. eb = path1->nodes[0];
  685. }
  686. btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
  687. if (key.objectid != cur->bytenr) {
  688. WARN_ON(exist);
  689. break;
  690. }
  691. if (key.type == BTRFS_EXTENT_ITEM_KEY ||
  692. key.type == BTRFS_METADATA_ITEM_KEY) {
  693. ret = find_inline_backref(eb, path1->slots[0],
  694. &ptr, &end);
  695. if (ret)
  696. goto next;
  697. }
  698. }
  699. if (ptr < end) {
  700. /* update key for inline back ref */
  701. struct btrfs_extent_inline_ref *iref;
  702. iref = (struct btrfs_extent_inline_ref *)ptr;
  703. key.type = btrfs_extent_inline_ref_type(eb, iref);
  704. key.offset = btrfs_extent_inline_ref_offset(eb, iref);
  705. WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
  706. key.type != BTRFS_SHARED_BLOCK_REF_KEY);
  707. }
  708. if (exist &&
  709. ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
  710. exist->owner == key.offset) ||
  711. (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
  712. exist->bytenr == key.offset))) {
  713. exist = NULL;
  714. goto next;
  715. }
  716. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  717. if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
  718. key.type == BTRFS_EXTENT_REF_V0_KEY) {
  719. if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
  720. struct btrfs_extent_ref_v0 *ref0;
  721. ref0 = btrfs_item_ptr(eb, path1->slots[0],
  722. struct btrfs_extent_ref_v0);
  723. if (key.objectid == key.offset) {
  724. root = find_tree_root(rc, eb, ref0);
  725. if (root && !should_ignore_root(root))
  726. cur->root = root;
  727. else
  728. list_add(&cur->list, &useless);
  729. break;
  730. }
  731. if (is_cowonly_root(btrfs_ref_root_v0(eb,
  732. ref0)))
  733. cur->cowonly = 1;
  734. }
  735. #else
  736. ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
  737. if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
  738. #endif
  739. if (key.objectid == key.offset) {
  740. /*
  741. * only root blocks of reloc trees use
  742. * backref of this type.
  743. */
  744. root = find_reloc_root(rc, cur->bytenr);
  745. ASSERT(root);
  746. cur->root = root;
  747. break;
  748. }
  749. edge = alloc_backref_edge(cache);
  750. if (!edge) {
  751. err = -ENOMEM;
  752. goto out;
  753. }
  754. rb_node = tree_search(&cache->rb_root, key.offset);
  755. if (!rb_node) {
  756. upper = alloc_backref_node(cache);
  757. if (!upper) {
  758. free_backref_edge(cache, edge);
  759. err = -ENOMEM;
  760. goto out;
  761. }
  762. upper->bytenr = key.offset;
  763. upper->level = cur->level + 1;
  764. /*
  765. * backrefs for the upper level block isn't
  766. * cached, add the block to pending list
  767. */
  768. list_add_tail(&edge->list[UPPER], &list);
  769. } else {
  770. upper = rb_entry(rb_node, struct backref_node,
  771. rb_node);
  772. ASSERT(upper->checked);
  773. INIT_LIST_HEAD(&edge->list[UPPER]);
  774. }
  775. list_add_tail(&edge->list[LOWER], &cur->upper);
  776. edge->node[LOWER] = cur;
  777. edge->node[UPPER] = upper;
  778. goto next;
  779. } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
  780. goto next;
  781. }
  782. /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
  783. root = read_fs_root(rc->extent_root->fs_info, key.offset);
  784. if (IS_ERR(root)) {
  785. err = PTR_ERR(root);
  786. goto out;
  787. }
  788. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  789. cur->cowonly = 1;
  790. if (btrfs_root_level(&root->root_item) == cur->level) {
  791. /* tree root */
  792. ASSERT(btrfs_root_bytenr(&root->root_item) ==
  793. cur->bytenr);
  794. if (should_ignore_root(root))
  795. list_add(&cur->list, &useless);
  796. else
  797. cur->root = root;
  798. break;
  799. }
  800. level = cur->level + 1;
  801. /*
  802. * searching the tree to find upper level blocks
  803. * reference the block.
  804. */
  805. path2->search_commit_root = 1;
  806. path2->skip_locking = 1;
  807. path2->lowest_level = level;
  808. ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
  809. path2->lowest_level = 0;
  810. if (ret < 0) {
  811. err = ret;
  812. goto out;
  813. }
  814. if (ret > 0 && path2->slots[level] > 0)
  815. path2->slots[level]--;
  816. eb = path2->nodes[level];
  817. WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
  818. cur->bytenr);
  819. lower = cur;
  820. need_check = true;
  821. for (; level < BTRFS_MAX_LEVEL; level++) {
  822. if (!path2->nodes[level]) {
  823. ASSERT(btrfs_root_bytenr(&root->root_item) ==
  824. lower->bytenr);
  825. if (should_ignore_root(root))
  826. list_add(&lower->list, &useless);
  827. else
  828. lower->root = root;
  829. break;
  830. }
  831. edge = alloc_backref_edge(cache);
  832. if (!edge) {
  833. err = -ENOMEM;
  834. goto out;
  835. }
  836. eb = path2->nodes[level];
  837. rb_node = tree_search(&cache->rb_root, eb->start);
  838. if (!rb_node) {
  839. upper = alloc_backref_node(cache);
  840. if (!upper) {
  841. free_backref_edge(cache, edge);
  842. err = -ENOMEM;
  843. goto out;
  844. }
  845. upper->bytenr = eb->start;
  846. upper->owner = btrfs_header_owner(eb);
  847. upper->level = lower->level + 1;
  848. if (!test_bit(BTRFS_ROOT_REF_COWS,
  849. &root->state))
  850. upper->cowonly = 1;
  851. /*
  852. * if we know the block isn't shared
  853. * we can void checking its backrefs.
  854. */
  855. if (btrfs_block_can_be_shared(root, eb))
  856. upper->checked = 0;
  857. else
  858. upper->checked = 1;
  859. /*
  860. * add the block to pending list if we
  861. * need check its backrefs, we only do this once
  862. * while walking up a tree as we will catch
  863. * anything else later on.
  864. */
  865. if (!upper->checked && need_check) {
  866. need_check = false;
  867. list_add_tail(&edge->list[UPPER],
  868. &list);
  869. } else {
  870. if (upper->checked)
  871. need_check = true;
  872. INIT_LIST_HEAD(&edge->list[UPPER]);
  873. }
  874. } else {
  875. upper = rb_entry(rb_node, struct backref_node,
  876. rb_node);
  877. ASSERT(upper->checked);
  878. INIT_LIST_HEAD(&edge->list[UPPER]);
  879. if (!upper->owner)
  880. upper->owner = btrfs_header_owner(eb);
  881. }
  882. list_add_tail(&edge->list[LOWER], &lower->upper);
  883. edge->node[LOWER] = lower;
  884. edge->node[UPPER] = upper;
  885. if (rb_node)
  886. break;
  887. lower = upper;
  888. upper = NULL;
  889. }
  890. btrfs_release_path(path2);
  891. next:
  892. if (ptr < end) {
  893. ptr += btrfs_extent_inline_ref_size(key.type);
  894. if (ptr >= end) {
  895. WARN_ON(ptr > end);
  896. ptr = 0;
  897. end = 0;
  898. }
  899. }
  900. if (ptr >= end)
  901. path1->slots[0]++;
  902. }
  903. btrfs_release_path(path1);
  904. cur->checked = 1;
  905. WARN_ON(exist);
  906. /* the pending list isn't empty, take the first block to process */
  907. if (!list_empty(&list)) {
  908. edge = list_entry(list.next, struct backref_edge, list[UPPER]);
  909. list_del_init(&edge->list[UPPER]);
  910. cur = edge->node[UPPER];
  911. goto again;
  912. }
  913. /*
  914. * everything goes well, connect backref nodes and insert backref nodes
  915. * into the cache.
  916. */
  917. ASSERT(node->checked);
  918. cowonly = node->cowonly;
  919. if (!cowonly) {
  920. rb_node = tree_insert(&cache->rb_root, node->bytenr,
  921. &node->rb_node);
  922. if (rb_node)
  923. backref_tree_panic(rb_node, -EEXIST, node->bytenr);
  924. list_add_tail(&node->lower, &cache->leaves);
  925. }
  926. list_for_each_entry(edge, &node->upper, list[LOWER])
  927. list_add_tail(&edge->list[UPPER], &list);
  928. while (!list_empty(&list)) {
  929. edge = list_entry(list.next, struct backref_edge, list[UPPER]);
  930. list_del_init(&edge->list[UPPER]);
  931. upper = edge->node[UPPER];
  932. if (upper->detached) {
  933. list_del(&edge->list[LOWER]);
  934. lower = edge->node[LOWER];
  935. free_backref_edge(cache, edge);
  936. if (list_empty(&lower->upper))
  937. list_add(&lower->list, &useless);
  938. continue;
  939. }
  940. if (!RB_EMPTY_NODE(&upper->rb_node)) {
  941. if (upper->lowest) {
  942. list_del_init(&upper->lower);
  943. upper->lowest = 0;
  944. }
  945. list_add_tail(&edge->list[UPPER], &upper->lower);
  946. continue;
  947. }
  948. if (!upper->checked) {
  949. /*
  950. * Still want to blow up for developers since this is a
  951. * logic bug.
  952. */
  953. ASSERT(0);
  954. err = -EINVAL;
  955. goto out;
  956. }
  957. if (cowonly != upper->cowonly) {
  958. ASSERT(0);
  959. err = -EINVAL;
  960. goto out;
  961. }
  962. if (!cowonly) {
  963. rb_node = tree_insert(&cache->rb_root, upper->bytenr,
  964. &upper->rb_node);
  965. if (rb_node)
  966. backref_tree_panic(rb_node, -EEXIST,
  967. upper->bytenr);
  968. }
  969. list_add_tail(&edge->list[UPPER], &upper->lower);
  970. list_for_each_entry(edge, &upper->upper, list[LOWER])
  971. list_add_tail(&edge->list[UPPER], &list);
  972. }
  973. /*
  974. * process useless backref nodes. backref nodes for tree leaves
  975. * are deleted from the cache. backref nodes for upper level
  976. * tree blocks are left in the cache to avoid unnecessary backref
  977. * lookup.
  978. */
  979. while (!list_empty(&useless)) {
  980. upper = list_entry(useless.next, struct backref_node, list);
  981. list_del_init(&upper->list);
  982. ASSERT(list_empty(&upper->upper));
  983. if (upper == node)
  984. node = NULL;
  985. if (upper->lowest) {
  986. list_del_init(&upper->lower);
  987. upper->lowest = 0;
  988. }
  989. while (!list_empty(&upper->lower)) {
  990. edge = list_entry(upper->lower.next,
  991. struct backref_edge, list[UPPER]);
  992. list_del(&edge->list[UPPER]);
  993. list_del(&edge->list[LOWER]);
  994. lower = edge->node[LOWER];
  995. free_backref_edge(cache, edge);
  996. if (list_empty(&lower->upper))
  997. list_add(&lower->list, &useless);
  998. }
  999. __mark_block_processed(rc, upper);
  1000. if (upper->level > 0) {
  1001. list_add(&upper->list, &cache->detached);
  1002. upper->detached = 1;
  1003. } else {
  1004. rb_erase(&upper->rb_node, &cache->rb_root);
  1005. free_backref_node(cache, upper);
  1006. }
  1007. }
  1008. out:
  1009. btrfs_free_path(path1);
  1010. btrfs_free_path(path2);
  1011. if (err) {
  1012. while (!list_empty(&useless)) {
  1013. lower = list_entry(useless.next,
  1014. struct backref_node, list);
  1015. list_del_init(&lower->list);
  1016. }
  1017. while (!list_empty(&list)) {
  1018. edge = list_first_entry(&list, struct backref_edge,
  1019. list[UPPER]);
  1020. list_del(&edge->list[UPPER]);
  1021. list_del(&edge->list[LOWER]);
  1022. lower = edge->node[LOWER];
  1023. upper = edge->node[UPPER];
  1024. free_backref_edge(cache, edge);
  1025. /*
  1026. * Lower is no longer linked to any upper backref nodes
  1027. * and isn't in the cache, we can free it ourselves.
  1028. */
  1029. if (list_empty(&lower->upper) &&
  1030. RB_EMPTY_NODE(&lower->rb_node))
  1031. list_add(&lower->list, &useless);
  1032. if (!RB_EMPTY_NODE(&upper->rb_node))
  1033. continue;
  1034. /* Add this guy's upper edges to the list to proces */
  1035. list_for_each_entry(edge, &upper->upper, list[LOWER])
  1036. list_add_tail(&edge->list[UPPER], &list);
  1037. if (list_empty(&upper->upper))
  1038. list_add(&upper->list, &useless);
  1039. }
  1040. while (!list_empty(&useless)) {
  1041. lower = list_entry(useless.next,
  1042. struct backref_node, list);
  1043. list_del_init(&lower->list);
  1044. free_backref_node(cache, lower);
  1045. }
  1046. return ERR_PTR(err);
  1047. }
  1048. ASSERT(!node || !node->detached);
  1049. return node;
  1050. }
  1051. /*
  1052. * helper to add backref node for the newly created snapshot.
  1053. * the backref node is created by cloning backref node that
  1054. * corresponds to root of source tree
  1055. */
  1056. static int clone_backref_node(struct btrfs_trans_handle *trans,
  1057. struct reloc_control *rc,
  1058. struct btrfs_root *src,
  1059. struct btrfs_root *dest)
  1060. {
  1061. struct btrfs_root *reloc_root = src->reloc_root;
  1062. struct backref_cache *cache = &rc->backref_cache;
  1063. struct backref_node *node = NULL;
  1064. struct backref_node *new_node;
  1065. struct backref_edge *edge;
  1066. struct backref_edge *new_edge;
  1067. struct rb_node *rb_node;
  1068. if (cache->last_trans > 0)
  1069. update_backref_cache(trans, cache);
  1070. rb_node = tree_search(&cache->rb_root, src->commit_root->start);
  1071. if (rb_node) {
  1072. node = rb_entry(rb_node, struct backref_node, rb_node);
  1073. if (node->detached)
  1074. node = NULL;
  1075. else
  1076. BUG_ON(node->new_bytenr != reloc_root->node->start);
  1077. }
  1078. if (!node) {
  1079. rb_node = tree_search(&cache->rb_root,
  1080. reloc_root->commit_root->start);
  1081. if (rb_node) {
  1082. node = rb_entry(rb_node, struct backref_node,
  1083. rb_node);
  1084. BUG_ON(node->detached);
  1085. }
  1086. }
  1087. if (!node)
  1088. return 0;
  1089. new_node = alloc_backref_node(cache);
  1090. if (!new_node)
  1091. return -ENOMEM;
  1092. new_node->bytenr = dest->node->start;
  1093. new_node->level = node->level;
  1094. new_node->lowest = node->lowest;
  1095. new_node->checked = 1;
  1096. new_node->root = dest;
  1097. if (!node->lowest) {
  1098. list_for_each_entry(edge, &node->lower, list[UPPER]) {
  1099. new_edge = alloc_backref_edge(cache);
  1100. if (!new_edge)
  1101. goto fail;
  1102. new_edge->node[UPPER] = new_node;
  1103. new_edge->node[LOWER] = edge->node[LOWER];
  1104. list_add_tail(&new_edge->list[UPPER],
  1105. &new_node->lower);
  1106. }
  1107. } else {
  1108. list_add_tail(&new_node->lower, &cache->leaves);
  1109. }
  1110. rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
  1111. &new_node->rb_node);
  1112. if (rb_node)
  1113. backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
  1114. if (!new_node->lowest) {
  1115. list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
  1116. list_add_tail(&new_edge->list[LOWER],
  1117. &new_edge->node[LOWER]->upper);
  1118. }
  1119. }
  1120. return 0;
  1121. fail:
  1122. while (!list_empty(&new_node->lower)) {
  1123. new_edge = list_entry(new_node->lower.next,
  1124. struct backref_edge, list[UPPER]);
  1125. list_del(&new_edge->list[UPPER]);
  1126. free_backref_edge(cache, new_edge);
  1127. }
  1128. free_backref_node(cache, new_node);
  1129. return -ENOMEM;
  1130. }
  1131. /*
  1132. * helper to add 'address of tree root -> reloc tree' mapping
  1133. */
  1134. static int __must_check __add_reloc_root(struct btrfs_root *root)
  1135. {
  1136. struct rb_node *rb_node;
  1137. struct mapping_node *node;
  1138. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1139. node = kmalloc(sizeof(*node), GFP_NOFS);
  1140. if (!node)
  1141. return -ENOMEM;
  1142. node->bytenr = root->node->start;
  1143. node->data = root;
  1144. spin_lock(&rc->reloc_root_tree.lock);
  1145. rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
  1146. node->bytenr, &node->rb_node);
  1147. spin_unlock(&rc->reloc_root_tree.lock);
  1148. if (rb_node) {
  1149. btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
  1150. "for start=%llu while inserting into relocation "
  1151. "tree", node->bytenr);
  1152. kfree(node);
  1153. return -EEXIST;
  1154. }
  1155. list_add_tail(&root->root_list, &rc->reloc_roots);
  1156. return 0;
  1157. }
  1158. /*
  1159. * helper to delete the 'address of tree root -> reloc tree'
  1160. * mapping
  1161. */
  1162. static void __del_reloc_root(struct btrfs_root *root)
  1163. {
  1164. struct rb_node *rb_node;
  1165. struct mapping_node *node = NULL;
  1166. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1167. spin_lock(&rc->reloc_root_tree.lock);
  1168. rb_node = tree_search(&rc->reloc_root_tree.rb_root,
  1169. root->node->start);
  1170. if (rb_node) {
  1171. node = rb_entry(rb_node, struct mapping_node, rb_node);
  1172. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  1173. }
  1174. spin_unlock(&rc->reloc_root_tree.lock);
  1175. if (!node)
  1176. return;
  1177. BUG_ON((struct btrfs_root *)node->data != root);
  1178. spin_lock(&root->fs_info->trans_lock);
  1179. list_del_init(&root->root_list);
  1180. spin_unlock(&root->fs_info->trans_lock);
  1181. kfree(node);
  1182. }
  1183. /*
  1184. * helper to update the 'address of tree root -> reloc tree'
  1185. * mapping
  1186. */
  1187. static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
  1188. {
  1189. struct rb_node *rb_node;
  1190. struct mapping_node *node = NULL;
  1191. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1192. spin_lock(&rc->reloc_root_tree.lock);
  1193. rb_node = tree_search(&rc->reloc_root_tree.rb_root,
  1194. root->node->start);
  1195. if (rb_node) {
  1196. node = rb_entry(rb_node, struct mapping_node, rb_node);
  1197. rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
  1198. }
  1199. spin_unlock(&rc->reloc_root_tree.lock);
  1200. if (!node)
  1201. return 0;
  1202. BUG_ON((struct btrfs_root *)node->data != root);
  1203. spin_lock(&rc->reloc_root_tree.lock);
  1204. node->bytenr = new_bytenr;
  1205. rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
  1206. node->bytenr, &node->rb_node);
  1207. spin_unlock(&rc->reloc_root_tree.lock);
  1208. if (rb_node)
  1209. backref_tree_panic(rb_node, -EEXIST, node->bytenr);
  1210. return 0;
  1211. }
  1212. static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
  1213. struct btrfs_root *root, u64 objectid)
  1214. {
  1215. struct btrfs_root *reloc_root;
  1216. struct extent_buffer *eb;
  1217. struct btrfs_root_item *root_item;
  1218. struct btrfs_key root_key;
  1219. u64 last_snap = 0;
  1220. int ret;
  1221. root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
  1222. BUG_ON(!root_item);
  1223. root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  1224. root_key.type = BTRFS_ROOT_ITEM_KEY;
  1225. root_key.offset = objectid;
  1226. if (root->root_key.objectid == objectid) {
  1227. /* called by btrfs_init_reloc_root */
  1228. ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
  1229. BTRFS_TREE_RELOC_OBJECTID);
  1230. BUG_ON(ret);
  1231. last_snap = btrfs_root_last_snapshot(&root->root_item);
  1232. btrfs_set_root_last_snapshot(&root->root_item,
  1233. trans->transid - 1);
  1234. } else {
  1235. /*
  1236. * called by btrfs_reloc_post_snapshot_hook.
  1237. * the source tree is a reloc tree, all tree blocks
  1238. * modified after it was created have RELOC flag
  1239. * set in their headers. so it's OK to not update
  1240. * the 'last_snapshot'.
  1241. */
  1242. ret = btrfs_copy_root(trans, root, root->node, &eb,
  1243. BTRFS_TREE_RELOC_OBJECTID);
  1244. BUG_ON(ret);
  1245. }
  1246. memcpy(root_item, &root->root_item, sizeof(*root_item));
  1247. btrfs_set_root_bytenr(root_item, eb->start);
  1248. btrfs_set_root_level(root_item, btrfs_header_level(eb));
  1249. btrfs_set_root_generation(root_item, trans->transid);
  1250. if (root->root_key.objectid == objectid) {
  1251. btrfs_set_root_refs(root_item, 0);
  1252. memset(&root_item->drop_progress, 0,
  1253. sizeof(struct btrfs_disk_key));
  1254. root_item->drop_level = 0;
  1255. /*
  1256. * abuse rtransid, it is safe because it is impossible to
  1257. * receive data into a relocation tree.
  1258. */
  1259. btrfs_set_root_rtransid(root_item, last_snap);
  1260. btrfs_set_root_otransid(root_item, trans->transid);
  1261. }
  1262. btrfs_tree_unlock(eb);
  1263. free_extent_buffer(eb);
  1264. ret = btrfs_insert_root(trans, root->fs_info->tree_root,
  1265. &root_key, root_item);
  1266. BUG_ON(ret);
  1267. kfree(root_item);
  1268. reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
  1269. BUG_ON(IS_ERR(reloc_root));
  1270. reloc_root->last_trans = trans->transid;
  1271. return reloc_root;
  1272. }
  1273. /*
  1274. * create reloc tree for a given fs tree. reloc tree is just a
  1275. * snapshot of the fs tree with special root objectid.
  1276. */
  1277. int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
  1278. struct btrfs_root *root)
  1279. {
  1280. struct btrfs_root *reloc_root;
  1281. struct reloc_control *rc = root->fs_info->reloc_ctl;
  1282. struct btrfs_block_rsv *rsv;
  1283. int clear_rsv = 0;
  1284. int ret;
  1285. if (root->reloc_root) {
  1286. reloc_root = root->reloc_root;
  1287. reloc_root->last_trans = trans->transid;
  1288. return 0;
  1289. }
  1290. if (!rc || !rc->create_reloc_tree ||
  1291. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1292. return 0;
  1293. if (!trans->reloc_reserved) {
  1294. rsv = trans->block_rsv;
  1295. trans->block_rsv = rc->block_rsv;
  1296. clear_rsv = 1;
  1297. }
  1298. reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
  1299. if (clear_rsv)
  1300. trans->block_rsv = rsv;
  1301. ret = __add_reloc_root(reloc_root);
  1302. BUG_ON(ret < 0);
  1303. root->reloc_root = reloc_root;
  1304. return 0;
  1305. }
  1306. /*
  1307. * update root item of reloc tree
  1308. */
  1309. int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
  1310. struct btrfs_root *root)
  1311. {
  1312. struct btrfs_root *reloc_root;
  1313. struct btrfs_root_item *root_item;
  1314. int ret;
  1315. if (!root->reloc_root)
  1316. goto out;
  1317. reloc_root = root->reloc_root;
  1318. root_item = &reloc_root->root_item;
  1319. if (root->fs_info->reloc_ctl->merge_reloc_tree &&
  1320. btrfs_root_refs(root_item) == 0) {
  1321. root->reloc_root = NULL;
  1322. __del_reloc_root(reloc_root);
  1323. }
  1324. if (reloc_root->commit_root != reloc_root->node) {
  1325. btrfs_set_root_node(root_item, reloc_root->node);
  1326. free_extent_buffer(reloc_root->commit_root);
  1327. reloc_root->commit_root = btrfs_root_node(reloc_root);
  1328. }
  1329. ret = btrfs_update_root(trans, root->fs_info->tree_root,
  1330. &reloc_root->root_key, root_item);
  1331. BUG_ON(ret);
  1332. out:
  1333. return 0;
  1334. }
  1335. /*
  1336. * helper to find first cached inode with inode number >= objectid
  1337. * in a subvolume
  1338. */
  1339. static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
  1340. {
  1341. struct rb_node *node;
  1342. struct rb_node *prev;
  1343. struct btrfs_inode *entry;
  1344. struct inode *inode;
  1345. spin_lock(&root->inode_lock);
  1346. again:
  1347. node = root->inode_tree.rb_node;
  1348. prev = NULL;
  1349. while (node) {
  1350. prev = node;
  1351. entry = rb_entry(node, struct btrfs_inode, rb_node);
  1352. if (objectid < btrfs_ino(&entry->vfs_inode))
  1353. node = node->rb_left;
  1354. else if (objectid > btrfs_ino(&entry->vfs_inode))
  1355. node = node->rb_right;
  1356. else
  1357. break;
  1358. }
  1359. if (!node) {
  1360. while (prev) {
  1361. entry = rb_entry(prev, struct btrfs_inode, rb_node);
  1362. if (objectid <= btrfs_ino(&entry->vfs_inode)) {
  1363. node = prev;
  1364. break;
  1365. }
  1366. prev = rb_next(prev);
  1367. }
  1368. }
  1369. while (node) {
  1370. entry = rb_entry(node, struct btrfs_inode, rb_node);
  1371. inode = igrab(&entry->vfs_inode);
  1372. if (inode) {
  1373. spin_unlock(&root->inode_lock);
  1374. return inode;
  1375. }
  1376. objectid = btrfs_ino(&entry->vfs_inode) + 1;
  1377. if (cond_resched_lock(&root->inode_lock))
  1378. goto again;
  1379. node = rb_next(node);
  1380. }
  1381. spin_unlock(&root->inode_lock);
  1382. return NULL;
  1383. }
  1384. static int in_block_group(u64 bytenr,
  1385. struct btrfs_block_group_cache *block_group)
  1386. {
  1387. if (bytenr >= block_group->key.objectid &&
  1388. bytenr < block_group->key.objectid + block_group->key.offset)
  1389. return 1;
  1390. return 0;
  1391. }
  1392. /*
  1393. * get new location of data
  1394. */
  1395. static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
  1396. u64 bytenr, u64 num_bytes)
  1397. {
  1398. struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
  1399. struct btrfs_path *path;
  1400. struct btrfs_file_extent_item *fi;
  1401. struct extent_buffer *leaf;
  1402. int ret;
  1403. path = btrfs_alloc_path();
  1404. if (!path)
  1405. return -ENOMEM;
  1406. bytenr -= BTRFS_I(reloc_inode)->index_cnt;
  1407. ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
  1408. bytenr, 0);
  1409. if (ret < 0)
  1410. goto out;
  1411. if (ret > 0) {
  1412. ret = -ENOENT;
  1413. goto out;
  1414. }
  1415. leaf = path->nodes[0];
  1416. fi = btrfs_item_ptr(leaf, path->slots[0],
  1417. struct btrfs_file_extent_item);
  1418. BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
  1419. btrfs_file_extent_compression(leaf, fi) ||
  1420. btrfs_file_extent_encryption(leaf, fi) ||
  1421. btrfs_file_extent_other_encoding(leaf, fi));
  1422. if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
  1423. ret = -EINVAL;
  1424. goto out;
  1425. }
  1426. *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  1427. ret = 0;
  1428. out:
  1429. btrfs_free_path(path);
  1430. return ret;
  1431. }
  1432. /*
  1433. * update file extent items in the tree leaf to point to
  1434. * the new locations.
  1435. */
  1436. static noinline_for_stack
  1437. int replace_file_extents(struct btrfs_trans_handle *trans,
  1438. struct reloc_control *rc,
  1439. struct btrfs_root *root,
  1440. struct extent_buffer *leaf)
  1441. {
  1442. struct btrfs_key key;
  1443. struct btrfs_file_extent_item *fi;
  1444. struct inode *inode = NULL;
  1445. u64 parent;
  1446. u64 bytenr;
  1447. u64 new_bytenr = 0;
  1448. u64 num_bytes;
  1449. u64 end;
  1450. u32 nritems;
  1451. u32 i;
  1452. int ret = 0;
  1453. int first = 1;
  1454. int dirty = 0;
  1455. if (rc->stage != UPDATE_DATA_PTRS)
  1456. return 0;
  1457. /* reloc trees always use full backref */
  1458. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
  1459. parent = leaf->start;
  1460. else
  1461. parent = 0;
  1462. nritems = btrfs_header_nritems(leaf);
  1463. for (i = 0; i < nritems; i++) {
  1464. cond_resched();
  1465. btrfs_item_key_to_cpu(leaf, &key, i);
  1466. if (key.type != BTRFS_EXTENT_DATA_KEY)
  1467. continue;
  1468. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  1469. if (btrfs_file_extent_type(leaf, fi) ==
  1470. BTRFS_FILE_EXTENT_INLINE)
  1471. continue;
  1472. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  1473. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  1474. if (bytenr == 0)
  1475. continue;
  1476. if (!in_block_group(bytenr, rc->block_group))
  1477. continue;
  1478. /*
  1479. * if we are modifying block in fs tree, wait for readpage
  1480. * to complete and drop the extent cache
  1481. */
  1482. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
  1483. if (first) {
  1484. inode = find_next_inode(root, key.objectid);
  1485. first = 0;
  1486. } else if (inode && btrfs_ino(inode) < key.objectid) {
  1487. btrfs_add_delayed_iput(inode);
  1488. inode = find_next_inode(root, key.objectid);
  1489. }
  1490. if (inode && btrfs_ino(inode) == key.objectid) {
  1491. end = key.offset +
  1492. btrfs_file_extent_num_bytes(leaf, fi);
  1493. WARN_ON(!IS_ALIGNED(key.offset,
  1494. root->sectorsize));
  1495. WARN_ON(!IS_ALIGNED(end, root->sectorsize));
  1496. end--;
  1497. ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
  1498. key.offset, end);
  1499. if (!ret)
  1500. continue;
  1501. btrfs_drop_extent_cache(inode, key.offset, end,
  1502. 1);
  1503. unlock_extent(&BTRFS_I(inode)->io_tree,
  1504. key.offset, end);
  1505. }
  1506. }
  1507. ret = get_new_location(rc->data_inode, &new_bytenr,
  1508. bytenr, num_bytes);
  1509. if (ret) {
  1510. /*
  1511. * Don't have to abort since we've not changed anything
  1512. * in the file extent yet.
  1513. */
  1514. break;
  1515. }
  1516. btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
  1517. dirty = 1;
  1518. key.offset -= btrfs_file_extent_offset(leaf, fi);
  1519. ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
  1520. num_bytes, parent,
  1521. btrfs_header_owner(leaf),
  1522. key.objectid, key.offset, 1);
  1523. if (ret) {
  1524. btrfs_abort_transaction(trans, root, ret);
  1525. break;
  1526. }
  1527. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  1528. parent, btrfs_header_owner(leaf),
  1529. key.objectid, key.offset, 1);
  1530. if (ret) {
  1531. btrfs_abort_transaction(trans, root, ret);
  1532. break;
  1533. }
  1534. }
  1535. if (dirty)
  1536. btrfs_mark_buffer_dirty(leaf);
  1537. if (inode)
  1538. btrfs_add_delayed_iput(inode);
  1539. return ret;
  1540. }
  1541. static noinline_for_stack
  1542. int memcmp_node_keys(struct extent_buffer *eb, int slot,
  1543. struct btrfs_path *path, int level)
  1544. {
  1545. struct btrfs_disk_key key1;
  1546. struct btrfs_disk_key key2;
  1547. btrfs_node_key(eb, &key1, slot);
  1548. btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
  1549. return memcmp(&key1, &key2, sizeof(key1));
  1550. }
  1551. /*
  1552. * try to replace tree blocks in fs tree with the new blocks
  1553. * in reloc tree. tree blocks haven't been modified since the
  1554. * reloc tree was create can be replaced.
  1555. *
  1556. * if a block was replaced, level of the block + 1 is returned.
  1557. * if no block got replaced, 0 is returned. if there are other
  1558. * errors, a negative error number is returned.
  1559. */
  1560. static noinline_for_stack
  1561. int replace_path(struct btrfs_trans_handle *trans,
  1562. struct btrfs_root *dest, struct btrfs_root *src,
  1563. struct btrfs_path *path, struct btrfs_key *next_key,
  1564. int lowest_level, int max_level)
  1565. {
  1566. struct extent_buffer *eb;
  1567. struct extent_buffer *parent;
  1568. struct btrfs_key key;
  1569. u64 old_bytenr;
  1570. u64 new_bytenr;
  1571. u64 old_ptr_gen;
  1572. u64 new_ptr_gen;
  1573. u64 last_snapshot;
  1574. u32 blocksize;
  1575. int cow = 0;
  1576. int level;
  1577. int ret;
  1578. int slot;
  1579. BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
  1580. BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
  1581. last_snapshot = btrfs_root_last_snapshot(&src->root_item);
  1582. again:
  1583. slot = path->slots[lowest_level];
  1584. btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
  1585. eb = btrfs_lock_root_node(dest);
  1586. btrfs_set_lock_blocking(eb);
  1587. level = btrfs_header_level(eb);
  1588. if (level < lowest_level) {
  1589. btrfs_tree_unlock(eb);
  1590. free_extent_buffer(eb);
  1591. return 0;
  1592. }
  1593. if (cow) {
  1594. ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
  1595. BUG_ON(ret);
  1596. }
  1597. btrfs_set_lock_blocking(eb);
  1598. if (next_key) {
  1599. next_key->objectid = (u64)-1;
  1600. next_key->type = (u8)-1;
  1601. next_key->offset = (u64)-1;
  1602. }
  1603. parent = eb;
  1604. while (1) {
  1605. level = btrfs_header_level(parent);
  1606. BUG_ON(level < lowest_level);
  1607. ret = btrfs_bin_search(parent, &key, level, &slot);
  1608. if (ret && slot > 0)
  1609. slot--;
  1610. if (next_key && slot + 1 < btrfs_header_nritems(parent))
  1611. btrfs_node_key_to_cpu(parent, next_key, slot + 1);
  1612. old_bytenr = btrfs_node_blockptr(parent, slot);
  1613. blocksize = dest->nodesize;
  1614. old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
  1615. if (level <= max_level) {
  1616. eb = path->nodes[level];
  1617. new_bytenr = btrfs_node_blockptr(eb,
  1618. path->slots[level]);
  1619. new_ptr_gen = btrfs_node_ptr_generation(eb,
  1620. path->slots[level]);
  1621. } else {
  1622. new_bytenr = 0;
  1623. new_ptr_gen = 0;
  1624. }
  1625. if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
  1626. ret = level;
  1627. break;
  1628. }
  1629. if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
  1630. memcmp_node_keys(parent, slot, path, level)) {
  1631. if (level <= lowest_level) {
  1632. ret = 0;
  1633. break;
  1634. }
  1635. eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
  1636. if (!eb || !extent_buffer_uptodate(eb)) {
  1637. ret = (!eb) ? -ENOMEM : -EIO;
  1638. free_extent_buffer(eb);
  1639. break;
  1640. }
  1641. btrfs_tree_lock(eb);
  1642. if (cow) {
  1643. ret = btrfs_cow_block(trans, dest, eb, parent,
  1644. slot, &eb);
  1645. BUG_ON(ret);
  1646. }
  1647. btrfs_set_lock_blocking(eb);
  1648. btrfs_tree_unlock(parent);
  1649. free_extent_buffer(parent);
  1650. parent = eb;
  1651. continue;
  1652. }
  1653. if (!cow) {
  1654. btrfs_tree_unlock(parent);
  1655. free_extent_buffer(parent);
  1656. cow = 1;
  1657. goto again;
  1658. }
  1659. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1660. path->slots[level]);
  1661. btrfs_release_path(path);
  1662. path->lowest_level = level;
  1663. ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
  1664. path->lowest_level = 0;
  1665. BUG_ON(ret);
  1666. /*
  1667. * swap blocks in fs tree and reloc tree.
  1668. */
  1669. btrfs_set_node_blockptr(parent, slot, new_bytenr);
  1670. btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
  1671. btrfs_mark_buffer_dirty(parent);
  1672. btrfs_set_node_blockptr(path->nodes[level],
  1673. path->slots[level], old_bytenr);
  1674. btrfs_set_node_ptr_generation(path->nodes[level],
  1675. path->slots[level], old_ptr_gen);
  1676. btrfs_mark_buffer_dirty(path->nodes[level]);
  1677. ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
  1678. path->nodes[level]->start,
  1679. src->root_key.objectid, level - 1, 0,
  1680. 1);
  1681. BUG_ON(ret);
  1682. ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
  1683. 0, dest->root_key.objectid, level - 1,
  1684. 0, 1);
  1685. BUG_ON(ret);
  1686. ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
  1687. path->nodes[level]->start,
  1688. src->root_key.objectid, level - 1, 0,
  1689. 1);
  1690. BUG_ON(ret);
  1691. ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
  1692. 0, dest->root_key.objectid, level - 1,
  1693. 0, 1);
  1694. BUG_ON(ret);
  1695. btrfs_unlock_up_safe(path, 0);
  1696. ret = level;
  1697. break;
  1698. }
  1699. btrfs_tree_unlock(parent);
  1700. free_extent_buffer(parent);
  1701. return ret;
  1702. }
  1703. /*
  1704. * helper to find next relocated block in reloc tree
  1705. */
  1706. static noinline_for_stack
  1707. int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1708. int *level)
  1709. {
  1710. struct extent_buffer *eb;
  1711. int i;
  1712. u64 last_snapshot;
  1713. u32 nritems;
  1714. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1715. for (i = 0; i < *level; i++) {
  1716. free_extent_buffer(path->nodes[i]);
  1717. path->nodes[i] = NULL;
  1718. }
  1719. for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
  1720. eb = path->nodes[i];
  1721. nritems = btrfs_header_nritems(eb);
  1722. while (path->slots[i] + 1 < nritems) {
  1723. path->slots[i]++;
  1724. if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
  1725. last_snapshot)
  1726. continue;
  1727. *level = i;
  1728. return 0;
  1729. }
  1730. free_extent_buffer(path->nodes[i]);
  1731. path->nodes[i] = NULL;
  1732. }
  1733. return 1;
  1734. }
  1735. /*
  1736. * walk down reloc tree to find relocated block of lowest level
  1737. */
  1738. static noinline_for_stack
  1739. int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
  1740. int *level)
  1741. {
  1742. struct extent_buffer *eb = NULL;
  1743. int i;
  1744. u64 bytenr;
  1745. u64 ptr_gen = 0;
  1746. u64 last_snapshot;
  1747. u32 nritems;
  1748. last_snapshot = btrfs_root_last_snapshot(&root->root_item);
  1749. for (i = *level; i > 0; i--) {
  1750. eb = path->nodes[i];
  1751. nritems = btrfs_header_nritems(eb);
  1752. while (path->slots[i] < nritems) {
  1753. ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
  1754. if (ptr_gen > last_snapshot)
  1755. break;
  1756. path->slots[i]++;
  1757. }
  1758. if (path->slots[i] >= nritems) {
  1759. if (i == *level)
  1760. break;
  1761. *level = i + 1;
  1762. return 0;
  1763. }
  1764. if (i == 1) {
  1765. *level = i;
  1766. return 0;
  1767. }
  1768. bytenr = btrfs_node_blockptr(eb, path->slots[i]);
  1769. eb = read_tree_block(root, bytenr, ptr_gen);
  1770. if (!eb || !extent_buffer_uptodate(eb)) {
  1771. free_extent_buffer(eb);
  1772. return -EIO;
  1773. }
  1774. BUG_ON(btrfs_header_level(eb) != i - 1);
  1775. path->nodes[i - 1] = eb;
  1776. path->slots[i - 1] = 0;
  1777. }
  1778. return 1;
  1779. }
  1780. /*
  1781. * invalidate extent cache for file extents whose key in range of
  1782. * [min_key, max_key)
  1783. */
  1784. static int invalidate_extent_cache(struct btrfs_root *root,
  1785. struct btrfs_key *min_key,
  1786. struct btrfs_key *max_key)
  1787. {
  1788. struct inode *inode = NULL;
  1789. u64 objectid;
  1790. u64 start, end;
  1791. u64 ino;
  1792. objectid = min_key->objectid;
  1793. while (1) {
  1794. cond_resched();
  1795. iput(inode);
  1796. if (objectid > max_key->objectid)
  1797. break;
  1798. inode = find_next_inode(root, objectid);
  1799. if (!inode)
  1800. break;
  1801. ino = btrfs_ino(inode);
  1802. if (ino > max_key->objectid) {
  1803. iput(inode);
  1804. break;
  1805. }
  1806. objectid = ino + 1;
  1807. if (!S_ISREG(inode->i_mode))
  1808. continue;
  1809. if (unlikely(min_key->objectid == ino)) {
  1810. if (min_key->type > BTRFS_EXTENT_DATA_KEY)
  1811. continue;
  1812. if (min_key->type < BTRFS_EXTENT_DATA_KEY)
  1813. start = 0;
  1814. else {
  1815. start = min_key->offset;
  1816. WARN_ON(!IS_ALIGNED(start, root->sectorsize));
  1817. }
  1818. } else {
  1819. start = 0;
  1820. }
  1821. if (unlikely(max_key->objectid == ino)) {
  1822. if (max_key->type < BTRFS_EXTENT_DATA_KEY)
  1823. continue;
  1824. if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
  1825. end = (u64)-1;
  1826. } else {
  1827. if (max_key->offset == 0)
  1828. continue;
  1829. end = max_key->offset;
  1830. WARN_ON(!IS_ALIGNED(end, root->sectorsize));
  1831. end--;
  1832. }
  1833. } else {
  1834. end = (u64)-1;
  1835. }
  1836. /* the lock_extent waits for readpage to complete */
  1837. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  1838. btrfs_drop_extent_cache(inode, start, end, 1);
  1839. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  1840. }
  1841. return 0;
  1842. }
  1843. static int find_next_key(struct btrfs_path *path, int level,
  1844. struct btrfs_key *key)
  1845. {
  1846. while (level < BTRFS_MAX_LEVEL) {
  1847. if (!path->nodes[level])
  1848. break;
  1849. if (path->slots[level] + 1 <
  1850. btrfs_header_nritems(path->nodes[level])) {
  1851. btrfs_node_key_to_cpu(path->nodes[level], key,
  1852. path->slots[level] + 1);
  1853. return 0;
  1854. }
  1855. level++;
  1856. }
  1857. return 1;
  1858. }
  1859. /*
  1860. * merge the relocated tree blocks in reloc tree with corresponding
  1861. * fs tree.
  1862. */
  1863. static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
  1864. struct btrfs_root *root)
  1865. {
  1866. LIST_HEAD(inode_list);
  1867. struct btrfs_key key;
  1868. struct btrfs_key next_key;
  1869. struct btrfs_trans_handle *trans = NULL;
  1870. struct btrfs_root *reloc_root;
  1871. struct btrfs_root_item *root_item;
  1872. struct btrfs_path *path;
  1873. struct extent_buffer *leaf;
  1874. int level;
  1875. int max_level;
  1876. int replaced = 0;
  1877. int ret;
  1878. int err = 0;
  1879. u32 min_reserved;
  1880. path = btrfs_alloc_path();
  1881. if (!path)
  1882. return -ENOMEM;
  1883. path->reada = 1;
  1884. reloc_root = root->reloc_root;
  1885. root_item = &reloc_root->root_item;
  1886. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  1887. level = btrfs_root_level(root_item);
  1888. extent_buffer_get(reloc_root->node);
  1889. path->nodes[level] = reloc_root->node;
  1890. path->slots[level] = 0;
  1891. } else {
  1892. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  1893. level = root_item->drop_level;
  1894. BUG_ON(level == 0);
  1895. path->lowest_level = level;
  1896. ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
  1897. path->lowest_level = 0;
  1898. if (ret < 0) {
  1899. btrfs_free_path(path);
  1900. return ret;
  1901. }
  1902. btrfs_node_key_to_cpu(path->nodes[level], &next_key,
  1903. path->slots[level]);
  1904. WARN_ON(memcmp(&key, &next_key, sizeof(key)));
  1905. btrfs_unlock_up_safe(path, 0);
  1906. }
  1907. min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
  1908. memset(&next_key, 0, sizeof(next_key));
  1909. while (1) {
  1910. ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
  1911. BTRFS_RESERVE_FLUSH_ALL);
  1912. if (ret) {
  1913. err = ret;
  1914. goto out;
  1915. }
  1916. trans = btrfs_start_transaction(root, 0);
  1917. if (IS_ERR(trans)) {
  1918. err = PTR_ERR(trans);
  1919. trans = NULL;
  1920. goto out;
  1921. }
  1922. trans->block_rsv = rc->block_rsv;
  1923. replaced = 0;
  1924. max_level = level;
  1925. ret = walk_down_reloc_tree(reloc_root, path, &level);
  1926. if (ret < 0) {
  1927. err = ret;
  1928. goto out;
  1929. }
  1930. if (ret > 0)
  1931. break;
  1932. if (!find_next_key(path, level, &key) &&
  1933. btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
  1934. ret = 0;
  1935. } else {
  1936. ret = replace_path(trans, root, reloc_root, path,
  1937. &next_key, level, max_level);
  1938. }
  1939. if (ret < 0) {
  1940. err = ret;
  1941. goto out;
  1942. }
  1943. if (ret > 0) {
  1944. level = ret;
  1945. btrfs_node_key_to_cpu(path->nodes[level], &key,
  1946. path->slots[level]);
  1947. replaced = 1;
  1948. }
  1949. ret = walk_up_reloc_tree(reloc_root, path, &level);
  1950. if (ret > 0)
  1951. break;
  1952. BUG_ON(level == 0);
  1953. /*
  1954. * save the merging progress in the drop_progress.
  1955. * this is OK since root refs == 1 in this case.
  1956. */
  1957. btrfs_node_key(path->nodes[level], &root_item->drop_progress,
  1958. path->slots[level]);
  1959. root_item->drop_level = level;
  1960. btrfs_end_transaction_throttle(trans, root);
  1961. trans = NULL;
  1962. btrfs_btree_balance_dirty(root);
  1963. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1964. invalidate_extent_cache(root, &key, &next_key);
  1965. }
  1966. /*
  1967. * handle the case only one block in the fs tree need to be
  1968. * relocated and the block is tree root.
  1969. */
  1970. leaf = btrfs_lock_root_node(root);
  1971. ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
  1972. btrfs_tree_unlock(leaf);
  1973. free_extent_buffer(leaf);
  1974. if (ret < 0)
  1975. err = ret;
  1976. out:
  1977. btrfs_free_path(path);
  1978. if (err == 0) {
  1979. memset(&root_item->drop_progress, 0,
  1980. sizeof(root_item->drop_progress));
  1981. root_item->drop_level = 0;
  1982. btrfs_set_root_refs(root_item, 0);
  1983. btrfs_update_reloc_root(trans, root);
  1984. }
  1985. if (trans)
  1986. btrfs_end_transaction_throttle(trans, root);
  1987. btrfs_btree_balance_dirty(root);
  1988. if (replaced && rc->stage == UPDATE_DATA_PTRS)
  1989. invalidate_extent_cache(root, &key, &next_key);
  1990. return err;
  1991. }
  1992. static noinline_for_stack
  1993. int prepare_to_merge(struct reloc_control *rc, int err)
  1994. {
  1995. struct btrfs_root *root = rc->extent_root;
  1996. struct btrfs_root *reloc_root;
  1997. struct btrfs_trans_handle *trans;
  1998. LIST_HEAD(reloc_roots);
  1999. u64 num_bytes = 0;
  2000. int ret;
  2001. mutex_lock(&root->fs_info->reloc_mutex);
  2002. rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
  2003. rc->merging_rsv_size += rc->nodes_relocated * 2;
  2004. mutex_unlock(&root->fs_info->reloc_mutex);
  2005. again:
  2006. if (!err) {
  2007. num_bytes = rc->merging_rsv_size;
  2008. ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
  2009. BTRFS_RESERVE_FLUSH_ALL);
  2010. if (ret)
  2011. err = ret;
  2012. }
  2013. trans = btrfs_join_transaction(rc->extent_root);
  2014. if (IS_ERR(trans)) {
  2015. if (!err)
  2016. btrfs_block_rsv_release(rc->extent_root,
  2017. rc->block_rsv, num_bytes);
  2018. return PTR_ERR(trans);
  2019. }
  2020. if (!err) {
  2021. if (num_bytes != rc->merging_rsv_size) {
  2022. btrfs_end_transaction(trans, rc->extent_root);
  2023. btrfs_block_rsv_release(rc->extent_root,
  2024. rc->block_rsv, num_bytes);
  2025. goto again;
  2026. }
  2027. }
  2028. rc->merge_reloc_tree = 1;
  2029. while (!list_empty(&rc->reloc_roots)) {
  2030. reloc_root = list_entry(rc->reloc_roots.next,
  2031. struct btrfs_root, root_list);
  2032. list_del_init(&reloc_root->root_list);
  2033. root = read_fs_root(reloc_root->fs_info,
  2034. reloc_root->root_key.offset);
  2035. BUG_ON(IS_ERR(root));
  2036. BUG_ON(root->reloc_root != reloc_root);
  2037. /*
  2038. * set reference count to 1, so btrfs_recover_relocation
  2039. * knows it should resumes merging
  2040. */
  2041. if (!err)
  2042. btrfs_set_root_refs(&reloc_root->root_item, 1);
  2043. btrfs_update_reloc_root(trans, root);
  2044. list_add(&reloc_root->root_list, &reloc_roots);
  2045. }
  2046. list_splice(&reloc_roots, &rc->reloc_roots);
  2047. if (!err)
  2048. btrfs_commit_transaction(trans, rc->extent_root);
  2049. else
  2050. btrfs_end_transaction(trans, rc->extent_root);
  2051. return err;
  2052. }
  2053. static noinline_for_stack
  2054. void free_reloc_roots(struct list_head *list)
  2055. {
  2056. struct btrfs_root *reloc_root;
  2057. while (!list_empty(list)) {
  2058. reloc_root = list_entry(list->next, struct btrfs_root,
  2059. root_list);
  2060. __del_reloc_root(reloc_root);
  2061. }
  2062. }
  2063. static noinline_for_stack
  2064. void merge_reloc_roots(struct reloc_control *rc)
  2065. {
  2066. struct btrfs_root *root;
  2067. struct btrfs_root *reloc_root;
  2068. u64 last_snap;
  2069. u64 otransid;
  2070. u64 objectid;
  2071. LIST_HEAD(reloc_roots);
  2072. int found = 0;
  2073. int ret = 0;
  2074. again:
  2075. root = rc->extent_root;
  2076. /*
  2077. * this serializes us with btrfs_record_root_in_transaction,
  2078. * we have to make sure nobody is in the middle of
  2079. * adding their roots to the list while we are
  2080. * doing this splice
  2081. */
  2082. mutex_lock(&root->fs_info->reloc_mutex);
  2083. list_splice_init(&rc->reloc_roots, &reloc_roots);
  2084. mutex_unlock(&root->fs_info->reloc_mutex);
  2085. while (!list_empty(&reloc_roots)) {
  2086. found = 1;
  2087. reloc_root = list_entry(reloc_roots.next,
  2088. struct btrfs_root, root_list);
  2089. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  2090. root = read_fs_root(reloc_root->fs_info,
  2091. reloc_root->root_key.offset);
  2092. BUG_ON(IS_ERR(root));
  2093. BUG_ON(root->reloc_root != reloc_root);
  2094. ret = merge_reloc_root(rc, root);
  2095. if (ret) {
  2096. if (list_empty(&reloc_root->root_list))
  2097. list_add_tail(&reloc_root->root_list,
  2098. &reloc_roots);
  2099. goto out;
  2100. }
  2101. } else {
  2102. list_del_init(&reloc_root->root_list);
  2103. }
  2104. /*
  2105. * we keep the old last snapshod transid in rtranid when we
  2106. * created the relocation tree.
  2107. */
  2108. last_snap = btrfs_root_rtransid(&reloc_root->root_item);
  2109. otransid = btrfs_root_otransid(&reloc_root->root_item);
  2110. objectid = reloc_root->root_key.offset;
  2111. ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
  2112. if (ret < 0) {
  2113. if (list_empty(&reloc_root->root_list))
  2114. list_add_tail(&reloc_root->root_list,
  2115. &reloc_roots);
  2116. goto out;
  2117. }
  2118. }
  2119. if (found) {
  2120. found = 0;
  2121. goto again;
  2122. }
  2123. out:
  2124. if (ret) {
  2125. btrfs_std_error(root->fs_info, ret);
  2126. if (!list_empty(&reloc_roots))
  2127. free_reloc_roots(&reloc_roots);
  2128. /* new reloc root may be added */
  2129. mutex_lock(&root->fs_info->reloc_mutex);
  2130. list_splice_init(&rc->reloc_roots, &reloc_roots);
  2131. mutex_unlock(&root->fs_info->reloc_mutex);
  2132. if (!list_empty(&reloc_roots))
  2133. free_reloc_roots(&reloc_roots);
  2134. }
  2135. BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
  2136. }
  2137. static void free_block_list(struct rb_root *blocks)
  2138. {
  2139. struct tree_block *block;
  2140. struct rb_node *rb_node;
  2141. while ((rb_node = rb_first(blocks))) {
  2142. block = rb_entry(rb_node, struct tree_block, rb_node);
  2143. rb_erase(rb_node, blocks);
  2144. kfree(block);
  2145. }
  2146. }
  2147. static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
  2148. struct btrfs_root *reloc_root)
  2149. {
  2150. struct btrfs_root *root;
  2151. if (reloc_root->last_trans == trans->transid)
  2152. return 0;
  2153. root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
  2154. BUG_ON(IS_ERR(root));
  2155. BUG_ON(root->reloc_root != reloc_root);
  2156. return btrfs_record_root_in_trans(trans, root);
  2157. }
  2158. static noinline_for_stack
  2159. struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
  2160. struct reloc_control *rc,
  2161. struct backref_node *node,
  2162. struct backref_edge *edges[])
  2163. {
  2164. struct backref_node *next;
  2165. struct btrfs_root *root;
  2166. int index = 0;
  2167. next = node;
  2168. while (1) {
  2169. cond_resched();
  2170. next = walk_up_backref(next, edges, &index);
  2171. root = next->root;
  2172. BUG_ON(!root);
  2173. BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
  2174. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
  2175. record_reloc_root_in_trans(trans, root);
  2176. break;
  2177. }
  2178. btrfs_record_root_in_trans(trans, root);
  2179. root = root->reloc_root;
  2180. if (next->new_bytenr != root->node->start) {
  2181. BUG_ON(next->new_bytenr);
  2182. BUG_ON(!list_empty(&next->list));
  2183. next->new_bytenr = root->node->start;
  2184. next->root = root;
  2185. list_add_tail(&next->list,
  2186. &rc->backref_cache.changed);
  2187. __mark_block_processed(rc, next);
  2188. break;
  2189. }
  2190. WARN_ON(1);
  2191. root = NULL;
  2192. next = walk_down_backref(edges, &index);
  2193. if (!next || next->level <= node->level)
  2194. break;
  2195. }
  2196. if (!root)
  2197. return NULL;
  2198. next = node;
  2199. /* setup backref node path for btrfs_reloc_cow_block */
  2200. while (1) {
  2201. rc->backref_cache.path[next->level] = next;
  2202. if (--index < 0)
  2203. break;
  2204. next = edges[index]->node[UPPER];
  2205. }
  2206. return root;
  2207. }
  2208. /*
  2209. * select a tree root for relocation. return NULL if the block
  2210. * is reference counted. we should use do_relocation() in this
  2211. * case. return a tree root pointer if the block isn't reference
  2212. * counted. return -ENOENT if the block is root of reloc tree.
  2213. */
  2214. static noinline_for_stack
  2215. struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
  2216. struct backref_node *node)
  2217. {
  2218. struct backref_node *next;
  2219. struct btrfs_root *root;
  2220. struct btrfs_root *fs_root = NULL;
  2221. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2222. int index = 0;
  2223. next = node;
  2224. while (1) {
  2225. cond_resched();
  2226. next = walk_up_backref(next, edges, &index);
  2227. root = next->root;
  2228. BUG_ON(!root);
  2229. /* no other choice for non-references counted tree */
  2230. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  2231. return root;
  2232. if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
  2233. fs_root = root;
  2234. if (next != node)
  2235. return NULL;
  2236. next = walk_down_backref(edges, &index);
  2237. if (!next || next->level <= node->level)
  2238. break;
  2239. }
  2240. if (!fs_root)
  2241. return ERR_PTR(-ENOENT);
  2242. return fs_root;
  2243. }
  2244. static noinline_for_stack
  2245. u64 calcu_metadata_size(struct reloc_control *rc,
  2246. struct backref_node *node, int reserve)
  2247. {
  2248. struct backref_node *next = node;
  2249. struct backref_edge *edge;
  2250. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2251. u64 num_bytes = 0;
  2252. int index = 0;
  2253. BUG_ON(reserve && node->processed);
  2254. while (next) {
  2255. cond_resched();
  2256. while (1) {
  2257. if (next->processed && (reserve || next != node))
  2258. break;
  2259. num_bytes += rc->extent_root->nodesize;
  2260. if (list_empty(&next->upper))
  2261. break;
  2262. edge = list_entry(next->upper.next,
  2263. struct backref_edge, list[LOWER]);
  2264. edges[index++] = edge;
  2265. next = edge->node[UPPER];
  2266. }
  2267. next = walk_down_backref(edges, &index);
  2268. }
  2269. return num_bytes;
  2270. }
  2271. static int reserve_metadata_space(struct btrfs_trans_handle *trans,
  2272. struct reloc_control *rc,
  2273. struct backref_node *node)
  2274. {
  2275. struct btrfs_root *root = rc->extent_root;
  2276. u64 num_bytes;
  2277. int ret;
  2278. u64 tmp;
  2279. num_bytes = calcu_metadata_size(rc, node, 1) * 2;
  2280. trans->block_rsv = rc->block_rsv;
  2281. rc->reserved_bytes += num_bytes;
  2282. ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
  2283. BTRFS_RESERVE_FLUSH_ALL);
  2284. if (ret) {
  2285. if (ret == -EAGAIN) {
  2286. tmp = rc->extent_root->nodesize *
  2287. RELOCATION_RESERVED_NODES;
  2288. while (tmp <= rc->reserved_bytes)
  2289. tmp <<= 1;
  2290. /*
  2291. * only one thread can access block_rsv at this point,
  2292. * so we don't need hold lock to protect block_rsv.
  2293. * we expand more reservation size here to allow enough
  2294. * space for relocation and we will return eailer in
  2295. * enospc case.
  2296. */
  2297. rc->block_rsv->size = tmp + rc->extent_root->nodesize *
  2298. RELOCATION_RESERVED_NODES;
  2299. }
  2300. return ret;
  2301. }
  2302. return 0;
  2303. }
  2304. /*
  2305. * relocate a block tree, and then update pointers in upper level
  2306. * blocks that reference the block to point to the new location.
  2307. *
  2308. * if called by link_to_upper, the block has already been relocated.
  2309. * in that case this function just updates pointers.
  2310. */
  2311. static int do_relocation(struct btrfs_trans_handle *trans,
  2312. struct reloc_control *rc,
  2313. struct backref_node *node,
  2314. struct btrfs_key *key,
  2315. struct btrfs_path *path, int lowest)
  2316. {
  2317. struct backref_node *upper;
  2318. struct backref_edge *edge;
  2319. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2320. struct btrfs_root *root;
  2321. struct extent_buffer *eb;
  2322. u32 blocksize;
  2323. u64 bytenr;
  2324. u64 generation;
  2325. int slot;
  2326. int ret;
  2327. int err = 0;
  2328. BUG_ON(lowest && node->eb);
  2329. path->lowest_level = node->level + 1;
  2330. rc->backref_cache.path[node->level] = node;
  2331. list_for_each_entry(edge, &node->upper, list[LOWER]) {
  2332. cond_resched();
  2333. upper = edge->node[UPPER];
  2334. root = select_reloc_root(trans, rc, upper, edges);
  2335. BUG_ON(!root);
  2336. if (upper->eb && !upper->locked) {
  2337. if (!lowest) {
  2338. ret = btrfs_bin_search(upper->eb, key,
  2339. upper->level, &slot);
  2340. BUG_ON(ret);
  2341. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2342. if (node->eb->start == bytenr)
  2343. goto next;
  2344. }
  2345. drop_node_buffer(upper);
  2346. }
  2347. if (!upper->eb) {
  2348. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2349. if (ret < 0) {
  2350. err = ret;
  2351. break;
  2352. }
  2353. BUG_ON(ret > 0);
  2354. if (!upper->eb) {
  2355. upper->eb = path->nodes[upper->level];
  2356. path->nodes[upper->level] = NULL;
  2357. } else {
  2358. BUG_ON(upper->eb != path->nodes[upper->level]);
  2359. }
  2360. upper->locked = 1;
  2361. path->locks[upper->level] = 0;
  2362. slot = path->slots[upper->level];
  2363. btrfs_release_path(path);
  2364. } else {
  2365. ret = btrfs_bin_search(upper->eb, key, upper->level,
  2366. &slot);
  2367. BUG_ON(ret);
  2368. }
  2369. bytenr = btrfs_node_blockptr(upper->eb, slot);
  2370. if (lowest) {
  2371. BUG_ON(bytenr != node->bytenr);
  2372. } else {
  2373. if (node->eb->start == bytenr)
  2374. goto next;
  2375. }
  2376. blocksize = root->nodesize;
  2377. generation = btrfs_node_ptr_generation(upper->eb, slot);
  2378. eb = read_tree_block(root, bytenr, generation);
  2379. if (!eb || !extent_buffer_uptodate(eb)) {
  2380. free_extent_buffer(eb);
  2381. err = -EIO;
  2382. goto next;
  2383. }
  2384. btrfs_tree_lock(eb);
  2385. btrfs_set_lock_blocking(eb);
  2386. if (!node->eb) {
  2387. ret = btrfs_cow_block(trans, root, eb, upper->eb,
  2388. slot, &eb);
  2389. btrfs_tree_unlock(eb);
  2390. free_extent_buffer(eb);
  2391. if (ret < 0) {
  2392. err = ret;
  2393. goto next;
  2394. }
  2395. BUG_ON(node->eb != eb);
  2396. } else {
  2397. btrfs_set_node_blockptr(upper->eb, slot,
  2398. node->eb->start);
  2399. btrfs_set_node_ptr_generation(upper->eb, slot,
  2400. trans->transid);
  2401. btrfs_mark_buffer_dirty(upper->eb);
  2402. ret = btrfs_inc_extent_ref(trans, root,
  2403. node->eb->start, blocksize,
  2404. upper->eb->start,
  2405. btrfs_header_owner(upper->eb),
  2406. node->level, 0, 1);
  2407. BUG_ON(ret);
  2408. ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
  2409. BUG_ON(ret);
  2410. }
  2411. next:
  2412. if (!upper->pending)
  2413. drop_node_buffer(upper);
  2414. else
  2415. unlock_node_buffer(upper);
  2416. if (err)
  2417. break;
  2418. }
  2419. if (!err && node->pending) {
  2420. drop_node_buffer(node);
  2421. list_move_tail(&node->list, &rc->backref_cache.changed);
  2422. node->pending = 0;
  2423. }
  2424. path->lowest_level = 0;
  2425. BUG_ON(err == -ENOSPC);
  2426. return err;
  2427. }
  2428. static int link_to_upper(struct btrfs_trans_handle *trans,
  2429. struct reloc_control *rc,
  2430. struct backref_node *node,
  2431. struct btrfs_path *path)
  2432. {
  2433. struct btrfs_key key;
  2434. btrfs_node_key_to_cpu(node->eb, &key, 0);
  2435. return do_relocation(trans, rc, node, &key, path, 0);
  2436. }
  2437. static int finish_pending_nodes(struct btrfs_trans_handle *trans,
  2438. struct reloc_control *rc,
  2439. struct btrfs_path *path, int err)
  2440. {
  2441. LIST_HEAD(list);
  2442. struct backref_cache *cache = &rc->backref_cache;
  2443. struct backref_node *node;
  2444. int level;
  2445. int ret;
  2446. for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
  2447. while (!list_empty(&cache->pending[level])) {
  2448. node = list_entry(cache->pending[level].next,
  2449. struct backref_node, list);
  2450. list_move_tail(&node->list, &list);
  2451. BUG_ON(!node->pending);
  2452. if (!err) {
  2453. ret = link_to_upper(trans, rc, node, path);
  2454. if (ret < 0)
  2455. err = ret;
  2456. }
  2457. }
  2458. list_splice_init(&list, &cache->pending[level]);
  2459. }
  2460. return err;
  2461. }
  2462. static void mark_block_processed(struct reloc_control *rc,
  2463. u64 bytenr, u32 blocksize)
  2464. {
  2465. set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
  2466. EXTENT_DIRTY, GFP_NOFS);
  2467. }
  2468. static void __mark_block_processed(struct reloc_control *rc,
  2469. struct backref_node *node)
  2470. {
  2471. u32 blocksize;
  2472. if (node->level == 0 ||
  2473. in_block_group(node->bytenr, rc->block_group)) {
  2474. blocksize = rc->extent_root->nodesize;
  2475. mark_block_processed(rc, node->bytenr, blocksize);
  2476. }
  2477. node->processed = 1;
  2478. }
  2479. /*
  2480. * mark a block and all blocks directly/indirectly reference the block
  2481. * as processed.
  2482. */
  2483. static void update_processed_blocks(struct reloc_control *rc,
  2484. struct backref_node *node)
  2485. {
  2486. struct backref_node *next = node;
  2487. struct backref_edge *edge;
  2488. struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
  2489. int index = 0;
  2490. while (next) {
  2491. cond_resched();
  2492. while (1) {
  2493. if (next->processed)
  2494. break;
  2495. __mark_block_processed(rc, next);
  2496. if (list_empty(&next->upper))
  2497. break;
  2498. edge = list_entry(next->upper.next,
  2499. struct backref_edge, list[LOWER]);
  2500. edges[index++] = edge;
  2501. next = edge->node[UPPER];
  2502. }
  2503. next = walk_down_backref(edges, &index);
  2504. }
  2505. }
  2506. static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
  2507. {
  2508. u32 blocksize = rc->extent_root->nodesize;
  2509. if (test_range_bit(&rc->processed_blocks, bytenr,
  2510. bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
  2511. return 1;
  2512. return 0;
  2513. }
  2514. static int get_tree_block_key(struct reloc_control *rc,
  2515. struct tree_block *block)
  2516. {
  2517. struct extent_buffer *eb;
  2518. BUG_ON(block->key_ready);
  2519. eb = read_tree_block(rc->extent_root, block->bytenr,
  2520. block->key.offset);
  2521. if (!eb || !extent_buffer_uptodate(eb)) {
  2522. free_extent_buffer(eb);
  2523. return -EIO;
  2524. }
  2525. WARN_ON(btrfs_header_level(eb) != block->level);
  2526. if (block->level == 0)
  2527. btrfs_item_key_to_cpu(eb, &block->key, 0);
  2528. else
  2529. btrfs_node_key_to_cpu(eb, &block->key, 0);
  2530. free_extent_buffer(eb);
  2531. block->key_ready = 1;
  2532. return 0;
  2533. }
  2534. /*
  2535. * helper function to relocate a tree block
  2536. */
  2537. static int relocate_tree_block(struct btrfs_trans_handle *trans,
  2538. struct reloc_control *rc,
  2539. struct backref_node *node,
  2540. struct btrfs_key *key,
  2541. struct btrfs_path *path)
  2542. {
  2543. struct btrfs_root *root;
  2544. int ret = 0;
  2545. if (!node)
  2546. return 0;
  2547. BUG_ON(node->processed);
  2548. root = select_one_root(trans, node);
  2549. if (root == ERR_PTR(-ENOENT)) {
  2550. update_processed_blocks(rc, node);
  2551. goto out;
  2552. }
  2553. if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  2554. ret = reserve_metadata_space(trans, rc, node);
  2555. if (ret)
  2556. goto out;
  2557. }
  2558. if (root) {
  2559. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
  2560. BUG_ON(node->new_bytenr);
  2561. BUG_ON(!list_empty(&node->list));
  2562. btrfs_record_root_in_trans(trans, root);
  2563. root = root->reloc_root;
  2564. node->new_bytenr = root->node->start;
  2565. node->root = root;
  2566. list_add_tail(&node->list, &rc->backref_cache.changed);
  2567. } else {
  2568. path->lowest_level = node->level;
  2569. ret = btrfs_search_slot(trans, root, key, path, 0, 1);
  2570. btrfs_release_path(path);
  2571. if (ret > 0)
  2572. ret = 0;
  2573. }
  2574. if (!ret)
  2575. update_processed_blocks(rc, node);
  2576. } else {
  2577. ret = do_relocation(trans, rc, node, key, path, 1);
  2578. }
  2579. out:
  2580. if (ret || node->level == 0 || node->cowonly)
  2581. remove_backref_node(&rc->backref_cache, node);
  2582. return ret;
  2583. }
  2584. /*
  2585. * relocate a list of blocks
  2586. */
  2587. static noinline_for_stack
  2588. int relocate_tree_blocks(struct btrfs_trans_handle *trans,
  2589. struct reloc_control *rc, struct rb_root *blocks)
  2590. {
  2591. struct backref_node *node;
  2592. struct btrfs_path *path;
  2593. struct tree_block *block;
  2594. struct rb_node *rb_node;
  2595. int ret;
  2596. int err = 0;
  2597. path = btrfs_alloc_path();
  2598. if (!path) {
  2599. err = -ENOMEM;
  2600. goto out_free_blocks;
  2601. }
  2602. rb_node = rb_first(blocks);
  2603. while (rb_node) {
  2604. block = rb_entry(rb_node, struct tree_block, rb_node);
  2605. if (!block->key_ready)
  2606. readahead_tree_block(rc->extent_root, block->bytenr);
  2607. rb_node = rb_next(rb_node);
  2608. }
  2609. rb_node = rb_first(blocks);
  2610. while (rb_node) {
  2611. block = rb_entry(rb_node, struct tree_block, rb_node);
  2612. if (!block->key_ready) {
  2613. err = get_tree_block_key(rc, block);
  2614. if (err)
  2615. goto out_free_path;
  2616. }
  2617. rb_node = rb_next(rb_node);
  2618. }
  2619. rb_node = rb_first(blocks);
  2620. while (rb_node) {
  2621. block = rb_entry(rb_node, struct tree_block, rb_node);
  2622. node = build_backref_tree(rc, &block->key,
  2623. block->level, block->bytenr);
  2624. if (IS_ERR(node)) {
  2625. err = PTR_ERR(node);
  2626. goto out;
  2627. }
  2628. ret = relocate_tree_block(trans, rc, node, &block->key,
  2629. path);
  2630. if (ret < 0) {
  2631. if (ret != -EAGAIN || rb_node == rb_first(blocks))
  2632. err = ret;
  2633. goto out;
  2634. }
  2635. rb_node = rb_next(rb_node);
  2636. }
  2637. out:
  2638. err = finish_pending_nodes(trans, rc, path, err);
  2639. out_free_path:
  2640. btrfs_free_path(path);
  2641. out_free_blocks:
  2642. free_block_list(blocks);
  2643. return err;
  2644. }
  2645. static noinline_for_stack
  2646. int prealloc_file_extent_cluster(struct inode *inode,
  2647. struct file_extent_cluster *cluster)
  2648. {
  2649. u64 alloc_hint = 0;
  2650. u64 start;
  2651. u64 end;
  2652. u64 offset = BTRFS_I(inode)->index_cnt;
  2653. u64 num_bytes;
  2654. int nr = 0;
  2655. int ret = 0;
  2656. BUG_ON(cluster->start != cluster->boundary[0]);
  2657. mutex_lock(&inode->i_mutex);
  2658. ret = btrfs_check_data_free_space(inode, cluster->end +
  2659. 1 - cluster->start);
  2660. if (ret)
  2661. goto out;
  2662. while (nr < cluster->nr) {
  2663. start = cluster->boundary[nr] - offset;
  2664. if (nr + 1 < cluster->nr)
  2665. end = cluster->boundary[nr + 1] - 1 - offset;
  2666. else
  2667. end = cluster->end - offset;
  2668. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2669. num_bytes = end + 1 - start;
  2670. ret = btrfs_prealloc_file_range(inode, 0, start,
  2671. num_bytes, num_bytes,
  2672. end + 1, &alloc_hint);
  2673. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2674. if (ret)
  2675. break;
  2676. nr++;
  2677. }
  2678. btrfs_free_reserved_data_space(inode, cluster->end +
  2679. 1 - cluster->start);
  2680. out:
  2681. mutex_unlock(&inode->i_mutex);
  2682. return ret;
  2683. }
  2684. static noinline_for_stack
  2685. int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
  2686. u64 block_start)
  2687. {
  2688. struct btrfs_root *root = BTRFS_I(inode)->root;
  2689. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  2690. struct extent_map *em;
  2691. int ret = 0;
  2692. em = alloc_extent_map();
  2693. if (!em)
  2694. return -ENOMEM;
  2695. em->start = start;
  2696. em->len = end + 1 - start;
  2697. em->block_len = em->len;
  2698. em->block_start = block_start;
  2699. em->bdev = root->fs_info->fs_devices->latest_bdev;
  2700. set_bit(EXTENT_FLAG_PINNED, &em->flags);
  2701. lock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2702. while (1) {
  2703. write_lock(&em_tree->lock);
  2704. ret = add_extent_mapping(em_tree, em, 0);
  2705. write_unlock(&em_tree->lock);
  2706. if (ret != -EEXIST) {
  2707. free_extent_map(em);
  2708. break;
  2709. }
  2710. btrfs_drop_extent_cache(inode, start, end, 0);
  2711. }
  2712. unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
  2713. return ret;
  2714. }
  2715. static int relocate_file_extent_cluster(struct inode *inode,
  2716. struct file_extent_cluster *cluster)
  2717. {
  2718. u64 page_start;
  2719. u64 page_end;
  2720. u64 offset = BTRFS_I(inode)->index_cnt;
  2721. unsigned long index;
  2722. unsigned long last_index;
  2723. struct page *page;
  2724. struct file_ra_state *ra;
  2725. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  2726. int nr = 0;
  2727. int ret = 0;
  2728. if (!cluster->nr)
  2729. return 0;
  2730. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  2731. if (!ra)
  2732. return -ENOMEM;
  2733. ret = prealloc_file_extent_cluster(inode, cluster);
  2734. if (ret)
  2735. goto out;
  2736. file_ra_state_init(ra, inode->i_mapping);
  2737. ret = setup_extent_mapping(inode, cluster->start - offset,
  2738. cluster->end - offset, cluster->start);
  2739. if (ret)
  2740. goto out;
  2741. index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
  2742. last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
  2743. while (index <= last_index) {
  2744. ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
  2745. if (ret)
  2746. goto out;
  2747. page = find_lock_page(inode->i_mapping, index);
  2748. if (!page) {
  2749. page_cache_sync_readahead(inode->i_mapping,
  2750. ra, NULL, index,
  2751. last_index + 1 - index);
  2752. page = find_or_create_page(inode->i_mapping, index,
  2753. mask);
  2754. if (!page) {
  2755. btrfs_delalloc_release_metadata(inode,
  2756. PAGE_CACHE_SIZE);
  2757. ret = -ENOMEM;
  2758. goto out;
  2759. }
  2760. }
  2761. if (PageReadahead(page)) {
  2762. page_cache_async_readahead(inode->i_mapping,
  2763. ra, NULL, page, index,
  2764. last_index + 1 - index);
  2765. }
  2766. if (!PageUptodate(page)) {
  2767. btrfs_readpage(NULL, page);
  2768. lock_page(page);
  2769. if (!PageUptodate(page)) {
  2770. unlock_page(page);
  2771. page_cache_release(page);
  2772. btrfs_delalloc_release_metadata(inode,
  2773. PAGE_CACHE_SIZE);
  2774. ret = -EIO;
  2775. goto out;
  2776. }
  2777. }
  2778. page_start = page_offset(page);
  2779. page_end = page_start + PAGE_CACHE_SIZE - 1;
  2780. lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
  2781. set_page_extent_mapped(page);
  2782. if (nr < cluster->nr &&
  2783. page_start + offset == cluster->boundary[nr]) {
  2784. set_extent_bits(&BTRFS_I(inode)->io_tree,
  2785. page_start, page_end,
  2786. EXTENT_BOUNDARY, GFP_NOFS);
  2787. nr++;
  2788. }
  2789. btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
  2790. set_page_dirty(page);
  2791. unlock_extent(&BTRFS_I(inode)->io_tree,
  2792. page_start, page_end);
  2793. unlock_page(page);
  2794. page_cache_release(page);
  2795. index++;
  2796. balance_dirty_pages_ratelimited(inode->i_mapping);
  2797. btrfs_throttle(BTRFS_I(inode)->root);
  2798. }
  2799. WARN_ON(nr != cluster->nr);
  2800. out:
  2801. kfree(ra);
  2802. return ret;
  2803. }
  2804. static noinline_for_stack
  2805. int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
  2806. struct file_extent_cluster *cluster)
  2807. {
  2808. int ret;
  2809. if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
  2810. ret = relocate_file_extent_cluster(inode, cluster);
  2811. if (ret)
  2812. return ret;
  2813. cluster->nr = 0;
  2814. }
  2815. if (!cluster->nr)
  2816. cluster->start = extent_key->objectid;
  2817. else
  2818. BUG_ON(cluster->nr >= MAX_EXTENTS);
  2819. cluster->end = extent_key->objectid + extent_key->offset - 1;
  2820. cluster->boundary[cluster->nr] = extent_key->objectid;
  2821. cluster->nr++;
  2822. if (cluster->nr >= MAX_EXTENTS) {
  2823. ret = relocate_file_extent_cluster(inode, cluster);
  2824. if (ret)
  2825. return ret;
  2826. cluster->nr = 0;
  2827. }
  2828. return 0;
  2829. }
  2830. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2831. static int get_ref_objectid_v0(struct reloc_control *rc,
  2832. struct btrfs_path *path,
  2833. struct btrfs_key *extent_key,
  2834. u64 *ref_objectid, int *path_change)
  2835. {
  2836. struct btrfs_key key;
  2837. struct extent_buffer *leaf;
  2838. struct btrfs_extent_ref_v0 *ref0;
  2839. int ret;
  2840. int slot;
  2841. leaf = path->nodes[0];
  2842. slot = path->slots[0];
  2843. while (1) {
  2844. if (slot >= btrfs_header_nritems(leaf)) {
  2845. ret = btrfs_next_leaf(rc->extent_root, path);
  2846. if (ret < 0)
  2847. return ret;
  2848. BUG_ON(ret > 0);
  2849. leaf = path->nodes[0];
  2850. slot = path->slots[0];
  2851. if (path_change)
  2852. *path_change = 1;
  2853. }
  2854. btrfs_item_key_to_cpu(leaf, &key, slot);
  2855. if (key.objectid != extent_key->objectid)
  2856. return -ENOENT;
  2857. if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
  2858. slot++;
  2859. continue;
  2860. }
  2861. ref0 = btrfs_item_ptr(leaf, slot,
  2862. struct btrfs_extent_ref_v0);
  2863. *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
  2864. break;
  2865. }
  2866. return 0;
  2867. }
  2868. #endif
  2869. /*
  2870. * helper to add a tree block to the list.
  2871. * the major work is getting the generation and level of the block
  2872. */
  2873. static int add_tree_block(struct reloc_control *rc,
  2874. struct btrfs_key *extent_key,
  2875. struct btrfs_path *path,
  2876. struct rb_root *blocks)
  2877. {
  2878. struct extent_buffer *eb;
  2879. struct btrfs_extent_item *ei;
  2880. struct btrfs_tree_block_info *bi;
  2881. struct tree_block *block;
  2882. struct rb_node *rb_node;
  2883. u32 item_size;
  2884. int level = -1;
  2885. u64 generation;
  2886. eb = path->nodes[0];
  2887. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  2888. if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
  2889. item_size >= sizeof(*ei) + sizeof(*bi)) {
  2890. ei = btrfs_item_ptr(eb, path->slots[0],
  2891. struct btrfs_extent_item);
  2892. if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
  2893. bi = (struct btrfs_tree_block_info *)(ei + 1);
  2894. level = btrfs_tree_block_level(eb, bi);
  2895. } else {
  2896. level = (int)extent_key->offset;
  2897. }
  2898. generation = btrfs_extent_generation(eb, ei);
  2899. } else {
  2900. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  2901. u64 ref_owner;
  2902. int ret;
  2903. BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
  2904. ret = get_ref_objectid_v0(rc, path, extent_key,
  2905. &ref_owner, NULL);
  2906. if (ret < 0)
  2907. return ret;
  2908. BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
  2909. level = (int)ref_owner;
  2910. /* FIXME: get real generation */
  2911. generation = 0;
  2912. #else
  2913. BUG();
  2914. #endif
  2915. }
  2916. btrfs_release_path(path);
  2917. BUG_ON(level == -1);
  2918. block = kmalloc(sizeof(*block), GFP_NOFS);
  2919. if (!block)
  2920. return -ENOMEM;
  2921. block->bytenr = extent_key->objectid;
  2922. block->key.objectid = rc->extent_root->nodesize;
  2923. block->key.offset = generation;
  2924. block->level = level;
  2925. block->key_ready = 0;
  2926. rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
  2927. if (rb_node)
  2928. backref_tree_panic(rb_node, -EEXIST, block->bytenr);
  2929. return 0;
  2930. }
  2931. /*
  2932. * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
  2933. */
  2934. static int __add_tree_block(struct reloc_control *rc,
  2935. u64 bytenr, u32 blocksize,
  2936. struct rb_root *blocks)
  2937. {
  2938. struct btrfs_path *path;
  2939. struct btrfs_key key;
  2940. int ret;
  2941. bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
  2942. SKINNY_METADATA);
  2943. if (tree_block_processed(bytenr, rc))
  2944. return 0;
  2945. if (tree_search(blocks, bytenr))
  2946. return 0;
  2947. path = btrfs_alloc_path();
  2948. if (!path)
  2949. return -ENOMEM;
  2950. again:
  2951. key.objectid = bytenr;
  2952. if (skinny) {
  2953. key.type = BTRFS_METADATA_ITEM_KEY;
  2954. key.offset = (u64)-1;
  2955. } else {
  2956. key.type = BTRFS_EXTENT_ITEM_KEY;
  2957. key.offset = blocksize;
  2958. }
  2959. path->search_commit_root = 1;
  2960. path->skip_locking = 1;
  2961. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
  2962. if (ret < 0)
  2963. goto out;
  2964. if (ret > 0 && skinny) {
  2965. if (path->slots[0]) {
  2966. path->slots[0]--;
  2967. btrfs_item_key_to_cpu(path->nodes[0], &key,
  2968. path->slots[0]);
  2969. if (key.objectid == bytenr &&
  2970. (key.type == BTRFS_METADATA_ITEM_KEY ||
  2971. (key.type == BTRFS_EXTENT_ITEM_KEY &&
  2972. key.offset == blocksize)))
  2973. ret = 0;
  2974. }
  2975. if (ret) {
  2976. skinny = false;
  2977. btrfs_release_path(path);
  2978. goto again;
  2979. }
  2980. }
  2981. BUG_ON(ret);
  2982. ret = add_tree_block(rc, &key, path, blocks);
  2983. out:
  2984. btrfs_free_path(path);
  2985. return ret;
  2986. }
  2987. /*
  2988. * helper to check if the block use full backrefs for pointers in it
  2989. */
  2990. static int block_use_full_backref(struct reloc_control *rc,
  2991. struct extent_buffer *eb)
  2992. {
  2993. u64 flags;
  2994. int ret;
  2995. if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
  2996. btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
  2997. return 1;
  2998. ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
  2999. eb->start, btrfs_header_level(eb), 1,
  3000. NULL, &flags);
  3001. BUG_ON(ret);
  3002. if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
  3003. ret = 1;
  3004. else
  3005. ret = 0;
  3006. return ret;
  3007. }
  3008. static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
  3009. struct inode *inode, u64 ino)
  3010. {
  3011. struct btrfs_key key;
  3012. struct btrfs_root *root = fs_info->tree_root;
  3013. struct btrfs_trans_handle *trans;
  3014. int ret = 0;
  3015. if (inode)
  3016. goto truncate;
  3017. key.objectid = ino;
  3018. key.type = BTRFS_INODE_ITEM_KEY;
  3019. key.offset = 0;
  3020. inode = btrfs_iget(fs_info->sb, &key, root, NULL);
  3021. if (IS_ERR(inode) || is_bad_inode(inode)) {
  3022. if (!IS_ERR(inode))
  3023. iput(inode);
  3024. return -ENOENT;
  3025. }
  3026. truncate:
  3027. ret = btrfs_check_trunc_cache_free_space(root,
  3028. &fs_info->global_block_rsv);
  3029. if (ret)
  3030. goto out;
  3031. trans = btrfs_join_transaction(root);
  3032. if (IS_ERR(trans)) {
  3033. ret = PTR_ERR(trans);
  3034. goto out;
  3035. }
  3036. ret = btrfs_truncate_free_space_cache(root, trans, inode);
  3037. btrfs_end_transaction(trans, root);
  3038. btrfs_btree_balance_dirty(root);
  3039. out:
  3040. iput(inode);
  3041. return ret;
  3042. }
  3043. /*
  3044. * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
  3045. * this function scans fs tree to find blocks reference the data extent
  3046. */
  3047. static int find_data_references(struct reloc_control *rc,
  3048. struct btrfs_key *extent_key,
  3049. struct extent_buffer *leaf,
  3050. struct btrfs_extent_data_ref *ref,
  3051. struct rb_root *blocks)
  3052. {
  3053. struct btrfs_path *path;
  3054. struct tree_block *block;
  3055. struct btrfs_root *root;
  3056. struct btrfs_file_extent_item *fi;
  3057. struct rb_node *rb_node;
  3058. struct btrfs_key key;
  3059. u64 ref_root;
  3060. u64 ref_objectid;
  3061. u64 ref_offset;
  3062. u32 ref_count;
  3063. u32 nritems;
  3064. int err = 0;
  3065. int added = 0;
  3066. int counted;
  3067. int ret;
  3068. ref_root = btrfs_extent_data_ref_root(leaf, ref);
  3069. ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
  3070. ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
  3071. ref_count = btrfs_extent_data_ref_count(leaf, ref);
  3072. /*
  3073. * This is an extent belonging to the free space cache, lets just delete
  3074. * it and redo the search.
  3075. */
  3076. if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
  3077. ret = delete_block_group_cache(rc->extent_root->fs_info,
  3078. NULL, ref_objectid);
  3079. if (ret != -ENOENT)
  3080. return ret;
  3081. ret = 0;
  3082. }
  3083. path = btrfs_alloc_path();
  3084. if (!path)
  3085. return -ENOMEM;
  3086. path->reada = 1;
  3087. root = read_fs_root(rc->extent_root->fs_info, ref_root);
  3088. if (IS_ERR(root)) {
  3089. err = PTR_ERR(root);
  3090. goto out;
  3091. }
  3092. key.objectid = ref_objectid;
  3093. key.type = BTRFS_EXTENT_DATA_KEY;
  3094. if (ref_offset > ((u64)-1 << 32))
  3095. key.offset = 0;
  3096. else
  3097. key.offset = ref_offset;
  3098. path->search_commit_root = 1;
  3099. path->skip_locking = 1;
  3100. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3101. if (ret < 0) {
  3102. err = ret;
  3103. goto out;
  3104. }
  3105. leaf = path->nodes[0];
  3106. nritems = btrfs_header_nritems(leaf);
  3107. /*
  3108. * the references in tree blocks that use full backrefs
  3109. * are not counted in
  3110. */
  3111. if (block_use_full_backref(rc, leaf))
  3112. counted = 0;
  3113. else
  3114. counted = 1;
  3115. rb_node = tree_search(blocks, leaf->start);
  3116. if (rb_node) {
  3117. if (counted)
  3118. added = 1;
  3119. else
  3120. path->slots[0] = nritems;
  3121. }
  3122. while (ref_count > 0) {
  3123. while (path->slots[0] >= nritems) {
  3124. ret = btrfs_next_leaf(root, path);
  3125. if (ret < 0) {
  3126. err = ret;
  3127. goto out;
  3128. }
  3129. if (WARN_ON(ret > 0))
  3130. goto out;
  3131. leaf = path->nodes[0];
  3132. nritems = btrfs_header_nritems(leaf);
  3133. added = 0;
  3134. if (block_use_full_backref(rc, leaf))
  3135. counted = 0;
  3136. else
  3137. counted = 1;
  3138. rb_node = tree_search(blocks, leaf->start);
  3139. if (rb_node) {
  3140. if (counted)
  3141. added = 1;
  3142. else
  3143. path->slots[0] = nritems;
  3144. }
  3145. }
  3146. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3147. if (WARN_ON(key.objectid != ref_objectid ||
  3148. key.type != BTRFS_EXTENT_DATA_KEY))
  3149. break;
  3150. fi = btrfs_item_ptr(leaf, path->slots[0],
  3151. struct btrfs_file_extent_item);
  3152. if (btrfs_file_extent_type(leaf, fi) ==
  3153. BTRFS_FILE_EXTENT_INLINE)
  3154. goto next;
  3155. if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
  3156. extent_key->objectid)
  3157. goto next;
  3158. key.offset -= btrfs_file_extent_offset(leaf, fi);
  3159. if (key.offset != ref_offset)
  3160. goto next;
  3161. if (counted)
  3162. ref_count--;
  3163. if (added)
  3164. goto next;
  3165. if (!tree_block_processed(leaf->start, rc)) {
  3166. block = kmalloc(sizeof(*block), GFP_NOFS);
  3167. if (!block) {
  3168. err = -ENOMEM;
  3169. break;
  3170. }
  3171. block->bytenr = leaf->start;
  3172. btrfs_item_key_to_cpu(leaf, &block->key, 0);
  3173. block->level = 0;
  3174. block->key_ready = 1;
  3175. rb_node = tree_insert(blocks, block->bytenr,
  3176. &block->rb_node);
  3177. if (rb_node)
  3178. backref_tree_panic(rb_node, -EEXIST,
  3179. block->bytenr);
  3180. }
  3181. if (counted)
  3182. added = 1;
  3183. else
  3184. path->slots[0] = nritems;
  3185. next:
  3186. path->slots[0]++;
  3187. }
  3188. out:
  3189. btrfs_free_path(path);
  3190. return err;
  3191. }
  3192. /*
  3193. * helper to find all tree blocks that reference a given data extent
  3194. */
  3195. static noinline_for_stack
  3196. int add_data_references(struct reloc_control *rc,
  3197. struct btrfs_key *extent_key,
  3198. struct btrfs_path *path,
  3199. struct rb_root *blocks)
  3200. {
  3201. struct btrfs_key key;
  3202. struct extent_buffer *eb;
  3203. struct btrfs_extent_data_ref *dref;
  3204. struct btrfs_extent_inline_ref *iref;
  3205. unsigned long ptr;
  3206. unsigned long end;
  3207. u32 blocksize = rc->extent_root->nodesize;
  3208. int ret = 0;
  3209. int err = 0;
  3210. eb = path->nodes[0];
  3211. ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
  3212. end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
  3213. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3214. if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
  3215. ptr = end;
  3216. else
  3217. #endif
  3218. ptr += sizeof(struct btrfs_extent_item);
  3219. while (ptr < end) {
  3220. iref = (struct btrfs_extent_inline_ref *)ptr;
  3221. key.type = btrfs_extent_inline_ref_type(eb, iref);
  3222. if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  3223. key.offset = btrfs_extent_inline_ref_offset(eb, iref);
  3224. ret = __add_tree_block(rc, key.offset, blocksize,
  3225. blocks);
  3226. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  3227. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  3228. ret = find_data_references(rc, extent_key,
  3229. eb, dref, blocks);
  3230. } else {
  3231. BUG();
  3232. }
  3233. if (ret) {
  3234. err = ret;
  3235. goto out;
  3236. }
  3237. ptr += btrfs_extent_inline_ref_size(key.type);
  3238. }
  3239. WARN_ON(ptr > end);
  3240. while (1) {
  3241. cond_resched();
  3242. eb = path->nodes[0];
  3243. if (path->slots[0] >= btrfs_header_nritems(eb)) {
  3244. ret = btrfs_next_leaf(rc->extent_root, path);
  3245. if (ret < 0) {
  3246. err = ret;
  3247. break;
  3248. }
  3249. if (ret > 0)
  3250. break;
  3251. eb = path->nodes[0];
  3252. }
  3253. btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
  3254. if (key.objectid != extent_key->objectid)
  3255. break;
  3256. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3257. if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
  3258. key.type == BTRFS_EXTENT_REF_V0_KEY) {
  3259. #else
  3260. BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
  3261. if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  3262. #endif
  3263. ret = __add_tree_block(rc, key.offset, blocksize,
  3264. blocks);
  3265. } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  3266. dref = btrfs_item_ptr(eb, path->slots[0],
  3267. struct btrfs_extent_data_ref);
  3268. ret = find_data_references(rc, extent_key,
  3269. eb, dref, blocks);
  3270. } else {
  3271. ret = 0;
  3272. }
  3273. if (ret) {
  3274. err = ret;
  3275. break;
  3276. }
  3277. path->slots[0]++;
  3278. }
  3279. out:
  3280. btrfs_release_path(path);
  3281. if (err)
  3282. free_block_list(blocks);
  3283. return err;
  3284. }
  3285. /*
  3286. * helper to find next unprocessed extent
  3287. */
  3288. static noinline_for_stack
  3289. int find_next_extent(struct btrfs_trans_handle *trans,
  3290. struct reloc_control *rc, struct btrfs_path *path,
  3291. struct btrfs_key *extent_key)
  3292. {
  3293. struct btrfs_key key;
  3294. struct extent_buffer *leaf;
  3295. u64 start, end, last;
  3296. int ret;
  3297. last = rc->block_group->key.objectid + rc->block_group->key.offset;
  3298. while (1) {
  3299. cond_resched();
  3300. if (rc->search_start >= last) {
  3301. ret = 1;
  3302. break;
  3303. }
  3304. key.objectid = rc->search_start;
  3305. key.type = BTRFS_EXTENT_ITEM_KEY;
  3306. key.offset = 0;
  3307. path->search_commit_root = 1;
  3308. path->skip_locking = 1;
  3309. ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
  3310. 0, 0);
  3311. if (ret < 0)
  3312. break;
  3313. next:
  3314. leaf = path->nodes[0];
  3315. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  3316. ret = btrfs_next_leaf(rc->extent_root, path);
  3317. if (ret != 0)
  3318. break;
  3319. leaf = path->nodes[0];
  3320. }
  3321. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3322. if (key.objectid >= last) {
  3323. ret = 1;
  3324. break;
  3325. }
  3326. if (key.type != BTRFS_EXTENT_ITEM_KEY &&
  3327. key.type != BTRFS_METADATA_ITEM_KEY) {
  3328. path->slots[0]++;
  3329. goto next;
  3330. }
  3331. if (key.type == BTRFS_EXTENT_ITEM_KEY &&
  3332. key.objectid + key.offset <= rc->search_start) {
  3333. path->slots[0]++;
  3334. goto next;
  3335. }
  3336. if (key.type == BTRFS_METADATA_ITEM_KEY &&
  3337. key.objectid + rc->extent_root->nodesize <=
  3338. rc->search_start) {
  3339. path->slots[0]++;
  3340. goto next;
  3341. }
  3342. ret = find_first_extent_bit(&rc->processed_blocks,
  3343. key.objectid, &start, &end,
  3344. EXTENT_DIRTY, NULL);
  3345. if (ret == 0 && start <= key.objectid) {
  3346. btrfs_release_path(path);
  3347. rc->search_start = end + 1;
  3348. } else {
  3349. if (key.type == BTRFS_EXTENT_ITEM_KEY)
  3350. rc->search_start = key.objectid + key.offset;
  3351. else
  3352. rc->search_start = key.objectid +
  3353. rc->extent_root->nodesize;
  3354. memcpy(extent_key, &key, sizeof(key));
  3355. return 0;
  3356. }
  3357. }
  3358. btrfs_release_path(path);
  3359. return ret;
  3360. }
  3361. static void set_reloc_control(struct reloc_control *rc)
  3362. {
  3363. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3364. mutex_lock(&fs_info->reloc_mutex);
  3365. fs_info->reloc_ctl = rc;
  3366. mutex_unlock(&fs_info->reloc_mutex);
  3367. }
  3368. static void unset_reloc_control(struct reloc_control *rc)
  3369. {
  3370. struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
  3371. mutex_lock(&fs_info->reloc_mutex);
  3372. fs_info->reloc_ctl = NULL;
  3373. mutex_unlock(&fs_info->reloc_mutex);
  3374. }
  3375. static int check_extent_flags(u64 flags)
  3376. {
  3377. if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
  3378. (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
  3379. return 1;
  3380. if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
  3381. !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
  3382. return 1;
  3383. if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
  3384. (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
  3385. return 1;
  3386. return 0;
  3387. }
  3388. static noinline_for_stack
  3389. int prepare_to_relocate(struct reloc_control *rc)
  3390. {
  3391. struct btrfs_trans_handle *trans;
  3392. rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
  3393. BTRFS_BLOCK_RSV_TEMP);
  3394. if (!rc->block_rsv)
  3395. return -ENOMEM;
  3396. memset(&rc->cluster, 0, sizeof(rc->cluster));
  3397. rc->search_start = rc->block_group->key.objectid;
  3398. rc->extents_found = 0;
  3399. rc->nodes_relocated = 0;
  3400. rc->merging_rsv_size = 0;
  3401. rc->reserved_bytes = 0;
  3402. rc->block_rsv->size = rc->extent_root->nodesize *
  3403. RELOCATION_RESERVED_NODES;
  3404. rc->create_reloc_tree = 1;
  3405. set_reloc_control(rc);
  3406. trans = btrfs_join_transaction(rc->extent_root);
  3407. if (IS_ERR(trans)) {
  3408. unset_reloc_control(rc);
  3409. /*
  3410. * extent tree is not a ref_cow tree and has no reloc_root to
  3411. * cleanup. And callers are responsible to free the above
  3412. * block rsv.
  3413. */
  3414. return PTR_ERR(trans);
  3415. }
  3416. btrfs_commit_transaction(trans, rc->extent_root);
  3417. return 0;
  3418. }
  3419. static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
  3420. {
  3421. struct rb_root blocks = RB_ROOT;
  3422. struct btrfs_key key;
  3423. struct btrfs_trans_handle *trans = NULL;
  3424. struct btrfs_path *path;
  3425. struct btrfs_extent_item *ei;
  3426. u64 flags;
  3427. u32 item_size;
  3428. int ret;
  3429. int err = 0;
  3430. int progress = 0;
  3431. path = btrfs_alloc_path();
  3432. if (!path)
  3433. return -ENOMEM;
  3434. path->reada = 1;
  3435. ret = prepare_to_relocate(rc);
  3436. if (ret) {
  3437. err = ret;
  3438. goto out_free;
  3439. }
  3440. while (1) {
  3441. rc->reserved_bytes = 0;
  3442. ret = btrfs_block_rsv_refill(rc->extent_root,
  3443. rc->block_rsv, rc->block_rsv->size,
  3444. BTRFS_RESERVE_FLUSH_ALL);
  3445. if (ret) {
  3446. err = ret;
  3447. break;
  3448. }
  3449. progress++;
  3450. trans = btrfs_start_transaction(rc->extent_root, 0);
  3451. if (IS_ERR(trans)) {
  3452. err = PTR_ERR(trans);
  3453. trans = NULL;
  3454. break;
  3455. }
  3456. restart:
  3457. if (update_backref_cache(trans, &rc->backref_cache)) {
  3458. btrfs_end_transaction(trans, rc->extent_root);
  3459. continue;
  3460. }
  3461. ret = find_next_extent(trans, rc, path, &key);
  3462. if (ret < 0)
  3463. err = ret;
  3464. if (ret != 0)
  3465. break;
  3466. rc->extents_found++;
  3467. ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
  3468. struct btrfs_extent_item);
  3469. item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
  3470. if (item_size >= sizeof(*ei)) {
  3471. flags = btrfs_extent_flags(path->nodes[0], ei);
  3472. ret = check_extent_flags(flags);
  3473. BUG_ON(ret);
  3474. } else {
  3475. #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
  3476. u64 ref_owner;
  3477. int path_change = 0;
  3478. BUG_ON(item_size !=
  3479. sizeof(struct btrfs_extent_item_v0));
  3480. ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
  3481. &path_change);
  3482. if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
  3483. flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
  3484. else
  3485. flags = BTRFS_EXTENT_FLAG_DATA;
  3486. if (path_change) {
  3487. btrfs_release_path(path);
  3488. path->search_commit_root = 1;
  3489. path->skip_locking = 1;
  3490. ret = btrfs_search_slot(NULL, rc->extent_root,
  3491. &key, path, 0, 0);
  3492. if (ret < 0) {
  3493. err = ret;
  3494. break;
  3495. }
  3496. BUG_ON(ret > 0);
  3497. }
  3498. #else
  3499. BUG();
  3500. #endif
  3501. }
  3502. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  3503. ret = add_tree_block(rc, &key, path, &blocks);
  3504. } else if (rc->stage == UPDATE_DATA_PTRS &&
  3505. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3506. ret = add_data_references(rc, &key, path, &blocks);
  3507. } else {
  3508. btrfs_release_path(path);
  3509. ret = 0;
  3510. }
  3511. if (ret < 0) {
  3512. err = ret;
  3513. break;
  3514. }
  3515. if (!RB_EMPTY_ROOT(&blocks)) {
  3516. ret = relocate_tree_blocks(trans, rc, &blocks);
  3517. if (ret < 0) {
  3518. /*
  3519. * if we fail to relocate tree blocks, force to update
  3520. * backref cache when committing transaction.
  3521. */
  3522. rc->backref_cache.last_trans = trans->transid - 1;
  3523. if (ret != -EAGAIN) {
  3524. err = ret;
  3525. break;
  3526. }
  3527. rc->extents_found--;
  3528. rc->search_start = key.objectid;
  3529. }
  3530. }
  3531. btrfs_end_transaction_throttle(trans, rc->extent_root);
  3532. btrfs_btree_balance_dirty(rc->extent_root);
  3533. trans = NULL;
  3534. if (rc->stage == MOVE_DATA_EXTENTS &&
  3535. (flags & BTRFS_EXTENT_FLAG_DATA)) {
  3536. rc->found_file_extent = 1;
  3537. ret = relocate_data_extent(rc->data_inode,
  3538. &key, &rc->cluster);
  3539. if (ret < 0) {
  3540. err = ret;
  3541. break;
  3542. }
  3543. }
  3544. }
  3545. if (trans && progress && err == -ENOSPC) {
  3546. ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
  3547. rc->block_group->flags);
  3548. if (ret == 0) {
  3549. err = 0;
  3550. progress = 0;
  3551. goto restart;
  3552. }
  3553. }
  3554. btrfs_release_path(path);
  3555. clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
  3556. GFP_NOFS);
  3557. if (trans) {
  3558. btrfs_end_transaction_throttle(trans, rc->extent_root);
  3559. btrfs_btree_balance_dirty(rc->extent_root);
  3560. }
  3561. if (!err) {
  3562. ret = relocate_file_extent_cluster(rc->data_inode,
  3563. &rc->cluster);
  3564. if (ret < 0)
  3565. err = ret;
  3566. }
  3567. rc->create_reloc_tree = 0;
  3568. set_reloc_control(rc);
  3569. backref_cache_cleanup(&rc->backref_cache);
  3570. btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
  3571. err = prepare_to_merge(rc, err);
  3572. merge_reloc_roots(rc);
  3573. rc->merge_reloc_tree = 0;
  3574. unset_reloc_control(rc);
  3575. btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
  3576. /* get rid of pinned extents */
  3577. trans = btrfs_join_transaction(rc->extent_root);
  3578. if (IS_ERR(trans))
  3579. err = PTR_ERR(trans);
  3580. else
  3581. btrfs_commit_transaction(trans, rc->extent_root);
  3582. out_free:
  3583. btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
  3584. btrfs_free_path(path);
  3585. return err;
  3586. }
  3587. static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
  3588. struct btrfs_root *root, u64 objectid)
  3589. {
  3590. struct btrfs_path *path;
  3591. struct btrfs_inode_item *item;
  3592. struct extent_buffer *leaf;
  3593. int ret;
  3594. path = btrfs_alloc_path();
  3595. if (!path)
  3596. return -ENOMEM;
  3597. ret = btrfs_insert_empty_inode(trans, root, path, objectid);
  3598. if (ret)
  3599. goto out;
  3600. leaf = path->nodes[0];
  3601. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
  3602. memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
  3603. btrfs_set_inode_generation(leaf, item, 1);
  3604. btrfs_set_inode_size(leaf, item, 0);
  3605. btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
  3606. btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
  3607. BTRFS_INODE_PREALLOC);
  3608. btrfs_mark_buffer_dirty(leaf);
  3609. out:
  3610. btrfs_free_path(path);
  3611. return ret;
  3612. }
  3613. /*
  3614. * helper to create inode for data relocation.
  3615. * the inode is in data relocation tree and its link count is 0
  3616. */
  3617. static noinline_for_stack
  3618. struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
  3619. struct btrfs_block_group_cache *group)
  3620. {
  3621. struct inode *inode = NULL;
  3622. struct btrfs_trans_handle *trans;
  3623. struct btrfs_root *root;
  3624. struct btrfs_key key;
  3625. u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
  3626. int err = 0;
  3627. root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
  3628. if (IS_ERR(root))
  3629. return ERR_CAST(root);
  3630. trans = btrfs_start_transaction(root, 6);
  3631. if (IS_ERR(trans))
  3632. return ERR_CAST(trans);
  3633. err = btrfs_find_free_objectid(root, &objectid);
  3634. if (err)
  3635. goto out;
  3636. err = __insert_orphan_inode(trans, root, objectid);
  3637. BUG_ON(err);
  3638. key.objectid = objectid;
  3639. key.type = BTRFS_INODE_ITEM_KEY;
  3640. key.offset = 0;
  3641. inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
  3642. BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
  3643. BTRFS_I(inode)->index_cnt = group->key.objectid;
  3644. err = btrfs_orphan_add(trans, inode);
  3645. out:
  3646. btrfs_end_transaction(trans, root);
  3647. btrfs_btree_balance_dirty(root);
  3648. if (err) {
  3649. if (inode)
  3650. iput(inode);
  3651. inode = ERR_PTR(err);
  3652. }
  3653. return inode;
  3654. }
  3655. static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
  3656. {
  3657. struct reloc_control *rc;
  3658. rc = kzalloc(sizeof(*rc), GFP_NOFS);
  3659. if (!rc)
  3660. return NULL;
  3661. INIT_LIST_HEAD(&rc->reloc_roots);
  3662. backref_cache_init(&rc->backref_cache);
  3663. mapping_tree_init(&rc->reloc_root_tree);
  3664. extent_io_tree_init(&rc->processed_blocks,
  3665. fs_info->btree_inode->i_mapping);
  3666. return rc;
  3667. }
  3668. /*
  3669. * function to relocate all extents in a block group.
  3670. */
  3671. int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
  3672. {
  3673. struct btrfs_fs_info *fs_info = extent_root->fs_info;
  3674. struct reloc_control *rc;
  3675. struct inode *inode;
  3676. struct btrfs_path *path;
  3677. int ret;
  3678. int rw = 0;
  3679. int err = 0;
  3680. rc = alloc_reloc_control(fs_info);
  3681. if (!rc)
  3682. return -ENOMEM;
  3683. rc->extent_root = extent_root;
  3684. rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
  3685. BUG_ON(!rc->block_group);
  3686. if (!rc->block_group->ro) {
  3687. ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
  3688. if (ret) {
  3689. err = ret;
  3690. goto out;
  3691. }
  3692. rw = 1;
  3693. }
  3694. path = btrfs_alloc_path();
  3695. if (!path) {
  3696. err = -ENOMEM;
  3697. goto out;
  3698. }
  3699. inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group,
  3700. path);
  3701. btrfs_free_path(path);
  3702. if (!IS_ERR(inode))
  3703. ret = delete_block_group_cache(fs_info, inode, 0);
  3704. else
  3705. ret = PTR_ERR(inode);
  3706. if (ret && ret != -ENOENT) {
  3707. err = ret;
  3708. goto out;
  3709. }
  3710. rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
  3711. if (IS_ERR(rc->data_inode)) {
  3712. err = PTR_ERR(rc->data_inode);
  3713. rc->data_inode = NULL;
  3714. goto out;
  3715. }
  3716. btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu",
  3717. rc->block_group->key.objectid, rc->block_group->flags);
  3718. ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
  3719. if (ret < 0) {
  3720. err = ret;
  3721. goto out;
  3722. }
  3723. btrfs_wait_ordered_roots(fs_info, -1);
  3724. while (1) {
  3725. mutex_lock(&fs_info->cleaner_mutex);
  3726. ret = relocate_block_group(rc);
  3727. mutex_unlock(&fs_info->cleaner_mutex);
  3728. if (ret < 0) {
  3729. err = ret;
  3730. goto out;
  3731. }
  3732. if (rc->extents_found == 0)
  3733. break;
  3734. btrfs_info(extent_root->fs_info, "found %llu extents",
  3735. rc->extents_found);
  3736. if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
  3737. ret = btrfs_wait_ordered_range(rc->data_inode, 0,
  3738. (u64)-1);
  3739. if (ret) {
  3740. err = ret;
  3741. goto out;
  3742. }
  3743. invalidate_mapping_pages(rc->data_inode->i_mapping,
  3744. 0, -1);
  3745. rc->stage = UPDATE_DATA_PTRS;
  3746. }
  3747. }
  3748. WARN_ON(rc->block_group->pinned > 0);
  3749. WARN_ON(rc->block_group->reserved > 0);
  3750. WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
  3751. out:
  3752. if (err && rw)
  3753. btrfs_set_block_group_rw(extent_root, rc->block_group);
  3754. iput(rc->data_inode);
  3755. btrfs_put_block_group(rc->block_group);
  3756. kfree(rc);
  3757. return err;
  3758. }
  3759. static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
  3760. {
  3761. struct btrfs_trans_handle *trans;
  3762. int ret, err;
  3763. trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
  3764. if (IS_ERR(trans))
  3765. return PTR_ERR(trans);
  3766. memset(&root->root_item.drop_progress, 0,
  3767. sizeof(root->root_item.drop_progress));
  3768. root->root_item.drop_level = 0;
  3769. btrfs_set_root_refs(&root->root_item, 0);
  3770. ret = btrfs_update_root(trans, root->fs_info->tree_root,
  3771. &root->root_key, &root->root_item);
  3772. err = btrfs_end_transaction(trans, root->fs_info->tree_root);
  3773. if (err)
  3774. return err;
  3775. return ret;
  3776. }
  3777. /*
  3778. * recover relocation interrupted by system crash.
  3779. *
  3780. * this function resumes merging reloc trees with corresponding fs trees.
  3781. * this is important for keeping the sharing of tree blocks
  3782. */
  3783. int btrfs_recover_relocation(struct btrfs_root *root)
  3784. {
  3785. LIST_HEAD(reloc_roots);
  3786. struct btrfs_key key;
  3787. struct btrfs_root *fs_root;
  3788. struct btrfs_root *reloc_root;
  3789. struct btrfs_path *path;
  3790. struct extent_buffer *leaf;
  3791. struct reloc_control *rc = NULL;
  3792. struct btrfs_trans_handle *trans;
  3793. int ret;
  3794. int err = 0;
  3795. path = btrfs_alloc_path();
  3796. if (!path)
  3797. return -ENOMEM;
  3798. path->reada = -1;
  3799. key.objectid = BTRFS_TREE_RELOC_OBJECTID;
  3800. key.type = BTRFS_ROOT_ITEM_KEY;
  3801. key.offset = (u64)-1;
  3802. while (1) {
  3803. ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
  3804. path, 0, 0);
  3805. if (ret < 0) {
  3806. err = ret;
  3807. goto out;
  3808. }
  3809. if (ret > 0) {
  3810. if (path->slots[0] == 0)
  3811. break;
  3812. path->slots[0]--;
  3813. }
  3814. leaf = path->nodes[0];
  3815. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  3816. btrfs_release_path(path);
  3817. if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
  3818. key.type != BTRFS_ROOT_ITEM_KEY)
  3819. break;
  3820. reloc_root = btrfs_read_fs_root(root, &key);
  3821. if (IS_ERR(reloc_root)) {
  3822. err = PTR_ERR(reloc_root);
  3823. goto out;
  3824. }
  3825. list_add(&reloc_root->root_list, &reloc_roots);
  3826. if (btrfs_root_refs(&reloc_root->root_item) > 0) {
  3827. fs_root = read_fs_root(root->fs_info,
  3828. reloc_root->root_key.offset);
  3829. if (IS_ERR(fs_root)) {
  3830. ret = PTR_ERR(fs_root);
  3831. if (ret != -ENOENT) {
  3832. err = ret;
  3833. goto out;
  3834. }
  3835. ret = mark_garbage_root(reloc_root);
  3836. if (ret < 0) {
  3837. err = ret;
  3838. goto out;
  3839. }
  3840. }
  3841. }
  3842. if (key.offset == 0)
  3843. break;
  3844. key.offset--;
  3845. }
  3846. btrfs_release_path(path);
  3847. if (list_empty(&reloc_roots))
  3848. goto out;
  3849. rc = alloc_reloc_control(root->fs_info);
  3850. if (!rc) {
  3851. err = -ENOMEM;
  3852. goto out;
  3853. }
  3854. rc->extent_root = root->fs_info->extent_root;
  3855. set_reloc_control(rc);
  3856. trans = btrfs_join_transaction(rc->extent_root);
  3857. if (IS_ERR(trans)) {
  3858. unset_reloc_control(rc);
  3859. err = PTR_ERR(trans);
  3860. goto out_free;
  3861. }
  3862. rc->merge_reloc_tree = 1;
  3863. while (!list_empty(&reloc_roots)) {
  3864. reloc_root = list_entry(reloc_roots.next,
  3865. struct btrfs_root, root_list);
  3866. list_del(&reloc_root->root_list);
  3867. if (btrfs_root_refs(&reloc_root->root_item) == 0) {
  3868. list_add_tail(&reloc_root->root_list,
  3869. &rc->reloc_roots);
  3870. continue;
  3871. }
  3872. fs_root = read_fs_root(root->fs_info,
  3873. reloc_root->root_key.offset);
  3874. if (IS_ERR(fs_root)) {
  3875. err = PTR_ERR(fs_root);
  3876. goto out_free;
  3877. }
  3878. err = __add_reloc_root(reloc_root);
  3879. BUG_ON(err < 0); /* -ENOMEM or logic error */
  3880. fs_root->reloc_root = reloc_root;
  3881. }
  3882. err = btrfs_commit_transaction(trans, rc->extent_root);
  3883. if (err)
  3884. goto out_free;
  3885. merge_reloc_roots(rc);
  3886. unset_reloc_control(rc);
  3887. trans = btrfs_join_transaction(rc->extent_root);
  3888. if (IS_ERR(trans))
  3889. err = PTR_ERR(trans);
  3890. else
  3891. err = btrfs_commit_transaction(trans, rc->extent_root);
  3892. out_free:
  3893. kfree(rc);
  3894. out:
  3895. if (!list_empty(&reloc_roots))
  3896. free_reloc_roots(&reloc_roots);
  3897. btrfs_free_path(path);
  3898. if (err == 0) {
  3899. /* cleanup orphan inode in data relocation tree */
  3900. fs_root = read_fs_root(root->fs_info,
  3901. BTRFS_DATA_RELOC_TREE_OBJECTID);
  3902. if (IS_ERR(fs_root))
  3903. err = PTR_ERR(fs_root);
  3904. else
  3905. err = btrfs_orphan_cleanup(fs_root);
  3906. }
  3907. return err;
  3908. }
  3909. /*
  3910. * helper to add ordered checksum for data relocation.
  3911. *
  3912. * cloning checksum properly handles the nodatasum extents.
  3913. * it also saves CPU time to re-calculate the checksum.
  3914. */
  3915. int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
  3916. {
  3917. struct btrfs_ordered_sum *sums;
  3918. struct btrfs_ordered_extent *ordered;
  3919. struct btrfs_root *root = BTRFS_I(inode)->root;
  3920. int ret;
  3921. u64 disk_bytenr;
  3922. u64 new_bytenr;
  3923. LIST_HEAD(list);
  3924. ordered = btrfs_lookup_ordered_extent(inode, file_pos);
  3925. BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
  3926. disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
  3927. ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
  3928. disk_bytenr + len - 1, &list, 0);
  3929. if (ret)
  3930. goto out;
  3931. while (!list_empty(&list)) {
  3932. sums = list_entry(list.next, struct btrfs_ordered_sum, list);
  3933. list_del_init(&sums->list);
  3934. /*
  3935. * We need to offset the new_bytenr based on where the csum is.
  3936. * We need to do this because we will read in entire prealloc
  3937. * extents but we may have written to say the middle of the
  3938. * prealloc extent, so we need to make sure the csum goes with
  3939. * the right disk offset.
  3940. *
  3941. * We can do this because the data reloc inode refers strictly
  3942. * to the on disk bytes, so we don't have to worry about
  3943. * disk_len vs real len like with real inodes since it's all
  3944. * disk length.
  3945. */
  3946. new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
  3947. sums->bytenr = new_bytenr;
  3948. btrfs_add_ordered_sum(inode, ordered, sums);
  3949. }
  3950. out:
  3951. btrfs_put_ordered_extent(ordered);
  3952. return ret;
  3953. }
  3954. int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
  3955. struct btrfs_root *root, struct extent_buffer *buf,
  3956. struct extent_buffer *cow)
  3957. {
  3958. struct reloc_control *rc;
  3959. struct backref_node *node;
  3960. int first_cow = 0;
  3961. int level;
  3962. int ret = 0;
  3963. rc = root->fs_info->reloc_ctl;
  3964. if (!rc)
  3965. return 0;
  3966. BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
  3967. root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
  3968. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
  3969. if (buf == root->node)
  3970. __update_reloc_root(root, cow->start);
  3971. }
  3972. level = btrfs_header_level(buf);
  3973. if (btrfs_header_generation(buf) <=
  3974. btrfs_root_last_snapshot(&root->root_item))
  3975. first_cow = 1;
  3976. if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
  3977. rc->create_reloc_tree) {
  3978. WARN_ON(!first_cow && level == 0);
  3979. node = rc->backref_cache.path[level];
  3980. BUG_ON(node->bytenr != buf->start &&
  3981. node->new_bytenr != buf->start);
  3982. drop_node_buffer(node);
  3983. extent_buffer_get(cow);
  3984. node->eb = cow;
  3985. node->new_bytenr = cow->start;
  3986. if (!node->pending) {
  3987. list_move_tail(&node->list,
  3988. &rc->backref_cache.pending[level]);
  3989. node->pending = 1;
  3990. }
  3991. if (first_cow)
  3992. __mark_block_processed(rc, node);
  3993. if (first_cow && level > 0)
  3994. rc->nodes_relocated += buf->len;
  3995. }
  3996. if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
  3997. ret = replace_file_extents(trans, rc, root, cow);
  3998. return ret;
  3999. }
  4000. /*
  4001. * called before creating snapshot. it calculates metadata reservation
  4002. * requried for relocating tree blocks in the snapshot
  4003. */
  4004. void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
  4005. struct btrfs_pending_snapshot *pending,
  4006. u64 *bytes_to_reserve)
  4007. {
  4008. struct btrfs_root *root;
  4009. struct reloc_control *rc;
  4010. root = pending->root;
  4011. if (!root->reloc_root)
  4012. return;
  4013. rc = root->fs_info->reloc_ctl;
  4014. if (!rc->merge_reloc_tree)
  4015. return;
  4016. root = root->reloc_root;
  4017. BUG_ON(btrfs_root_refs(&root->root_item) == 0);
  4018. /*
  4019. * relocation is in the stage of merging trees. the space
  4020. * used by merging a reloc tree is twice the size of
  4021. * relocated tree nodes in the worst case. half for cowing
  4022. * the reloc tree, half for cowing the fs tree. the space
  4023. * used by cowing the reloc tree will be freed after the
  4024. * tree is dropped. if we create snapshot, cowing the fs
  4025. * tree may use more space than it frees. so we need
  4026. * reserve extra space.
  4027. */
  4028. *bytes_to_reserve += rc->nodes_relocated;
  4029. }
  4030. /*
  4031. * called after snapshot is created. migrate block reservation
  4032. * and create reloc root for the newly created snapshot
  4033. */
  4034. int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
  4035. struct btrfs_pending_snapshot *pending)
  4036. {
  4037. struct btrfs_root *root = pending->root;
  4038. struct btrfs_root *reloc_root;
  4039. struct btrfs_root *new_root;
  4040. struct reloc_control *rc;
  4041. int ret;
  4042. if (!root->reloc_root)
  4043. return 0;
  4044. rc = root->fs_info->reloc_ctl;
  4045. rc->merging_rsv_size += rc->nodes_relocated;
  4046. if (rc->merge_reloc_tree) {
  4047. ret = btrfs_block_rsv_migrate(&pending->block_rsv,
  4048. rc->block_rsv,
  4049. rc->nodes_relocated);
  4050. if (ret)
  4051. return ret;
  4052. }
  4053. new_root = pending->snap;
  4054. reloc_root = create_reloc_root(trans, root->reloc_root,
  4055. new_root->root_key.objectid);
  4056. if (IS_ERR(reloc_root))
  4057. return PTR_ERR(reloc_root);
  4058. ret = __add_reloc_root(reloc_root);
  4059. BUG_ON(ret < 0);
  4060. new_root->reloc_root = reloc_root;
  4061. if (rc->create_reloc_tree)
  4062. ret = clone_backref_node(trans, rc, root, reloc_root);
  4063. return ret;
  4064. }