volumes.c 113 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/bio.h>
  20. #include <linux/slab.h>
  21. #include <linux/buffer_head.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/random.h>
  24. #include <linux/iocontext.h>
  25. #include <linux/capability.h>
  26. #include <linux/kthread.h>
  27. #include <asm/div64.h>
  28. #include "compat.h"
  29. #include "ctree.h"
  30. #include "extent_map.h"
  31. #include "disk-io.h"
  32. #include "transaction.h"
  33. #include "print-tree.h"
  34. #include "volumes.h"
  35. #include "async-thread.h"
  36. #include "check-integrity.h"
  37. static int init_first_rw_device(struct btrfs_trans_handle *trans,
  38. struct btrfs_root *root,
  39. struct btrfs_device *device);
  40. static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
  41. static DEFINE_MUTEX(uuid_mutex);
  42. static LIST_HEAD(fs_uuids);
  43. static void lock_chunks(struct btrfs_root *root)
  44. {
  45. mutex_lock(&root->fs_info->chunk_mutex);
  46. }
  47. static void unlock_chunks(struct btrfs_root *root)
  48. {
  49. mutex_unlock(&root->fs_info->chunk_mutex);
  50. }
  51. static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
  52. {
  53. struct btrfs_device *device;
  54. WARN_ON(fs_devices->opened);
  55. while (!list_empty(&fs_devices->devices)) {
  56. device = list_entry(fs_devices->devices.next,
  57. struct btrfs_device, dev_list);
  58. list_del(&device->dev_list);
  59. kfree(device->name);
  60. kfree(device);
  61. }
  62. kfree(fs_devices);
  63. }
  64. int btrfs_cleanup_fs_uuids(void)
  65. {
  66. struct btrfs_fs_devices *fs_devices;
  67. while (!list_empty(&fs_uuids)) {
  68. fs_devices = list_entry(fs_uuids.next,
  69. struct btrfs_fs_devices, list);
  70. list_del(&fs_devices->list);
  71. free_fs_devices(fs_devices);
  72. }
  73. return 0;
  74. }
  75. static noinline struct btrfs_device *__find_device(struct list_head *head,
  76. u64 devid, u8 *uuid)
  77. {
  78. struct btrfs_device *dev;
  79. list_for_each_entry(dev, head, dev_list) {
  80. if (dev->devid == devid &&
  81. (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
  82. return dev;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
  88. {
  89. struct btrfs_fs_devices *fs_devices;
  90. list_for_each_entry(fs_devices, &fs_uuids, list) {
  91. if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
  92. return fs_devices;
  93. }
  94. return NULL;
  95. }
  96. static void requeue_list(struct btrfs_pending_bios *pending_bios,
  97. struct bio *head, struct bio *tail)
  98. {
  99. struct bio *old_head;
  100. old_head = pending_bios->head;
  101. pending_bios->head = head;
  102. if (pending_bios->tail)
  103. tail->bi_next = old_head;
  104. else
  105. pending_bios->tail = tail;
  106. }
  107. /*
  108. * we try to collect pending bios for a device so we don't get a large
  109. * number of procs sending bios down to the same device. This greatly
  110. * improves the schedulers ability to collect and merge the bios.
  111. *
  112. * But, it also turns into a long list of bios to process and that is sure
  113. * to eventually make the worker thread block. The solution here is to
  114. * make some progress and then put this work struct back at the end of
  115. * the list if the block device is congested. This way, multiple devices
  116. * can make progress from a single worker thread.
  117. */
  118. static noinline int run_scheduled_bios(struct btrfs_device *device)
  119. {
  120. struct bio *pending;
  121. struct backing_dev_info *bdi;
  122. struct btrfs_fs_info *fs_info;
  123. struct btrfs_pending_bios *pending_bios;
  124. struct bio *tail;
  125. struct bio *cur;
  126. int again = 0;
  127. unsigned long num_run;
  128. unsigned long batch_run = 0;
  129. unsigned long limit;
  130. unsigned long last_waited = 0;
  131. int force_reg = 0;
  132. int sync_pending = 0;
  133. struct blk_plug plug;
  134. /*
  135. * this function runs all the bios we've collected for
  136. * a particular device. We don't want to wander off to
  137. * another device without first sending all of these down.
  138. * So, setup a plug here and finish it off before we return
  139. */
  140. blk_start_plug(&plug);
  141. bdi = blk_get_backing_dev_info(device->bdev);
  142. fs_info = device->dev_root->fs_info;
  143. limit = btrfs_async_submit_limit(fs_info);
  144. limit = limit * 2 / 3;
  145. loop:
  146. spin_lock(&device->io_lock);
  147. loop_lock:
  148. num_run = 0;
  149. /* take all the bios off the list at once and process them
  150. * later on (without the lock held). But, remember the
  151. * tail and other pointers so the bios can be properly reinserted
  152. * into the list if we hit congestion
  153. */
  154. if (!force_reg && device->pending_sync_bios.head) {
  155. pending_bios = &device->pending_sync_bios;
  156. force_reg = 1;
  157. } else {
  158. pending_bios = &device->pending_bios;
  159. force_reg = 0;
  160. }
  161. pending = pending_bios->head;
  162. tail = pending_bios->tail;
  163. WARN_ON(pending && !tail);
  164. /*
  165. * if pending was null this time around, no bios need processing
  166. * at all and we can stop. Otherwise it'll loop back up again
  167. * and do an additional check so no bios are missed.
  168. *
  169. * device->running_pending is used to synchronize with the
  170. * schedule_bio code.
  171. */
  172. if (device->pending_sync_bios.head == NULL &&
  173. device->pending_bios.head == NULL) {
  174. again = 0;
  175. device->running_pending = 0;
  176. } else {
  177. again = 1;
  178. device->running_pending = 1;
  179. }
  180. pending_bios->head = NULL;
  181. pending_bios->tail = NULL;
  182. spin_unlock(&device->io_lock);
  183. while (pending) {
  184. rmb();
  185. /* we want to work on both lists, but do more bios on the
  186. * sync list than the regular list
  187. */
  188. if ((num_run > 32 &&
  189. pending_bios != &device->pending_sync_bios &&
  190. device->pending_sync_bios.head) ||
  191. (num_run > 64 && pending_bios == &device->pending_sync_bios &&
  192. device->pending_bios.head)) {
  193. spin_lock(&device->io_lock);
  194. requeue_list(pending_bios, pending, tail);
  195. goto loop_lock;
  196. }
  197. cur = pending;
  198. pending = pending->bi_next;
  199. cur->bi_next = NULL;
  200. atomic_dec(&fs_info->nr_async_bios);
  201. if (atomic_read(&fs_info->nr_async_bios) < limit &&
  202. waitqueue_active(&fs_info->async_submit_wait))
  203. wake_up(&fs_info->async_submit_wait);
  204. BUG_ON(atomic_read(&cur->bi_cnt) == 0);
  205. /*
  206. * if we're doing the sync list, record that our
  207. * plug has some sync requests on it
  208. *
  209. * If we're doing the regular list and there are
  210. * sync requests sitting around, unplug before
  211. * we add more
  212. */
  213. if (pending_bios == &device->pending_sync_bios) {
  214. sync_pending = 1;
  215. } else if (sync_pending) {
  216. blk_finish_plug(&plug);
  217. blk_start_plug(&plug);
  218. sync_pending = 0;
  219. }
  220. btrfsic_submit_bio(cur->bi_rw, cur);
  221. num_run++;
  222. batch_run++;
  223. if (need_resched())
  224. cond_resched();
  225. /*
  226. * we made progress, there is more work to do and the bdi
  227. * is now congested. Back off and let other work structs
  228. * run instead
  229. */
  230. if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
  231. fs_info->fs_devices->open_devices > 1) {
  232. struct io_context *ioc;
  233. ioc = current->io_context;
  234. /*
  235. * the main goal here is that we don't want to
  236. * block if we're going to be able to submit
  237. * more requests without blocking.
  238. *
  239. * This code does two great things, it pokes into
  240. * the elevator code from a filesystem _and_
  241. * it makes assumptions about how batching works.
  242. */
  243. if (ioc && ioc->nr_batch_requests > 0 &&
  244. time_before(jiffies, ioc->last_waited + HZ/50UL) &&
  245. (last_waited == 0 ||
  246. ioc->last_waited == last_waited)) {
  247. /*
  248. * we want to go through our batch of
  249. * requests and stop. So, we copy out
  250. * the ioc->last_waited time and test
  251. * against it before looping
  252. */
  253. last_waited = ioc->last_waited;
  254. if (need_resched())
  255. cond_resched();
  256. continue;
  257. }
  258. spin_lock(&device->io_lock);
  259. requeue_list(pending_bios, pending, tail);
  260. device->running_pending = 1;
  261. spin_unlock(&device->io_lock);
  262. btrfs_requeue_work(&device->work);
  263. goto done;
  264. }
  265. /* unplug every 64 requests just for good measure */
  266. if (batch_run % 64 == 0) {
  267. blk_finish_plug(&plug);
  268. blk_start_plug(&plug);
  269. sync_pending = 0;
  270. }
  271. }
  272. cond_resched();
  273. if (again)
  274. goto loop;
  275. spin_lock(&device->io_lock);
  276. if (device->pending_bios.head || device->pending_sync_bios.head)
  277. goto loop_lock;
  278. spin_unlock(&device->io_lock);
  279. done:
  280. blk_finish_plug(&plug);
  281. return 0;
  282. }
  283. static void pending_bios_fn(struct btrfs_work *work)
  284. {
  285. struct btrfs_device *device;
  286. device = container_of(work, struct btrfs_device, work);
  287. run_scheduled_bios(device);
  288. }
  289. static noinline int device_list_add(const char *path,
  290. struct btrfs_super_block *disk_super,
  291. u64 devid, struct btrfs_fs_devices **fs_devices_ret)
  292. {
  293. struct btrfs_device *device;
  294. struct btrfs_fs_devices *fs_devices;
  295. u64 found_transid = btrfs_super_generation(disk_super);
  296. char *name;
  297. fs_devices = find_fsid(disk_super->fsid);
  298. if (!fs_devices) {
  299. fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
  300. if (!fs_devices)
  301. return -ENOMEM;
  302. INIT_LIST_HEAD(&fs_devices->devices);
  303. INIT_LIST_HEAD(&fs_devices->alloc_list);
  304. list_add(&fs_devices->list, &fs_uuids);
  305. memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
  306. fs_devices->latest_devid = devid;
  307. fs_devices->latest_trans = found_transid;
  308. mutex_init(&fs_devices->device_list_mutex);
  309. device = NULL;
  310. } else {
  311. device = __find_device(&fs_devices->devices, devid,
  312. disk_super->dev_item.uuid);
  313. }
  314. if (!device) {
  315. if (fs_devices->opened)
  316. return -EBUSY;
  317. device = kzalloc(sizeof(*device), GFP_NOFS);
  318. if (!device) {
  319. /* we can safely leave the fs_devices entry around */
  320. return -ENOMEM;
  321. }
  322. device->devid = devid;
  323. device->work.func = pending_bios_fn;
  324. memcpy(device->uuid, disk_super->dev_item.uuid,
  325. BTRFS_UUID_SIZE);
  326. spin_lock_init(&device->io_lock);
  327. device->name = kstrdup(path, GFP_NOFS);
  328. if (!device->name) {
  329. kfree(device);
  330. return -ENOMEM;
  331. }
  332. INIT_LIST_HEAD(&device->dev_alloc_list);
  333. /* init readahead state */
  334. spin_lock_init(&device->reada_lock);
  335. device->reada_curr_zone = NULL;
  336. atomic_set(&device->reada_in_flight, 0);
  337. device->reada_next = 0;
  338. INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
  339. INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
  340. mutex_lock(&fs_devices->device_list_mutex);
  341. list_add_rcu(&device->dev_list, &fs_devices->devices);
  342. mutex_unlock(&fs_devices->device_list_mutex);
  343. device->fs_devices = fs_devices;
  344. fs_devices->num_devices++;
  345. } else if (!device->name || strcmp(device->name, path)) {
  346. name = kstrdup(path, GFP_NOFS);
  347. if (!name)
  348. return -ENOMEM;
  349. kfree(device->name);
  350. device->name = name;
  351. if (device->missing) {
  352. fs_devices->missing_devices--;
  353. device->missing = 0;
  354. }
  355. }
  356. if (found_transid > fs_devices->latest_trans) {
  357. fs_devices->latest_devid = devid;
  358. fs_devices->latest_trans = found_transid;
  359. }
  360. *fs_devices_ret = fs_devices;
  361. return 0;
  362. }
  363. static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
  364. {
  365. struct btrfs_fs_devices *fs_devices;
  366. struct btrfs_device *device;
  367. struct btrfs_device *orig_dev;
  368. fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
  369. if (!fs_devices)
  370. return ERR_PTR(-ENOMEM);
  371. INIT_LIST_HEAD(&fs_devices->devices);
  372. INIT_LIST_HEAD(&fs_devices->alloc_list);
  373. INIT_LIST_HEAD(&fs_devices->list);
  374. mutex_init(&fs_devices->device_list_mutex);
  375. fs_devices->latest_devid = orig->latest_devid;
  376. fs_devices->latest_trans = orig->latest_trans;
  377. memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
  378. /* We have held the volume lock, it is safe to get the devices. */
  379. list_for_each_entry(orig_dev, &orig->devices, dev_list) {
  380. device = kzalloc(sizeof(*device), GFP_NOFS);
  381. if (!device)
  382. goto error;
  383. device->name = kstrdup(orig_dev->name, GFP_NOFS);
  384. if (!device->name) {
  385. kfree(device);
  386. goto error;
  387. }
  388. device->devid = orig_dev->devid;
  389. device->work.func = pending_bios_fn;
  390. memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
  391. spin_lock_init(&device->io_lock);
  392. INIT_LIST_HEAD(&device->dev_list);
  393. INIT_LIST_HEAD(&device->dev_alloc_list);
  394. list_add(&device->dev_list, &fs_devices->devices);
  395. device->fs_devices = fs_devices;
  396. fs_devices->num_devices++;
  397. }
  398. return fs_devices;
  399. error:
  400. free_fs_devices(fs_devices);
  401. return ERR_PTR(-ENOMEM);
  402. }
  403. int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
  404. {
  405. struct btrfs_device *device, *next;
  406. struct block_device *latest_bdev = NULL;
  407. u64 latest_devid = 0;
  408. u64 latest_transid = 0;
  409. mutex_lock(&uuid_mutex);
  410. again:
  411. /* This is the initialized path, it is safe to release the devices. */
  412. list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
  413. if (device->in_fs_metadata) {
  414. if (!latest_transid ||
  415. device->generation > latest_transid) {
  416. latest_devid = device->devid;
  417. latest_transid = device->generation;
  418. latest_bdev = device->bdev;
  419. }
  420. continue;
  421. }
  422. if (device->bdev) {
  423. blkdev_put(device->bdev, device->mode);
  424. device->bdev = NULL;
  425. fs_devices->open_devices--;
  426. }
  427. if (device->writeable) {
  428. list_del_init(&device->dev_alloc_list);
  429. device->writeable = 0;
  430. fs_devices->rw_devices--;
  431. }
  432. list_del_init(&device->dev_list);
  433. fs_devices->num_devices--;
  434. kfree(device->name);
  435. kfree(device);
  436. }
  437. if (fs_devices->seed) {
  438. fs_devices = fs_devices->seed;
  439. goto again;
  440. }
  441. fs_devices->latest_bdev = latest_bdev;
  442. fs_devices->latest_devid = latest_devid;
  443. fs_devices->latest_trans = latest_transid;
  444. mutex_unlock(&uuid_mutex);
  445. return 0;
  446. }
  447. static void __free_device(struct work_struct *work)
  448. {
  449. struct btrfs_device *device;
  450. device = container_of(work, struct btrfs_device, rcu_work);
  451. if (device->bdev)
  452. blkdev_put(device->bdev, device->mode);
  453. kfree(device->name);
  454. kfree(device);
  455. }
  456. static void free_device(struct rcu_head *head)
  457. {
  458. struct btrfs_device *device;
  459. device = container_of(head, struct btrfs_device, rcu);
  460. INIT_WORK(&device->rcu_work, __free_device);
  461. schedule_work(&device->rcu_work);
  462. }
  463. static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
  464. {
  465. struct btrfs_device *device;
  466. if (--fs_devices->opened > 0)
  467. return 0;
  468. mutex_lock(&fs_devices->device_list_mutex);
  469. list_for_each_entry(device, &fs_devices->devices, dev_list) {
  470. struct btrfs_device *new_device;
  471. if (device->bdev)
  472. fs_devices->open_devices--;
  473. if (device->writeable) {
  474. list_del_init(&device->dev_alloc_list);
  475. fs_devices->rw_devices--;
  476. }
  477. if (device->can_discard)
  478. fs_devices->num_can_discard--;
  479. new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
  480. BUG_ON(!new_device);
  481. memcpy(new_device, device, sizeof(*new_device));
  482. new_device->name = kstrdup(device->name, GFP_NOFS);
  483. BUG_ON(device->name && !new_device->name);
  484. new_device->bdev = NULL;
  485. new_device->writeable = 0;
  486. new_device->in_fs_metadata = 0;
  487. new_device->can_discard = 0;
  488. list_replace_rcu(&device->dev_list, &new_device->dev_list);
  489. call_rcu(&device->rcu, free_device);
  490. }
  491. mutex_unlock(&fs_devices->device_list_mutex);
  492. WARN_ON(fs_devices->open_devices);
  493. WARN_ON(fs_devices->rw_devices);
  494. fs_devices->opened = 0;
  495. fs_devices->seeding = 0;
  496. return 0;
  497. }
  498. int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
  499. {
  500. struct btrfs_fs_devices *seed_devices = NULL;
  501. int ret;
  502. mutex_lock(&uuid_mutex);
  503. ret = __btrfs_close_devices(fs_devices);
  504. if (!fs_devices->opened) {
  505. seed_devices = fs_devices->seed;
  506. fs_devices->seed = NULL;
  507. }
  508. mutex_unlock(&uuid_mutex);
  509. while (seed_devices) {
  510. fs_devices = seed_devices;
  511. seed_devices = fs_devices->seed;
  512. __btrfs_close_devices(fs_devices);
  513. free_fs_devices(fs_devices);
  514. }
  515. return ret;
  516. }
  517. static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
  518. fmode_t flags, void *holder)
  519. {
  520. struct request_queue *q;
  521. struct block_device *bdev;
  522. struct list_head *head = &fs_devices->devices;
  523. struct btrfs_device *device;
  524. struct block_device *latest_bdev = NULL;
  525. struct buffer_head *bh;
  526. struct btrfs_super_block *disk_super;
  527. u64 latest_devid = 0;
  528. u64 latest_transid = 0;
  529. u64 devid;
  530. int seeding = 1;
  531. int ret = 0;
  532. flags |= FMODE_EXCL;
  533. list_for_each_entry(device, head, dev_list) {
  534. if (device->bdev)
  535. continue;
  536. if (!device->name)
  537. continue;
  538. bdev = blkdev_get_by_path(device->name, flags, holder);
  539. if (IS_ERR(bdev)) {
  540. printk(KERN_INFO "open %s failed\n", device->name);
  541. goto error;
  542. }
  543. set_blocksize(bdev, 4096);
  544. bh = btrfs_read_dev_super(bdev);
  545. if (!bh)
  546. goto error_close;
  547. disk_super = (struct btrfs_super_block *)bh->b_data;
  548. devid = btrfs_stack_device_id(&disk_super->dev_item);
  549. if (devid != device->devid)
  550. goto error_brelse;
  551. if (memcmp(device->uuid, disk_super->dev_item.uuid,
  552. BTRFS_UUID_SIZE))
  553. goto error_brelse;
  554. device->generation = btrfs_super_generation(disk_super);
  555. if (!latest_transid || device->generation > latest_transid) {
  556. latest_devid = devid;
  557. latest_transid = device->generation;
  558. latest_bdev = bdev;
  559. }
  560. if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
  561. device->writeable = 0;
  562. } else {
  563. device->writeable = !bdev_read_only(bdev);
  564. seeding = 0;
  565. }
  566. q = bdev_get_queue(bdev);
  567. if (blk_queue_discard(q)) {
  568. device->can_discard = 1;
  569. fs_devices->num_can_discard++;
  570. }
  571. device->bdev = bdev;
  572. device->in_fs_metadata = 0;
  573. device->mode = flags;
  574. if (!blk_queue_nonrot(bdev_get_queue(bdev)))
  575. fs_devices->rotating = 1;
  576. fs_devices->open_devices++;
  577. if (device->writeable) {
  578. fs_devices->rw_devices++;
  579. list_add(&device->dev_alloc_list,
  580. &fs_devices->alloc_list);
  581. }
  582. brelse(bh);
  583. continue;
  584. error_brelse:
  585. brelse(bh);
  586. error_close:
  587. blkdev_put(bdev, flags);
  588. error:
  589. continue;
  590. }
  591. if (fs_devices->open_devices == 0) {
  592. ret = -EINVAL;
  593. goto out;
  594. }
  595. fs_devices->seeding = seeding;
  596. fs_devices->opened = 1;
  597. fs_devices->latest_bdev = latest_bdev;
  598. fs_devices->latest_devid = latest_devid;
  599. fs_devices->latest_trans = latest_transid;
  600. fs_devices->total_rw_bytes = 0;
  601. out:
  602. return ret;
  603. }
  604. int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
  605. fmode_t flags, void *holder)
  606. {
  607. int ret;
  608. mutex_lock(&uuid_mutex);
  609. if (fs_devices->opened) {
  610. fs_devices->opened++;
  611. ret = 0;
  612. } else {
  613. ret = __btrfs_open_devices(fs_devices, flags, holder);
  614. }
  615. mutex_unlock(&uuid_mutex);
  616. return ret;
  617. }
  618. int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
  619. struct btrfs_fs_devices **fs_devices_ret)
  620. {
  621. struct btrfs_super_block *disk_super;
  622. struct block_device *bdev;
  623. struct buffer_head *bh;
  624. int ret;
  625. u64 devid;
  626. u64 transid;
  627. flags |= FMODE_EXCL;
  628. bdev = blkdev_get_by_path(path, flags, holder);
  629. if (IS_ERR(bdev)) {
  630. ret = PTR_ERR(bdev);
  631. goto error;
  632. }
  633. mutex_lock(&uuid_mutex);
  634. ret = set_blocksize(bdev, 4096);
  635. if (ret)
  636. goto error_close;
  637. bh = btrfs_read_dev_super(bdev);
  638. if (!bh) {
  639. ret = -EINVAL;
  640. goto error_close;
  641. }
  642. disk_super = (struct btrfs_super_block *)bh->b_data;
  643. devid = btrfs_stack_device_id(&disk_super->dev_item);
  644. transid = btrfs_super_generation(disk_super);
  645. if (disk_super->label[0])
  646. printk(KERN_INFO "device label %s ", disk_super->label);
  647. else
  648. printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
  649. printk(KERN_CONT "devid %llu transid %llu %s\n",
  650. (unsigned long long)devid, (unsigned long long)transid, path);
  651. ret = device_list_add(path, disk_super, devid, fs_devices_ret);
  652. brelse(bh);
  653. error_close:
  654. mutex_unlock(&uuid_mutex);
  655. blkdev_put(bdev, flags);
  656. error:
  657. return ret;
  658. }
  659. /* helper to account the used device space in the range */
  660. int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
  661. u64 end, u64 *length)
  662. {
  663. struct btrfs_key key;
  664. struct btrfs_root *root = device->dev_root;
  665. struct btrfs_dev_extent *dev_extent;
  666. struct btrfs_path *path;
  667. u64 extent_end;
  668. int ret;
  669. int slot;
  670. struct extent_buffer *l;
  671. *length = 0;
  672. if (start >= device->total_bytes)
  673. return 0;
  674. path = btrfs_alloc_path();
  675. if (!path)
  676. return -ENOMEM;
  677. path->reada = 2;
  678. key.objectid = device->devid;
  679. key.offset = start;
  680. key.type = BTRFS_DEV_EXTENT_KEY;
  681. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  682. if (ret < 0)
  683. goto out;
  684. if (ret > 0) {
  685. ret = btrfs_previous_item(root, path, key.objectid, key.type);
  686. if (ret < 0)
  687. goto out;
  688. }
  689. while (1) {
  690. l = path->nodes[0];
  691. slot = path->slots[0];
  692. if (slot >= btrfs_header_nritems(l)) {
  693. ret = btrfs_next_leaf(root, path);
  694. if (ret == 0)
  695. continue;
  696. if (ret < 0)
  697. goto out;
  698. break;
  699. }
  700. btrfs_item_key_to_cpu(l, &key, slot);
  701. if (key.objectid < device->devid)
  702. goto next;
  703. if (key.objectid > device->devid)
  704. break;
  705. if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
  706. goto next;
  707. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  708. extent_end = key.offset + btrfs_dev_extent_length(l,
  709. dev_extent);
  710. if (key.offset <= start && extent_end > end) {
  711. *length = end - start + 1;
  712. break;
  713. } else if (key.offset <= start && extent_end > start)
  714. *length += extent_end - start;
  715. else if (key.offset > start && extent_end <= end)
  716. *length += extent_end - key.offset;
  717. else if (key.offset > start && key.offset <= end) {
  718. *length += end - key.offset + 1;
  719. break;
  720. } else if (key.offset > end)
  721. break;
  722. next:
  723. path->slots[0]++;
  724. }
  725. ret = 0;
  726. out:
  727. btrfs_free_path(path);
  728. return ret;
  729. }
  730. /*
  731. * find_free_dev_extent - find free space in the specified device
  732. * @device: the device which we search the free space in
  733. * @num_bytes: the size of the free space that we need
  734. * @start: store the start of the free space.
  735. * @len: the size of the free space. that we find, or the size of the max
  736. * free space if we don't find suitable free space
  737. *
  738. * this uses a pretty simple search, the expectation is that it is
  739. * called very infrequently and that a given device has a small number
  740. * of extents
  741. *
  742. * @start is used to store the start of the free space if we find. But if we
  743. * don't find suitable free space, it will be used to store the start position
  744. * of the max free space.
  745. *
  746. * @len is used to store the size of the free space that we find.
  747. * But if we don't find suitable free space, it is used to store the size of
  748. * the max free space.
  749. */
  750. int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
  751. u64 *start, u64 *len)
  752. {
  753. struct btrfs_key key;
  754. struct btrfs_root *root = device->dev_root;
  755. struct btrfs_dev_extent *dev_extent;
  756. struct btrfs_path *path;
  757. u64 hole_size;
  758. u64 max_hole_start;
  759. u64 max_hole_size;
  760. u64 extent_end;
  761. u64 search_start;
  762. u64 search_end = device->total_bytes;
  763. int ret;
  764. int slot;
  765. struct extent_buffer *l;
  766. /* FIXME use last free of some kind */
  767. /* we don't want to overwrite the superblock on the drive,
  768. * so we make sure to start at an offset of at least 1MB
  769. */
  770. search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
  771. max_hole_start = search_start;
  772. max_hole_size = 0;
  773. hole_size = 0;
  774. if (search_start >= search_end) {
  775. ret = -ENOSPC;
  776. goto error;
  777. }
  778. path = btrfs_alloc_path();
  779. if (!path) {
  780. ret = -ENOMEM;
  781. goto error;
  782. }
  783. path->reada = 2;
  784. key.objectid = device->devid;
  785. key.offset = search_start;
  786. key.type = BTRFS_DEV_EXTENT_KEY;
  787. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  788. if (ret < 0)
  789. goto out;
  790. if (ret > 0) {
  791. ret = btrfs_previous_item(root, path, key.objectid, key.type);
  792. if (ret < 0)
  793. goto out;
  794. }
  795. while (1) {
  796. l = path->nodes[0];
  797. slot = path->slots[0];
  798. if (slot >= btrfs_header_nritems(l)) {
  799. ret = btrfs_next_leaf(root, path);
  800. if (ret == 0)
  801. continue;
  802. if (ret < 0)
  803. goto out;
  804. break;
  805. }
  806. btrfs_item_key_to_cpu(l, &key, slot);
  807. if (key.objectid < device->devid)
  808. goto next;
  809. if (key.objectid > device->devid)
  810. break;
  811. if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
  812. goto next;
  813. if (key.offset > search_start) {
  814. hole_size = key.offset - search_start;
  815. if (hole_size > max_hole_size) {
  816. max_hole_start = search_start;
  817. max_hole_size = hole_size;
  818. }
  819. /*
  820. * If this free space is greater than which we need,
  821. * it must be the max free space that we have found
  822. * until now, so max_hole_start must point to the start
  823. * of this free space and the length of this free space
  824. * is stored in max_hole_size. Thus, we return
  825. * max_hole_start and max_hole_size and go back to the
  826. * caller.
  827. */
  828. if (hole_size >= num_bytes) {
  829. ret = 0;
  830. goto out;
  831. }
  832. }
  833. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  834. extent_end = key.offset + btrfs_dev_extent_length(l,
  835. dev_extent);
  836. if (extent_end > search_start)
  837. search_start = extent_end;
  838. next:
  839. path->slots[0]++;
  840. cond_resched();
  841. }
  842. /*
  843. * At this point, search_start should be the end of
  844. * allocated dev extents, and when shrinking the device,
  845. * search_end may be smaller than search_start.
  846. */
  847. if (search_end > search_start)
  848. hole_size = search_end - search_start;
  849. if (hole_size > max_hole_size) {
  850. max_hole_start = search_start;
  851. max_hole_size = hole_size;
  852. }
  853. /* See above. */
  854. if (hole_size < num_bytes)
  855. ret = -ENOSPC;
  856. else
  857. ret = 0;
  858. out:
  859. btrfs_free_path(path);
  860. error:
  861. *start = max_hole_start;
  862. if (len)
  863. *len = max_hole_size;
  864. return ret;
  865. }
  866. static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
  867. struct btrfs_device *device,
  868. u64 start)
  869. {
  870. int ret;
  871. struct btrfs_path *path;
  872. struct btrfs_root *root = device->dev_root;
  873. struct btrfs_key key;
  874. struct btrfs_key found_key;
  875. struct extent_buffer *leaf = NULL;
  876. struct btrfs_dev_extent *extent = NULL;
  877. path = btrfs_alloc_path();
  878. if (!path)
  879. return -ENOMEM;
  880. key.objectid = device->devid;
  881. key.offset = start;
  882. key.type = BTRFS_DEV_EXTENT_KEY;
  883. again:
  884. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  885. if (ret > 0) {
  886. ret = btrfs_previous_item(root, path, key.objectid,
  887. BTRFS_DEV_EXTENT_KEY);
  888. if (ret)
  889. goto out;
  890. leaf = path->nodes[0];
  891. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  892. extent = btrfs_item_ptr(leaf, path->slots[0],
  893. struct btrfs_dev_extent);
  894. BUG_ON(found_key.offset > start || found_key.offset +
  895. btrfs_dev_extent_length(leaf, extent) < start);
  896. key = found_key;
  897. btrfs_release_path(path);
  898. goto again;
  899. } else if (ret == 0) {
  900. leaf = path->nodes[0];
  901. extent = btrfs_item_ptr(leaf, path->slots[0],
  902. struct btrfs_dev_extent);
  903. }
  904. BUG_ON(ret);
  905. if (device->bytes_used > 0) {
  906. u64 len = btrfs_dev_extent_length(leaf, extent);
  907. device->bytes_used -= len;
  908. spin_lock(&root->fs_info->free_chunk_lock);
  909. root->fs_info->free_chunk_space += len;
  910. spin_unlock(&root->fs_info->free_chunk_lock);
  911. }
  912. ret = btrfs_del_item(trans, root, path);
  913. out:
  914. btrfs_free_path(path);
  915. return ret;
  916. }
  917. int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
  918. struct btrfs_device *device,
  919. u64 chunk_tree, u64 chunk_objectid,
  920. u64 chunk_offset, u64 start, u64 num_bytes)
  921. {
  922. int ret;
  923. struct btrfs_path *path;
  924. struct btrfs_root *root = device->dev_root;
  925. struct btrfs_dev_extent *extent;
  926. struct extent_buffer *leaf;
  927. struct btrfs_key key;
  928. WARN_ON(!device->in_fs_metadata);
  929. path = btrfs_alloc_path();
  930. if (!path)
  931. return -ENOMEM;
  932. key.objectid = device->devid;
  933. key.offset = start;
  934. key.type = BTRFS_DEV_EXTENT_KEY;
  935. ret = btrfs_insert_empty_item(trans, root, path, &key,
  936. sizeof(*extent));
  937. BUG_ON(ret);
  938. leaf = path->nodes[0];
  939. extent = btrfs_item_ptr(leaf, path->slots[0],
  940. struct btrfs_dev_extent);
  941. btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
  942. btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
  943. btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
  944. write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
  945. (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
  946. BTRFS_UUID_SIZE);
  947. btrfs_set_dev_extent_length(leaf, extent, num_bytes);
  948. btrfs_mark_buffer_dirty(leaf);
  949. btrfs_free_path(path);
  950. return ret;
  951. }
  952. static noinline int find_next_chunk(struct btrfs_root *root,
  953. u64 objectid, u64 *offset)
  954. {
  955. struct btrfs_path *path;
  956. int ret;
  957. struct btrfs_key key;
  958. struct btrfs_chunk *chunk;
  959. struct btrfs_key found_key;
  960. path = btrfs_alloc_path();
  961. if (!path)
  962. return -ENOMEM;
  963. key.objectid = objectid;
  964. key.offset = (u64)-1;
  965. key.type = BTRFS_CHUNK_ITEM_KEY;
  966. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  967. if (ret < 0)
  968. goto error;
  969. BUG_ON(ret == 0);
  970. ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
  971. if (ret) {
  972. *offset = 0;
  973. } else {
  974. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  975. path->slots[0]);
  976. if (found_key.objectid != objectid)
  977. *offset = 0;
  978. else {
  979. chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
  980. struct btrfs_chunk);
  981. *offset = found_key.offset +
  982. btrfs_chunk_length(path->nodes[0], chunk);
  983. }
  984. }
  985. ret = 0;
  986. error:
  987. btrfs_free_path(path);
  988. return ret;
  989. }
  990. static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
  991. {
  992. int ret;
  993. struct btrfs_key key;
  994. struct btrfs_key found_key;
  995. struct btrfs_path *path;
  996. root = root->fs_info->chunk_root;
  997. path = btrfs_alloc_path();
  998. if (!path)
  999. return -ENOMEM;
  1000. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1001. key.type = BTRFS_DEV_ITEM_KEY;
  1002. key.offset = (u64)-1;
  1003. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1004. if (ret < 0)
  1005. goto error;
  1006. BUG_ON(ret == 0);
  1007. ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
  1008. BTRFS_DEV_ITEM_KEY);
  1009. if (ret) {
  1010. *objectid = 1;
  1011. } else {
  1012. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  1013. path->slots[0]);
  1014. *objectid = found_key.offset + 1;
  1015. }
  1016. ret = 0;
  1017. error:
  1018. btrfs_free_path(path);
  1019. return ret;
  1020. }
  1021. /*
  1022. * the device information is stored in the chunk root
  1023. * the btrfs_device struct should be fully filled in
  1024. */
  1025. int btrfs_add_device(struct btrfs_trans_handle *trans,
  1026. struct btrfs_root *root,
  1027. struct btrfs_device *device)
  1028. {
  1029. int ret;
  1030. struct btrfs_path *path;
  1031. struct btrfs_dev_item *dev_item;
  1032. struct extent_buffer *leaf;
  1033. struct btrfs_key key;
  1034. unsigned long ptr;
  1035. root = root->fs_info->chunk_root;
  1036. path = btrfs_alloc_path();
  1037. if (!path)
  1038. return -ENOMEM;
  1039. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1040. key.type = BTRFS_DEV_ITEM_KEY;
  1041. key.offset = device->devid;
  1042. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1043. sizeof(*dev_item));
  1044. if (ret)
  1045. goto out;
  1046. leaf = path->nodes[0];
  1047. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  1048. btrfs_set_device_id(leaf, dev_item, device->devid);
  1049. btrfs_set_device_generation(leaf, dev_item, 0);
  1050. btrfs_set_device_type(leaf, dev_item, device->type);
  1051. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  1052. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  1053. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  1054. btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
  1055. btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
  1056. btrfs_set_device_group(leaf, dev_item, 0);
  1057. btrfs_set_device_seek_speed(leaf, dev_item, 0);
  1058. btrfs_set_device_bandwidth(leaf, dev_item, 0);
  1059. btrfs_set_device_start_offset(leaf, dev_item, 0);
  1060. ptr = (unsigned long)btrfs_device_uuid(dev_item);
  1061. write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
  1062. ptr = (unsigned long)btrfs_device_fsid(dev_item);
  1063. write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
  1064. btrfs_mark_buffer_dirty(leaf);
  1065. ret = 0;
  1066. out:
  1067. btrfs_free_path(path);
  1068. return ret;
  1069. }
  1070. static int btrfs_rm_dev_item(struct btrfs_root *root,
  1071. struct btrfs_device *device)
  1072. {
  1073. int ret;
  1074. struct btrfs_path *path;
  1075. struct btrfs_key key;
  1076. struct btrfs_trans_handle *trans;
  1077. root = root->fs_info->chunk_root;
  1078. path = btrfs_alloc_path();
  1079. if (!path)
  1080. return -ENOMEM;
  1081. trans = btrfs_start_transaction(root, 0);
  1082. if (IS_ERR(trans)) {
  1083. btrfs_free_path(path);
  1084. return PTR_ERR(trans);
  1085. }
  1086. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1087. key.type = BTRFS_DEV_ITEM_KEY;
  1088. key.offset = device->devid;
  1089. lock_chunks(root);
  1090. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1091. if (ret < 0)
  1092. goto out;
  1093. if (ret > 0) {
  1094. ret = -ENOENT;
  1095. goto out;
  1096. }
  1097. ret = btrfs_del_item(trans, root, path);
  1098. if (ret)
  1099. goto out;
  1100. out:
  1101. btrfs_free_path(path);
  1102. unlock_chunks(root);
  1103. btrfs_commit_transaction(trans, root);
  1104. return ret;
  1105. }
  1106. int btrfs_rm_device(struct btrfs_root *root, char *device_path)
  1107. {
  1108. struct btrfs_device *device;
  1109. struct btrfs_device *next_device;
  1110. struct block_device *bdev;
  1111. struct buffer_head *bh = NULL;
  1112. struct btrfs_super_block *disk_super;
  1113. struct btrfs_fs_devices *cur_devices;
  1114. u64 all_avail;
  1115. u64 devid;
  1116. u64 num_devices;
  1117. u8 *dev_uuid;
  1118. int ret = 0;
  1119. bool clear_super = false;
  1120. mutex_lock(&uuid_mutex);
  1121. all_avail = root->fs_info->avail_data_alloc_bits |
  1122. root->fs_info->avail_system_alloc_bits |
  1123. root->fs_info->avail_metadata_alloc_bits;
  1124. if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
  1125. root->fs_info->fs_devices->num_devices <= 4) {
  1126. printk(KERN_ERR "btrfs: unable to go below four devices "
  1127. "on raid10\n");
  1128. ret = -EINVAL;
  1129. goto out;
  1130. }
  1131. if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
  1132. root->fs_info->fs_devices->num_devices <= 2) {
  1133. printk(KERN_ERR "btrfs: unable to go below two "
  1134. "devices on raid1\n");
  1135. ret = -EINVAL;
  1136. goto out;
  1137. }
  1138. if (strcmp(device_path, "missing") == 0) {
  1139. struct list_head *devices;
  1140. struct btrfs_device *tmp;
  1141. device = NULL;
  1142. devices = &root->fs_info->fs_devices->devices;
  1143. /*
  1144. * It is safe to read the devices since the volume_mutex
  1145. * is held.
  1146. */
  1147. list_for_each_entry(tmp, devices, dev_list) {
  1148. if (tmp->in_fs_metadata && !tmp->bdev) {
  1149. device = tmp;
  1150. break;
  1151. }
  1152. }
  1153. bdev = NULL;
  1154. bh = NULL;
  1155. disk_super = NULL;
  1156. if (!device) {
  1157. printk(KERN_ERR "btrfs: no missing devices found to "
  1158. "remove\n");
  1159. goto out;
  1160. }
  1161. } else {
  1162. bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
  1163. root->fs_info->bdev_holder);
  1164. if (IS_ERR(bdev)) {
  1165. ret = PTR_ERR(bdev);
  1166. goto out;
  1167. }
  1168. set_blocksize(bdev, 4096);
  1169. bh = btrfs_read_dev_super(bdev);
  1170. if (!bh) {
  1171. ret = -EINVAL;
  1172. goto error_close;
  1173. }
  1174. disk_super = (struct btrfs_super_block *)bh->b_data;
  1175. devid = btrfs_stack_device_id(&disk_super->dev_item);
  1176. dev_uuid = disk_super->dev_item.uuid;
  1177. device = btrfs_find_device(root, devid, dev_uuid,
  1178. disk_super->fsid);
  1179. if (!device) {
  1180. ret = -ENOENT;
  1181. goto error_brelse;
  1182. }
  1183. }
  1184. if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
  1185. printk(KERN_ERR "btrfs: unable to remove the only writeable "
  1186. "device\n");
  1187. ret = -EINVAL;
  1188. goto error_brelse;
  1189. }
  1190. if (device->writeable) {
  1191. lock_chunks(root);
  1192. list_del_init(&device->dev_alloc_list);
  1193. unlock_chunks(root);
  1194. root->fs_info->fs_devices->rw_devices--;
  1195. clear_super = true;
  1196. }
  1197. ret = btrfs_shrink_device(device, 0);
  1198. if (ret)
  1199. goto error_undo;
  1200. ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
  1201. if (ret)
  1202. goto error_undo;
  1203. spin_lock(&root->fs_info->free_chunk_lock);
  1204. root->fs_info->free_chunk_space = device->total_bytes -
  1205. device->bytes_used;
  1206. spin_unlock(&root->fs_info->free_chunk_lock);
  1207. device->in_fs_metadata = 0;
  1208. btrfs_scrub_cancel_dev(root, device);
  1209. /*
  1210. * the device list mutex makes sure that we don't change
  1211. * the device list while someone else is writing out all
  1212. * the device supers.
  1213. */
  1214. cur_devices = device->fs_devices;
  1215. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  1216. list_del_rcu(&device->dev_list);
  1217. device->fs_devices->num_devices--;
  1218. if (device->missing)
  1219. root->fs_info->fs_devices->missing_devices--;
  1220. next_device = list_entry(root->fs_info->fs_devices->devices.next,
  1221. struct btrfs_device, dev_list);
  1222. if (device->bdev == root->fs_info->sb->s_bdev)
  1223. root->fs_info->sb->s_bdev = next_device->bdev;
  1224. if (device->bdev == root->fs_info->fs_devices->latest_bdev)
  1225. root->fs_info->fs_devices->latest_bdev = next_device->bdev;
  1226. if (device->bdev)
  1227. device->fs_devices->open_devices--;
  1228. call_rcu(&device->rcu, free_device);
  1229. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1230. num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
  1231. btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
  1232. if (cur_devices->open_devices == 0) {
  1233. struct btrfs_fs_devices *fs_devices;
  1234. fs_devices = root->fs_info->fs_devices;
  1235. while (fs_devices) {
  1236. if (fs_devices->seed == cur_devices)
  1237. break;
  1238. fs_devices = fs_devices->seed;
  1239. }
  1240. fs_devices->seed = cur_devices->seed;
  1241. cur_devices->seed = NULL;
  1242. lock_chunks(root);
  1243. __btrfs_close_devices(cur_devices);
  1244. unlock_chunks(root);
  1245. free_fs_devices(cur_devices);
  1246. }
  1247. /*
  1248. * at this point, the device is zero sized. We want to
  1249. * remove it from the devices list and zero out the old super
  1250. */
  1251. if (clear_super) {
  1252. /* make sure this device isn't detected as part of
  1253. * the FS anymore
  1254. */
  1255. memset(&disk_super->magic, 0, sizeof(disk_super->magic));
  1256. set_buffer_dirty(bh);
  1257. sync_dirty_buffer(bh);
  1258. }
  1259. ret = 0;
  1260. error_brelse:
  1261. brelse(bh);
  1262. error_close:
  1263. if (bdev)
  1264. blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
  1265. out:
  1266. mutex_unlock(&uuid_mutex);
  1267. return ret;
  1268. error_undo:
  1269. if (device->writeable) {
  1270. lock_chunks(root);
  1271. list_add(&device->dev_alloc_list,
  1272. &root->fs_info->fs_devices->alloc_list);
  1273. unlock_chunks(root);
  1274. root->fs_info->fs_devices->rw_devices++;
  1275. }
  1276. goto error_brelse;
  1277. }
  1278. /*
  1279. * does all the dirty work required for changing file system's UUID.
  1280. */
  1281. static int btrfs_prepare_sprout(struct btrfs_root *root)
  1282. {
  1283. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  1284. struct btrfs_fs_devices *old_devices;
  1285. struct btrfs_fs_devices *seed_devices;
  1286. struct btrfs_super_block *disk_super = root->fs_info->super_copy;
  1287. struct btrfs_device *device;
  1288. u64 super_flags;
  1289. BUG_ON(!mutex_is_locked(&uuid_mutex));
  1290. if (!fs_devices->seeding)
  1291. return -EINVAL;
  1292. seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
  1293. if (!seed_devices)
  1294. return -ENOMEM;
  1295. old_devices = clone_fs_devices(fs_devices);
  1296. if (IS_ERR(old_devices)) {
  1297. kfree(seed_devices);
  1298. return PTR_ERR(old_devices);
  1299. }
  1300. list_add(&old_devices->list, &fs_uuids);
  1301. memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
  1302. seed_devices->opened = 1;
  1303. INIT_LIST_HEAD(&seed_devices->devices);
  1304. INIT_LIST_HEAD(&seed_devices->alloc_list);
  1305. mutex_init(&seed_devices->device_list_mutex);
  1306. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  1307. list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
  1308. synchronize_rcu);
  1309. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1310. list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
  1311. list_for_each_entry(device, &seed_devices->devices, dev_list) {
  1312. device->fs_devices = seed_devices;
  1313. }
  1314. fs_devices->seeding = 0;
  1315. fs_devices->num_devices = 0;
  1316. fs_devices->open_devices = 0;
  1317. fs_devices->seed = seed_devices;
  1318. generate_random_uuid(fs_devices->fsid);
  1319. memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
  1320. memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
  1321. super_flags = btrfs_super_flags(disk_super) &
  1322. ~BTRFS_SUPER_FLAG_SEEDING;
  1323. btrfs_set_super_flags(disk_super, super_flags);
  1324. return 0;
  1325. }
  1326. /*
  1327. * strore the expected generation for seed devices in device items.
  1328. */
  1329. static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
  1330. struct btrfs_root *root)
  1331. {
  1332. struct btrfs_path *path;
  1333. struct extent_buffer *leaf;
  1334. struct btrfs_dev_item *dev_item;
  1335. struct btrfs_device *device;
  1336. struct btrfs_key key;
  1337. u8 fs_uuid[BTRFS_UUID_SIZE];
  1338. u8 dev_uuid[BTRFS_UUID_SIZE];
  1339. u64 devid;
  1340. int ret;
  1341. path = btrfs_alloc_path();
  1342. if (!path)
  1343. return -ENOMEM;
  1344. root = root->fs_info->chunk_root;
  1345. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1346. key.offset = 0;
  1347. key.type = BTRFS_DEV_ITEM_KEY;
  1348. while (1) {
  1349. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  1350. if (ret < 0)
  1351. goto error;
  1352. leaf = path->nodes[0];
  1353. next_slot:
  1354. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  1355. ret = btrfs_next_leaf(root, path);
  1356. if (ret > 0)
  1357. break;
  1358. if (ret < 0)
  1359. goto error;
  1360. leaf = path->nodes[0];
  1361. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1362. btrfs_release_path(path);
  1363. continue;
  1364. }
  1365. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  1366. if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
  1367. key.type != BTRFS_DEV_ITEM_KEY)
  1368. break;
  1369. dev_item = btrfs_item_ptr(leaf, path->slots[0],
  1370. struct btrfs_dev_item);
  1371. devid = btrfs_device_id(leaf, dev_item);
  1372. read_extent_buffer(leaf, dev_uuid,
  1373. (unsigned long)btrfs_device_uuid(dev_item),
  1374. BTRFS_UUID_SIZE);
  1375. read_extent_buffer(leaf, fs_uuid,
  1376. (unsigned long)btrfs_device_fsid(dev_item),
  1377. BTRFS_UUID_SIZE);
  1378. device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
  1379. BUG_ON(!device);
  1380. if (device->fs_devices->seeding) {
  1381. btrfs_set_device_generation(leaf, dev_item,
  1382. device->generation);
  1383. btrfs_mark_buffer_dirty(leaf);
  1384. }
  1385. path->slots[0]++;
  1386. goto next_slot;
  1387. }
  1388. ret = 0;
  1389. error:
  1390. btrfs_free_path(path);
  1391. return ret;
  1392. }
  1393. int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
  1394. {
  1395. struct request_queue *q;
  1396. struct btrfs_trans_handle *trans;
  1397. struct btrfs_device *device;
  1398. struct block_device *bdev;
  1399. struct list_head *devices;
  1400. struct super_block *sb = root->fs_info->sb;
  1401. u64 total_bytes;
  1402. int seeding_dev = 0;
  1403. int ret = 0;
  1404. if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
  1405. return -EINVAL;
  1406. bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
  1407. root->fs_info->bdev_holder);
  1408. if (IS_ERR(bdev))
  1409. return PTR_ERR(bdev);
  1410. if (root->fs_info->fs_devices->seeding) {
  1411. seeding_dev = 1;
  1412. down_write(&sb->s_umount);
  1413. mutex_lock(&uuid_mutex);
  1414. }
  1415. filemap_write_and_wait(bdev->bd_inode->i_mapping);
  1416. devices = &root->fs_info->fs_devices->devices;
  1417. /*
  1418. * we have the volume lock, so we don't need the extra
  1419. * device list mutex while reading the list here.
  1420. */
  1421. list_for_each_entry(device, devices, dev_list) {
  1422. if (device->bdev == bdev) {
  1423. ret = -EEXIST;
  1424. goto error;
  1425. }
  1426. }
  1427. device = kzalloc(sizeof(*device), GFP_NOFS);
  1428. if (!device) {
  1429. /* we can safely leave the fs_devices entry around */
  1430. ret = -ENOMEM;
  1431. goto error;
  1432. }
  1433. device->name = kstrdup(device_path, GFP_NOFS);
  1434. if (!device->name) {
  1435. kfree(device);
  1436. ret = -ENOMEM;
  1437. goto error;
  1438. }
  1439. ret = find_next_devid(root, &device->devid);
  1440. if (ret) {
  1441. kfree(device->name);
  1442. kfree(device);
  1443. goto error;
  1444. }
  1445. trans = btrfs_start_transaction(root, 0);
  1446. if (IS_ERR(trans)) {
  1447. kfree(device->name);
  1448. kfree(device);
  1449. ret = PTR_ERR(trans);
  1450. goto error;
  1451. }
  1452. lock_chunks(root);
  1453. q = bdev_get_queue(bdev);
  1454. if (blk_queue_discard(q))
  1455. device->can_discard = 1;
  1456. device->writeable = 1;
  1457. device->work.func = pending_bios_fn;
  1458. generate_random_uuid(device->uuid);
  1459. spin_lock_init(&device->io_lock);
  1460. device->generation = trans->transid;
  1461. device->io_width = root->sectorsize;
  1462. device->io_align = root->sectorsize;
  1463. device->sector_size = root->sectorsize;
  1464. device->total_bytes = i_size_read(bdev->bd_inode);
  1465. device->disk_total_bytes = device->total_bytes;
  1466. device->dev_root = root->fs_info->dev_root;
  1467. device->bdev = bdev;
  1468. device->in_fs_metadata = 1;
  1469. device->mode = FMODE_EXCL;
  1470. set_blocksize(device->bdev, 4096);
  1471. if (seeding_dev) {
  1472. sb->s_flags &= ~MS_RDONLY;
  1473. ret = btrfs_prepare_sprout(root);
  1474. BUG_ON(ret);
  1475. }
  1476. device->fs_devices = root->fs_info->fs_devices;
  1477. /*
  1478. * we don't want write_supers to jump in here with our device
  1479. * half setup
  1480. */
  1481. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  1482. list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
  1483. list_add(&device->dev_alloc_list,
  1484. &root->fs_info->fs_devices->alloc_list);
  1485. root->fs_info->fs_devices->num_devices++;
  1486. root->fs_info->fs_devices->open_devices++;
  1487. root->fs_info->fs_devices->rw_devices++;
  1488. if (device->can_discard)
  1489. root->fs_info->fs_devices->num_can_discard++;
  1490. root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
  1491. spin_lock(&root->fs_info->free_chunk_lock);
  1492. root->fs_info->free_chunk_space += device->total_bytes;
  1493. spin_unlock(&root->fs_info->free_chunk_lock);
  1494. if (!blk_queue_nonrot(bdev_get_queue(bdev)))
  1495. root->fs_info->fs_devices->rotating = 1;
  1496. total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
  1497. btrfs_set_super_total_bytes(root->fs_info->super_copy,
  1498. total_bytes + device->total_bytes);
  1499. total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
  1500. btrfs_set_super_num_devices(root->fs_info->super_copy,
  1501. total_bytes + 1);
  1502. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  1503. if (seeding_dev) {
  1504. ret = init_first_rw_device(trans, root, device);
  1505. BUG_ON(ret);
  1506. ret = btrfs_finish_sprout(trans, root);
  1507. BUG_ON(ret);
  1508. } else {
  1509. ret = btrfs_add_device(trans, root, device);
  1510. }
  1511. /*
  1512. * we've got more storage, clear any full flags on the space
  1513. * infos
  1514. */
  1515. btrfs_clear_space_info_full(root->fs_info);
  1516. unlock_chunks(root);
  1517. btrfs_commit_transaction(trans, root);
  1518. if (seeding_dev) {
  1519. mutex_unlock(&uuid_mutex);
  1520. up_write(&sb->s_umount);
  1521. ret = btrfs_relocate_sys_chunks(root);
  1522. BUG_ON(ret);
  1523. }
  1524. return ret;
  1525. error:
  1526. blkdev_put(bdev, FMODE_EXCL);
  1527. if (seeding_dev) {
  1528. mutex_unlock(&uuid_mutex);
  1529. up_write(&sb->s_umount);
  1530. }
  1531. return ret;
  1532. }
  1533. static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
  1534. struct btrfs_device *device)
  1535. {
  1536. int ret;
  1537. struct btrfs_path *path;
  1538. struct btrfs_root *root;
  1539. struct btrfs_dev_item *dev_item;
  1540. struct extent_buffer *leaf;
  1541. struct btrfs_key key;
  1542. root = device->dev_root->fs_info->chunk_root;
  1543. path = btrfs_alloc_path();
  1544. if (!path)
  1545. return -ENOMEM;
  1546. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  1547. key.type = BTRFS_DEV_ITEM_KEY;
  1548. key.offset = device->devid;
  1549. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  1550. if (ret < 0)
  1551. goto out;
  1552. if (ret > 0) {
  1553. ret = -ENOENT;
  1554. goto out;
  1555. }
  1556. leaf = path->nodes[0];
  1557. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  1558. btrfs_set_device_id(leaf, dev_item, device->devid);
  1559. btrfs_set_device_type(leaf, dev_item, device->type);
  1560. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  1561. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  1562. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  1563. btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
  1564. btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
  1565. btrfs_mark_buffer_dirty(leaf);
  1566. out:
  1567. btrfs_free_path(path);
  1568. return ret;
  1569. }
  1570. static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
  1571. struct btrfs_device *device, u64 new_size)
  1572. {
  1573. struct btrfs_super_block *super_copy =
  1574. device->dev_root->fs_info->super_copy;
  1575. u64 old_total = btrfs_super_total_bytes(super_copy);
  1576. u64 diff = new_size - device->total_bytes;
  1577. if (!device->writeable)
  1578. return -EACCES;
  1579. if (new_size <= device->total_bytes)
  1580. return -EINVAL;
  1581. btrfs_set_super_total_bytes(super_copy, old_total + diff);
  1582. device->fs_devices->total_rw_bytes += diff;
  1583. device->total_bytes = new_size;
  1584. device->disk_total_bytes = new_size;
  1585. btrfs_clear_space_info_full(device->dev_root->fs_info);
  1586. return btrfs_update_device(trans, device);
  1587. }
  1588. int btrfs_grow_device(struct btrfs_trans_handle *trans,
  1589. struct btrfs_device *device, u64 new_size)
  1590. {
  1591. int ret;
  1592. lock_chunks(device->dev_root);
  1593. ret = __btrfs_grow_device(trans, device, new_size);
  1594. unlock_chunks(device->dev_root);
  1595. return ret;
  1596. }
  1597. static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
  1598. struct btrfs_root *root,
  1599. u64 chunk_tree, u64 chunk_objectid,
  1600. u64 chunk_offset)
  1601. {
  1602. int ret;
  1603. struct btrfs_path *path;
  1604. struct btrfs_key key;
  1605. root = root->fs_info->chunk_root;
  1606. path = btrfs_alloc_path();
  1607. if (!path)
  1608. return -ENOMEM;
  1609. key.objectid = chunk_objectid;
  1610. key.offset = chunk_offset;
  1611. key.type = BTRFS_CHUNK_ITEM_KEY;
  1612. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1613. BUG_ON(ret);
  1614. ret = btrfs_del_item(trans, root, path);
  1615. btrfs_free_path(path);
  1616. return ret;
  1617. }
  1618. static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
  1619. chunk_offset)
  1620. {
  1621. struct btrfs_super_block *super_copy = root->fs_info->super_copy;
  1622. struct btrfs_disk_key *disk_key;
  1623. struct btrfs_chunk *chunk;
  1624. u8 *ptr;
  1625. int ret = 0;
  1626. u32 num_stripes;
  1627. u32 array_size;
  1628. u32 len = 0;
  1629. u32 cur;
  1630. struct btrfs_key key;
  1631. array_size = btrfs_super_sys_array_size(super_copy);
  1632. ptr = super_copy->sys_chunk_array;
  1633. cur = 0;
  1634. while (cur < array_size) {
  1635. disk_key = (struct btrfs_disk_key *)ptr;
  1636. btrfs_disk_key_to_cpu(&key, disk_key);
  1637. len = sizeof(*disk_key);
  1638. if (key.type == BTRFS_CHUNK_ITEM_KEY) {
  1639. chunk = (struct btrfs_chunk *)(ptr + len);
  1640. num_stripes = btrfs_stack_chunk_num_stripes(chunk);
  1641. len += btrfs_chunk_item_size(num_stripes);
  1642. } else {
  1643. ret = -EIO;
  1644. break;
  1645. }
  1646. if (key.objectid == chunk_objectid &&
  1647. key.offset == chunk_offset) {
  1648. memmove(ptr, ptr + len, array_size - (cur + len));
  1649. array_size -= len;
  1650. btrfs_set_super_sys_array_size(super_copy, array_size);
  1651. } else {
  1652. ptr += len;
  1653. cur += len;
  1654. }
  1655. }
  1656. return ret;
  1657. }
  1658. static int btrfs_relocate_chunk(struct btrfs_root *root,
  1659. u64 chunk_tree, u64 chunk_objectid,
  1660. u64 chunk_offset)
  1661. {
  1662. struct extent_map_tree *em_tree;
  1663. struct btrfs_root *extent_root;
  1664. struct btrfs_trans_handle *trans;
  1665. struct extent_map *em;
  1666. struct map_lookup *map;
  1667. int ret;
  1668. int i;
  1669. root = root->fs_info->chunk_root;
  1670. extent_root = root->fs_info->extent_root;
  1671. em_tree = &root->fs_info->mapping_tree.map_tree;
  1672. ret = btrfs_can_relocate(extent_root, chunk_offset);
  1673. if (ret)
  1674. return -ENOSPC;
  1675. /* step one, relocate all the extents inside this chunk */
  1676. ret = btrfs_relocate_block_group(extent_root, chunk_offset);
  1677. if (ret)
  1678. return ret;
  1679. trans = btrfs_start_transaction(root, 0);
  1680. BUG_ON(IS_ERR(trans));
  1681. lock_chunks(root);
  1682. /*
  1683. * step two, delete the device extents and the
  1684. * chunk tree entries
  1685. */
  1686. read_lock(&em_tree->lock);
  1687. em = lookup_extent_mapping(em_tree, chunk_offset, 1);
  1688. read_unlock(&em_tree->lock);
  1689. BUG_ON(!em || em->start > chunk_offset ||
  1690. em->start + em->len < chunk_offset);
  1691. map = (struct map_lookup *)em->bdev;
  1692. for (i = 0; i < map->num_stripes; i++) {
  1693. ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
  1694. map->stripes[i].physical);
  1695. BUG_ON(ret);
  1696. if (map->stripes[i].dev) {
  1697. ret = btrfs_update_device(trans, map->stripes[i].dev);
  1698. BUG_ON(ret);
  1699. }
  1700. }
  1701. ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
  1702. chunk_offset);
  1703. BUG_ON(ret);
  1704. trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
  1705. if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
  1706. ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
  1707. BUG_ON(ret);
  1708. }
  1709. ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
  1710. BUG_ON(ret);
  1711. write_lock(&em_tree->lock);
  1712. remove_extent_mapping(em_tree, em);
  1713. write_unlock(&em_tree->lock);
  1714. kfree(map);
  1715. em->bdev = NULL;
  1716. /* once for the tree */
  1717. free_extent_map(em);
  1718. /* once for us */
  1719. free_extent_map(em);
  1720. unlock_chunks(root);
  1721. btrfs_end_transaction(trans, root);
  1722. return 0;
  1723. }
  1724. static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
  1725. {
  1726. struct btrfs_root *chunk_root = root->fs_info->chunk_root;
  1727. struct btrfs_path *path;
  1728. struct extent_buffer *leaf;
  1729. struct btrfs_chunk *chunk;
  1730. struct btrfs_key key;
  1731. struct btrfs_key found_key;
  1732. u64 chunk_tree = chunk_root->root_key.objectid;
  1733. u64 chunk_type;
  1734. bool retried = false;
  1735. int failed = 0;
  1736. int ret;
  1737. path = btrfs_alloc_path();
  1738. if (!path)
  1739. return -ENOMEM;
  1740. again:
  1741. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  1742. key.offset = (u64)-1;
  1743. key.type = BTRFS_CHUNK_ITEM_KEY;
  1744. while (1) {
  1745. ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
  1746. if (ret < 0)
  1747. goto error;
  1748. BUG_ON(ret == 0);
  1749. ret = btrfs_previous_item(chunk_root, path, key.objectid,
  1750. key.type);
  1751. if (ret < 0)
  1752. goto error;
  1753. if (ret > 0)
  1754. break;
  1755. leaf = path->nodes[0];
  1756. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1757. chunk = btrfs_item_ptr(leaf, path->slots[0],
  1758. struct btrfs_chunk);
  1759. chunk_type = btrfs_chunk_type(leaf, chunk);
  1760. btrfs_release_path(path);
  1761. if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
  1762. ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
  1763. found_key.objectid,
  1764. found_key.offset);
  1765. if (ret == -ENOSPC)
  1766. failed++;
  1767. else if (ret)
  1768. BUG();
  1769. }
  1770. if (found_key.offset == 0)
  1771. break;
  1772. key.offset = found_key.offset - 1;
  1773. }
  1774. ret = 0;
  1775. if (failed && !retried) {
  1776. failed = 0;
  1777. retried = true;
  1778. goto again;
  1779. } else if (failed && retried) {
  1780. WARN_ON(1);
  1781. ret = -ENOSPC;
  1782. }
  1783. error:
  1784. btrfs_free_path(path);
  1785. return ret;
  1786. }
  1787. static int insert_balance_item(struct btrfs_root *root,
  1788. struct btrfs_balance_control *bctl)
  1789. {
  1790. struct btrfs_trans_handle *trans;
  1791. struct btrfs_balance_item *item;
  1792. struct btrfs_disk_balance_args disk_bargs;
  1793. struct btrfs_path *path;
  1794. struct extent_buffer *leaf;
  1795. struct btrfs_key key;
  1796. int ret, err;
  1797. path = btrfs_alloc_path();
  1798. if (!path)
  1799. return -ENOMEM;
  1800. trans = btrfs_start_transaction(root, 0);
  1801. if (IS_ERR(trans)) {
  1802. btrfs_free_path(path);
  1803. return PTR_ERR(trans);
  1804. }
  1805. key.objectid = BTRFS_BALANCE_OBJECTID;
  1806. key.type = BTRFS_BALANCE_ITEM_KEY;
  1807. key.offset = 0;
  1808. ret = btrfs_insert_empty_item(trans, root, path, &key,
  1809. sizeof(*item));
  1810. if (ret)
  1811. goto out;
  1812. leaf = path->nodes[0];
  1813. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
  1814. memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
  1815. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
  1816. btrfs_set_balance_data(leaf, item, &disk_bargs);
  1817. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
  1818. btrfs_set_balance_meta(leaf, item, &disk_bargs);
  1819. btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
  1820. btrfs_set_balance_sys(leaf, item, &disk_bargs);
  1821. btrfs_set_balance_flags(leaf, item, bctl->flags);
  1822. btrfs_mark_buffer_dirty(leaf);
  1823. out:
  1824. btrfs_free_path(path);
  1825. err = btrfs_commit_transaction(trans, root);
  1826. if (err && !ret)
  1827. ret = err;
  1828. return ret;
  1829. }
  1830. static int del_balance_item(struct btrfs_root *root)
  1831. {
  1832. struct btrfs_trans_handle *trans;
  1833. struct btrfs_path *path;
  1834. struct btrfs_key key;
  1835. int ret, err;
  1836. path = btrfs_alloc_path();
  1837. if (!path)
  1838. return -ENOMEM;
  1839. trans = btrfs_start_transaction(root, 0);
  1840. if (IS_ERR(trans)) {
  1841. btrfs_free_path(path);
  1842. return PTR_ERR(trans);
  1843. }
  1844. key.objectid = BTRFS_BALANCE_OBJECTID;
  1845. key.type = BTRFS_BALANCE_ITEM_KEY;
  1846. key.offset = 0;
  1847. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  1848. if (ret < 0)
  1849. goto out;
  1850. if (ret > 0) {
  1851. ret = -ENOENT;
  1852. goto out;
  1853. }
  1854. ret = btrfs_del_item(trans, root, path);
  1855. out:
  1856. btrfs_free_path(path);
  1857. err = btrfs_commit_transaction(trans, root);
  1858. if (err && !ret)
  1859. ret = err;
  1860. return ret;
  1861. }
  1862. /*
  1863. * This is a heuristic used to reduce the number of chunks balanced on
  1864. * resume after balance was interrupted.
  1865. */
  1866. static void update_balance_args(struct btrfs_balance_control *bctl)
  1867. {
  1868. /*
  1869. * Turn on soft mode for chunk types that were being converted.
  1870. */
  1871. if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
  1872. bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
  1873. if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
  1874. bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
  1875. if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
  1876. bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
  1877. /*
  1878. * Turn on usage filter if is not already used. The idea is
  1879. * that chunks that we have already balanced should be
  1880. * reasonably full. Don't do it for chunks that are being
  1881. * converted - that will keep us from relocating unconverted
  1882. * (albeit full) chunks.
  1883. */
  1884. if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  1885. !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  1886. bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
  1887. bctl->data.usage = 90;
  1888. }
  1889. if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  1890. !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  1891. bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
  1892. bctl->sys.usage = 90;
  1893. }
  1894. if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
  1895. !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
  1896. bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
  1897. bctl->meta.usage = 90;
  1898. }
  1899. }
  1900. /*
  1901. * Should be called with both balance and volume mutexes held to
  1902. * serialize other volume operations (add_dev/rm_dev/resize) with
  1903. * restriper. Same goes for unset_balance_control.
  1904. */
  1905. static void set_balance_control(struct btrfs_balance_control *bctl)
  1906. {
  1907. struct btrfs_fs_info *fs_info = bctl->fs_info;
  1908. BUG_ON(fs_info->balance_ctl);
  1909. spin_lock(&fs_info->balance_lock);
  1910. fs_info->balance_ctl = bctl;
  1911. spin_unlock(&fs_info->balance_lock);
  1912. }
  1913. static void unset_balance_control(struct btrfs_fs_info *fs_info)
  1914. {
  1915. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  1916. BUG_ON(!fs_info->balance_ctl);
  1917. spin_lock(&fs_info->balance_lock);
  1918. fs_info->balance_ctl = NULL;
  1919. spin_unlock(&fs_info->balance_lock);
  1920. kfree(bctl);
  1921. }
  1922. /*
  1923. * Balance filters. Return 1 if chunk should be filtered out
  1924. * (should not be balanced).
  1925. */
  1926. static int chunk_profiles_filter(u64 chunk_type,
  1927. struct btrfs_balance_args *bargs)
  1928. {
  1929. chunk_type = chunk_to_extended(chunk_type) &
  1930. BTRFS_EXTENDED_PROFILE_MASK;
  1931. if (bargs->profiles & chunk_type)
  1932. return 0;
  1933. return 1;
  1934. }
  1935. static u64 div_factor_fine(u64 num, int factor)
  1936. {
  1937. if (factor <= 0)
  1938. return 0;
  1939. if (factor >= 100)
  1940. return num;
  1941. num *= factor;
  1942. do_div(num, 100);
  1943. return num;
  1944. }
  1945. static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
  1946. struct btrfs_balance_args *bargs)
  1947. {
  1948. struct btrfs_block_group_cache *cache;
  1949. u64 chunk_used, user_thresh;
  1950. int ret = 1;
  1951. cache = btrfs_lookup_block_group(fs_info, chunk_offset);
  1952. chunk_used = btrfs_block_group_used(&cache->item);
  1953. user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
  1954. if (chunk_used < user_thresh)
  1955. ret = 0;
  1956. btrfs_put_block_group(cache);
  1957. return ret;
  1958. }
  1959. static int chunk_devid_filter(struct extent_buffer *leaf,
  1960. struct btrfs_chunk *chunk,
  1961. struct btrfs_balance_args *bargs)
  1962. {
  1963. struct btrfs_stripe *stripe;
  1964. int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  1965. int i;
  1966. for (i = 0; i < num_stripes; i++) {
  1967. stripe = btrfs_stripe_nr(chunk, i);
  1968. if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
  1969. return 0;
  1970. }
  1971. return 1;
  1972. }
  1973. /* [pstart, pend) */
  1974. static int chunk_drange_filter(struct extent_buffer *leaf,
  1975. struct btrfs_chunk *chunk,
  1976. u64 chunk_offset,
  1977. struct btrfs_balance_args *bargs)
  1978. {
  1979. struct btrfs_stripe *stripe;
  1980. int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  1981. u64 stripe_offset;
  1982. u64 stripe_length;
  1983. int factor;
  1984. int i;
  1985. if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
  1986. return 0;
  1987. if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
  1988. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
  1989. factor = 2;
  1990. else
  1991. factor = 1;
  1992. factor = num_stripes / factor;
  1993. for (i = 0; i < num_stripes; i++) {
  1994. stripe = btrfs_stripe_nr(chunk, i);
  1995. if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
  1996. continue;
  1997. stripe_offset = btrfs_stripe_offset(leaf, stripe);
  1998. stripe_length = btrfs_chunk_length(leaf, chunk);
  1999. do_div(stripe_length, factor);
  2000. if (stripe_offset < bargs->pend &&
  2001. stripe_offset + stripe_length > bargs->pstart)
  2002. return 0;
  2003. }
  2004. return 1;
  2005. }
  2006. /* [vstart, vend) */
  2007. static int chunk_vrange_filter(struct extent_buffer *leaf,
  2008. struct btrfs_chunk *chunk,
  2009. u64 chunk_offset,
  2010. struct btrfs_balance_args *bargs)
  2011. {
  2012. if (chunk_offset < bargs->vend &&
  2013. chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
  2014. /* at least part of the chunk is inside this vrange */
  2015. return 0;
  2016. return 1;
  2017. }
  2018. static int chunk_soft_convert_filter(u64 chunk_type,
  2019. struct btrfs_balance_args *bargs)
  2020. {
  2021. if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
  2022. return 0;
  2023. chunk_type = chunk_to_extended(chunk_type) &
  2024. BTRFS_EXTENDED_PROFILE_MASK;
  2025. if (bargs->target == chunk_type)
  2026. return 1;
  2027. return 0;
  2028. }
  2029. static int should_balance_chunk(struct btrfs_root *root,
  2030. struct extent_buffer *leaf,
  2031. struct btrfs_chunk *chunk, u64 chunk_offset)
  2032. {
  2033. struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
  2034. struct btrfs_balance_args *bargs = NULL;
  2035. u64 chunk_type = btrfs_chunk_type(leaf, chunk);
  2036. /* type filter */
  2037. if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
  2038. (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
  2039. return 0;
  2040. }
  2041. if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
  2042. bargs = &bctl->data;
  2043. else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
  2044. bargs = &bctl->sys;
  2045. else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
  2046. bargs = &bctl->meta;
  2047. /* profiles filter */
  2048. if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
  2049. chunk_profiles_filter(chunk_type, bargs)) {
  2050. return 0;
  2051. }
  2052. /* usage filter */
  2053. if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
  2054. chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
  2055. return 0;
  2056. }
  2057. /* devid filter */
  2058. if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
  2059. chunk_devid_filter(leaf, chunk, bargs)) {
  2060. return 0;
  2061. }
  2062. /* drange filter, makes sense only with devid filter */
  2063. if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
  2064. chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
  2065. return 0;
  2066. }
  2067. /* vrange filter */
  2068. if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
  2069. chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
  2070. return 0;
  2071. }
  2072. /* soft profile changing mode */
  2073. if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
  2074. chunk_soft_convert_filter(chunk_type, bargs)) {
  2075. return 0;
  2076. }
  2077. return 1;
  2078. }
  2079. static u64 div_factor(u64 num, int factor)
  2080. {
  2081. if (factor == 10)
  2082. return num;
  2083. num *= factor;
  2084. do_div(num, 10);
  2085. return num;
  2086. }
  2087. static int __btrfs_balance(struct btrfs_fs_info *fs_info)
  2088. {
  2089. struct btrfs_balance_control *bctl = fs_info->balance_ctl;
  2090. struct btrfs_root *chunk_root = fs_info->chunk_root;
  2091. struct btrfs_root *dev_root = fs_info->dev_root;
  2092. struct list_head *devices;
  2093. struct btrfs_device *device;
  2094. u64 old_size;
  2095. u64 size_to_free;
  2096. struct btrfs_chunk *chunk;
  2097. struct btrfs_path *path;
  2098. struct btrfs_key key;
  2099. struct btrfs_key found_key;
  2100. struct btrfs_trans_handle *trans;
  2101. struct extent_buffer *leaf;
  2102. int slot;
  2103. int ret;
  2104. int enospc_errors = 0;
  2105. bool counting = true;
  2106. /* step one make some room on all the devices */
  2107. devices = &fs_info->fs_devices->devices;
  2108. list_for_each_entry(device, devices, dev_list) {
  2109. old_size = device->total_bytes;
  2110. size_to_free = div_factor(old_size, 1);
  2111. size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
  2112. if (!device->writeable ||
  2113. device->total_bytes - device->bytes_used > size_to_free)
  2114. continue;
  2115. ret = btrfs_shrink_device(device, old_size - size_to_free);
  2116. if (ret == -ENOSPC)
  2117. break;
  2118. BUG_ON(ret);
  2119. trans = btrfs_start_transaction(dev_root, 0);
  2120. BUG_ON(IS_ERR(trans));
  2121. ret = btrfs_grow_device(trans, device, old_size);
  2122. BUG_ON(ret);
  2123. btrfs_end_transaction(trans, dev_root);
  2124. }
  2125. /* step two, relocate all the chunks */
  2126. path = btrfs_alloc_path();
  2127. if (!path) {
  2128. ret = -ENOMEM;
  2129. goto error;
  2130. }
  2131. /* zero out stat counters */
  2132. spin_lock(&fs_info->balance_lock);
  2133. memset(&bctl->stat, 0, sizeof(bctl->stat));
  2134. spin_unlock(&fs_info->balance_lock);
  2135. again:
  2136. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  2137. key.offset = (u64)-1;
  2138. key.type = BTRFS_CHUNK_ITEM_KEY;
  2139. while (1) {
  2140. if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
  2141. atomic_read(&fs_info->balance_cancel_req)) {
  2142. ret = -ECANCELED;
  2143. goto error;
  2144. }
  2145. ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
  2146. if (ret < 0)
  2147. goto error;
  2148. /*
  2149. * this shouldn't happen, it means the last relocate
  2150. * failed
  2151. */
  2152. if (ret == 0)
  2153. BUG(); /* FIXME break ? */
  2154. ret = btrfs_previous_item(chunk_root, path, 0,
  2155. BTRFS_CHUNK_ITEM_KEY);
  2156. if (ret) {
  2157. ret = 0;
  2158. break;
  2159. }
  2160. leaf = path->nodes[0];
  2161. slot = path->slots[0];
  2162. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  2163. if (found_key.objectid != key.objectid)
  2164. break;
  2165. /* chunk zero is special */
  2166. if (found_key.offset == 0)
  2167. break;
  2168. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  2169. if (!counting) {
  2170. spin_lock(&fs_info->balance_lock);
  2171. bctl->stat.considered++;
  2172. spin_unlock(&fs_info->balance_lock);
  2173. }
  2174. ret = should_balance_chunk(chunk_root, leaf, chunk,
  2175. found_key.offset);
  2176. btrfs_release_path(path);
  2177. if (!ret)
  2178. goto loop;
  2179. if (counting) {
  2180. spin_lock(&fs_info->balance_lock);
  2181. bctl->stat.expected++;
  2182. spin_unlock(&fs_info->balance_lock);
  2183. goto loop;
  2184. }
  2185. ret = btrfs_relocate_chunk(chunk_root,
  2186. chunk_root->root_key.objectid,
  2187. found_key.objectid,
  2188. found_key.offset);
  2189. if (ret && ret != -ENOSPC)
  2190. goto error;
  2191. if (ret == -ENOSPC) {
  2192. enospc_errors++;
  2193. } else {
  2194. spin_lock(&fs_info->balance_lock);
  2195. bctl->stat.completed++;
  2196. spin_unlock(&fs_info->balance_lock);
  2197. }
  2198. loop:
  2199. key.offset = found_key.offset - 1;
  2200. }
  2201. if (counting) {
  2202. btrfs_release_path(path);
  2203. counting = false;
  2204. goto again;
  2205. }
  2206. error:
  2207. btrfs_free_path(path);
  2208. if (enospc_errors) {
  2209. printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
  2210. enospc_errors);
  2211. if (!ret)
  2212. ret = -ENOSPC;
  2213. }
  2214. return ret;
  2215. }
  2216. /**
  2217. * alloc_profile_is_valid - see if a given profile is valid and reduced
  2218. * @flags: profile to validate
  2219. * @extended: if true @flags is treated as an extended profile
  2220. */
  2221. static int alloc_profile_is_valid(u64 flags, int extended)
  2222. {
  2223. u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
  2224. BTRFS_BLOCK_GROUP_PROFILE_MASK);
  2225. flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
  2226. /* 1) check that all other bits are zeroed */
  2227. if (flags & ~mask)
  2228. return 0;
  2229. /* 2) see if profile is reduced */
  2230. if (flags == 0)
  2231. return !extended; /* "0" is valid for usual profiles */
  2232. /* true if exactly one bit set */
  2233. return (flags & (flags - 1)) == 0;
  2234. }
  2235. static inline int balance_need_close(struct btrfs_fs_info *fs_info)
  2236. {
  2237. /* cancel requested || normal exit path */
  2238. return atomic_read(&fs_info->balance_cancel_req) ||
  2239. (atomic_read(&fs_info->balance_pause_req) == 0 &&
  2240. atomic_read(&fs_info->balance_cancel_req) == 0);
  2241. }
  2242. static void __cancel_balance(struct btrfs_fs_info *fs_info)
  2243. {
  2244. int ret;
  2245. unset_balance_control(fs_info);
  2246. ret = del_balance_item(fs_info->tree_root);
  2247. BUG_ON(ret);
  2248. }
  2249. void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
  2250. struct btrfs_ioctl_balance_args *bargs);
  2251. /*
  2252. * Should be called with both balance and volume mutexes held
  2253. */
  2254. int btrfs_balance(struct btrfs_balance_control *bctl,
  2255. struct btrfs_ioctl_balance_args *bargs)
  2256. {
  2257. struct btrfs_fs_info *fs_info = bctl->fs_info;
  2258. u64 allowed;
  2259. int mixed = 0;
  2260. int ret;
  2261. if (btrfs_fs_closing(fs_info) ||
  2262. atomic_read(&fs_info->balance_pause_req) ||
  2263. atomic_read(&fs_info->balance_cancel_req)) {
  2264. ret = -EINVAL;
  2265. goto out;
  2266. }
  2267. allowed = btrfs_super_incompat_flags(fs_info->super_copy);
  2268. if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  2269. mixed = 1;
  2270. /*
  2271. * In case of mixed groups both data and meta should be picked,
  2272. * and identical options should be given for both of them.
  2273. */
  2274. allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
  2275. if (mixed && (bctl->flags & allowed)) {
  2276. if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
  2277. !(bctl->flags & BTRFS_BALANCE_METADATA) ||
  2278. memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
  2279. printk(KERN_ERR "btrfs: with mixed groups data and "
  2280. "metadata balance options must be the same\n");
  2281. ret = -EINVAL;
  2282. goto out;
  2283. }
  2284. }
  2285. allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
  2286. if (fs_info->fs_devices->num_devices == 1)
  2287. allowed |= BTRFS_BLOCK_GROUP_DUP;
  2288. else if (fs_info->fs_devices->num_devices < 4)
  2289. allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
  2290. else
  2291. allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
  2292. BTRFS_BLOCK_GROUP_RAID10);
  2293. if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2294. (!alloc_profile_is_valid(bctl->data.target, 1) ||
  2295. (bctl->data.target & ~allowed))) {
  2296. printk(KERN_ERR "btrfs: unable to start balance with target "
  2297. "data profile %llu\n",
  2298. (unsigned long long)bctl->data.target);
  2299. ret = -EINVAL;
  2300. goto out;
  2301. }
  2302. if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2303. (!alloc_profile_is_valid(bctl->meta.target, 1) ||
  2304. (bctl->meta.target & ~allowed))) {
  2305. printk(KERN_ERR "btrfs: unable to start balance with target "
  2306. "metadata profile %llu\n",
  2307. (unsigned long long)bctl->meta.target);
  2308. ret = -EINVAL;
  2309. goto out;
  2310. }
  2311. if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2312. (!alloc_profile_is_valid(bctl->sys.target, 1) ||
  2313. (bctl->sys.target & ~allowed))) {
  2314. printk(KERN_ERR "btrfs: unable to start balance with target "
  2315. "system profile %llu\n",
  2316. (unsigned long long)bctl->sys.target);
  2317. ret = -EINVAL;
  2318. goto out;
  2319. }
  2320. /* allow dup'ed data chunks only in mixed mode */
  2321. if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2322. (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
  2323. printk(KERN_ERR "btrfs: dup for data is not allowed\n");
  2324. ret = -EINVAL;
  2325. goto out;
  2326. }
  2327. /* allow to reduce meta or sys integrity only if force set */
  2328. allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
  2329. BTRFS_BLOCK_GROUP_RAID10;
  2330. if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2331. (fs_info->avail_system_alloc_bits & allowed) &&
  2332. !(bctl->sys.target & allowed)) ||
  2333. ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
  2334. (fs_info->avail_metadata_alloc_bits & allowed) &&
  2335. !(bctl->meta.target & allowed))) {
  2336. if (bctl->flags & BTRFS_BALANCE_FORCE) {
  2337. printk(KERN_INFO "btrfs: force reducing metadata "
  2338. "integrity\n");
  2339. } else {
  2340. printk(KERN_ERR "btrfs: balance will reduce metadata "
  2341. "integrity, use force if you want this\n");
  2342. ret = -EINVAL;
  2343. goto out;
  2344. }
  2345. }
  2346. ret = insert_balance_item(fs_info->tree_root, bctl);
  2347. if (ret && ret != -EEXIST)
  2348. goto out;
  2349. if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
  2350. BUG_ON(ret == -EEXIST);
  2351. set_balance_control(bctl);
  2352. } else {
  2353. BUG_ON(ret != -EEXIST);
  2354. spin_lock(&fs_info->balance_lock);
  2355. update_balance_args(bctl);
  2356. spin_unlock(&fs_info->balance_lock);
  2357. }
  2358. atomic_inc(&fs_info->balance_running);
  2359. mutex_unlock(&fs_info->balance_mutex);
  2360. ret = __btrfs_balance(fs_info);
  2361. mutex_lock(&fs_info->balance_mutex);
  2362. atomic_dec(&fs_info->balance_running);
  2363. if (bargs) {
  2364. memset(bargs, 0, sizeof(*bargs));
  2365. update_ioctl_balance_args(fs_info, 0, bargs);
  2366. }
  2367. if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
  2368. balance_need_close(fs_info)) {
  2369. __cancel_balance(fs_info);
  2370. }
  2371. wake_up(&fs_info->balance_wait_q);
  2372. return ret;
  2373. out:
  2374. if (bctl->flags & BTRFS_BALANCE_RESUME)
  2375. __cancel_balance(fs_info);
  2376. else
  2377. kfree(bctl);
  2378. return ret;
  2379. }
  2380. static int balance_kthread(void *data)
  2381. {
  2382. struct btrfs_balance_control *bctl =
  2383. (struct btrfs_balance_control *)data;
  2384. struct btrfs_fs_info *fs_info = bctl->fs_info;
  2385. int ret = 0;
  2386. mutex_lock(&fs_info->volume_mutex);
  2387. mutex_lock(&fs_info->balance_mutex);
  2388. set_balance_control(bctl);
  2389. if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
  2390. printk(KERN_INFO "btrfs: force skipping balance\n");
  2391. } else {
  2392. printk(KERN_INFO "btrfs: continuing balance\n");
  2393. ret = btrfs_balance(bctl, NULL);
  2394. }
  2395. mutex_unlock(&fs_info->balance_mutex);
  2396. mutex_unlock(&fs_info->volume_mutex);
  2397. return ret;
  2398. }
  2399. int btrfs_recover_balance(struct btrfs_root *tree_root)
  2400. {
  2401. struct task_struct *tsk;
  2402. struct btrfs_balance_control *bctl;
  2403. struct btrfs_balance_item *item;
  2404. struct btrfs_disk_balance_args disk_bargs;
  2405. struct btrfs_path *path;
  2406. struct extent_buffer *leaf;
  2407. struct btrfs_key key;
  2408. int ret;
  2409. path = btrfs_alloc_path();
  2410. if (!path)
  2411. return -ENOMEM;
  2412. bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
  2413. if (!bctl) {
  2414. ret = -ENOMEM;
  2415. goto out;
  2416. }
  2417. key.objectid = BTRFS_BALANCE_OBJECTID;
  2418. key.type = BTRFS_BALANCE_ITEM_KEY;
  2419. key.offset = 0;
  2420. ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
  2421. if (ret < 0)
  2422. goto out_bctl;
  2423. if (ret > 0) { /* ret = -ENOENT; */
  2424. ret = 0;
  2425. goto out_bctl;
  2426. }
  2427. leaf = path->nodes[0];
  2428. item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
  2429. bctl->fs_info = tree_root->fs_info;
  2430. bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
  2431. btrfs_balance_data(leaf, item, &disk_bargs);
  2432. btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
  2433. btrfs_balance_meta(leaf, item, &disk_bargs);
  2434. btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
  2435. btrfs_balance_sys(leaf, item, &disk_bargs);
  2436. btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
  2437. tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
  2438. if (IS_ERR(tsk))
  2439. ret = PTR_ERR(tsk);
  2440. else
  2441. goto out;
  2442. out_bctl:
  2443. kfree(bctl);
  2444. out:
  2445. btrfs_free_path(path);
  2446. return ret;
  2447. }
  2448. int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
  2449. {
  2450. int ret = 0;
  2451. mutex_lock(&fs_info->balance_mutex);
  2452. if (!fs_info->balance_ctl) {
  2453. mutex_unlock(&fs_info->balance_mutex);
  2454. return -ENOTCONN;
  2455. }
  2456. if (atomic_read(&fs_info->balance_running)) {
  2457. atomic_inc(&fs_info->balance_pause_req);
  2458. mutex_unlock(&fs_info->balance_mutex);
  2459. wait_event(fs_info->balance_wait_q,
  2460. atomic_read(&fs_info->balance_running) == 0);
  2461. mutex_lock(&fs_info->balance_mutex);
  2462. /* we are good with balance_ctl ripped off from under us */
  2463. BUG_ON(atomic_read(&fs_info->balance_running));
  2464. atomic_dec(&fs_info->balance_pause_req);
  2465. } else {
  2466. ret = -ENOTCONN;
  2467. }
  2468. mutex_unlock(&fs_info->balance_mutex);
  2469. return ret;
  2470. }
  2471. int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
  2472. {
  2473. mutex_lock(&fs_info->balance_mutex);
  2474. if (!fs_info->balance_ctl) {
  2475. mutex_unlock(&fs_info->balance_mutex);
  2476. return -ENOTCONN;
  2477. }
  2478. atomic_inc(&fs_info->balance_cancel_req);
  2479. /*
  2480. * if we are running just wait and return, balance item is
  2481. * deleted in btrfs_balance in this case
  2482. */
  2483. if (atomic_read(&fs_info->balance_running)) {
  2484. mutex_unlock(&fs_info->balance_mutex);
  2485. wait_event(fs_info->balance_wait_q,
  2486. atomic_read(&fs_info->balance_running) == 0);
  2487. mutex_lock(&fs_info->balance_mutex);
  2488. } else {
  2489. /* __cancel_balance needs volume_mutex */
  2490. mutex_unlock(&fs_info->balance_mutex);
  2491. mutex_lock(&fs_info->volume_mutex);
  2492. mutex_lock(&fs_info->balance_mutex);
  2493. if (fs_info->balance_ctl)
  2494. __cancel_balance(fs_info);
  2495. mutex_unlock(&fs_info->volume_mutex);
  2496. }
  2497. BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
  2498. atomic_dec(&fs_info->balance_cancel_req);
  2499. mutex_unlock(&fs_info->balance_mutex);
  2500. return 0;
  2501. }
  2502. /*
  2503. * shrinking a device means finding all of the device extents past
  2504. * the new size, and then following the back refs to the chunks.
  2505. * The chunk relocation code actually frees the device extent
  2506. */
  2507. int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
  2508. {
  2509. struct btrfs_trans_handle *trans;
  2510. struct btrfs_root *root = device->dev_root;
  2511. struct btrfs_dev_extent *dev_extent = NULL;
  2512. struct btrfs_path *path;
  2513. u64 length;
  2514. u64 chunk_tree;
  2515. u64 chunk_objectid;
  2516. u64 chunk_offset;
  2517. int ret;
  2518. int slot;
  2519. int failed = 0;
  2520. bool retried = false;
  2521. struct extent_buffer *l;
  2522. struct btrfs_key key;
  2523. struct btrfs_super_block *super_copy = root->fs_info->super_copy;
  2524. u64 old_total = btrfs_super_total_bytes(super_copy);
  2525. u64 old_size = device->total_bytes;
  2526. u64 diff = device->total_bytes - new_size;
  2527. if (new_size >= device->total_bytes)
  2528. return -EINVAL;
  2529. path = btrfs_alloc_path();
  2530. if (!path)
  2531. return -ENOMEM;
  2532. path->reada = 2;
  2533. lock_chunks(root);
  2534. device->total_bytes = new_size;
  2535. if (device->writeable) {
  2536. device->fs_devices->total_rw_bytes -= diff;
  2537. spin_lock(&root->fs_info->free_chunk_lock);
  2538. root->fs_info->free_chunk_space -= diff;
  2539. spin_unlock(&root->fs_info->free_chunk_lock);
  2540. }
  2541. unlock_chunks(root);
  2542. again:
  2543. key.objectid = device->devid;
  2544. key.offset = (u64)-1;
  2545. key.type = BTRFS_DEV_EXTENT_KEY;
  2546. do {
  2547. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2548. if (ret < 0)
  2549. goto done;
  2550. ret = btrfs_previous_item(root, path, 0, key.type);
  2551. if (ret < 0)
  2552. goto done;
  2553. if (ret) {
  2554. ret = 0;
  2555. btrfs_release_path(path);
  2556. break;
  2557. }
  2558. l = path->nodes[0];
  2559. slot = path->slots[0];
  2560. btrfs_item_key_to_cpu(l, &key, path->slots[0]);
  2561. if (key.objectid != device->devid) {
  2562. btrfs_release_path(path);
  2563. break;
  2564. }
  2565. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  2566. length = btrfs_dev_extent_length(l, dev_extent);
  2567. if (key.offset + length <= new_size) {
  2568. btrfs_release_path(path);
  2569. break;
  2570. }
  2571. chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
  2572. chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
  2573. chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
  2574. btrfs_release_path(path);
  2575. ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
  2576. chunk_offset);
  2577. if (ret && ret != -ENOSPC)
  2578. goto done;
  2579. if (ret == -ENOSPC)
  2580. failed++;
  2581. } while (key.offset-- > 0);
  2582. if (failed && !retried) {
  2583. failed = 0;
  2584. retried = true;
  2585. goto again;
  2586. } else if (failed && retried) {
  2587. ret = -ENOSPC;
  2588. lock_chunks(root);
  2589. device->total_bytes = old_size;
  2590. if (device->writeable)
  2591. device->fs_devices->total_rw_bytes += diff;
  2592. spin_lock(&root->fs_info->free_chunk_lock);
  2593. root->fs_info->free_chunk_space += diff;
  2594. spin_unlock(&root->fs_info->free_chunk_lock);
  2595. unlock_chunks(root);
  2596. goto done;
  2597. }
  2598. /* Shrinking succeeded, else we would be at "done". */
  2599. trans = btrfs_start_transaction(root, 0);
  2600. if (IS_ERR(trans)) {
  2601. ret = PTR_ERR(trans);
  2602. goto done;
  2603. }
  2604. lock_chunks(root);
  2605. device->disk_total_bytes = new_size;
  2606. /* Now btrfs_update_device() will change the on-disk size. */
  2607. ret = btrfs_update_device(trans, device);
  2608. if (ret) {
  2609. unlock_chunks(root);
  2610. btrfs_end_transaction(trans, root);
  2611. goto done;
  2612. }
  2613. WARN_ON(diff > old_total);
  2614. btrfs_set_super_total_bytes(super_copy, old_total - diff);
  2615. unlock_chunks(root);
  2616. btrfs_end_transaction(trans, root);
  2617. done:
  2618. btrfs_free_path(path);
  2619. return ret;
  2620. }
  2621. static int btrfs_add_system_chunk(struct btrfs_root *root,
  2622. struct btrfs_key *key,
  2623. struct btrfs_chunk *chunk, int item_size)
  2624. {
  2625. struct btrfs_super_block *super_copy = root->fs_info->super_copy;
  2626. struct btrfs_disk_key disk_key;
  2627. u32 array_size;
  2628. u8 *ptr;
  2629. array_size = btrfs_super_sys_array_size(super_copy);
  2630. if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
  2631. return -EFBIG;
  2632. ptr = super_copy->sys_chunk_array + array_size;
  2633. btrfs_cpu_key_to_disk(&disk_key, key);
  2634. memcpy(ptr, &disk_key, sizeof(disk_key));
  2635. ptr += sizeof(disk_key);
  2636. memcpy(ptr, chunk, item_size);
  2637. item_size += sizeof(disk_key);
  2638. btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
  2639. return 0;
  2640. }
  2641. /*
  2642. * sort the devices in descending order by max_avail, total_avail
  2643. */
  2644. static int btrfs_cmp_device_info(const void *a, const void *b)
  2645. {
  2646. const struct btrfs_device_info *di_a = a;
  2647. const struct btrfs_device_info *di_b = b;
  2648. if (di_a->max_avail > di_b->max_avail)
  2649. return -1;
  2650. if (di_a->max_avail < di_b->max_avail)
  2651. return 1;
  2652. if (di_a->total_avail > di_b->total_avail)
  2653. return -1;
  2654. if (di_a->total_avail < di_b->total_avail)
  2655. return 1;
  2656. return 0;
  2657. }
  2658. static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
  2659. struct btrfs_root *extent_root,
  2660. struct map_lookup **map_ret,
  2661. u64 *num_bytes_out, u64 *stripe_size_out,
  2662. u64 start, u64 type)
  2663. {
  2664. struct btrfs_fs_info *info = extent_root->fs_info;
  2665. struct btrfs_fs_devices *fs_devices = info->fs_devices;
  2666. struct list_head *cur;
  2667. struct map_lookup *map = NULL;
  2668. struct extent_map_tree *em_tree;
  2669. struct extent_map *em;
  2670. struct btrfs_device_info *devices_info = NULL;
  2671. u64 total_avail;
  2672. int num_stripes; /* total number of stripes to allocate */
  2673. int sub_stripes; /* sub_stripes info for map */
  2674. int dev_stripes; /* stripes per dev */
  2675. int devs_max; /* max devs to use */
  2676. int devs_min; /* min devs needed */
  2677. int devs_increment; /* ndevs has to be a multiple of this */
  2678. int ncopies; /* how many copies to data has */
  2679. int ret;
  2680. u64 max_stripe_size;
  2681. u64 max_chunk_size;
  2682. u64 stripe_size;
  2683. u64 num_bytes;
  2684. int ndevs;
  2685. int i;
  2686. int j;
  2687. BUG_ON(!alloc_profile_is_valid(type, 0));
  2688. if (list_empty(&fs_devices->alloc_list))
  2689. return -ENOSPC;
  2690. sub_stripes = 1;
  2691. dev_stripes = 1;
  2692. devs_increment = 1;
  2693. ncopies = 1;
  2694. devs_max = 0; /* 0 == as many as possible */
  2695. devs_min = 1;
  2696. /*
  2697. * define the properties of each RAID type.
  2698. * FIXME: move this to a global table and use it in all RAID
  2699. * calculation code
  2700. */
  2701. if (type & (BTRFS_BLOCK_GROUP_DUP)) {
  2702. dev_stripes = 2;
  2703. ncopies = 2;
  2704. devs_max = 1;
  2705. } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
  2706. devs_min = 2;
  2707. } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
  2708. devs_increment = 2;
  2709. ncopies = 2;
  2710. devs_max = 2;
  2711. devs_min = 2;
  2712. } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
  2713. sub_stripes = 2;
  2714. devs_increment = 2;
  2715. ncopies = 2;
  2716. devs_min = 4;
  2717. } else {
  2718. devs_max = 1;
  2719. }
  2720. if (type & BTRFS_BLOCK_GROUP_DATA) {
  2721. max_stripe_size = 1024 * 1024 * 1024;
  2722. max_chunk_size = 10 * max_stripe_size;
  2723. } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
  2724. /* for larger filesystems, use larger metadata chunks */
  2725. if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
  2726. max_stripe_size = 1024 * 1024 * 1024;
  2727. else
  2728. max_stripe_size = 256 * 1024 * 1024;
  2729. max_chunk_size = max_stripe_size;
  2730. } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
  2731. max_stripe_size = 32 * 1024 * 1024;
  2732. max_chunk_size = 2 * max_stripe_size;
  2733. } else {
  2734. printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
  2735. type);
  2736. BUG_ON(1);
  2737. }
  2738. /* we don't want a chunk larger than 10% of writeable space */
  2739. max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
  2740. max_chunk_size);
  2741. devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
  2742. GFP_NOFS);
  2743. if (!devices_info)
  2744. return -ENOMEM;
  2745. cur = fs_devices->alloc_list.next;
  2746. /*
  2747. * in the first pass through the devices list, we gather information
  2748. * about the available holes on each device.
  2749. */
  2750. ndevs = 0;
  2751. while (cur != &fs_devices->alloc_list) {
  2752. struct btrfs_device *device;
  2753. u64 max_avail;
  2754. u64 dev_offset;
  2755. device = list_entry(cur, struct btrfs_device, dev_alloc_list);
  2756. cur = cur->next;
  2757. if (!device->writeable) {
  2758. printk(KERN_ERR
  2759. "btrfs: read-only device in alloc_list\n");
  2760. WARN_ON(1);
  2761. continue;
  2762. }
  2763. if (!device->in_fs_metadata)
  2764. continue;
  2765. if (device->total_bytes > device->bytes_used)
  2766. total_avail = device->total_bytes - device->bytes_used;
  2767. else
  2768. total_avail = 0;
  2769. /* If there is no space on this device, skip it. */
  2770. if (total_avail == 0)
  2771. continue;
  2772. ret = find_free_dev_extent(device,
  2773. max_stripe_size * dev_stripes,
  2774. &dev_offset, &max_avail);
  2775. if (ret && ret != -ENOSPC)
  2776. goto error;
  2777. if (ret == 0)
  2778. max_avail = max_stripe_size * dev_stripes;
  2779. if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
  2780. continue;
  2781. devices_info[ndevs].dev_offset = dev_offset;
  2782. devices_info[ndevs].max_avail = max_avail;
  2783. devices_info[ndevs].total_avail = total_avail;
  2784. devices_info[ndevs].dev = device;
  2785. ++ndevs;
  2786. }
  2787. /*
  2788. * now sort the devices by hole size / available space
  2789. */
  2790. sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
  2791. btrfs_cmp_device_info, NULL);
  2792. /* round down to number of usable stripes */
  2793. ndevs -= ndevs % devs_increment;
  2794. if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
  2795. ret = -ENOSPC;
  2796. goto error;
  2797. }
  2798. if (devs_max && ndevs > devs_max)
  2799. ndevs = devs_max;
  2800. /*
  2801. * the primary goal is to maximize the number of stripes, so use as many
  2802. * devices as possible, even if the stripes are not maximum sized.
  2803. */
  2804. stripe_size = devices_info[ndevs-1].max_avail;
  2805. num_stripes = ndevs * dev_stripes;
  2806. if (stripe_size * num_stripes > max_chunk_size * ncopies) {
  2807. stripe_size = max_chunk_size * ncopies;
  2808. do_div(stripe_size, num_stripes);
  2809. }
  2810. do_div(stripe_size, dev_stripes);
  2811. do_div(stripe_size, BTRFS_STRIPE_LEN);
  2812. stripe_size *= BTRFS_STRIPE_LEN;
  2813. map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
  2814. if (!map) {
  2815. ret = -ENOMEM;
  2816. goto error;
  2817. }
  2818. map->num_stripes = num_stripes;
  2819. for (i = 0; i < ndevs; ++i) {
  2820. for (j = 0; j < dev_stripes; ++j) {
  2821. int s = i * dev_stripes + j;
  2822. map->stripes[s].dev = devices_info[i].dev;
  2823. map->stripes[s].physical = devices_info[i].dev_offset +
  2824. j * stripe_size;
  2825. }
  2826. }
  2827. map->sector_size = extent_root->sectorsize;
  2828. map->stripe_len = BTRFS_STRIPE_LEN;
  2829. map->io_align = BTRFS_STRIPE_LEN;
  2830. map->io_width = BTRFS_STRIPE_LEN;
  2831. map->type = type;
  2832. map->sub_stripes = sub_stripes;
  2833. *map_ret = map;
  2834. num_bytes = stripe_size * (num_stripes / ncopies);
  2835. *stripe_size_out = stripe_size;
  2836. *num_bytes_out = num_bytes;
  2837. trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
  2838. em = alloc_extent_map();
  2839. if (!em) {
  2840. ret = -ENOMEM;
  2841. goto error;
  2842. }
  2843. em->bdev = (struct block_device *)map;
  2844. em->start = start;
  2845. em->len = num_bytes;
  2846. em->block_start = 0;
  2847. em->block_len = em->len;
  2848. em_tree = &extent_root->fs_info->mapping_tree.map_tree;
  2849. write_lock(&em_tree->lock);
  2850. ret = add_extent_mapping(em_tree, em);
  2851. write_unlock(&em_tree->lock);
  2852. BUG_ON(ret);
  2853. free_extent_map(em);
  2854. ret = btrfs_make_block_group(trans, extent_root, 0, type,
  2855. BTRFS_FIRST_CHUNK_TREE_OBJECTID,
  2856. start, num_bytes);
  2857. BUG_ON(ret);
  2858. for (i = 0; i < map->num_stripes; ++i) {
  2859. struct btrfs_device *device;
  2860. u64 dev_offset;
  2861. device = map->stripes[i].dev;
  2862. dev_offset = map->stripes[i].physical;
  2863. ret = btrfs_alloc_dev_extent(trans, device,
  2864. info->chunk_root->root_key.objectid,
  2865. BTRFS_FIRST_CHUNK_TREE_OBJECTID,
  2866. start, dev_offset, stripe_size);
  2867. BUG_ON(ret);
  2868. }
  2869. kfree(devices_info);
  2870. return 0;
  2871. error:
  2872. kfree(map);
  2873. kfree(devices_info);
  2874. return ret;
  2875. }
  2876. static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
  2877. struct btrfs_root *extent_root,
  2878. struct map_lookup *map, u64 chunk_offset,
  2879. u64 chunk_size, u64 stripe_size)
  2880. {
  2881. u64 dev_offset;
  2882. struct btrfs_key key;
  2883. struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
  2884. struct btrfs_device *device;
  2885. struct btrfs_chunk *chunk;
  2886. struct btrfs_stripe *stripe;
  2887. size_t item_size = btrfs_chunk_item_size(map->num_stripes);
  2888. int index = 0;
  2889. int ret;
  2890. chunk = kzalloc(item_size, GFP_NOFS);
  2891. if (!chunk)
  2892. return -ENOMEM;
  2893. index = 0;
  2894. while (index < map->num_stripes) {
  2895. device = map->stripes[index].dev;
  2896. device->bytes_used += stripe_size;
  2897. ret = btrfs_update_device(trans, device);
  2898. BUG_ON(ret);
  2899. index++;
  2900. }
  2901. spin_lock(&extent_root->fs_info->free_chunk_lock);
  2902. extent_root->fs_info->free_chunk_space -= (stripe_size *
  2903. map->num_stripes);
  2904. spin_unlock(&extent_root->fs_info->free_chunk_lock);
  2905. index = 0;
  2906. stripe = &chunk->stripe;
  2907. while (index < map->num_stripes) {
  2908. device = map->stripes[index].dev;
  2909. dev_offset = map->stripes[index].physical;
  2910. btrfs_set_stack_stripe_devid(stripe, device->devid);
  2911. btrfs_set_stack_stripe_offset(stripe, dev_offset);
  2912. memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
  2913. stripe++;
  2914. index++;
  2915. }
  2916. btrfs_set_stack_chunk_length(chunk, chunk_size);
  2917. btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
  2918. btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
  2919. btrfs_set_stack_chunk_type(chunk, map->type);
  2920. btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
  2921. btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
  2922. btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
  2923. btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
  2924. btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
  2925. key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
  2926. key.type = BTRFS_CHUNK_ITEM_KEY;
  2927. key.offset = chunk_offset;
  2928. ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
  2929. BUG_ON(ret);
  2930. if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
  2931. ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
  2932. item_size);
  2933. BUG_ON(ret);
  2934. }
  2935. kfree(chunk);
  2936. return 0;
  2937. }
  2938. /*
  2939. * Chunk allocation falls into two parts. The first part does works
  2940. * that make the new allocated chunk useable, but not do any operation
  2941. * that modifies the chunk tree. The second part does the works that
  2942. * require modifying the chunk tree. This division is important for the
  2943. * bootstrap process of adding storage to a seed btrfs.
  2944. */
  2945. int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
  2946. struct btrfs_root *extent_root, u64 type)
  2947. {
  2948. u64 chunk_offset;
  2949. u64 chunk_size;
  2950. u64 stripe_size;
  2951. struct map_lookup *map;
  2952. struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
  2953. int ret;
  2954. ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
  2955. &chunk_offset);
  2956. if (ret)
  2957. return ret;
  2958. ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
  2959. &stripe_size, chunk_offset, type);
  2960. if (ret)
  2961. return ret;
  2962. ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
  2963. chunk_size, stripe_size);
  2964. BUG_ON(ret);
  2965. return 0;
  2966. }
  2967. static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
  2968. struct btrfs_root *root,
  2969. struct btrfs_device *device)
  2970. {
  2971. u64 chunk_offset;
  2972. u64 sys_chunk_offset;
  2973. u64 chunk_size;
  2974. u64 sys_chunk_size;
  2975. u64 stripe_size;
  2976. u64 sys_stripe_size;
  2977. u64 alloc_profile;
  2978. struct map_lookup *map;
  2979. struct map_lookup *sys_map;
  2980. struct btrfs_fs_info *fs_info = root->fs_info;
  2981. struct btrfs_root *extent_root = fs_info->extent_root;
  2982. int ret;
  2983. ret = find_next_chunk(fs_info->chunk_root,
  2984. BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
  2985. if (ret)
  2986. return ret;
  2987. alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
  2988. fs_info->avail_metadata_alloc_bits;
  2989. alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
  2990. ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
  2991. &stripe_size, chunk_offset, alloc_profile);
  2992. BUG_ON(ret);
  2993. sys_chunk_offset = chunk_offset + chunk_size;
  2994. alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
  2995. fs_info->avail_system_alloc_bits;
  2996. alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
  2997. ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
  2998. &sys_chunk_size, &sys_stripe_size,
  2999. sys_chunk_offset, alloc_profile);
  3000. BUG_ON(ret);
  3001. ret = btrfs_add_device(trans, fs_info->chunk_root, device);
  3002. BUG_ON(ret);
  3003. /*
  3004. * Modifying chunk tree needs allocating new blocks from both
  3005. * system block group and metadata block group. So we only can
  3006. * do operations require modifying the chunk tree after both
  3007. * block groups were created.
  3008. */
  3009. ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
  3010. chunk_size, stripe_size);
  3011. BUG_ON(ret);
  3012. ret = __finish_chunk_alloc(trans, extent_root, sys_map,
  3013. sys_chunk_offset, sys_chunk_size,
  3014. sys_stripe_size);
  3015. BUG_ON(ret);
  3016. return 0;
  3017. }
  3018. int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
  3019. {
  3020. struct extent_map *em;
  3021. struct map_lookup *map;
  3022. struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
  3023. int readonly = 0;
  3024. int i;
  3025. read_lock(&map_tree->map_tree.lock);
  3026. em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
  3027. read_unlock(&map_tree->map_tree.lock);
  3028. if (!em)
  3029. return 1;
  3030. if (btrfs_test_opt(root, DEGRADED)) {
  3031. free_extent_map(em);
  3032. return 0;
  3033. }
  3034. map = (struct map_lookup *)em->bdev;
  3035. for (i = 0; i < map->num_stripes; i++) {
  3036. if (!map->stripes[i].dev->writeable) {
  3037. readonly = 1;
  3038. break;
  3039. }
  3040. }
  3041. free_extent_map(em);
  3042. return readonly;
  3043. }
  3044. void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
  3045. {
  3046. extent_map_tree_init(&tree->map_tree);
  3047. }
  3048. void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
  3049. {
  3050. struct extent_map *em;
  3051. while (1) {
  3052. write_lock(&tree->map_tree.lock);
  3053. em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
  3054. if (em)
  3055. remove_extent_mapping(&tree->map_tree, em);
  3056. write_unlock(&tree->map_tree.lock);
  3057. if (!em)
  3058. break;
  3059. kfree(em->bdev);
  3060. /* once for us */
  3061. free_extent_map(em);
  3062. /* once for the tree */
  3063. free_extent_map(em);
  3064. }
  3065. }
  3066. int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
  3067. {
  3068. struct extent_map *em;
  3069. struct map_lookup *map;
  3070. struct extent_map_tree *em_tree = &map_tree->map_tree;
  3071. int ret;
  3072. read_lock(&em_tree->lock);
  3073. em = lookup_extent_mapping(em_tree, logical, len);
  3074. read_unlock(&em_tree->lock);
  3075. BUG_ON(!em);
  3076. BUG_ON(em->start > logical || em->start + em->len < logical);
  3077. map = (struct map_lookup *)em->bdev;
  3078. if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
  3079. ret = map->num_stripes;
  3080. else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
  3081. ret = map->sub_stripes;
  3082. else
  3083. ret = 1;
  3084. free_extent_map(em);
  3085. return ret;
  3086. }
  3087. static int find_live_mirror(struct map_lookup *map, int first, int num,
  3088. int optimal)
  3089. {
  3090. int i;
  3091. if (map->stripes[optimal].dev->bdev)
  3092. return optimal;
  3093. for (i = first; i < first + num; i++) {
  3094. if (map->stripes[i].dev->bdev)
  3095. return i;
  3096. }
  3097. /* we couldn't find one that doesn't fail. Just return something
  3098. * and the io error handling code will clean up eventually
  3099. */
  3100. return optimal;
  3101. }
  3102. static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
  3103. u64 logical, u64 *length,
  3104. struct btrfs_bio **bbio_ret,
  3105. int mirror_num)
  3106. {
  3107. struct extent_map *em;
  3108. struct map_lookup *map;
  3109. struct extent_map_tree *em_tree = &map_tree->map_tree;
  3110. u64 offset;
  3111. u64 stripe_offset;
  3112. u64 stripe_end_offset;
  3113. u64 stripe_nr;
  3114. u64 stripe_nr_orig;
  3115. u64 stripe_nr_end;
  3116. int stripe_index;
  3117. int i;
  3118. int ret = 0;
  3119. int num_stripes;
  3120. int max_errors = 0;
  3121. struct btrfs_bio *bbio = NULL;
  3122. read_lock(&em_tree->lock);
  3123. em = lookup_extent_mapping(em_tree, logical, *length);
  3124. read_unlock(&em_tree->lock);
  3125. if (!em) {
  3126. printk(KERN_CRIT "unable to find logical %llu len %llu\n",
  3127. (unsigned long long)logical,
  3128. (unsigned long long)*length);
  3129. BUG();
  3130. }
  3131. BUG_ON(em->start > logical || em->start + em->len < logical);
  3132. map = (struct map_lookup *)em->bdev;
  3133. offset = logical - em->start;
  3134. if (mirror_num > map->num_stripes)
  3135. mirror_num = 0;
  3136. stripe_nr = offset;
  3137. /*
  3138. * stripe_nr counts the total number of stripes we have to stride
  3139. * to get to this block
  3140. */
  3141. do_div(stripe_nr, map->stripe_len);
  3142. stripe_offset = stripe_nr * map->stripe_len;
  3143. BUG_ON(offset < stripe_offset);
  3144. /* stripe_offset is the offset of this block in its stripe*/
  3145. stripe_offset = offset - stripe_offset;
  3146. if (rw & REQ_DISCARD)
  3147. *length = min_t(u64, em->len - offset, *length);
  3148. else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
  3149. /* we limit the length of each bio to what fits in a stripe */
  3150. *length = min_t(u64, em->len - offset,
  3151. map->stripe_len - stripe_offset);
  3152. } else {
  3153. *length = em->len - offset;
  3154. }
  3155. if (!bbio_ret)
  3156. goto out;
  3157. num_stripes = 1;
  3158. stripe_index = 0;
  3159. stripe_nr_orig = stripe_nr;
  3160. stripe_nr_end = (offset + *length + map->stripe_len - 1) &
  3161. (~(map->stripe_len - 1));
  3162. do_div(stripe_nr_end, map->stripe_len);
  3163. stripe_end_offset = stripe_nr_end * map->stripe_len -
  3164. (offset + *length);
  3165. if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  3166. if (rw & REQ_DISCARD)
  3167. num_stripes = min_t(u64, map->num_stripes,
  3168. stripe_nr_end - stripe_nr_orig);
  3169. stripe_index = do_div(stripe_nr, map->num_stripes);
  3170. } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
  3171. if (rw & (REQ_WRITE | REQ_DISCARD))
  3172. num_stripes = map->num_stripes;
  3173. else if (mirror_num)
  3174. stripe_index = mirror_num - 1;
  3175. else {
  3176. stripe_index = find_live_mirror(map, 0,
  3177. map->num_stripes,
  3178. current->pid % map->num_stripes);
  3179. mirror_num = stripe_index + 1;
  3180. }
  3181. } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
  3182. if (rw & (REQ_WRITE | REQ_DISCARD)) {
  3183. num_stripes = map->num_stripes;
  3184. } else if (mirror_num) {
  3185. stripe_index = mirror_num - 1;
  3186. } else {
  3187. mirror_num = 1;
  3188. }
  3189. } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  3190. int factor = map->num_stripes / map->sub_stripes;
  3191. stripe_index = do_div(stripe_nr, factor);
  3192. stripe_index *= map->sub_stripes;
  3193. if (rw & REQ_WRITE)
  3194. num_stripes = map->sub_stripes;
  3195. else if (rw & REQ_DISCARD)
  3196. num_stripes = min_t(u64, map->sub_stripes *
  3197. (stripe_nr_end - stripe_nr_orig),
  3198. map->num_stripes);
  3199. else if (mirror_num)
  3200. stripe_index += mirror_num - 1;
  3201. else {
  3202. stripe_index = find_live_mirror(map, stripe_index,
  3203. map->sub_stripes, stripe_index +
  3204. current->pid % map->sub_stripes);
  3205. mirror_num = stripe_index + 1;
  3206. }
  3207. } else {
  3208. /*
  3209. * after this do_div call, stripe_nr is the number of stripes
  3210. * on this device we have to walk to find the data, and
  3211. * stripe_index is the number of our device in the stripe array
  3212. */
  3213. stripe_index = do_div(stripe_nr, map->num_stripes);
  3214. mirror_num = stripe_index + 1;
  3215. }
  3216. BUG_ON(stripe_index >= map->num_stripes);
  3217. bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
  3218. if (!bbio) {
  3219. ret = -ENOMEM;
  3220. goto out;
  3221. }
  3222. atomic_set(&bbio->error, 0);
  3223. if (rw & REQ_DISCARD) {
  3224. int factor = 0;
  3225. int sub_stripes = 0;
  3226. u64 stripes_per_dev = 0;
  3227. u32 remaining_stripes = 0;
  3228. if (map->type &
  3229. (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
  3230. if (map->type & BTRFS_BLOCK_GROUP_RAID0)
  3231. sub_stripes = 1;
  3232. else
  3233. sub_stripes = map->sub_stripes;
  3234. factor = map->num_stripes / sub_stripes;
  3235. stripes_per_dev = div_u64_rem(stripe_nr_end -
  3236. stripe_nr_orig,
  3237. factor,
  3238. &remaining_stripes);
  3239. }
  3240. for (i = 0; i < num_stripes; i++) {
  3241. bbio->stripes[i].physical =
  3242. map->stripes[stripe_index].physical +
  3243. stripe_offset + stripe_nr * map->stripe_len;
  3244. bbio->stripes[i].dev = map->stripes[stripe_index].dev;
  3245. if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
  3246. BTRFS_BLOCK_GROUP_RAID10)) {
  3247. bbio->stripes[i].length = stripes_per_dev *
  3248. map->stripe_len;
  3249. if (i / sub_stripes < remaining_stripes)
  3250. bbio->stripes[i].length +=
  3251. map->stripe_len;
  3252. if (i < sub_stripes)
  3253. bbio->stripes[i].length -=
  3254. stripe_offset;
  3255. if ((i / sub_stripes + 1) %
  3256. sub_stripes == remaining_stripes)
  3257. bbio->stripes[i].length -=
  3258. stripe_end_offset;
  3259. if (i == sub_stripes - 1)
  3260. stripe_offset = 0;
  3261. } else
  3262. bbio->stripes[i].length = *length;
  3263. stripe_index++;
  3264. if (stripe_index == map->num_stripes) {
  3265. /* This could only happen for RAID0/10 */
  3266. stripe_index = 0;
  3267. stripe_nr++;
  3268. }
  3269. }
  3270. } else {
  3271. for (i = 0; i < num_stripes; i++) {
  3272. bbio->stripes[i].physical =
  3273. map->stripes[stripe_index].physical +
  3274. stripe_offset +
  3275. stripe_nr * map->stripe_len;
  3276. bbio->stripes[i].dev =
  3277. map->stripes[stripe_index].dev;
  3278. stripe_index++;
  3279. }
  3280. }
  3281. if (rw & REQ_WRITE) {
  3282. if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
  3283. BTRFS_BLOCK_GROUP_RAID10 |
  3284. BTRFS_BLOCK_GROUP_DUP)) {
  3285. max_errors = 1;
  3286. }
  3287. }
  3288. *bbio_ret = bbio;
  3289. bbio->num_stripes = num_stripes;
  3290. bbio->max_errors = max_errors;
  3291. bbio->mirror_num = mirror_num;
  3292. out:
  3293. free_extent_map(em);
  3294. return ret;
  3295. }
  3296. int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
  3297. u64 logical, u64 *length,
  3298. struct btrfs_bio **bbio_ret, int mirror_num)
  3299. {
  3300. return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
  3301. mirror_num);
  3302. }
  3303. int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
  3304. u64 chunk_start, u64 physical, u64 devid,
  3305. u64 **logical, int *naddrs, int *stripe_len)
  3306. {
  3307. struct extent_map_tree *em_tree = &map_tree->map_tree;
  3308. struct extent_map *em;
  3309. struct map_lookup *map;
  3310. u64 *buf;
  3311. u64 bytenr;
  3312. u64 length;
  3313. u64 stripe_nr;
  3314. int i, j, nr = 0;
  3315. read_lock(&em_tree->lock);
  3316. em = lookup_extent_mapping(em_tree, chunk_start, 1);
  3317. read_unlock(&em_tree->lock);
  3318. BUG_ON(!em || em->start != chunk_start);
  3319. map = (struct map_lookup *)em->bdev;
  3320. length = em->len;
  3321. if (map->type & BTRFS_BLOCK_GROUP_RAID10)
  3322. do_div(length, map->num_stripes / map->sub_stripes);
  3323. else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
  3324. do_div(length, map->num_stripes);
  3325. buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
  3326. BUG_ON(!buf);
  3327. for (i = 0; i < map->num_stripes; i++) {
  3328. if (devid && map->stripes[i].dev->devid != devid)
  3329. continue;
  3330. if (map->stripes[i].physical > physical ||
  3331. map->stripes[i].physical + length <= physical)
  3332. continue;
  3333. stripe_nr = physical - map->stripes[i].physical;
  3334. do_div(stripe_nr, map->stripe_len);
  3335. if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
  3336. stripe_nr = stripe_nr * map->num_stripes + i;
  3337. do_div(stripe_nr, map->sub_stripes);
  3338. } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
  3339. stripe_nr = stripe_nr * map->num_stripes + i;
  3340. }
  3341. bytenr = chunk_start + stripe_nr * map->stripe_len;
  3342. WARN_ON(nr >= map->num_stripes);
  3343. for (j = 0; j < nr; j++) {
  3344. if (buf[j] == bytenr)
  3345. break;
  3346. }
  3347. if (j == nr) {
  3348. WARN_ON(nr >= map->num_stripes);
  3349. buf[nr++] = bytenr;
  3350. }
  3351. }
  3352. *logical = buf;
  3353. *naddrs = nr;
  3354. *stripe_len = map->stripe_len;
  3355. free_extent_map(em);
  3356. return 0;
  3357. }
  3358. static void btrfs_end_bio(struct bio *bio, int err)
  3359. {
  3360. struct btrfs_bio *bbio = bio->bi_private;
  3361. int is_orig_bio = 0;
  3362. if (err)
  3363. atomic_inc(&bbio->error);
  3364. if (bio == bbio->orig_bio)
  3365. is_orig_bio = 1;
  3366. if (atomic_dec_and_test(&bbio->stripes_pending)) {
  3367. if (!is_orig_bio) {
  3368. bio_put(bio);
  3369. bio = bbio->orig_bio;
  3370. }
  3371. bio->bi_private = bbio->private;
  3372. bio->bi_end_io = bbio->end_io;
  3373. bio->bi_bdev = (struct block_device *)
  3374. (unsigned long)bbio->mirror_num;
  3375. /* only send an error to the higher layers if it is
  3376. * beyond the tolerance of the multi-bio
  3377. */
  3378. if (atomic_read(&bbio->error) > bbio->max_errors) {
  3379. err = -EIO;
  3380. } else {
  3381. /*
  3382. * this bio is actually up to date, we didn't
  3383. * go over the max number of errors
  3384. */
  3385. set_bit(BIO_UPTODATE, &bio->bi_flags);
  3386. err = 0;
  3387. }
  3388. kfree(bbio);
  3389. bio_endio(bio, err);
  3390. } else if (!is_orig_bio) {
  3391. bio_put(bio);
  3392. }
  3393. }
  3394. struct async_sched {
  3395. struct bio *bio;
  3396. int rw;
  3397. struct btrfs_fs_info *info;
  3398. struct btrfs_work work;
  3399. };
  3400. /*
  3401. * see run_scheduled_bios for a description of why bios are collected for
  3402. * async submit.
  3403. *
  3404. * This will add one bio to the pending list for a device and make sure
  3405. * the work struct is scheduled.
  3406. */
  3407. static noinline int schedule_bio(struct btrfs_root *root,
  3408. struct btrfs_device *device,
  3409. int rw, struct bio *bio)
  3410. {
  3411. int should_queue = 1;
  3412. struct btrfs_pending_bios *pending_bios;
  3413. /* don't bother with additional async steps for reads, right now */
  3414. if (!(rw & REQ_WRITE)) {
  3415. bio_get(bio);
  3416. btrfsic_submit_bio(rw, bio);
  3417. bio_put(bio);
  3418. return 0;
  3419. }
  3420. /*
  3421. * nr_async_bios allows us to reliably return congestion to the
  3422. * higher layers. Otherwise, the async bio makes it appear we have
  3423. * made progress against dirty pages when we've really just put it
  3424. * on a queue for later
  3425. */
  3426. atomic_inc(&root->fs_info->nr_async_bios);
  3427. WARN_ON(bio->bi_next);
  3428. bio->bi_next = NULL;
  3429. bio->bi_rw |= rw;
  3430. spin_lock(&device->io_lock);
  3431. if (bio->bi_rw & REQ_SYNC)
  3432. pending_bios = &device->pending_sync_bios;
  3433. else
  3434. pending_bios = &device->pending_bios;
  3435. if (pending_bios->tail)
  3436. pending_bios->tail->bi_next = bio;
  3437. pending_bios->tail = bio;
  3438. if (!pending_bios->head)
  3439. pending_bios->head = bio;
  3440. if (device->running_pending)
  3441. should_queue = 0;
  3442. spin_unlock(&device->io_lock);
  3443. if (should_queue)
  3444. btrfs_queue_worker(&root->fs_info->submit_workers,
  3445. &device->work);
  3446. return 0;
  3447. }
  3448. int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
  3449. int mirror_num, int async_submit)
  3450. {
  3451. struct btrfs_mapping_tree *map_tree;
  3452. struct btrfs_device *dev;
  3453. struct bio *first_bio = bio;
  3454. u64 logical = (u64)bio->bi_sector << 9;
  3455. u64 length = 0;
  3456. u64 map_length;
  3457. int ret;
  3458. int dev_nr = 0;
  3459. int total_devs = 1;
  3460. struct btrfs_bio *bbio = NULL;
  3461. length = bio->bi_size;
  3462. map_tree = &root->fs_info->mapping_tree;
  3463. map_length = length;
  3464. ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
  3465. mirror_num);
  3466. BUG_ON(ret);
  3467. total_devs = bbio->num_stripes;
  3468. if (map_length < length) {
  3469. printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
  3470. "len %llu\n", (unsigned long long)logical,
  3471. (unsigned long long)length,
  3472. (unsigned long long)map_length);
  3473. BUG();
  3474. }
  3475. bbio->orig_bio = first_bio;
  3476. bbio->private = first_bio->bi_private;
  3477. bbio->end_io = first_bio->bi_end_io;
  3478. atomic_set(&bbio->stripes_pending, bbio->num_stripes);
  3479. while (dev_nr < total_devs) {
  3480. if (dev_nr < total_devs - 1) {
  3481. bio = bio_clone(first_bio, GFP_NOFS);
  3482. BUG_ON(!bio);
  3483. } else {
  3484. bio = first_bio;
  3485. }
  3486. bio->bi_private = bbio;
  3487. bio->bi_end_io = btrfs_end_bio;
  3488. bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
  3489. dev = bbio->stripes[dev_nr].dev;
  3490. if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
  3491. pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
  3492. "(%s id %llu), size=%u\n", rw,
  3493. (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
  3494. dev->name, dev->devid, bio->bi_size);
  3495. bio->bi_bdev = dev->bdev;
  3496. if (async_submit)
  3497. schedule_bio(root, dev, rw, bio);
  3498. else
  3499. btrfsic_submit_bio(rw, bio);
  3500. } else {
  3501. bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
  3502. bio->bi_sector = logical >> 9;
  3503. bio_endio(bio, -EIO);
  3504. }
  3505. dev_nr++;
  3506. }
  3507. return 0;
  3508. }
  3509. struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
  3510. u8 *uuid, u8 *fsid)
  3511. {
  3512. struct btrfs_device *device;
  3513. struct btrfs_fs_devices *cur_devices;
  3514. cur_devices = root->fs_info->fs_devices;
  3515. while (cur_devices) {
  3516. if (!fsid ||
  3517. !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
  3518. device = __find_device(&cur_devices->devices,
  3519. devid, uuid);
  3520. if (device)
  3521. return device;
  3522. }
  3523. cur_devices = cur_devices->seed;
  3524. }
  3525. return NULL;
  3526. }
  3527. static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
  3528. u64 devid, u8 *dev_uuid)
  3529. {
  3530. struct btrfs_device *device;
  3531. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  3532. device = kzalloc(sizeof(*device), GFP_NOFS);
  3533. if (!device)
  3534. return NULL;
  3535. list_add(&device->dev_list,
  3536. &fs_devices->devices);
  3537. device->dev_root = root->fs_info->dev_root;
  3538. device->devid = devid;
  3539. device->work.func = pending_bios_fn;
  3540. device->fs_devices = fs_devices;
  3541. device->missing = 1;
  3542. fs_devices->num_devices++;
  3543. fs_devices->missing_devices++;
  3544. spin_lock_init(&device->io_lock);
  3545. INIT_LIST_HEAD(&device->dev_alloc_list);
  3546. memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
  3547. return device;
  3548. }
  3549. static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
  3550. struct extent_buffer *leaf,
  3551. struct btrfs_chunk *chunk)
  3552. {
  3553. struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
  3554. struct map_lookup *map;
  3555. struct extent_map *em;
  3556. u64 logical;
  3557. u64 length;
  3558. u64 devid;
  3559. u8 uuid[BTRFS_UUID_SIZE];
  3560. int num_stripes;
  3561. int ret;
  3562. int i;
  3563. logical = key->offset;
  3564. length = btrfs_chunk_length(leaf, chunk);
  3565. read_lock(&map_tree->map_tree.lock);
  3566. em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
  3567. read_unlock(&map_tree->map_tree.lock);
  3568. /* already mapped? */
  3569. if (em && em->start <= logical && em->start + em->len > logical) {
  3570. free_extent_map(em);
  3571. return 0;
  3572. } else if (em) {
  3573. free_extent_map(em);
  3574. }
  3575. em = alloc_extent_map();
  3576. if (!em)
  3577. return -ENOMEM;
  3578. num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  3579. map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
  3580. if (!map) {
  3581. free_extent_map(em);
  3582. return -ENOMEM;
  3583. }
  3584. em->bdev = (struct block_device *)map;
  3585. em->start = logical;
  3586. em->len = length;
  3587. em->block_start = 0;
  3588. em->block_len = em->len;
  3589. map->num_stripes = num_stripes;
  3590. map->io_width = btrfs_chunk_io_width(leaf, chunk);
  3591. map->io_align = btrfs_chunk_io_align(leaf, chunk);
  3592. map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
  3593. map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
  3594. map->type = btrfs_chunk_type(leaf, chunk);
  3595. map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
  3596. for (i = 0; i < num_stripes; i++) {
  3597. map->stripes[i].physical =
  3598. btrfs_stripe_offset_nr(leaf, chunk, i);
  3599. devid = btrfs_stripe_devid_nr(leaf, chunk, i);
  3600. read_extent_buffer(leaf, uuid, (unsigned long)
  3601. btrfs_stripe_dev_uuid_nr(chunk, i),
  3602. BTRFS_UUID_SIZE);
  3603. map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
  3604. NULL);
  3605. if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
  3606. kfree(map);
  3607. free_extent_map(em);
  3608. return -EIO;
  3609. }
  3610. if (!map->stripes[i].dev) {
  3611. map->stripes[i].dev =
  3612. add_missing_dev(root, devid, uuid);
  3613. if (!map->stripes[i].dev) {
  3614. kfree(map);
  3615. free_extent_map(em);
  3616. return -EIO;
  3617. }
  3618. }
  3619. map->stripes[i].dev->in_fs_metadata = 1;
  3620. }
  3621. write_lock(&map_tree->map_tree.lock);
  3622. ret = add_extent_mapping(&map_tree->map_tree, em);
  3623. write_unlock(&map_tree->map_tree.lock);
  3624. BUG_ON(ret);
  3625. free_extent_map(em);
  3626. return 0;
  3627. }
  3628. static int fill_device_from_item(struct extent_buffer *leaf,
  3629. struct btrfs_dev_item *dev_item,
  3630. struct btrfs_device *device)
  3631. {
  3632. unsigned long ptr;
  3633. device->devid = btrfs_device_id(leaf, dev_item);
  3634. device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
  3635. device->total_bytes = device->disk_total_bytes;
  3636. device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
  3637. device->type = btrfs_device_type(leaf, dev_item);
  3638. device->io_align = btrfs_device_io_align(leaf, dev_item);
  3639. device->io_width = btrfs_device_io_width(leaf, dev_item);
  3640. device->sector_size = btrfs_device_sector_size(leaf, dev_item);
  3641. ptr = (unsigned long)btrfs_device_uuid(dev_item);
  3642. read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
  3643. return 0;
  3644. }
  3645. static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
  3646. {
  3647. struct btrfs_fs_devices *fs_devices;
  3648. int ret;
  3649. BUG_ON(!mutex_is_locked(&uuid_mutex));
  3650. fs_devices = root->fs_info->fs_devices->seed;
  3651. while (fs_devices) {
  3652. if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
  3653. ret = 0;
  3654. goto out;
  3655. }
  3656. fs_devices = fs_devices->seed;
  3657. }
  3658. fs_devices = find_fsid(fsid);
  3659. if (!fs_devices) {
  3660. ret = -ENOENT;
  3661. goto out;
  3662. }
  3663. fs_devices = clone_fs_devices(fs_devices);
  3664. if (IS_ERR(fs_devices)) {
  3665. ret = PTR_ERR(fs_devices);
  3666. goto out;
  3667. }
  3668. ret = __btrfs_open_devices(fs_devices, FMODE_READ,
  3669. root->fs_info->bdev_holder);
  3670. if (ret)
  3671. goto out;
  3672. if (!fs_devices->seeding) {
  3673. __btrfs_close_devices(fs_devices);
  3674. free_fs_devices(fs_devices);
  3675. ret = -EINVAL;
  3676. goto out;
  3677. }
  3678. fs_devices->seed = root->fs_info->fs_devices->seed;
  3679. root->fs_info->fs_devices->seed = fs_devices;
  3680. out:
  3681. return ret;
  3682. }
  3683. static int read_one_dev(struct btrfs_root *root,
  3684. struct extent_buffer *leaf,
  3685. struct btrfs_dev_item *dev_item)
  3686. {
  3687. struct btrfs_device *device;
  3688. u64 devid;
  3689. int ret;
  3690. u8 fs_uuid[BTRFS_UUID_SIZE];
  3691. u8 dev_uuid[BTRFS_UUID_SIZE];
  3692. devid = btrfs_device_id(leaf, dev_item);
  3693. read_extent_buffer(leaf, dev_uuid,
  3694. (unsigned long)btrfs_device_uuid(dev_item),
  3695. BTRFS_UUID_SIZE);
  3696. read_extent_buffer(leaf, fs_uuid,
  3697. (unsigned long)btrfs_device_fsid(dev_item),
  3698. BTRFS_UUID_SIZE);
  3699. if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
  3700. ret = open_seed_devices(root, fs_uuid);
  3701. if (ret && !btrfs_test_opt(root, DEGRADED))
  3702. return ret;
  3703. }
  3704. device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
  3705. if (!device || !device->bdev) {
  3706. if (!btrfs_test_opt(root, DEGRADED))
  3707. return -EIO;
  3708. if (!device) {
  3709. printk(KERN_WARNING "warning devid %llu missing\n",
  3710. (unsigned long long)devid);
  3711. device = add_missing_dev(root, devid, dev_uuid);
  3712. if (!device)
  3713. return -ENOMEM;
  3714. } else if (!device->missing) {
  3715. /*
  3716. * this happens when a device that was properly setup
  3717. * in the device info lists suddenly goes bad.
  3718. * device->bdev is NULL, and so we have to set
  3719. * device->missing to one here
  3720. */
  3721. root->fs_info->fs_devices->missing_devices++;
  3722. device->missing = 1;
  3723. }
  3724. }
  3725. if (device->fs_devices != root->fs_info->fs_devices) {
  3726. BUG_ON(device->writeable);
  3727. if (device->generation !=
  3728. btrfs_device_generation(leaf, dev_item))
  3729. return -EINVAL;
  3730. }
  3731. fill_device_from_item(leaf, dev_item, device);
  3732. device->dev_root = root->fs_info->dev_root;
  3733. device->in_fs_metadata = 1;
  3734. if (device->writeable) {
  3735. device->fs_devices->total_rw_bytes += device->total_bytes;
  3736. spin_lock(&root->fs_info->free_chunk_lock);
  3737. root->fs_info->free_chunk_space += device->total_bytes -
  3738. device->bytes_used;
  3739. spin_unlock(&root->fs_info->free_chunk_lock);
  3740. }
  3741. ret = 0;
  3742. return ret;
  3743. }
  3744. int btrfs_read_sys_array(struct btrfs_root *root)
  3745. {
  3746. struct btrfs_super_block *super_copy = root->fs_info->super_copy;
  3747. struct extent_buffer *sb;
  3748. struct btrfs_disk_key *disk_key;
  3749. struct btrfs_chunk *chunk;
  3750. u8 *ptr;
  3751. unsigned long sb_ptr;
  3752. int ret = 0;
  3753. u32 num_stripes;
  3754. u32 array_size;
  3755. u32 len = 0;
  3756. u32 cur;
  3757. struct btrfs_key key;
  3758. sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
  3759. BTRFS_SUPER_INFO_SIZE);
  3760. if (!sb)
  3761. return -ENOMEM;
  3762. btrfs_set_buffer_uptodate(sb);
  3763. btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
  3764. /*
  3765. * The sb extent buffer is artifical and just used to read the system array.
  3766. * btrfs_set_buffer_uptodate() call does not properly mark all it's
  3767. * pages up-to-date when the page is larger: extent does not cover the
  3768. * whole page and consequently check_page_uptodate does not find all
  3769. * the page's extents up-to-date (the hole beyond sb),
  3770. * write_extent_buffer then triggers a WARN_ON.
  3771. *
  3772. * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
  3773. * but sb spans only this function. Add an explicit SetPageUptodate call
  3774. * to silence the warning eg. on PowerPC 64.
  3775. */
  3776. if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
  3777. SetPageUptodate(sb->pages[0]);
  3778. write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
  3779. array_size = btrfs_super_sys_array_size(super_copy);
  3780. ptr = super_copy->sys_chunk_array;
  3781. sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
  3782. cur = 0;
  3783. while (cur < array_size) {
  3784. disk_key = (struct btrfs_disk_key *)ptr;
  3785. btrfs_disk_key_to_cpu(&key, disk_key);
  3786. len = sizeof(*disk_key); ptr += len;
  3787. sb_ptr += len;
  3788. cur += len;
  3789. if (key.type == BTRFS_CHUNK_ITEM_KEY) {
  3790. chunk = (struct btrfs_chunk *)sb_ptr;
  3791. ret = read_one_chunk(root, &key, sb, chunk);
  3792. if (ret)
  3793. break;
  3794. num_stripes = btrfs_chunk_num_stripes(sb, chunk);
  3795. len = btrfs_chunk_item_size(num_stripes);
  3796. } else {
  3797. ret = -EIO;
  3798. break;
  3799. }
  3800. ptr += len;
  3801. sb_ptr += len;
  3802. cur += len;
  3803. }
  3804. free_extent_buffer(sb);
  3805. return ret;
  3806. }
  3807. int btrfs_read_chunk_tree(struct btrfs_root *root)
  3808. {
  3809. struct btrfs_path *path;
  3810. struct extent_buffer *leaf;
  3811. struct btrfs_key key;
  3812. struct btrfs_key found_key;
  3813. int ret;
  3814. int slot;
  3815. root = root->fs_info->chunk_root;
  3816. path = btrfs_alloc_path();
  3817. if (!path)
  3818. return -ENOMEM;
  3819. mutex_lock(&uuid_mutex);
  3820. lock_chunks(root);
  3821. /* first we search for all of the device items, and then we
  3822. * read in all of the chunk items. This way we can create chunk
  3823. * mappings that reference all of the devices that are afound
  3824. */
  3825. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  3826. key.offset = 0;
  3827. key.type = 0;
  3828. again:
  3829. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3830. if (ret < 0)
  3831. goto error;
  3832. while (1) {
  3833. leaf = path->nodes[0];
  3834. slot = path->slots[0];
  3835. if (slot >= btrfs_header_nritems(leaf)) {
  3836. ret = btrfs_next_leaf(root, path);
  3837. if (ret == 0)
  3838. continue;
  3839. if (ret < 0)
  3840. goto error;
  3841. break;
  3842. }
  3843. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  3844. if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
  3845. if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
  3846. break;
  3847. if (found_key.type == BTRFS_DEV_ITEM_KEY) {
  3848. struct btrfs_dev_item *dev_item;
  3849. dev_item = btrfs_item_ptr(leaf, slot,
  3850. struct btrfs_dev_item);
  3851. ret = read_one_dev(root, leaf, dev_item);
  3852. if (ret)
  3853. goto error;
  3854. }
  3855. } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
  3856. struct btrfs_chunk *chunk;
  3857. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  3858. ret = read_one_chunk(root, &found_key, leaf, chunk);
  3859. if (ret)
  3860. goto error;
  3861. }
  3862. path->slots[0]++;
  3863. }
  3864. if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
  3865. key.objectid = 0;
  3866. btrfs_release_path(path);
  3867. goto again;
  3868. }
  3869. ret = 0;
  3870. error:
  3871. unlock_chunks(root);
  3872. mutex_unlock(&uuid_mutex);
  3873. btrfs_free_path(path);
  3874. return ret;
  3875. }