disk-io.c 120 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/swap.h>
  22. #include <linux/radix-tree.h>
  23. #include <linux/writeback.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/kthread.h>
  27. #include <linux/freezer.h>
  28. #include <linux/slab.h>
  29. #include <linux/migrate.h>
  30. #include <linux/ratelimit.h>
  31. #include <linux/uuid.h>
  32. #include <linux/semaphore.h>
  33. #include <asm/unaligned.h>
  34. #include "ctree.h"
  35. #include "disk-io.h"
  36. #include "hash.h"
  37. #include "transaction.h"
  38. #include "btrfs_inode.h"
  39. #include "volumes.h"
  40. #include "print-tree.h"
  41. #include "locking.h"
  42. #include "tree-log.h"
  43. #include "free-space-cache.h"
  44. #include "inode-map.h"
  45. #include "check-integrity.h"
  46. #include "rcu-string.h"
  47. #include "dev-replace.h"
  48. #include "raid56.h"
  49. #include "sysfs.h"
  50. #include "qgroup.h"
  51. #ifdef CONFIG_X86
  52. #include <asm/cpufeature.h>
  53. #endif
  54. static const struct extent_io_ops btree_extent_io_ops;
  55. static void end_workqueue_fn(struct btrfs_work *work);
  56. static void free_fs_root(struct btrfs_root *root);
  57. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  58. int read_only);
  59. static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  60. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  61. struct btrfs_root *root);
  62. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  63. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  64. struct extent_io_tree *dirty_pages,
  65. int mark);
  66. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  67. struct extent_io_tree *pinned_extents);
  68. static int btrfs_cleanup_transaction(struct btrfs_root *root);
  69. static void btrfs_error_commit_super(struct btrfs_root *root);
  70. /*
  71. * btrfs_end_io_wq structs are used to do processing in task context when an IO
  72. * is complete. This is used during reads to verify checksums, and it is used
  73. * by writes to insert metadata for new file extents after IO is complete.
  74. */
  75. struct btrfs_end_io_wq {
  76. struct bio *bio;
  77. bio_end_io_t *end_io;
  78. void *private;
  79. struct btrfs_fs_info *info;
  80. int error;
  81. enum btrfs_wq_endio_type metadata;
  82. struct list_head list;
  83. struct btrfs_work work;
  84. };
  85. static struct kmem_cache *btrfs_end_io_wq_cache;
  86. int __init btrfs_end_io_wq_init(void)
  87. {
  88. btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
  89. sizeof(struct btrfs_end_io_wq),
  90. 0,
  91. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  92. NULL);
  93. if (!btrfs_end_io_wq_cache)
  94. return -ENOMEM;
  95. return 0;
  96. }
  97. void btrfs_end_io_wq_exit(void)
  98. {
  99. if (btrfs_end_io_wq_cache)
  100. kmem_cache_destroy(btrfs_end_io_wq_cache);
  101. }
  102. /*
  103. * async submit bios are used to offload expensive checksumming
  104. * onto the worker threads. They checksum file and metadata bios
  105. * just before they are sent down the IO stack.
  106. */
  107. struct async_submit_bio {
  108. struct inode *inode;
  109. struct bio *bio;
  110. struct list_head list;
  111. extent_submit_bio_hook_t *submit_bio_start;
  112. extent_submit_bio_hook_t *submit_bio_done;
  113. int rw;
  114. int mirror_num;
  115. unsigned long bio_flags;
  116. /*
  117. * bio_offset is optional, can be used if the pages in the bio
  118. * can't tell us where in the file the bio should go
  119. */
  120. u64 bio_offset;
  121. struct btrfs_work work;
  122. int error;
  123. };
  124. /*
  125. * Lockdep class keys for extent_buffer->lock's in this root. For a given
  126. * eb, the lockdep key is determined by the btrfs_root it belongs to and
  127. * the level the eb occupies in the tree.
  128. *
  129. * Different roots are used for different purposes and may nest inside each
  130. * other and they require separate keysets. As lockdep keys should be
  131. * static, assign keysets according to the purpose of the root as indicated
  132. * by btrfs_root->objectid. This ensures that all special purpose roots
  133. * have separate keysets.
  134. *
  135. * Lock-nesting across peer nodes is always done with the immediate parent
  136. * node locked thus preventing deadlock. As lockdep doesn't know this, use
  137. * subclass to avoid triggering lockdep warning in such cases.
  138. *
  139. * The key is set by the readpage_end_io_hook after the buffer has passed
  140. * csum validation but before the pages are unlocked. It is also set by
  141. * btrfs_init_new_buffer on freshly allocated blocks.
  142. *
  143. * We also add a check to make sure the highest level of the tree is the
  144. * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
  145. * needs update as well.
  146. */
  147. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  148. # if BTRFS_MAX_LEVEL != 8
  149. # error
  150. # endif
  151. static struct btrfs_lockdep_keyset {
  152. u64 id; /* root objectid */
  153. const char *name_stem; /* lock name stem */
  154. char names[BTRFS_MAX_LEVEL + 1][20];
  155. struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
  156. } btrfs_lockdep_keysets[] = {
  157. { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
  158. { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
  159. { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
  160. { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
  161. { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
  162. { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
  163. { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
  164. { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
  165. { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
  166. { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
  167. { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
  168. { .id = 0, .name_stem = "tree" },
  169. };
  170. void __init btrfs_init_lockdep(void)
  171. {
  172. int i, j;
  173. /* initialize lockdep class names */
  174. for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
  175. struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
  176. for (j = 0; j < ARRAY_SIZE(ks->names); j++)
  177. snprintf(ks->names[j], sizeof(ks->names[j]),
  178. "btrfs-%s-%02d", ks->name_stem, j);
  179. }
  180. }
  181. void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
  182. int level)
  183. {
  184. struct btrfs_lockdep_keyset *ks;
  185. BUG_ON(level >= ARRAY_SIZE(ks->keys));
  186. /* find the matching keyset, id 0 is the default entry */
  187. for (ks = btrfs_lockdep_keysets; ks->id; ks++)
  188. if (ks->id == objectid)
  189. break;
  190. lockdep_set_class_and_name(&eb->lock,
  191. &ks->keys[level], ks->names[level]);
  192. }
  193. #endif
  194. /*
  195. * extents on the btree inode are pretty simple, there's one extent
  196. * that covers the entire device
  197. */
  198. static struct extent_map *btree_get_extent(struct inode *inode,
  199. struct page *page, size_t pg_offset, u64 start, u64 len,
  200. int create)
  201. {
  202. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  203. struct extent_map *em;
  204. int ret;
  205. read_lock(&em_tree->lock);
  206. em = lookup_extent_mapping(em_tree, start, len);
  207. if (em) {
  208. em->bdev =
  209. BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  210. read_unlock(&em_tree->lock);
  211. goto out;
  212. }
  213. read_unlock(&em_tree->lock);
  214. em = alloc_extent_map();
  215. if (!em) {
  216. em = ERR_PTR(-ENOMEM);
  217. goto out;
  218. }
  219. em->start = 0;
  220. em->len = (u64)-1;
  221. em->block_len = (u64)-1;
  222. em->block_start = 0;
  223. em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  224. write_lock(&em_tree->lock);
  225. ret = add_extent_mapping(em_tree, em, 0);
  226. if (ret == -EEXIST) {
  227. free_extent_map(em);
  228. em = lookup_extent_mapping(em_tree, start, len);
  229. if (!em)
  230. em = ERR_PTR(-EIO);
  231. } else if (ret) {
  232. free_extent_map(em);
  233. em = ERR_PTR(ret);
  234. }
  235. write_unlock(&em_tree->lock);
  236. out:
  237. return em;
  238. }
  239. u32 btrfs_csum_data(char *data, u32 seed, size_t len)
  240. {
  241. return btrfs_crc32c(seed, data, len);
  242. }
  243. void btrfs_csum_final(u32 crc, char *result)
  244. {
  245. put_unaligned_le32(~crc, result);
  246. }
  247. /*
  248. * compute the csum for a btree block, and either verify it or write it
  249. * into the csum field of the block.
  250. */
  251. static int csum_tree_block(struct btrfs_fs_info *fs_info,
  252. struct extent_buffer *buf,
  253. int verify)
  254. {
  255. u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  256. char *result = NULL;
  257. unsigned long len;
  258. unsigned long cur_len;
  259. unsigned long offset = BTRFS_CSUM_SIZE;
  260. char *kaddr;
  261. unsigned long map_start;
  262. unsigned long map_len;
  263. int err;
  264. u32 crc = ~(u32)0;
  265. unsigned long inline_result;
  266. len = buf->len - offset;
  267. while (len > 0) {
  268. err = map_private_extent_buffer(buf, offset, 32,
  269. &kaddr, &map_start, &map_len);
  270. if (err)
  271. return 1;
  272. cur_len = min(len, map_len - (offset - map_start));
  273. crc = btrfs_csum_data(kaddr + offset - map_start,
  274. crc, cur_len);
  275. len -= cur_len;
  276. offset += cur_len;
  277. }
  278. if (csum_size > sizeof(inline_result)) {
  279. result = kzalloc(csum_size, GFP_NOFS);
  280. if (!result)
  281. return 1;
  282. } else {
  283. result = (char *)&inline_result;
  284. }
  285. btrfs_csum_final(crc, result);
  286. if (verify) {
  287. if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
  288. u32 val;
  289. u32 found = 0;
  290. memcpy(&found, result, csum_size);
  291. read_extent_buffer(buf, &val, 0, csum_size);
  292. btrfs_warn_rl(fs_info,
  293. "%s checksum verify failed on %llu wanted %X found %X "
  294. "level %d",
  295. fs_info->sb->s_id, buf->start,
  296. val, found, btrfs_header_level(buf));
  297. if (result != (char *)&inline_result)
  298. kfree(result);
  299. return 1;
  300. }
  301. } else {
  302. write_extent_buffer(buf, result, 0, csum_size);
  303. }
  304. if (result != (char *)&inline_result)
  305. kfree(result);
  306. return 0;
  307. }
  308. /*
  309. * we can't consider a given block up to date unless the transid of the
  310. * block matches the transid in the parent node's pointer. This is how we
  311. * detect blocks that either didn't get written at all or got written
  312. * in the wrong place.
  313. */
  314. static int verify_parent_transid(struct extent_io_tree *io_tree,
  315. struct extent_buffer *eb, u64 parent_transid,
  316. int atomic)
  317. {
  318. struct extent_state *cached_state = NULL;
  319. int ret;
  320. bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
  321. if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
  322. return 0;
  323. if (atomic)
  324. return -EAGAIN;
  325. if (need_lock) {
  326. btrfs_tree_read_lock(eb);
  327. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  328. }
  329. lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
  330. 0, &cached_state);
  331. if (extent_buffer_uptodate(eb) &&
  332. btrfs_header_generation(eb) == parent_transid) {
  333. ret = 0;
  334. goto out;
  335. }
  336. btrfs_err_rl(eb->fs_info,
  337. "parent transid verify failed on %llu wanted %llu found %llu",
  338. eb->start,
  339. parent_transid, btrfs_header_generation(eb));
  340. ret = 1;
  341. /*
  342. * Things reading via commit roots that don't have normal protection,
  343. * like send, can have a really old block in cache that may point at a
  344. * block that has been free'd and re-allocated. So don't clear uptodate
  345. * if we find an eb that is under IO (dirty/writeback) because we could
  346. * end up reading in the stale data and then writing it back out and
  347. * making everybody very sad.
  348. */
  349. if (!extent_buffer_under_io(eb))
  350. clear_extent_buffer_uptodate(eb);
  351. out:
  352. unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
  353. &cached_state, GFP_NOFS);
  354. if (need_lock)
  355. btrfs_tree_read_unlock_blocking(eb);
  356. return ret;
  357. }
  358. /*
  359. * Return 0 if the superblock checksum type matches the checksum value of that
  360. * algorithm. Pass the raw disk superblock data.
  361. */
  362. static int btrfs_check_super_csum(char *raw_disk_sb)
  363. {
  364. struct btrfs_super_block *disk_sb =
  365. (struct btrfs_super_block *)raw_disk_sb;
  366. u16 csum_type = btrfs_super_csum_type(disk_sb);
  367. int ret = 0;
  368. if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
  369. u32 crc = ~(u32)0;
  370. const int csum_size = sizeof(crc);
  371. char result[csum_size];
  372. /*
  373. * The super_block structure does not span the whole
  374. * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
  375. * is filled with zeros and is included in the checkum.
  376. */
  377. crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
  378. crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
  379. btrfs_csum_final(crc, result);
  380. if (memcmp(raw_disk_sb, result, csum_size))
  381. ret = 1;
  382. }
  383. if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
  384. printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
  385. csum_type);
  386. ret = 1;
  387. }
  388. return ret;
  389. }
  390. /*
  391. * helper to read a given tree block, doing retries as required when
  392. * the checksums don't match and we have alternate mirrors to try.
  393. */
  394. static int btree_read_extent_buffer_pages(struct btrfs_root *root,
  395. struct extent_buffer *eb,
  396. u64 start, u64 parent_transid)
  397. {
  398. struct extent_io_tree *io_tree;
  399. int failed = 0;
  400. int ret;
  401. int num_copies = 0;
  402. int mirror_num = 0;
  403. int failed_mirror = 0;
  404. clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  405. io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  406. while (1) {
  407. ret = read_extent_buffer_pages(io_tree, eb, start,
  408. WAIT_COMPLETE,
  409. btree_get_extent, mirror_num);
  410. if (!ret) {
  411. if (!verify_parent_transid(io_tree, eb,
  412. parent_transid, 0))
  413. break;
  414. else
  415. ret = -EIO;
  416. }
  417. /*
  418. * This buffer's crc is fine, but its contents are corrupted, so
  419. * there is no reason to read the other copies, they won't be
  420. * any less wrong.
  421. */
  422. if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
  423. break;
  424. num_copies = btrfs_num_copies(root->fs_info,
  425. eb->start, eb->len);
  426. if (num_copies == 1)
  427. break;
  428. if (!failed_mirror) {
  429. failed = 1;
  430. failed_mirror = eb->read_mirror;
  431. }
  432. mirror_num++;
  433. if (mirror_num == failed_mirror)
  434. mirror_num++;
  435. if (mirror_num > num_copies)
  436. break;
  437. }
  438. if (failed && !ret && failed_mirror)
  439. repair_eb_io_failure(root, eb, failed_mirror);
  440. return ret;
  441. }
  442. /*
  443. * checksum a dirty tree block before IO. This has extra checks to make sure
  444. * we only fill in the checksum field in the first page of a multi-page block
  445. */
  446. static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
  447. {
  448. u64 start = page_offset(page);
  449. u64 found_start;
  450. struct extent_buffer *eb;
  451. eb = (struct extent_buffer *)page->private;
  452. if (page != eb->pages[0])
  453. return 0;
  454. found_start = btrfs_header_bytenr(eb);
  455. if (WARN_ON(found_start != start || !PageUptodate(page)))
  456. return 0;
  457. csum_tree_block(fs_info, eb, 0);
  458. return 0;
  459. }
  460. static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
  461. struct extent_buffer *eb)
  462. {
  463. struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
  464. u8 fsid[BTRFS_UUID_SIZE];
  465. int ret = 1;
  466. read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
  467. while (fs_devices) {
  468. if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
  469. ret = 0;
  470. break;
  471. }
  472. fs_devices = fs_devices->seed;
  473. }
  474. return ret;
  475. }
  476. #define CORRUPT(reason, eb, root, slot) \
  477. btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
  478. "root=%llu, slot=%d", reason, \
  479. btrfs_header_bytenr(eb), root->objectid, slot)
  480. static noinline int check_leaf(struct btrfs_root *root,
  481. struct extent_buffer *leaf)
  482. {
  483. struct btrfs_key key;
  484. struct btrfs_key leaf_key;
  485. u32 nritems = btrfs_header_nritems(leaf);
  486. int slot;
  487. if (nritems == 0)
  488. return 0;
  489. /* Check the 0 item */
  490. if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
  491. BTRFS_LEAF_DATA_SIZE(root)) {
  492. CORRUPT("invalid item offset size pair", leaf, root, 0);
  493. return -EIO;
  494. }
  495. /*
  496. * Check to make sure each items keys are in the correct order and their
  497. * offsets make sense. We only have to loop through nritems-1 because
  498. * we check the current slot against the next slot, which verifies the
  499. * next slot's offset+size makes sense and that the current's slot
  500. * offset is correct.
  501. */
  502. for (slot = 0; slot < nritems - 1; slot++) {
  503. btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
  504. btrfs_item_key_to_cpu(leaf, &key, slot + 1);
  505. /* Make sure the keys are in the right order */
  506. if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
  507. CORRUPT("bad key order", leaf, root, slot);
  508. return -EIO;
  509. }
  510. /*
  511. * Make sure the offset and ends are right, remember that the
  512. * item data starts at the end of the leaf and grows towards the
  513. * front.
  514. */
  515. if (btrfs_item_offset_nr(leaf, slot) !=
  516. btrfs_item_end_nr(leaf, slot + 1)) {
  517. CORRUPT("slot offset bad", leaf, root, slot);
  518. return -EIO;
  519. }
  520. /*
  521. * Check to make sure that we don't point outside of the leaf,
  522. * just incase all the items are consistent to eachother, but
  523. * all point outside of the leaf.
  524. */
  525. if (btrfs_item_end_nr(leaf, slot) >
  526. BTRFS_LEAF_DATA_SIZE(root)) {
  527. CORRUPT("slot end outside of leaf", leaf, root, slot);
  528. return -EIO;
  529. }
  530. }
  531. return 0;
  532. }
  533. static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
  534. u64 phy_offset, struct page *page,
  535. u64 start, u64 end, int mirror)
  536. {
  537. u64 found_start;
  538. int found_level;
  539. struct extent_buffer *eb;
  540. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  541. int ret = 0;
  542. int reads_done;
  543. if (!page->private)
  544. goto out;
  545. eb = (struct extent_buffer *)page->private;
  546. /* the pending IO might have been the only thing that kept this buffer
  547. * in memory. Make sure we have a ref for all this other checks
  548. */
  549. extent_buffer_get(eb);
  550. reads_done = atomic_dec_and_test(&eb->io_pages);
  551. if (!reads_done)
  552. goto err;
  553. eb->read_mirror = mirror;
  554. if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
  555. ret = -EIO;
  556. goto err;
  557. }
  558. found_start = btrfs_header_bytenr(eb);
  559. if (found_start != eb->start) {
  560. btrfs_err_rl(eb->fs_info, "bad tree block start %llu %llu",
  561. found_start, eb->start);
  562. ret = -EIO;
  563. goto err;
  564. }
  565. if (check_tree_block_fsid(root->fs_info, eb)) {
  566. btrfs_err_rl(eb->fs_info, "bad fsid on block %llu",
  567. eb->start);
  568. ret = -EIO;
  569. goto err;
  570. }
  571. found_level = btrfs_header_level(eb);
  572. if (found_level >= BTRFS_MAX_LEVEL) {
  573. btrfs_err(root->fs_info, "bad tree block level %d",
  574. (int)btrfs_header_level(eb));
  575. ret = -EIO;
  576. goto err;
  577. }
  578. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
  579. eb, found_level);
  580. ret = csum_tree_block(root->fs_info, eb, 1);
  581. if (ret) {
  582. ret = -EIO;
  583. goto err;
  584. }
  585. /*
  586. * If this is a leaf block and it is corrupt, set the corrupt bit so
  587. * that we don't try and read the other copies of this block, just
  588. * return -EIO.
  589. */
  590. if (found_level == 0 && check_leaf(root, eb)) {
  591. set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  592. ret = -EIO;
  593. }
  594. if (!ret)
  595. set_extent_buffer_uptodate(eb);
  596. err:
  597. if (reads_done &&
  598. test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  599. btree_readahead_hook(root, eb, eb->start, ret);
  600. if (ret) {
  601. /*
  602. * our io error hook is going to dec the io pages
  603. * again, we have to make sure it has something
  604. * to decrement
  605. */
  606. atomic_inc(&eb->io_pages);
  607. clear_extent_buffer_uptodate(eb);
  608. }
  609. free_extent_buffer(eb);
  610. out:
  611. return ret;
  612. }
  613. static int btree_io_failed_hook(struct page *page, int failed_mirror)
  614. {
  615. struct extent_buffer *eb;
  616. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  617. eb = (struct extent_buffer *)page->private;
  618. set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
  619. eb->read_mirror = failed_mirror;
  620. atomic_dec(&eb->io_pages);
  621. if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  622. btree_readahead_hook(root, eb, eb->start, -EIO);
  623. return -EIO; /* we fixed nothing */
  624. }
  625. static void end_workqueue_bio(struct bio *bio)
  626. {
  627. struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
  628. struct btrfs_fs_info *fs_info;
  629. struct btrfs_workqueue *wq;
  630. btrfs_work_func_t func;
  631. fs_info = end_io_wq->info;
  632. end_io_wq->error = bio->bi_error;
  633. if (bio->bi_rw & REQ_WRITE) {
  634. if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
  635. wq = fs_info->endio_meta_write_workers;
  636. func = btrfs_endio_meta_write_helper;
  637. } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) {
  638. wq = fs_info->endio_freespace_worker;
  639. func = btrfs_freespace_write_helper;
  640. } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
  641. wq = fs_info->endio_raid56_workers;
  642. func = btrfs_endio_raid56_helper;
  643. } else {
  644. wq = fs_info->endio_write_workers;
  645. func = btrfs_endio_write_helper;
  646. }
  647. } else {
  648. if (unlikely(end_io_wq->metadata ==
  649. BTRFS_WQ_ENDIO_DIO_REPAIR)) {
  650. wq = fs_info->endio_repair_workers;
  651. func = btrfs_endio_repair_helper;
  652. } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
  653. wq = fs_info->endio_raid56_workers;
  654. func = btrfs_endio_raid56_helper;
  655. } else if (end_io_wq->metadata) {
  656. wq = fs_info->endio_meta_workers;
  657. func = btrfs_endio_meta_helper;
  658. } else {
  659. wq = fs_info->endio_workers;
  660. func = btrfs_endio_helper;
  661. }
  662. }
  663. btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL);
  664. btrfs_queue_work(wq, &end_io_wq->work);
  665. }
  666. int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
  667. enum btrfs_wq_endio_type metadata)
  668. {
  669. struct btrfs_end_io_wq *end_io_wq;
  670. end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
  671. if (!end_io_wq)
  672. return -ENOMEM;
  673. end_io_wq->private = bio->bi_private;
  674. end_io_wq->end_io = bio->bi_end_io;
  675. end_io_wq->info = info;
  676. end_io_wq->error = 0;
  677. end_io_wq->bio = bio;
  678. end_io_wq->metadata = metadata;
  679. bio->bi_private = end_io_wq;
  680. bio->bi_end_io = end_workqueue_bio;
  681. return 0;
  682. }
  683. unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
  684. {
  685. unsigned long limit = min_t(unsigned long,
  686. info->thread_pool_size,
  687. info->fs_devices->open_devices);
  688. return 256 * limit;
  689. }
  690. static void run_one_async_start(struct btrfs_work *work)
  691. {
  692. struct async_submit_bio *async;
  693. int ret;
  694. async = container_of(work, struct async_submit_bio, work);
  695. ret = async->submit_bio_start(async->inode, async->rw, async->bio,
  696. async->mirror_num, async->bio_flags,
  697. async->bio_offset);
  698. if (ret)
  699. async->error = ret;
  700. }
  701. static void run_one_async_done(struct btrfs_work *work)
  702. {
  703. struct btrfs_fs_info *fs_info;
  704. struct async_submit_bio *async;
  705. int limit;
  706. async = container_of(work, struct async_submit_bio, work);
  707. fs_info = BTRFS_I(async->inode)->root->fs_info;
  708. limit = btrfs_async_submit_limit(fs_info);
  709. limit = limit * 2 / 3;
  710. /*
  711. * atomic_dec_return implies a barrier for waitqueue_active
  712. */
  713. if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
  714. waitqueue_active(&fs_info->async_submit_wait))
  715. wake_up(&fs_info->async_submit_wait);
  716. /* If an error occured we just want to clean up the bio and move on */
  717. if (async->error) {
  718. async->bio->bi_error = async->error;
  719. bio_endio(async->bio);
  720. return;
  721. }
  722. async->submit_bio_done(async->inode, async->rw, async->bio,
  723. async->mirror_num, async->bio_flags,
  724. async->bio_offset);
  725. }
  726. static void run_one_async_free(struct btrfs_work *work)
  727. {
  728. struct async_submit_bio *async;
  729. async = container_of(work, struct async_submit_bio, work);
  730. kfree(async);
  731. }
  732. int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
  733. int rw, struct bio *bio, int mirror_num,
  734. unsigned long bio_flags,
  735. u64 bio_offset,
  736. extent_submit_bio_hook_t *submit_bio_start,
  737. extent_submit_bio_hook_t *submit_bio_done)
  738. {
  739. struct async_submit_bio *async;
  740. async = kmalloc(sizeof(*async), GFP_NOFS);
  741. if (!async)
  742. return -ENOMEM;
  743. async->inode = inode;
  744. async->rw = rw;
  745. async->bio = bio;
  746. async->mirror_num = mirror_num;
  747. async->submit_bio_start = submit_bio_start;
  748. async->submit_bio_done = submit_bio_done;
  749. btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start,
  750. run_one_async_done, run_one_async_free);
  751. async->bio_flags = bio_flags;
  752. async->bio_offset = bio_offset;
  753. async->error = 0;
  754. atomic_inc(&fs_info->nr_async_submits);
  755. if (rw & REQ_SYNC)
  756. btrfs_set_work_high_priority(&async->work);
  757. btrfs_queue_work(fs_info->workers, &async->work);
  758. while (atomic_read(&fs_info->async_submit_draining) &&
  759. atomic_read(&fs_info->nr_async_submits)) {
  760. wait_event(fs_info->async_submit_wait,
  761. (atomic_read(&fs_info->nr_async_submits) == 0));
  762. }
  763. return 0;
  764. }
  765. static int btree_csum_one_bio(struct bio *bio)
  766. {
  767. struct bio_vec *bvec;
  768. struct btrfs_root *root;
  769. int i, ret = 0;
  770. bio_for_each_segment_all(bvec, bio, i) {
  771. root = BTRFS_I(bvec->bv_page->mapping->host)->root;
  772. ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
  773. if (ret)
  774. break;
  775. }
  776. return ret;
  777. }
  778. static int __btree_submit_bio_start(struct inode *inode, int rw,
  779. struct bio *bio, int mirror_num,
  780. unsigned long bio_flags,
  781. u64 bio_offset)
  782. {
  783. /*
  784. * when we're called for a write, we're already in the async
  785. * submission context. Just jump into btrfs_map_bio
  786. */
  787. return btree_csum_one_bio(bio);
  788. }
  789. static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  790. int mirror_num, unsigned long bio_flags,
  791. u64 bio_offset)
  792. {
  793. int ret;
  794. /*
  795. * when we're called for a write, we're already in the async
  796. * submission context. Just jump into btrfs_map_bio
  797. */
  798. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
  799. if (ret) {
  800. bio->bi_error = ret;
  801. bio_endio(bio);
  802. }
  803. return ret;
  804. }
  805. static int check_async_write(struct inode *inode, unsigned long bio_flags)
  806. {
  807. if (bio_flags & EXTENT_BIO_TREE_LOG)
  808. return 0;
  809. #ifdef CONFIG_X86
  810. if (cpu_has_xmm4_2)
  811. return 0;
  812. #endif
  813. return 1;
  814. }
  815. static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  816. int mirror_num, unsigned long bio_flags,
  817. u64 bio_offset)
  818. {
  819. int async = check_async_write(inode, bio_flags);
  820. int ret;
  821. if (!(rw & REQ_WRITE)) {
  822. /*
  823. * called for a read, do the setup so that checksum validation
  824. * can happen in the async kernel threads
  825. */
  826. ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
  827. bio, BTRFS_WQ_ENDIO_METADATA);
  828. if (ret)
  829. goto out_w_error;
  830. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  831. mirror_num, 0);
  832. } else if (!async) {
  833. ret = btree_csum_one_bio(bio);
  834. if (ret)
  835. goto out_w_error;
  836. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  837. mirror_num, 0);
  838. } else {
  839. /*
  840. * kthread helpers are used to submit writes so that
  841. * checksumming can happen in parallel across all CPUs
  842. */
  843. ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
  844. inode, rw, bio, mirror_num, 0,
  845. bio_offset,
  846. __btree_submit_bio_start,
  847. __btree_submit_bio_done);
  848. }
  849. if (ret)
  850. goto out_w_error;
  851. return 0;
  852. out_w_error:
  853. bio->bi_error = ret;
  854. bio_endio(bio);
  855. return ret;
  856. }
  857. #ifdef CONFIG_MIGRATION
  858. static int btree_migratepage(struct address_space *mapping,
  859. struct page *newpage, struct page *page,
  860. enum migrate_mode mode)
  861. {
  862. /*
  863. * we can't safely write a btree page from here,
  864. * we haven't done the locking hook
  865. */
  866. if (PageDirty(page))
  867. return -EAGAIN;
  868. /*
  869. * Buffers may be managed in a filesystem specific way.
  870. * We must have no buffers or drop them.
  871. */
  872. if (page_has_private(page) &&
  873. !try_to_release_page(page, GFP_KERNEL))
  874. return -EAGAIN;
  875. return migrate_page(mapping, newpage, page, mode);
  876. }
  877. #endif
  878. static int btree_writepages(struct address_space *mapping,
  879. struct writeback_control *wbc)
  880. {
  881. struct btrfs_fs_info *fs_info;
  882. int ret;
  883. if (wbc->sync_mode == WB_SYNC_NONE) {
  884. if (wbc->for_kupdate)
  885. return 0;
  886. fs_info = BTRFS_I(mapping->host)->root->fs_info;
  887. /* this is a bit racy, but that's ok */
  888. ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
  889. BTRFS_DIRTY_METADATA_THRESH);
  890. if (ret < 0)
  891. return 0;
  892. }
  893. return btree_write_cache_pages(mapping, wbc);
  894. }
  895. static int btree_readpage(struct file *file, struct page *page)
  896. {
  897. struct extent_io_tree *tree;
  898. tree = &BTRFS_I(page->mapping->host)->io_tree;
  899. return extent_read_full_page(tree, page, btree_get_extent, 0);
  900. }
  901. static int btree_releasepage(struct page *page, gfp_t gfp_flags)
  902. {
  903. if (PageWriteback(page) || PageDirty(page))
  904. return 0;
  905. return try_release_extent_buffer(page);
  906. }
  907. static void btree_invalidatepage(struct page *page, unsigned int offset,
  908. unsigned int length)
  909. {
  910. struct extent_io_tree *tree;
  911. tree = &BTRFS_I(page->mapping->host)->io_tree;
  912. extent_invalidatepage(tree, page, offset);
  913. btree_releasepage(page, GFP_NOFS);
  914. if (PagePrivate(page)) {
  915. btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
  916. "page private not zero on page %llu",
  917. (unsigned long long)page_offset(page));
  918. ClearPagePrivate(page);
  919. set_page_private(page, 0);
  920. page_cache_release(page);
  921. }
  922. }
  923. static int btree_set_page_dirty(struct page *page)
  924. {
  925. #ifdef DEBUG
  926. struct extent_buffer *eb;
  927. BUG_ON(!PagePrivate(page));
  928. eb = (struct extent_buffer *)page->private;
  929. BUG_ON(!eb);
  930. BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
  931. BUG_ON(!atomic_read(&eb->refs));
  932. btrfs_assert_tree_locked(eb);
  933. #endif
  934. return __set_page_dirty_nobuffers(page);
  935. }
  936. static const struct address_space_operations btree_aops = {
  937. .readpage = btree_readpage,
  938. .writepages = btree_writepages,
  939. .releasepage = btree_releasepage,
  940. .invalidatepage = btree_invalidatepage,
  941. #ifdef CONFIG_MIGRATION
  942. .migratepage = btree_migratepage,
  943. #endif
  944. .set_page_dirty = btree_set_page_dirty,
  945. };
  946. void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
  947. {
  948. struct extent_buffer *buf = NULL;
  949. struct inode *btree_inode = root->fs_info->btree_inode;
  950. buf = btrfs_find_create_tree_block(root, bytenr);
  951. if (!buf)
  952. return;
  953. read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
  954. buf, 0, WAIT_NONE, btree_get_extent, 0);
  955. free_extent_buffer(buf);
  956. }
  957. int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
  958. int mirror_num, struct extent_buffer **eb)
  959. {
  960. struct extent_buffer *buf = NULL;
  961. struct inode *btree_inode = root->fs_info->btree_inode;
  962. struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
  963. int ret;
  964. buf = btrfs_find_create_tree_block(root, bytenr);
  965. if (!buf)
  966. return 0;
  967. set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
  968. ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
  969. btree_get_extent, mirror_num);
  970. if (ret) {
  971. free_extent_buffer(buf);
  972. return ret;
  973. }
  974. if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
  975. free_extent_buffer(buf);
  976. return -EIO;
  977. } else if (extent_buffer_uptodate(buf)) {
  978. *eb = buf;
  979. } else {
  980. free_extent_buffer(buf);
  981. }
  982. return 0;
  983. }
  984. struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
  985. u64 bytenr)
  986. {
  987. return find_extent_buffer(fs_info, bytenr);
  988. }
  989. struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
  990. u64 bytenr)
  991. {
  992. if (btrfs_test_is_dummy_root(root))
  993. return alloc_test_extent_buffer(root->fs_info, bytenr);
  994. return alloc_extent_buffer(root->fs_info, bytenr);
  995. }
  996. int btrfs_write_tree_block(struct extent_buffer *buf)
  997. {
  998. return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
  999. buf->start + buf->len - 1);
  1000. }
  1001. int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
  1002. {
  1003. return filemap_fdatawait_range(buf->pages[0]->mapping,
  1004. buf->start, buf->start + buf->len - 1);
  1005. }
  1006. struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
  1007. u64 parent_transid)
  1008. {
  1009. struct extent_buffer *buf = NULL;
  1010. int ret;
  1011. buf = btrfs_find_create_tree_block(root, bytenr);
  1012. if (!buf)
  1013. return ERR_PTR(-ENOMEM);
  1014. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  1015. if (ret) {
  1016. free_extent_buffer(buf);
  1017. return ERR_PTR(ret);
  1018. }
  1019. return buf;
  1020. }
  1021. void clean_tree_block(struct btrfs_trans_handle *trans,
  1022. struct btrfs_fs_info *fs_info,
  1023. struct extent_buffer *buf)
  1024. {
  1025. if (btrfs_header_generation(buf) ==
  1026. fs_info->running_transaction->transid) {
  1027. btrfs_assert_tree_locked(buf);
  1028. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
  1029. __percpu_counter_add(&fs_info->dirty_metadata_bytes,
  1030. -buf->len,
  1031. fs_info->dirty_metadata_batch);
  1032. /* ugh, clear_extent_buffer_dirty needs to lock the page */
  1033. btrfs_set_lock_blocking(buf);
  1034. clear_extent_buffer_dirty(buf);
  1035. }
  1036. }
  1037. }
  1038. static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
  1039. {
  1040. struct btrfs_subvolume_writers *writers;
  1041. int ret;
  1042. writers = kmalloc(sizeof(*writers), GFP_NOFS);
  1043. if (!writers)
  1044. return ERR_PTR(-ENOMEM);
  1045. ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
  1046. if (ret < 0) {
  1047. kfree(writers);
  1048. return ERR_PTR(ret);
  1049. }
  1050. init_waitqueue_head(&writers->wait);
  1051. return writers;
  1052. }
  1053. static void
  1054. btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
  1055. {
  1056. percpu_counter_destroy(&writers->counter);
  1057. kfree(writers);
  1058. }
  1059. static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
  1060. struct btrfs_root *root, struct btrfs_fs_info *fs_info,
  1061. u64 objectid)
  1062. {
  1063. root->node = NULL;
  1064. root->commit_root = NULL;
  1065. root->sectorsize = sectorsize;
  1066. root->nodesize = nodesize;
  1067. root->stripesize = stripesize;
  1068. root->state = 0;
  1069. root->orphan_cleanup_state = 0;
  1070. root->objectid = objectid;
  1071. root->last_trans = 0;
  1072. root->highest_objectid = 0;
  1073. root->nr_delalloc_inodes = 0;
  1074. root->nr_ordered_extents = 0;
  1075. root->name = NULL;
  1076. root->inode_tree = RB_ROOT;
  1077. INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
  1078. root->block_rsv = NULL;
  1079. root->orphan_block_rsv = NULL;
  1080. INIT_LIST_HEAD(&root->dirty_list);
  1081. INIT_LIST_HEAD(&root->root_list);
  1082. INIT_LIST_HEAD(&root->delalloc_inodes);
  1083. INIT_LIST_HEAD(&root->delalloc_root);
  1084. INIT_LIST_HEAD(&root->ordered_extents);
  1085. INIT_LIST_HEAD(&root->ordered_root);
  1086. INIT_LIST_HEAD(&root->logged_list[0]);
  1087. INIT_LIST_HEAD(&root->logged_list[1]);
  1088. spin_lock_init(&root->orphan_lock);
  1089. spin_lock_init(&root->inode_lock);
  1090. spin_lock_init(&root->delalloc_lock);
  1091. spin_lock_init(&root->ordered_extent_lock);
  1092. spin_lock_init(&root->accounting_lock);
  1093. spin_lock_init(&root->log_extents_lock[0]);
  1094. spin_lock_init(&root->log_extents_lock[1]);
  1095. mutex_init(&root->objectid_mutex);
  1096. mutex_init(&root->log_mutex);
  1097. mutex_init(&root->ordered_extent_mutex);
  1098. mutex_init(&root->delalloc_mutex);
  1099. init_waitqueue_head(&root->log_writer_wait);
  1100. init_waitqueue_head(&root->log_commit_wait[0]);
  1101. init_waitqueue_head(&root->log_commit_wait[1]);
  1102. INIT_LIST_HEAD(&root->log_ctxs[0]);
  1103. INIT_LIST_HEAD(&root->log_ctxs[1]);
  1104. atomic_set(&root->log_commit[0], 0);
  1105. atomic_set(&root->log_commit[1], 0);
  1106. atomic_set(&root->log_writers, 0);
  1107. atomic_set(&root->log_batch, 0);
  1108. atomic_set(&root->orphan_inodes, 0);
  1109. atomic_set(&root->refs, 1);
  1110. atomic_set(&root->will_be_snapshoted, 0);
  1111. atomic_set(&root->qgroup_meta_rsv, 0);
  1112. root->log_transid = 0;
  1113. root->log_transid_committed = -1;
  1114. root->last_log_commit = 0;
  1115. if (fs_info)
  1116. extent_io_tree_init(&root->dirty_log_pages,
  1117. fs_info->btree_inode->i_mapping);
  1118. memset(&root->root_key, 0, sizeof(root->root_key));
  1119. memset(&root->root_item, 0, sizeof(root->root_item));
  1120. memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
  1121. if (fs_info)
  1122. root->defrag_trans_start = fs_info->generation;
  1123. else
  1124. root->defrag_trans_start = 0;
  1125. root->root_key.objectid = objectid;
  1126. root->anon_dev = 0;
  1127. spin_lock_init(&root->root_item_lock);
  1128. }
  1129. static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
  1130. {
  1131. struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
  1132. if (root)
  1133. root->fs_info = fs_info;
  1134. return root;
  1135. }
  1136. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  1137. /* Should only be used by the testing infrastructure */
  1138. struct btrfs_root *btrfs_alloc_dummy_root(void)
  1139. {
  1140. struct btrfs_root *root;
  1141. root = btrfs_alloc_root(NULL);
  1142. if (!root)
  1143. return ERR_PTR(-ENOMEM);
  1144. __setup_root(4096, 4096, 4096, root, NULL, 1);
  1145. set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
  1146. root->alloc_bytenr = 0;
  1147. return root;
  1148. }
  1149. #endif
  1150. struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
  1151. struct btrfs_fs_info *fs_info,
  1152. u64 objectid)
  1153. {
  1154. struct extent_buffer *leaf;
  1155. struct btrfs_root *tree_root = fs_info->tree_root;
  1156. struct btrfs_root *root;
  1157. struct btrfs_key key;
  1158. int ret = 0;
  1159. uuid_le uuid;
  1160. root = btrfs_alloc_root(fs_info);
  1161. if (!root)
  1162. return ERR_PTR(-ENOMEM);
  1163. __setup_root(tree_root->nodesize, tree_root->sectorsize,
  1164. tree_root->stripesize, root, fs_info, objectid);
  1165. root->root_key.objectid = objectid;
  1166. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1167. root->root_key.offset = 0;
  1168. leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
  1169. if (IS_ERR(leaf)) {
  1170. ret = PTR_ERR(leaf);
  1171. leaf = NULL;
  1172. goto fail;
  1173. }
  1174. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1175. btrfs_set_header_bytenr(leaf, leaf->start);
  1176. btrfs_set_header_generation(leaf, trans->transid);
  1177. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1178. btrfs_set_header_owner(leaf, objectid);
  1179. root->node = leaf;
  1180. write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
  1181. BTRFS_FSID_SIZE);
  1182. write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
  1183. btrfs_header_chunk_tree_uuid(leaf),
  1184. BTRFS_UUID_SIZE);
  1185. btrfs_mark_buffer_dirty(leaf);
  1186. root->commit_root = btrfs_root_node(root);
  1187. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  1188. root->root_item.flags = 0;
  1189. root->root_item.byte_limit = 0;
  1190. btrfs_set_root_bytenr(&root->root_item, leaf->start);
  1191. btrfs_set_root_generation(&root->root_item, trans->transid);
  1192. btrfs_set_root_level(&root->root_item, 0);
  1193. btrfs_set_root_refs(&root->root_item, 1);
  1194. btrfs_set_root_used(&root->root_item, leaf->len);
  1195. btrfs_set_root_last_snapshot(&root->root_item, 0);
  1196. btrfs_set_root_dirid(&root->root_item, 0);
  1197. uuid_le_gen(&uuid);
  1198. memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
  1199. root->root_item.drop_level = 0;
  1200. key.objectid = objectid;
  1201. key.type = BTRFS_ROOT_ITEM_KEY;
  1202. key.offset = 0;
  1203. ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
  1204. if (ret)
  1205. goto fail;
  1206. btrfs_tree_unlock(leaf);
  1207. return root;
  1208. fail:
  1209. if (leaf) {
  1210. btrfs_tree_unlock(leaf);
  1211. free_extent_buffer(root->commit_root);
  1212. free_extent_buffer(leaf);
  1213. }
  1214. kfree(root);
  1215. return ERR_PTR(ret);
  1216. }
  1217. static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
  1218. struct btrfs_fs_info *fs_info)
  1219. {
  1220. struct btrfs_root *root;
  1221. struct btrfs_root *tree_root = fs_info->tree_root;
  1222. struct extent_buffer *leaf;
  1223. root = btrfs_alloc_root(fs_info);
  1224. if (!root)
  1225. return ERR_PTR(-ENOMEM);
  1226. __setup_root(tree_root->nodesize, tree_root->sectorsize,
  1227. tree_root->stripesize, root, fs_info,
  1228. BTRFS_TREE_LOG_OBJECTID);
  1229. root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
  1230. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1231. root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
  1232. /*
  1233. * DON'T set REF_COWS for log trees
  1234. *
  1235. * log trees do not get reference counted because they go away
  1236. * before a real commit is actually done. They do store pointers
  1237. * to file data extents, and those reference counts still get
  1238. * updated (along with back refs to the log tree).
  1239. */
  1240. leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
  1241. NULL, 0, 0, 0);
  1242. if (IS_ERR(leaf)) {
  1243. kfree(root);
  1244. return ERR_CAST(leaf);
  1245. }
  1246. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1247. btrfs_set_header_bytenr(leaf, leaf->start);
  1248. btrfs_set_header_generation(leaf, trans->transid);
  1249. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1250. btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
  1251. root->node = leaf;
  1252. write_extent_buffer(root->node, root->fs_info->fsid,
  1253. btrfs_header_fsid(), BTRFS_FSID_SIZE);
  1254. btrfs_mark_buffer_dirty(root->node);
  1255. btrfs_tree_unlock(root->node);
  1256. return root;
  1257. }
  1258. int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
  1259. struct btrfs_fs_info *fs_info)
  1260. {
  1261. struct btrfs_root *log_root;
  1262. log_root = alloc_log_tree(trans, fs_info);
  1263. if (IS_ERR(log_root))
  1264. return PTR_ERR(log_root);
  1265. WARN_ON(fs_info->log_root_tree);
  1266. fs_info->log_root_tree = log_root;
  1267. return 0;
  1268. }
  1269. int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
  1270. struct btrfs_root *root)
  1271. {
  1272. struct btrfs_root *log_root;
  1273. struct btrfs_inode_item *inode_item;
  1274. log_root = alloc_log_tree(trans, root->fs_info);
  1275. if (IS_ERR(log_root))
  1276. return PTR_ERR(log_root);
  1277. log_root->last_trans = trans->transid;
  1278. log_root->root_key.offset = root->root_key.objectid;
  1279. inode_item = &log_root->root_item.inode;
  1280. btrfs_set_stack_inode_generation(inode_item, 1);
  1281. btrfs_set_stack_inode_size(inode_item, 3);
  1282. btrfs_set_stack_inode_nlink(inode_item, 1);
  1283. btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
  1284. btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
  1285. btrfs_set_root_node(&log_root->root_item, log_root->node);
  1286. WARN_ON(root->log_root);
  1287. root->log_root = log_root;
  1288. root->log_transid = 0;
  1289. root->log_transid_committed = -1;
  1290. root->last_log_commit = 0;
  1291. return 0;
  1292. }
  1293. static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
  1294. struct btrfs_key *key)
  1295. {
  1296. struct btrfs_root *root;
  1297. struct btrfs_fs_info *fs_info = tree_root->fs_info;
  1298. struct btrfs_path *path;
  1299. u64 generation;
  1300. int ret;
  1301. path = btrfs_alloc_path();
  1302. if (!path)
  1303. return ERR_PTR(-ENOMEM);
  1304. root = btrfs_alloc_root(fs_info);
  1305. if (!root) {
  1306. ret = -ENOMEM;
  1307. goto alloc_fail;
  1308. }
  1309. __setup_root(tree_root->nodesize, tree_root->sectorsize,
  1310. tree_root->stripesize, root, fs_info, key->objectid);
  1311. ret = btrfs_find_root(tree_root, key, path,
  1312. &root->root_item, &root->root_key);
  1313. if (ret) {
  1314. if (ret > 0)
  1315. ret = -ENOENT;
  1316. goto find_fail;
  1317. }
  1318. generation = btrfs_root_generation(&root->root_item);
  1319. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1320. generation);
  1321. if (IS_ERR(root->node)) {
  1322. ret = PTR_ERR(root->node);
  1323. goto find_fail;
  1324. } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
  1325. ret = -EIO;
  1326. free_extent_buffer(root->node);
  1327. goto find_fail;
  1328. }
  1329. root->commit_root = btrfs_root_node(root);
  1330. out:
  1331. btrfs_free_path(path);
  1332. return root;
  1333. find_fail:
  1334. kfree(root);
  1335. alloc_fail:
  1336. root = ERR_PTR(ret);
  1337. goto out;
  1338. }
  1339. struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
  1340. struct btrfs_key *location)
  1341. {
  1342. struct btrfs_root *root;
  1343. root = btrfs_read_tree_root(tree_root, location);
  1344. if (IS_ERR(root))
  1345. return root;
  1346. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  1347. set_bit(BTRFS_ROOT_REF_COWS, &root->state);
  1348. btrfs_check_and_init_root_item(&root->root_item);
  1349. }
  1350. return root;
  1351. }
  1352. int btrfs_init_fs_root(struct btrfs_root *root)
  1353. {
  1354. int ret;
  1355. struct btrfs_subvolume_writers *writers;
  1356. root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
  1357. root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
  1358. GFP_NOFS);
  1359. if (!root->free_ino_pinned || !root->free_ino_ctl) {
  1360. ret = -ENOMEM;
  1361. goto fail;
  1362. }
  1363. writers = btrfs_alloc_subvolume_writers();
  1364. if (IS_ERR(writers)) {
  1365. ret = PTR_ERR(writers);
  1366. goto fail;
  1367. }
  1368. root->subv_writers = writers;
  1369. btrfs_init_free_ino_ctl(root);
  1370. spin_lock_init(&root->ino_cache_lock);
  1371. init_waitqueue_head(&root->ino_cache_wait);
  1372. ret = get_anon_bdev(&root->anon_dev);
  1373. if (ret)
  1374. goto free_writers;
  1375. return 0;
  1376. free_writers:
  1377. btrfs_free_subvolume_writers(root->subv_writers);
  1378. fail:
  1379. kfree(root->free_ino_ctl);
  1380. kfree(root->free_ino_pinned);
  1381. return ret;
  1382. }
  1383. static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
  1384. u64 root_id)
  1385. {
  1386. struct btrfs_root *root;
  1387. spin_lock(&fs_info->fs_roots_radix_lock);
  1388. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  1389. (unsigned long)root_id);
  1390. spin_unlock(&fs_info->fs_roots_radix_lock);
  1391. return root;
  1392. }
  1393. int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
  1394. struct btrfs_root *root)
  1395. {
  1396. int ret;
  1397. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  1398. if (ret)
  1399. return ret;
  1400. spin_lock(&fs_info->fs_roots_radix_lock);
  1401. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  1402. (unsigned long)root->root_key.objectid,
  1403. root);
  1404. if (ret == 0)
  1405. set_bit(BTRFS_ROOT_IN_RADIX, &root->state);
  1406. spin_unlock(&fs_info->fs_roots_radix_lock);
  1407. radix_tree_preload_end();
  1408. return ret;
  1409. }
  1410. struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
  1411. struct btrfs_key *location,
  1412. bool check_ref)
  1413. {
  1414. struct btrfs_root *root;
  1415. struct btrfs_path *path;
  1416. struct btrfs_key key;
  1417. int ret;
  1418. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  1419. return fs_info->tree_root;
  1420. if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  1421. return fs_info->extent_root;
  1422. if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
  1423. return fs_info->chunk_root;
  1424. if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
  1425. return fs_info->dev_root;
  1426. if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
  1427. return fs_info->csum_root;
  1428. if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
  1429. return fs_info->quota_root ? fs_info->quota_root :
  1430. ERR_PTR(-ENOENT);
  1431. if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
  1432. return fs_info->uuid_root ? fs_info->uuid_root :
  1433. ERR_PTR(-ENOENT);
  1434. again:
  1435. root = btrfs_lookup_fs_root(fs_info, location->objectid);
  1436. if (root) {
  1437. if (check_ref && btrfs_root_refs(&root->root_item) == 0)
  1438. return ERR_PTR(-ENOENT);
  1439. return root;
  1440. }
  1441. root = btrfs_read_fs_root(fs_info->tree_root, location);
  1442. if (IS_ERR(root))
  1443. return root;
  1444. if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
  1445. ret = -ENOENT;
  1446. goto fail;
  1447. }
  1448. ret = btrfs_init_fs_root(root);
  1449. if (ret)
  1450. goto fail;
  1451. path = btrfs_alloc_path();
  1452. if (!path) {
  1453. ret = -ENOMEM;
  1454. goto fail;
  1455. }
  1456. key.objectid = BTRFS_ORPHAN_OBJECTID;
  1457. key.type = BTRFS_ORPHAN_ITEM_KEY;
  1458. key.offset = location->objectid;
  1459. ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
  1460. btrfs_free_path(path);
  1461. if (ret < 0)
  1462. goto fail;
  1463. if (ret == 0)
  1464. set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
  1465. ret = btrfs_insert_fs_root(fs_info, root);
  1466. if (ret) {
  1467. if (ret == -EEXIST) {
  1468. free_fs_root(root);
  1469. goto again;
  1470. }
  1471. goto fail;
  1472. }
  1473. return root;
  1474. fail:
  1475. free_fs_root(root);
  1476. return ERR_PTR(ret);
  1477. }
  1478. static int btrfs_congested_fn(void *congested_data, int bdi_bits)
  1479. {
  1480. struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
  1481. int ret = 0;
  1482. struct btrfs_device *device;
  1483. struct backing_dev_info *bdi;
  1484. rcu_read_lock();
  1485. list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
  1486. if (!device->bdev)
  1487. continue;
  1488. bdi = blk_get_backing_dev_info(device->bdev);
  1489. if (bdi_congested(bdi, bdi_bits)) {
  1490. ret = 1;
  1491. break;
  1492. }
  1493. }
  1494. rcu_read_unlock();
  1495. return ret;
  1496. }
  1497. static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
  1498. {
  1499. int err;
  1500. err = bdi_setup_and_register(bdi, "btrfs");
  1501. if (err)
  1502. return err;
  1503. bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
  1504. bdi->congested_fn = btrfs_congested_fn;
  1505. bdi->congested_data = info;
  1506. bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
  1507. return 0;
  1508. }
  1509. /*
  1510. * called by the kthread helper functions to finally call the bio end_io
  1511. * functions. This is where read checksum verification actually happens
  1512. */
  1513. static void end_workqueue_fn(struct btrfs_work *work)
  1514. {
  1515. struct bio *bio;
  1516. struct btrfs_end_io_wq *end_io_wq;
  1517. end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
  1518. bio = end_io_wq->bio;
  1519. bio->bi_error = end_io_wq->error;
  1520. bio->bi_private = end_io_wq->private;
  1521. bio->bi_end_io = end_io_wq->end_io;
  1522. kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
  1523. bio_endio(bio);
  1524. }
  1525. static int cleaner_kthread(void *arg)
  1526. {
  1527. struct btrfs_root *root = arg;
  1528. int again;
  1529. struct btrfs_trans_handle *trans;
  1530. do {
  1531. again = 0;
  1532. /* Make the cleaner go to sleep early. */
  1533. if (btrfs_need_cleaner_sleep(root))
  1534. goto sleep;
  1535. if (!mutex_trylock(&root->fs_info->cleaner_mutex))
  1536. goto sleep;
  1537. /*
  1538. * Avoid the problem that we change the status of the fs
  1539. * during the above check and trylock.
  1540. */
  1541. if (btrfs_need_cleaner_sleep(root)) {
  1542. mutex_unlock(&root->fs_info->cleaner_mutex);
  1543. goto sleep;
  1544. }
  1545. btrfs_run_delayed_iputs(root);
  1546. again = btrfs_clean_one_deleted_snapshot(root);
  1547. mutex_unlock(&root->fs_info->cleaner_mutex);
  1548. /*
  1549. * The defragger has dealt with the R/O remount and umount,
  1550. * needn't do anything special here.
  1551. */
  1552. btrfs_run_defrag_inodes(root->fs_info);
  1553. /*
  1554. * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
  1555. * with relocation (btrfs_relocate_chunk) and relocation
  1556. * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
  1557. * after acquiring fs_info->delete_unused_bgs_mutex. So we
  1558. * can't hold, nor need to, fs_info->cleaner_mutex when deleting
  1559. * unused block groups.
  1560. */
  1561. btrfs_delete_unused_bgs(root->fs_info);
  1562. sleep:
  1563. if (!try_to_freeze() && !again) {
  1564. set_current_state(TASK_INTERRUPTIBLE);
  1565. if (!kthread_should_stop())
  1566. schedule();
  1567. __set_current_state(TASK_RUNNING);
  1568. }
  1569. } while (!kthread_should_stop());
  1570. /*
  1571. * Transaction kthread is stopped before us and wakes us up.
  1572. * However we might have started a new transaction and COWed some
  1573. * tree blocks when deleting unused block groups for example. So
  1574. * make sure we commit the transaction we started to have a clean
  1575. * shutdown when evicting the btree inode - if it has dirty pages
  1576. * when we do the final iput() on it, eviction will trigger a
  1577. * writeback for it which will fail with null pointer dereferences
  1578. * since work queues and other resources were already released and
  1579. * destroyed by the time the iput/eviction/writeback is made.
  1580. */
  1581. trans = btrfs_attach_transaction(root);
  1582. if (IS_ERR(trans)) {
  1583. if (PTR_ERR(trans) != -ENOENT)
  1584. btrfs_err(root->fs_info,
  1585. "cleaner transaction attach returned %ld",
  1586. PTR_ERR(trans));
  1587. } else {
  1588. int ret;
  1589. ret = btrfs_commit_transaction(trans, root);
  1590. if (ret)
  1591. btrfs_err(root->fs_info,
  1592. "cleaner open transaction commit returned %d",
  1593. ret);
  1594. }
  1595. return 0;
  1596. }
  1597. static int transaction_kthread(void *arg)
  1598. {
  1599. struct btrfs_root *root = arg;
  1600. struct btrfs_trans_handle *trans;
  1601. struct btrfs_transaction *cur;
  1602. u64 transid;
  1603. unsigned long now;
  1604. unsigned long delay;
  1605. bool cannot_commit;
  1606. do {
  1607. cannot_commit = false;
  1608. delay = HZ * root->fs_info->commit_interval;
  1609. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  1610. spin_lock(&root->fs_info->trans_lock);
  1611. cur = root->fs_info->running_transaction;
  1612. if (!cur) {
  1613. spin_unlock(&root->fs_info->trans_lock);
  1614. goto sleep;
  1615. }
  1616. now = get_seconds();
  1617. if (cur->state < TRANS_STATE_BLOCKED &&
  1618. (now < cur->start_time ||
  1619. now - cur->start_time < root->fs_info->commit_interval)) {
  1620. spin_unlock(&root->fs_info->trans_lock);
  1621. delay = HZ * 5;
  1622. goto sleep;
  1623. }
  1624. transid = cur->transid;
  1625. spin_unlock(&root->fs_info->trans_lock);
  1626. /* If the file system is aborted, this will always fail. */
  1627. trans = btrfs_attach_transaction(root);
  1628. if (IS_ERR(trans)) {
  1629. if (PTR_ERR(trans) != -ENOENT)
  1630. cannot_commit = true;
  1631. goto sleep;
  1632. }
  1633. if (transid == trans->transid) {
  1634. btrfs_commit_transaction(trans, root);
  1635. } else {
  1636. btrfs_end_transaction(trans, root);
  1637. }
  1638. sleep:
  1639. wake_up_process(root->fs_info->cleaner_kthread);
  1640. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  1641. if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
  1642. &root->fs_info->fs_state)))
  1643. btrfs_cleanup_transaction(root);
  1644. if (!try_to_freeze()) {
  1645. set_current_state(TASK_INTERRUPTIBLE);
  1646. if (!kthread_should_stop() &&
  1647. (!btrfs_transaction_blocked(root->fs_info) ||
  1648. cannot_commit))
  1649. schedule_timeout(delay);
  1650. __set_current_state(TASK_RUNNING);
  1651. }
  1652. } while (!kthread_should_stop());
  1653. return 0;
  1654. }
  1655. /*
  1656. * this will find the highest generation in the array of
  1657. * root backups. The index of the highest array is returned,
  1658. * or -1 if we can't find anything.
  1659. *
  1660. * We check to make sure the array is valid by comparing the
  1661. * generation of the latest root in the array with the generation
  1662. * in the super block. If they don't match we pitch it.
  1663. */
  1664. static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
  1665. {
  1666. u64 cur;
  1667. int newest_index = -1;
  1668. struct btrfs_root_backup *root_backup;
  1669. int i;
  1670. for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
  1671. root_backup = info->super_copy->super_roots + i;
  1672. cur = btrfs_backup_tree_root_gen(root_backup);
  1673. if (cur == newest_gen)
  1674. newest_index = i;
  1675. }
  1676. /* check to see if we actually wrapped around */
  1677. if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
  1678. root_backup = info->super_copy->super_roots;
  1679. cur = btrfs_backup_tree_root_gen(root_backup);
  1680. if (cur == newest_gen)
  1681. newest_index = 0;
  1682. }
  1683. return newest_index;
  1684. }
  1685. /*
  1686. * find the oldest backup so we know where to store new entries
  1687. * in the backup array. This will set the backup_root_index
  1688. * field in the fs_info struct
  1689. */
  1690. static void find_oldest_super_backup(struct btrfs_fs_info *info,
  1691. u64 newest_gen)
  1692. {
  1693. int newest_index = -1;
  1694. newest_index = find_newest_super_backup(info, newest_gen);
  1695. /* if there was garbage in there, just move along */
  1696. if (newest_index == -1) {
  1697. info->backup_root_index = 0;
  1698. } else {
  1699. info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1700. }
  1701. }
  1702. /*
  1703. * copy all the root pointers into the super backup array.
  1704. * this will bump the backup pointer by one when it is
  1705. * done
  1706. */
  1707. static void backup_super_roots(struct btrfs_fs_info *info)
  1708. {
  1709. int next_backup;
  1710. struct btrfs_root_backup *root_backup;
  1711. int last_backup;
  1712. next_backup = info->backup_root_index;
  1713. last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1714. BTRFS_NUM_BACKUP_ROOTS;
  1715. /*
  1716. * just overwrite the last backup if we're at the same generation
  1717. * this happens only at umount
  1718. */
  1719. root_backup = info->super_for_commit->super_roots + last_backup;
  1720. if (btrfs_backup_tree_root_gen(root_backup) ==
  1721. btrfs_header_generation(info->tree_root->node))
  1722. next_backup = last_backup;
  1723. root_backup = info->super_for_commit->super_roots + next_backup;
  1724. /*
  1725. * make sure all of our padding and empty slots get zero filled
  1726. * regardless of which ones we use today
  1727. */
  1728. memset(root_backup, 0, sizeof(*root_backup));
  1729. info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1730. btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
  1731. btrfs_set_backup_tree_root_gen(root_backup,
  1732. btrfs_header_generation(info->tree_root->node));
  1733. btrfs_set_backup_tree_root_level(root_backup,
  1734. btrfs_header_level(info->tree_root->node));
  1735. btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
  1736. btrfs_set_backup_chunk_root_gen(root_backup,
  1737. btrfs_header_generation(info->chunk_root->node));
  1738. btrfs_set_backup_chunk_root_level(root_backup,
  1739. btrfs_header_level(info->chunk_root->node));
  1740. btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
  1741. btrfs_set_backup_extent_root_gen(root_backup,
  1742. btrfs_header_generation(info->extent_root->node));
  1743. btrfs_set_backup_extent_root_level(root_backup,
  1744. btrfs_header_level(info->extent_root->node));
  1745. /*
  1746. * we might commit during log recovery, which happens before we set
  1747. * the fs_root. Make sure it is valid before we fill it in.
  1748. */
  1749. if (info->fs_root && info->fs_root->node) {
  1750. btrfs_set_backup_fs_root(root_backup,
  1751. info->fs_root->node->start);
  1752. btrfs_set_backup_fs_root_gen(root_backup,
  1753. btrfs_header_generation(info->fs_root->node));
  1754. btrfs_set_backup_fs_root_level(root_backup,
  1755. btrfs_header_level(info->fs_root->node));
  1756. }
  1757. btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
  1758. btrfs_set_backup_dev_root_gen(root_backup,
  1759. btrfs_header_generation(info->dev_root->node));
  1760. btrfs_set_backup_dev_root_level(root_backup,
  1761. btrfs_header_level(info->dev_root->node));
  1762. btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
  1763. btrfs_set_backup_csum_root_gen(root_backup,
  1764. btrfs_header_generation(info->csum_root->node));
  1765. btrfs_set_backup_csum_root_level(root_backup,
  1766. btrfs_header_level(info->csum_root->node));
  1767. btrfs_set_backup_total_bytes(root_backup,
  1768. btrfs_super_total_bytes(info->super_copy));
  1769. btrfs_set_backup_bytes_used(root_backup,
  1770. btrfs_super_bytes_used(info->super_copy));
  1771. btrfs_set_backup_num_devices(root_backup,
  1772. btrfs_super_num_devices(info->super_copy));
  1773. /*
  1774. * if we don't copy this out to the super_copy, it won't get remembered
  1775. * for the next commit
  1776. */
  1777. memcpy(&info->super_copy->super_roots,
  1778. &info->super_for_commit->super_roots,
  1779. sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
  1780. }
  1781. /*
  1782. * this copies info out of the root backup array and back into
  1783. * the in-memory super block. It is meant to help iterate through
  1784. * the array, so you send it the number of backups you've already
  1785. * tried and the last backup index you used.
  1786. *
  1787. * this returns -1 when it has tried all the backups
  1788. */
  1789. static noinline int next_root_backup(struct btrfs_fs_info *info,
  1790. struct btrfs_super_block *super,
  1791. int *num_backups_tried, int *backup_index)
  1792. {
  1793. struct btrfs_root_backup *root_backup;
  1794. int newest = *backup_index;
  1795. if (*num_backups_tried == 0) {
  1796. u64 gen = btrfs_super_generation(super);
  1797. newest = find_newest_super_backup(info, gen);
  1798. if (newest == -1)
  1799. return -1;
  1800. *backup_index = newest;
  1801. *num_backups_tried = 1;
  1802. } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
  1803. /* we've tried all the backups, all done */
  1804. return -1;
  1805. } else {
  1806. /* jump to the next oldest backup */
  1807. newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1808. BTRFS_NUM_BACKUP_ROOTS;
  1809. *backup_index = newest;
  1810. *num_backups_tried += 1;
  1811. }
  1812. root_backup = super->super_roots + newest;
  1813. btrfs_set_super_generation(super,
  1814. btrfs_backup_tree_root_gen(root_backup));
  1815. btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
  1816. btrfs_set_super_root_level(super,
  1817. btrfs_backup_tree_root_level(root_backup));
  1818. btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
  1819. /*
  1820. * fixme: the total bytes and num_devices need to match or we should
  1821. * need a fsck
  1822. */
  1823. btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
  1824. btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
  1825. return 0;
  1826. }
  1827. /* helper to cleanup workers */
  1828. static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
  1829. {
  1830. btrfs_destroy_workqueue(fs_info->fixup_workers);
  1831. btrfs_destroy_workqueue(fs_info->delalloc_workers);
  1832. btrfs_destroy_workqueue(fs_info->workers);
  1833. btrfs_destroy_workqueue(fs_info->endio_workers);
  1834. btrfs_destroy_workqueue(fs_info->endio_meta_workers);
  1835. btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
  1836. btrfs_destroy_workqueue(fs_info->endio_repair_workers);
  1837. btrfs_destroy_workqueue(fs_info->rmw_workers);
  1838. btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
  1839. btrfs_destroy_workqueue(fs_info->endio_write_workers);
  1840. btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
  1841. btrfs_destroy_workqueue(fs_info->submit_workers);
  1842. btrfs_destroy_workqueue(fs_info->delayed_workers);
  1843. btrfs_destroy_workqueue(fs_info->caching_workers);
  1844. btrfs_destroy_workqueue(fs_info->readahead_workers);
  1845. btrfs_destroy_workqueue(fs_info->flush_workers);
  1846. btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
  1847. btrfs_destroy_workqueue(fs_info->extent_workers);
  1848. }
  1849. static void free_root_extent_buffers(struct btrfs_root *root)
  1850. {
  1851. if (root) {
  1852. free_extent_buffer(root->node);
  1853. free_extent_buffer(root->commit_root);
  1854. root->node = NULL;
  1855. root->commit_root = NULL;
  1856. }
  1857. }
  1858. /* helper to cleanup tree roots */
  1859. static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
  1860. {
  1861. free_root_extent_buffers(info->tree_root);
  1862. free_root_extent_buffers(info->dev_root);
  1863. free_root_extent_buffers(info->extent_root);
  1864. free_root_extent_buffers(info->csum_root);
  1865. free_root_extent_buffers(info->quota_root);
  1866. free_root_extent_buffers(info->uuid_root);
  1867. if (chunk_root)
  1868. free_root_extent_buffers(info->chunk_root);
  1869. }
  1870. void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
  1871. {
  1872. int ret;
  1873. struct btrfs_root *gang[8];
  1874. int i;
  1875. while (!list_empty(&fs_info->dead_roots)) {
  1876. gang[0] = list_entry(fs_info->dead_roots.next,
  1877. struct btrfs_root, root_list);
  1878. list_del(&gang[0]->root_list);
  1879. if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) {
  1880. btrfs_drop_and_free_fs_root(fs_info, gang[0]);
  1881. } else {
  1882. free_extent_buffer(gang[0]->node);
  1883. free_extent_buffer(gang[0]->commit_root);
  1884. btrfs_put_fs_root(gang[0]);
  1885. }
  1886. }
  1887. while (1) {
  1888. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  1889. (void **)gang, 0,
  1890. ARRAY_SIZE(gang));
  1891. if (!ret)
  1892. break;
  1893. for (i = 0; i < ret; i++)
  1894. btrfs_drop_and_free_fs_root(fs_info, gang[i]);
  1895. }
  1896. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  1897. btrfs_free_log_root_tree(NULL, fs_info);
  1898. btrfs_destroy_pinned_extent(fs_info->tree_root,
  1899. fs_info->pinned_extents);
  1900. }
  1901. }
  1902. static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
  1903. {
  1904. mutex_init(&fs_info->scrub_lock);
  1905. atomic_set(&fs_info->scrubs_running, 0);
  1906. atomic_set(&fs_info->scrub_pause_req, 0);
  1907. atomic_set(&fs_info->scrubs_paused, 0);
  1908. atomic_set(&fs_info->scrub_cancel_req, 0);
  1909. init_waitqueue_head(&fs_info->scrub_pause_wait);
  1910. fs_info->scrub_workers_refcnt = 0;
  1911. }
  1912. static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
  1913. {
  1914. spin_lock_init(&fs_info->balance_lock);
  1915. mutex_init(&fs_info->balance_mutex);
  1916. atomic_set(&fs_info->balance_running, 0);
  1917. atomic_set(&fs_info->balance_pause_req, 0);
  1918. atomic_set(&fs_info->balance_cancel_req, 0);
  1919. fs_info->balance_ctl = NULL;
  1920. init_waitqueue_head(&fs_info->balance_wait_q);
  1921. }
  1922. static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
  1923. struct btrfs_root *tree_root)
  1924. {
  1925. fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
  1926. set_nlink(fs_info->btree_inode, 1);
  1927. /*
  1928. * we set the i_size on the btree inode to the max possible int.
  1929. * the real end of the address space is determined by all of
  1930. * the devices in the system
  1931. */
  1932. fs_info->btree_inode->i_size = OFFSET_MAX;
  1933. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  1934. RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
  1935. extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
  1936. fs_info->btree_inode->i_mapping);
  1937. BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
  1938. extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
  1939. BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
  1940. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  1941. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  1942. sizeof(struct btrfs_key));
  1943. set_bit(BTRFS_INODE_DUMMY,
  1944. &BTRFS_I(fs_info->btree_inode)->runtime_flags);
  1945. btrfs_insert_inode_hash(fs_info->btree_inode);
  1946. }
  1947. static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
  1948. {
  1949. fs_info->dev_replace.lock_owner = 0;
  1950. atomic_set(&fs_info->dev_replace.nesting_level, 0);
  1951. mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
  1952. mutex_init(&fs_info->dev_replace.lock_management_lock);
  1953. mutex_init(&fs_info->dev_replace.lock);
  1954. init_waitqueue_head(&fs_info->replace_wait);
  1955. }
  1956. static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
  1957. {
  1958. spin_lock_init(&fs_info->qgroup_lock);
  1959. mutex_init(&fs_info->qgroup_ioctl_lock);
  1960. fs_info->qgroup_tree = RB_ROOT;
  1961. fs_info->qgroup_op_tree = RB_ROOT;
  1962. INIT_LIST_HEAD(&fs_info->dirty_qgroups);
  1963. fs_info->qgroup_seq = 1;
  1964. fs_info->quota_enabled = 0;
  1965. fs_info->pending_quota_state = 0;
  1966. fs_info->qgroup_ulist = NULL;
  1967. mutex_init(&fs_info->qgroup_rescan_lock);
  1968. }
  1969. static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
  1970. struct btrfs_fs_devices *fs_devices)
  1971. {
  1972. int max_active = fs_info->thread_pool_size;
  1973. unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
  1974. fs_info->workers =
  1975. btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
  1976. max_active, 16);
  1977. fs_info->delalloc_workers =
  1978. btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
  1979. fs_info->flush_workers =
  1980. btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
  1981. fs_info->caching_workers =
  1982. btrfs_alloc_workqueue("cache", flags, max_active, 0);
  1983. /*
  1984. * a higher idle thresh on the submit workers makes it much more
  1985. * likely that bios will be send down in a sane order to the
  1986. * devices
  1987. */
  1988. fs_info->submit_workers =
  1989. btrfs_alloc_workqueue("submit", flags,
  1990. min_t(u64, fs_devices->num_devices,
  1991. max_active), 64);
  1992. fs_info->fixup_workers =
  1993. btrfs_alloc_workqueue("fixup", flags, 1, 0);
  1994. /*
  1995. * endios are largely parallel and should have a very
  1996. * low idle thresh
  1997. */
  1998. fs_info->endio_workers =
  1999. btrfs_alloc_workqueue("endio", flags, max_active, 4);
  2000. fs_info->endio_meta_workers =
  2001. btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
  2002. fs_info->endio_meta_write_workers =
  2003. btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
  2004. fs_info->endio_raid56_workers =
  2005. btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
  2006. fs_info->endio_repair_workers =
  2007. btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
  2008. fs_info->rmw_workers =
  2009. btrfs_alloc_workqueue("rmw", flags, max_active, 2);
  2010. fs_info->endio_write_workers =
  2011. btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
  2012. fs_info->endio_freespace_worker =
  2013. btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
  2014. fs_info->delayed_workers =
  2015. btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
  2016. fs_info->readahead_workers =
  2017. btrfs_alloc_workqueue("readahead", flags, max_active, 2);
  2018. fs_info->qgroup_rescan_workers =
  2019. btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
  2020. fs_info->extent_workers =
  2021. btrfs_alloc_workqueue("extent-refs", flags,
  2022. min_t(u64, fs_devices->num_devices,
  2023. max_active), 8);
  2024. if (!(fs_info->workers && fs_info->delalloc_workers &&
  2025. fs_info->submit_workers && fs_info->flush_workers &&
  2026. fs_info->endio_workers && fs_info->endio_meta_workers &&
  2027. fs_info->endio_meta_write_workers &&
  2028. fs_info->endio_repair_workers &&
  2029. fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
  2030. fs_info->endio_freespace_worker && fs_info->rmw_workers &&
  2031. fs_info->caching_workers && fs_info->readahead_workers &&
  2032. fs_info->fixup_workers && fs_info->delayed_workers &&
  2033. fs_info->extent_workers &&
  2034. fs_info->qgroup_rescan_workers)) {
  2035. return -ENOMEM;
  2036. }
  2037. return 0;
  2038. }
  2039. static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
  2040. struct btrfs_fs_devices *fs_devices)
  2041. {
  2042. int ret;
  2043. struct btrfs_root *tree_root = fs_info->tree_root;
  2044. struct btrfs_root *log_tree_root;
  2045. struct btrfs_super_block *disk_super = fs_info->super_copy;
  2046. u64 bytenr = btrfs_super_log_root(disk_super);
  2047. if (fs_devices->rw_devices == 0) {
  2048. btrfs_warn(fs_info, "log replay required on RO media");
  2049. return -EIO;
  2050. }
  2051. log_tree_root = btrfs_alloc_root(fs_info);
  2052. if (!log_tree_root)
  2053. return -ENOMEM;
  2054. __setup_root(tree_root->nodesize, tree_root->sectorsize,
  2055. tree_root->stripesize, log_tree_root, fs_info,
  2056. BTRFS_TREE_LOG_OBJECTID);
  2057. log_tree_root->node = read_tree_block(tree_root, bytenr,
  2058. fs_info->generation + 1);
  2059. if (IS_ERR(log_tree_root->node)) {
  2060. btrfs_warn(fs_info, "failed to read log tree");
  2061. ret = PTR_ERR(log_tree_root->node);
  2062. kfree(log_tree_root);
  2063. return ret;
  2064. } else if (!extent_buffer_uptodate(log_tree_root->node)) {
  2065. btrfs_err(fs_info, "failed to read log tree");
  2066. free_extent_buffer(log_tree_root->node);
  2067. kfree(log_tree_root);
  2068. return -EIO;
  2069. }
  2070. /* returns with log_tree_root freed on success */
  2071. ret = btrfs_recover_log_trees(log_tree_root);
  2072. if (ret) {
  2073. btrfs_std_error(tree_root->fs_info, ret,
  2074. "Failed to recover log tree");
  2075. free_extent_buffer(log_tree_root->node);
  2076. kfree(log_tree_root);
  2077. return ret;
  2078. }
  2079. if (fs_info->sb->s_flags & MS_RDONLY) {
  2080. ret = btrfs_commit_super(tree_root);
  2081. if (ret)
  2082. return ret;
  2083. }
  2084. return 0;
  2085. }
  2086. static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
  2087. struct btrfs_root *tree_root)
  2088. {
  2089. struct btrfs_root *root;
  2090. struct btrfs_key location;
  2091. int ret;
  2092. location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
  2093. location.type = BTRFS_ROOT_ITEM_KEY;
  2094. location.offset = 0;
  2095. root = btrfs_read_tree_root(tree_root, &location);
  2096. if (IS_ERR(root))
  2097. return PTR_ERR(root);
  2098. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  2099. fs_info->extent_root = root;
  2100. location.objectid = BTRFS_DEV_TREE_OBJECTID;
  2101. root = btrfs_read_tree_root(tree_root, &location);
  2102. if (IS_ERR(root))
  2103. return PTR_ERR(root);
  2104. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  2105. fs_info->dev_root = root;
  2106. btrfs_init_devices_late(fs_info);
  2107. location.objectid = BTRFS_CSUM_TREE_OBJECTID;
  2108. root = btrfs_read_tree_root(tree_root, &location);
  2109. if (IS_ERR(root))
  2110. return PTR_ERR(root);
  2111. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  2112. fs_info->csum_root = root;
  2113. location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
  2114. root = btrfs_read_tree_root(tree_root, &location);
  2115. if (!IS_ERR(root)) {
  2116. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  2117. fs_info->quota_enabled = 1;
  2118. fs_info->pending_quota_state = 1;
  2119. fs_info->quota_root = root;
  2120. }
  2121. location.objectid = BTRFS_UUID_TREE_OBJECTID;
  2122. root = btrfs_read_tree_root(tree_root, &location);
  2123. if (IS_ERR(root)) {
  2124. ret = PTR_ERR(root);
  2125. if (ret != -ENOENT)
  2126. return ret;
  2127. } else {
  2128. set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
  2129. fs_info->uuid_root = root;
  2130. }
  2131. return 0;
  2132. }
  2133. int open_ctree(struct super_block *sb,
  2134. struct btrfs_fs_devices *fs_devices,
  2135. char *options)
  2136. {
  2137. u32 sectorsize;
  2138. u32 nodesize;
  2139. u32 stripesize;
  2140. u64 generation;
  2141. u64 features;
  2142. struct btrfs_key location;
  2143. struct buffer_head *bh;
  2144. struct btrfs_super_block *disk_super;
  2145. struct btrfs_fs_info *fs_info = btrfs_sb(sb);
  2146. struct btrfs_root *tree_root;
  2147. struct btrfs_root *chunk_root;
  2148. int ret;
  2149. int err = -EINVAL;
  2150. int num_backups_tried = 0;
  2151. int backup_index = 0;
  2152. int max_active;
  2153. tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
  2154. chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
  2155. if (!tree_root || !chunk_root) {
  2156. err = -ENOMEM;
  2157. goto fail;
  2158. }
  2159. ret = init_srcu_struct(&fs_info->subvol_srcu);
  2160. if (ret) {
  2161. err = ret;
  2162. goto fail;
  2163. }
  2164. ret = setup_bdi(fs_info, &fs_info->bdi);
  2165. if (ret) {
  2166. err = ret;
  2167. goto fail_srcu;
  2168. }
  2169. ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
  2170. if (ret) {
  2171. err = ret;
  2172. goto fail_bdi;
  2173. }
  2174. fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
  2175. (1 + ilog2(nr_cpu_ids));
  2176. ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
  2177. if (ret) {
  2178. err = ret;
  2179. goto fail_dirty_metadata_bytes;
  2180. }
  2181. ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
  2182. if (ret) {
  2183. err = ret;
  2184. goto fail_delalloc_bytes;
  2185. }
  2186. fs_info->btree_inode = new_inode(sb);
  2187. if (!fs_info->btree_inode) {
  2188. err = -ENOMEM;
  2189. goto fail_bio_counter;
  2190. }
  2191. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  2192. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
  2193. INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
  2194. INIT_LIST_HEAD(&fs_info->trans_list);
  2195. INIT_LIST_HEAD(&fs_info->dead_roots);
  2196. INIT_LIST_HEAD(&fs_info->delayed_iputs);
  2197. INIT_LIST_HEAD(&fs_info->delalloc_roots);
  2198. INIT_LIST_HEAD(&fs_info->caching_block_groups);
  2199. spin_lock_init(&fs_info->delalloc_root_lock);
  2200. spin_lock_init(&fs_info->trans_lock);
  2201. spin_lock_init(&fs_info->fs_roots_radix_lock);
  2202. spin_lock_init(&fs_info->delayed_iput_lock);
  2203. spin_lock_init(&fs_info->defrag_inodes_lock);
  2204. spin_lock_init(&fs_info->free_chunk_lock);
  2205. spin_lock_init(&fs_info->tree_mod_seq_lock);
  2206. spin_lock_init(&fs_info->super_lock);
  2207. spin_lock_init(&fs_info->qgroup_op_lock);
  2208. spin_lock_init(&fs_info->buffer_lock);
  2209. spin_lock_init(&fs_info->unused_bgs_lock);
  2210. rwlock_init(&fs_info->tree_mod_log_lock);
  2211. mutex_init(&fs_info->unused_bg_unpin_mutex);
  2212. mutex_init(&fs_info->delete_unused_bgs_mutex);
  2213. mutex_init(&fs_info->reloc_mutex);
  2214. mutex_init(&fs_info->delalloc_root_mutex);
  2215. seqlock_init(&fs_info->profiles_lock);
  2216. init_rwsem(&fs_info->delayed_iput_sem);
  2217. INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
  2218. INIT_LIST_HEAD(&fs_info->space_info);
  2219. INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
  2220. INIT_LIST_HEAD(&fs_info->unused_bgs);
  2221. btrfs_mapping_init(&fs_info->mapping_tree);
  2222. btrfs_init_block_rsv(&fs_info->global_block_rsv,
  2223. BTRFS_BLOCK_RSV_GLOBAL);
  2224. btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
  2225. BTRFS_BLOCK_RSV_DELALLOC);
  2226. btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
  2227. btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
  2228. btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
  2229. btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
  2230. BTRFS_BLOCK_RSV_DELOPS);
  2231. atomic_set(&fs_info->nr_async_submits, 0);
  2232. atomic_set(&fs_info->async_delalloc_pages, 0);
  2233. atomic_set(&fs_info->async_submit_draining, 0);
  2234. atomic_set(&fs_info->nr_async_bios, 0);
  2235. atomic_set(&fs_info->defrag_running, 0);
  2236. atomic_set(&fs_info->qgroup_op_seq, 0);
  2237. atomic64_set(&fs_info->tree_mod_seq, 0);
  2238. fs_info->sb = sb;
  2239. fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
  2240. fs_info->metadata_ratio = 0;
  2241. fs_info->defrag_inodes = RB_ROOT;
  2242. fs_info->free_chunk_space = 0;
  2243. fs_info->tree_mod_log = RB_ROOT;
  2244. fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
  2245. fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
  2246. /* readahead state */
  2247. INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
  2248. spin_lock_init(&fs_info->reada_lock);
  2249. fs_info->thread_pool_size = min_t(unsigned long,
  2250. num_online_cpus() + 2, 8);
  2251. INIT_LIST_HEAD(&fs_info->ordered_roots);
  2252. spin_lock_init(&fs_info->ordered_root_lock);
  2253. fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
  2254. GFP_NOFS);
  2255. if (!fs_info->delayed_root) {
  2256. err = -ENOMEM;
  2257. goto fail_iput;
  2258. }
  2259. btrfs_init_delayed_root(fs_info->delayed_root);
  2260. btrfs_init_scrub(fs_info);
  2261. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2262. fs_info->check_integrity_print_mask = 0;
  2263. #endif
  2264. btrfs_init_balance(fs_info);
  2265. btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
  2266. sb->s_blocksize = 4096;
  2267. sb->s_blocksize_bits = blksize_bits(4096);
  2268. sb->s_bdi = &fs_info->bdi;
  2269. btrfs_init_btree_inode(fs_info, tree_root);
  2270. spin_lock_init(&fs_info->block_group_cache_lock);
  2271. fs_info->block_group_cache_tree = RB_ROOT;
  2272. fs_info->first_logical_byte = (u64)-1;
  2273. extent_io_tree_init(&fs_info->freed_extents[0],
  2274. fs_info->btree_inode->i_mapping);
  2275. extent_io_tree_init(&fs_info->freed_extents[1],
  2276. fs_info->btree_inode->i_mapping);
  2277. fs_info->pinned_extents = &fs_info->freed_extents[0];
  2278. fs_info->do_barriers = 1;
  2279. mutex_init(&fs_info->ordered_operations_mutex);
  2280. mutex_init(&fs_info->tree_log_mutex);
  2281. mutex_init(&fs_info->chunk_mutex);
  2282. mutex_init(&fs_info->transaction_kthread_mutex);
  2283. mutex_init(&fs_info->cleaner_mutex);
  2284. mutex_init(&fs_info->volume_mutex);
  2285. mutex_init(&fs_info->ro_block_group_mutex);
  2286. init_rwsem(&fs_info->commit_root_sem);
  2287. init_rwsem(&fs_info->cleanup_work_sem);
  2288. init_rwsem(&fs_info->subvol_sem);
  2289. sema_init(&fs_info->uuid_tree_rescan_sem, 1);
  2290. btrfs_init_dev_replace_locks(fs_info);
  2291. btrfs_init_qgroup(fs_info);
  2292. btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
  2293. btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
  2294. init_waitqueue_head(&fs_info->transaction_throttle);
  2295. init_waitqueue_head(&fs_info->transaction_wait);
  2296. init_waitqueue_head(&fs_info->transaction_blocked_wait);
  2297. init_waitqueue_head(&fs_info->async_submit_wait);
  2298. INIT_LIST_HEAD(&fs_info->pinned_chunks);
  2299. ret = btrfs_alloc_stripe_hash_table(fs_info);
  2300. if (ret) {
  2301. err = ret;
  2302. goto fail_alloc;
  2303. }
  2304. __setup_root(4096, 4096, 4096, tree_root,
  2305. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  2306. invalidate_bdev(fs_devices->latest_bdev);
  2307. /*
  2308. * Read super block and check the signature bytes only
  2309. */
  2310. bh = btrfs_read_dev_super(fs_devices->latest_bdev);
  2311. if (IS_ERR(bh)) {
  2312. err = PTR_ERR(bh);
  2313. goto fail_alloc;
  2314. }
  2315. /*
  2316. * We want to check superblock checksum, the type is stored inside.
  2317. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
  2318. */
  2319. if (btrfs_check_super_csum(bh->b_data)) {
  2320. printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
  2321. err = -EINVAL;
  2322. goto fail_alloc;
  2323. }
  2324. /*
  2325. * super_copy is zeroed at allocation time and we never touch the
  2326. * following bytes up to INFO_SIZE, the checksum is calculated from
  2327. * the whole block of INFO_SIZE
  2328. */
  2329. memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
  2330. memcpy(fs_info->super_for_commit, fs_info->super_copy,
  2331. sizeof(*fs_info->super_for_commit));
  2332. brelse(bh);
  2333. memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
  2334. ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
  2335. if (ret) {
  2336. printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
  2337. err = -EINVAL;
  2338. goto fail_alloc;
  2339. }
  2340. disk_super = fs_info->super_copy;
  2341. if (!btrfs_super_root(disk_super))
  2342. goto fail_alloc;
  2343. /* check FS state, whether FS is broken. */
  2344. if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
  2345. set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
  2346. /*
  2347. * run through our array of backup supers and setup
  2348. * our ring pointer to the oldest one
  2349. */
  2350. generation = btrfs_super_generation(disk_super);
  2351. find_oldest_super_backup(fs_info, generation);
  2352. /*
  2353. * In the long term, we'll store the compression type in the super
  2354. * block, and it'll be used for per file compression control.
  2355. */
  2356. fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
  2357. ret = btrfs_parse_options(tree_root, options);
  2358. if (ret) {
  2359. err = ret;
  2360. goto fail_alloc;
  2361. }
  2362. features = btrfs_super_incompat_flags(disk_super) &
  2363. ~BTRFS_FEATURE_INCOMPAT_SUPP;
  2364. if (features) {
  2365. printk(KERN_ERR "BTRFS: couldn't mount because of "
  2366. "unsupported optional features (%Lx).\n",
  2367. features);
  2368. err = -EINVAL;
  2369. goto fail_alloc;
  2370. }
  2371. /*
  2372. * Leafsize and nodesize were always equal, this is only a sanity check.
  2373. */
  2374. if (le32_to_cpu(disk_super->__unused_leafsize) !=
  2375. btrfs_super_nodesize(disk_super)) {
  2376. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2377. "blocksizes don't match. node %d leaf %d\n",
  2378. btrfs_super_nodesize(disk_super),
  2379. le32_to_cpu(disk_super->__unused_leafsize));
  2380. err = -EINVAL;
  2381. goto fail_alloc;
  2382. }
  2383. if (btrfs_super_nodesize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
  2384. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2385. "blocksize (%d) was too large\n",
  2386. btrfs_super_nodesize(disk_super));
  2387. err = -EINVAL;
  2388. goto fail_alloc;
  2389. }
  2390. features = btrfs_super_incompat_flags(disk_super);
  2391. features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
  2392. if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
  2393. features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
  2394. if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
  2395. printk(KERN_INFO "BTRFS: has skinny extents\n");
  2396. /*
  2397. * flag our filesystem as having big metadata blocks if
  2398. * they are bigger than the page size
  2399. */
  2400. if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
  2401. if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
  2402. printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
  2403. features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
  2404. }
  2405. nodesize = btrfs_super_nodesize(disk_super);
  2406. sectorsize = btrfs_super_sectorsize(disk_super);
  2407. stripesize = btrfs_super_stripesize(disk_super);
  2408. fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
  2409. fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
  2410. /*
  2411. * mixed block groups end up with duplicate but slightly offset
  2412. * extent buffers for the same range. It leads to corruptions
  2413. */
  2414. if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
  2415. (sectorsize != nodesize)) {
  2416. printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes "
  2417. "are not allowed for mixed block groups on %s\n",
  2418. sb->s_id);
  2419. goto fail_alloc;
  2420. }
  2421. /*
  2422. * Needn't use the lock because there is no other task which will
  2423. * update the flag.
  2424. */
  2425. btrfs_set_super_incompat_flags(disk_super, features);
  2426. features = btrfs_super_compat_ro_flags(disk_super) &
  2427. ~BTRFS_FEATURE_COMPAT_RO_SUPP;
  2428. if (!(sb->s_flags & MS_RDONLY) && features) {
  2429. printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
  2430. "unsupported option features (%Lx).\n",
  2431. features);
  2432. err = -EINVAL;
  2433. goto fail_alloc;
  2434. }
  2435. max_active = fs_info->thread_pool_size;
  2436. ret = btrfs_init_workqueues(fs_info, fs_devices);
  2437. if (ret) {
  2438. err = ret;
  2439. goto fail_sb_buffer;
  2440. }
  2441. fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
  2442. fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
  2443. 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
  2444. tree_root->nodesize = nodesize;
  2445. tree_root->sectorsize = sectorsize;
  2446. tree_root->stripesize = stripesize;
  2447. sb->s_blocksize = sectorsize;
  2448. sb->s_blocksize_bits = blksize_bits(sectorsize);
  2449. if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
  2450. printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
  2451. goto fail_sb_buffer;
  2452. }
  2453. if (sectorsize != PAGE_SIZE) {
  2454. printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
  2455. "found on %s\n", (unsigned long)sectorsize, sb->s_id);
  2456. goto fail_sb_buffer;
  2457. }
  2458. mutex_lock(&fs_info->chunk_mutex);
  2459. ret = btrfs_read_sys_array(tree_root);
  2460. mutex_unlock(&fs_info->chunk_mutex);
  2461. if (ret) {
  2462. printk(KERN_ERR "BTRFS: failed to read the system "
  2463. "array on %s\n", sb->s_id);
  2464. goto fail_sb_buffer;
  2465. }
  2466. generation = btrfs_super_chunk_root_generation(disk_super);
  2467. __setup_root(nodesize, sectorsize, stripesize, chunk_root,
  2468. fs_info, BTRFS_CHUNK_TREE_OBJECTID);
  2469. chunk_root->node = read_tree_block(chunk_root,
  2470. btrfs_super_chunk_root(disk_super),
  2471. generation);
  2472. if (IS_ERR(chunk_root->node) ||
  2473. !extent_buffer_uptodate(chunk_root->node)) {
  2474. printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
  2475. sb->s_id);
  2476. if (!IS_ERR(chunk_root->node))
  2477. free_extent_buffer(chunk_root->node);
  2478. chunk_root->node = NULL;
  2479. goto fail_tree_roots;
  2480. }
  2481. btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
  2482. chunk_root->commit_root = btrfs_root_node(chunk_root);
  2483. read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
  2484. btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
  2485. ret = btrfs_read_chunk_tree(chunk_root);
  2486. if (ret) {
  2487. printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n",
  2488. sb->s_id);
  2489. goto fail_tree_roots;
  2490. }
  2491. /*
  2492. * keep the device that is marked to be the target device for the
  2493. * dev_replace procedure
  2494. */
  2495. btrfs_close_extra_devices(fs_devices, 0);
  2496. if (!fs_devices->latest_bdev) {
  2497. printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
  2498. sb->s_id);
  2499. goto fail_tree_roots;
  2500. }
  2501. retry_root_backup:
  2502. generation = btrfs_super_generation(disk_super);
  2503. tree_root->node = read_tree_block(tree_root,
  2504. btrfs_super_root(disk_super),
  2505. generation);
  2506. if (IS_ERR(tree_root->node) ||
  2507. !extent_buffer_uptodate(tree_root->node)) {
  2508. printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
  2509. sb->s_id);
  2510. if (!IS_ERR(tree_root->node))
  2511. free_extent_buffer(tree_root->node);
  2512. tree_root->node = NULL;
  2513. goto recovery_tree_root;
  2514. }
  2515. btrfs_set_root_node(&tree_root->root_item, tree_root->node);
  2516. tree_root->commit_root = btrfs_root_node(tree_root);
  2517. btrfs_set_root_refs(&tree_root->root_item, 1);
  2518. ret = btrfs_read_roots(fs_info, tree_root);
  2519. if (ret)
  2520. goto recovery_tree_root;
  2521. fs_info->generation = generation;
  2522. fs_info->last_trans_committed = generation;
  2523. ret = btrfs_recover_balance(fs_info);
  2524. if (ret) {
  2525. printk(KERN_ERR "BTRFS: failed to recover balance\n");
  2526. goto fail_block_groups;
  2527. }
  2528. ret = btrfs_init_dev_stats(fs_info);
  2529. if (ret) {
  2530. printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
  2531. ret);
  2532. goto fail_block_groups;
  2533. }
  2534. ret = btrfs_init_dev_replace(fs_info);
  2535. if (ret) {
  2536. pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
  2537. goto fail_block_groups;
  2538. }
  2539. btrfs_close_extra_devices(fs_devices, 1);
  2540. ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
  2541. if (ret) {
  2542. pr_err("BTRFS: failed to init sysfs fsid interface: %d\n", ret);
  2543. goto fail_block_groups;
  2544. }
  2545. ret = btrfs_sysfs_add_device(fs_devices);
  2546. if (ret) {
  2547. pr_err("BTRFS: failed to init sysfs device interface: %d\n", ret);
  2548. goto fail_fsdev_sysfs;
  2549. }
  2550. ret = btrfs_sysfs_add_mounted(fs_info);
  2551. if (ret) {
  2552. pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
  2553. goto fail_fsdev_sysfs;
  2554. }
  2555. ret = btrfs_init_space_info(fs_info);
  2556. if (ret) {
  2557. printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
  2558. goto fail_sysfs;
  2559. }
  2560. ret = btrfs_read_block_groups(fs_info->extent_root);
  2561. if (ret) {
  2562. printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
  2563. goto fail_sysfs;
  2564. }
  2565. fs_info->num_tolerated_disk_barrier_failures =
  2566. btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
  2567. if (fs_info->fs_devices->missing_devices >
  2568. fs_info->num_tolerated_disk_barrier_failures &&
  2569. !(sb->s_flags & MS_RDONLY)) {
  2570. pr_warn("BTRFS: missing devices(%llu) exceeds the limit(%d), writeable mount is not allowed\n",
  2571. fs_info->fs_devices->missing_devices,
  2572. fs_info->num_tolerated_disk_barrier_failures);
  2573. goto fail_sysfs;
  2574. }
  2575. fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
  2576. "btrfs-cleaner");
  2577. if (IS_ERR(fs_info->cleaner_kthread))
  2578. goto fail_sysfs;
  2579. fs_info->transaction_kthread = kthread_run(transaction_kthread,
  2580. tree_root,
  2581. "btrfs-transaction");
  2582. if (IS_ERR(fs_info->transaction_kthread))
  2583. goto fail_cleaner;
  2584. if (!btrfs_test_opt(tree_root, SSD) &&
  2585. !btrfs_test_opt(tree_root, NOSSD) &&
  2586. !fs_info->fs_devices->rotating) {
  2587. printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
  2588. "mode\n");
  2589. btrfs_set_opt(fs_info->mount_opt, SSD);
  2590. }
  2591. /*
  2592. * Mount does not set all options immediatelly, we can do it now and do
  2593. * not have to wait for transaction commit
  2594. */
  2595. btrfs_apply_pending_changes(fs_info);
  2596. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2597. if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
  2598. ret = btrfsic_mount(tree_root, fs_devices,
  2599. btrfs_test_opt(tree_root,
  2600. CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
  2601. 1 : 0,
  2602. fs_info->check_integrity_print_mask);
  2603. if (ret)
  2604. printk(KERN_WARNING "BTRFS: failed to initialize"
  2605. " integrity check module %s\n", sb->s_id);
  2606. }
  2607. #endif
  2608. ret = btrfs_read_qgroup_config(fs_info);
  2609. if (ret)
  2610. goto fail_trans_kthread;
  2611. /* do not make disk changes in broken FS */
  2612. if (btrfs_super_log_root(disk_super) != 0) {
  2613. ret = btrfs_replay_log(fs_info, fs_devices);
  2614. if (ret) {
  2615. err = ret;
  2616. goto fail_qgroup;
  2617. }
  2618. }
  2619. ret = btrfs_find_orphan_roots(tree_root);
  2620. if (ret)
  2621. goto fail_qgroup;
  2622. if (!(sb->s_flags & MS_RDONLY)) {
  2623. ret = btrfs_cleanup_fs_roots(fs_info);
  2624. if (ret)
  2625. goto fail_qgroup;
  2626. mutex_lock(&fs_info->cleaner_mutex);
  2627. ret = btrfs_recover_relocation(tree_root);
  2628. mutex_unlock(&fs_info->cleaner_mutex);
  2629. if (ret < 0) {
  2630. printk(KERN_WARNING
  2631. "BTRFS: failed to recover relocation\n");
  2632. err = -EINVAL;
  2633. goto fail_qgroup;
  2634. }
  2635. }
  2636. location.objectid = BTRFS_FS_TREE_OBJECTID;
  2637. location.type = BTRFS_ROOT_ITEM_KEY;
  2638. location.offset = 0;
  2639. fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
  2640. if (IS_ERR(fs_info->fs_root)) {
  2641. err = PTR_ERR(fs_info->fs_root);
  2642. goto fail_qgroup;
  2643. }
  2644. if (sb->s_flags & MS_RDONLY)
  2645. return 0;
  2646. down_read(&fs_info->cleanup_work_sem);
  2647. if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
  2648. (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
  2649. up_read(&fs_info->cleanup_work_sem);
  2650. close_ctree(tree_root);
  2651. return ret;
  2652. }
  2653. up_read(&fs_info->cleanup_work_sem);
  2654. ret = btrfs_resume_balance_async(fs_info);
  2655. if (ret) {
  2656. printk(KERN_WARNING "BTRFS: failed to resume balance\n");
  2657. close_ctree(tree_root);
  2658. return ret;
  2659. }
  2660. ret = btrfs_resume_dev_replace_async(fs_info);
  2661. if (ret) {
  2662. pr_warn("BTRFS: failed to resume dev_replace\n");
  2663. close_ctree(tree_root);
  2664. return ret;
  2665. }
  2666. btrfs_qgroup_rescan_resume(fs_info);
  2667. if (!fs_info->uuid_root) {
  2668. pr_info("BTRFS: creating UUID tree\n");
  2669. ret = btrfs_create_uuid_tree(fs_info);
  2670. if (ret) {
  2671. pr_warn("BTRFS: failed to create the UUID tree %d\n",
  2672. ret);
  2673. close_ctree(tree_root);
  2674. return ret;
  2675. }
  2676. } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
  2677. fs_info->generation !=
  2678. btrfs_super_uuid_tree_generation(disk_super)) {
  2679. pr_info("BTRFS: checking UUID tree\n");
  2680. ret = btrfs_check_uuid_tree(fs_info);
  2681. if (ret) {
  2682. pr_warn("BTRFS: failed to check the UUID tree %d\n",
  2683. ret);
  2684. close_ctree(tree_root);
  2685. return ret;
  2686. }
  2687. } else {
  2688. fs_info->update_uuid_tree_gen = 1;
  2689. }
  2690. fs_info->open = 1;
  2691. return 0;
  2692. fail_qgroup:
  2693. btrfs_free_qgroup_config(fs_info);
  2694. fail_trans_kthread:
  2695. kthread_stop(fs_info->transaction_kthread);
  2696. btrfs_cleanup_transaction(fs_info->tree_root);
  2697. btrfs_free_fs_roots(fs_info);
  2698. fail_cleaner:
  2699. kthread_stop(fs_info->cleaner_kthread);
  2700. /*
  2701. * make sure we're done with the btree inode before we stop our
  2702. * kthreads
  2703. */
  2704. filemap_write_and_wait(fs_info->btree_inode->i_mapping);
  2705. fail_sysfs:
  2706. btrfs_sysfs_remove_mounted(fs_info);
  2707. fail_fsdev_sysfs:
  2708. btrfs_sysfs_remove_fsid(fs_info->fs_devices);
  2709. fail_block_groups:
  2710. btrfs_put_block_group_cache(fs_info);
  2711. btrfs_free_block_groups(fs_info);
  2712. fail_tree_roots:
  2713. free_root_pointers(fs_info, 1);
  2714. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  2715. fail_sb_buffer:
  2716. btrfs_stop_all_workers(fs_info);
  2717. fail_alloc:
  2718. fail_iput:
  2719. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  2720. iput(fs_info->btree_inode);
  2721. fail_bio_counter:
  2722. percpu_counter_destroy(&fs_info->bio_counter);
  2723. fail_delalloc_bytes:
  2724. percpu_counter_destroy(&fs_info->delalloc_bytes);
  2725. fail_dirty_metadata_bytes:
  2726. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  2727. fail_bdi:
  2728. bdi_destroy(&fs_info->bdi);
  2729. fail_srcu:
  2730. cleanup_srcu_struct(&fs_info->subvol_srcu);
  2731. fail:
  2732. btrfs_free_stripe_hash_table(fs_info);
  2733. btrfs_close_devices(fs_info->fs_devices);
  2734. return err;
  2735. recovery_tree_root:
  2736. if (!btrfs_test_opt(tree_root, RECOVERY))
  2737. goto fail_tree_roots;
  2738. free_root_pointers(fs_info, 0);
  2739. /* don't use the log in recovery mode, it won't be valid */
  2740. btrfs_set_super_log_root(disk_super, 0);
  2741. /* we can't trust the free space cache either */
  2742. btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
  2743. ret = next_root_backup(fs_info, fs_info->super_copy,
  2744. &num_backups_tried, &backup_index);
  2745. if (ret == -1)
  2746. goto fail_block_groups;
  2747. goto retry_root_backup;
  2748. }
  2749. static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  2750. {
  2751. if (uptodate) {
  2752. set_buffer_uptodate(bh);
  2753. } else {
  2754. struct btrfs_device *device = (struct btrfs_device *)
  2755. bh->b_private;
  2756. btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
  2757. "lost page write due to IO error on %s",
  2758. rcu_str_deref(device->name));
  2759. /* note, we dont' set_buffer_write_io_error because we have
  2760. * our own ways of dealing with the IO errors
  2761. */
  2762. clear_buffer_uptodate(bh);
  2763. btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
  2764. }
  2765. unlock_buffer(bh);
  2766. put_bh(bh);
  2767. }
  2768. int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
  2769. struct buffer_head **bh_ret)
  2770. {
  2771. struct buffer_head *bh;
  2772. struct btrfs_super_block *super;
  2773. u64 bytenr;
  2774. bytenr = btrfs_sb_offset(copy_num);
  2775. if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
  2776. return -EINVAL;
  2777. bh = __bread(bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE);
  2778. /*
  2779. * If we fail to read from the underlying devices, as of now
  2780. * the best option we have is to mark it EIO.
  2781. */
  2782. if (!bh)
  2783. return -EIO;
  2784. super = (struct btrfs_super_block *)bh->b_data;
  2785. if (btrfs_super_bytenr(super) != bytenr ||
  2786. btrfs_super_magic(super) != BTRFS_MAGIC) {
  2787. brelse(bh);
  2788. return -EINVAL;
  2789. }
  2790. *bh_ret = bh;
  2791. return 0;
  2792. }
  2793. struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
  2794. {
  2795. struct buffer_head *bh;
  2796. struct buffer_head *latest = NULL;
  2797. struct btrfs_super_block *super;
  2798. int i;
  2799. u64 transid = 0;
  2800. int ret = -EINVAL;
  2801. /* we would like to check all the supers, but that would make
  2802. * a btrfs mount succeed after a mkfs from a different FS.
  2803. * So, we need to add a special mount option to scan for
  2804. * later supers, using BTRFS_SUPER_MIRROR_MAX instead
  2805. */
  2806. for (i = 0; i < 1; i++) {
  2807. ret = btrfs_read_dev_one_super(bdev, i, &bh);
  2808. if (ret)
  2809. continue;
  2810. super = (struct btrfs_super_block *)bh->b_data;
  2811. if (!latest || btrfs_super_generation(super) > transid) {
  2812. brelse(latest);
  2813. latest = bh;
  2814. transid = btrfs_super_generation(super);
  2815. } else {
  2816. brelse(bh);
  2817. }
  2818. }
  2819. if (!latest)
  2820. return ERR_PTR(ret);
  2821. return latest;
  2822. }
  2823. /*
  2824. * this should be called twice, once with wait == 0 and
  2825. * once with wait == 1. When wait == 0 is done, all the buffer heads
  2826. * we write are pinned.
  2827. *
  2828. * They are released when wait == 1 is done.
  2829. * max_mirrors must be the same for both runs, and it indicates how
  2830. * many supers on this one device should be written.
  2831. *
  2832. * max_mirrors == 0 means to write them all.
  2833. */
  2834. static int write_dev_supers(struct btrfs_device *device,
  2835. struct btrfs_super_block *sb,
  2836. int do_barriers, int wait, int max_mirrors)
  2837. {
  2838. struct buffer_head *bh;
  2839. int i;
  2840. int ret;
  2841. int errors = 0;
  2842. u32 crc;
  2843. u64 bytenr;
  2844. if (max_mirrors == 0)
  2845. max_mirrors = BTRFS_SUPER_MIRROR_MAX;
  2846. for (i = 0; i < max_mirrors; i++) {
  2847. bytenr = btrfs_sb_offset(i);
  2848. if (bytenr + BTRFS_SUPER_INFO_SIZE >=
  2849. device->commit_total_bytes)
  2850. break;
  2851. if (wait) {
  2852. bh = __find_get_block(device->bdev, bytenr / 4096,
  2853. BTRFS_SUPER_INFO_SIZE);
  2854. if (!bh) {
  2855. errors++;
  2856. continue;
  2857. }
  2858. wait_on_buffer(bh);
  2859. if (!buffer_uptodate(bh))
  2860. errors++;
  2861. /* drop our reference */
  2862. brelse(bh);
  2863. /* drop the reference from the wait == 0 run */
  2864. brelse(bh);
  2865. continue;
  2866. } else {
  2867. btrfs_set_super_bytenr(sb, bytenr);
  2868. crc = ~(u32)0;
  2869. crc = btrfs_csum_data((char *)sb +
  2870. BTRFS_CSUM_SIZE, crc,
  2871. BTRFS_SUPER_INFO_SIZE -
  2872. BTRFS_CSUM_SIZE);
  2873. btrfs_csum_final(crc, sb->csum);
  2874. /*
  2875. * one reference for us, and we leave it for the
  2876. * caller
  2877. */
  2878. bh = __getblk(device->bdev, bytenr / 4096,
  2879. BTRFS_SUPER_INFO_SIZE);
  2880. if (!bh) {
  2881. btrfs_err(device->dev_root->fs_info,
  2882. "couldn't get super buffer head for bytenr %llu",
  2883. bytenr);
  2884. errors++;
  2885. continue;
  2886. }
  2887. memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
  2888. /* one reference for submit_bh */
  2889. get_bh(bh);
  2890. set_buffer_uptodate(bh);
  2891. lock_buffer(bh);
  2892. bh->b_end_io = btrfs_end_buffer_write_sync;
  2893. bh->b_private = device;
  2894. }
  2895. /*
  2896. * we fua the first super. The others we allow
  2897. * to go down lazy.
  2898. */
  2899. if (i == 0)
  2900. ret = btrfsic_submit_bh(WRITE_FUA, bh);
  2901. else
  2902. ret = btrfsic_submit_bh(WRITE_SYNC, bh);
  2903. if (ret)
  2904. errors++;
  2905. }
  2906. return errors < i ? 0 : -1;
  2907. }
  2908. /*
  2909. * endio for the write_dev_flush, this will wake anyone waiting
  2910. * for the barrier when it is done
  2911. */
  2912. static void btrfs_end_empty_barrier(struct bio *bio)
  2913. {
  2914. if (bio->bi_private)
  2915. complete(bio->bi_private);
  2916. bio_put(bio);
  2917. }
  2918. /*
  2919. * trigger flushes for one the devices. If you pass wait == 0, the flushes are
  2920. * sent down. With wait == 1, it waits for the previous flush.
  2921. *
  2922. * any device where the flush fails with eopnotsupp are flagged as not-barrier
  2923. * capable
  2924. */
  2925. static int write_dev_flush(struct btrfs_device *device, int wait)
  2926. {
  2927. struct bio *bio;
  2928. int ret = 0;
  2929. if (device->nobarriers)
  2930. return 0;
  2931. if (wait) {
  2932. bio = device->flush_bio;
  2933. if (!bio)
  2934. return 0;
  2935. wait_for_completion(&device->flush_wait);
  2936. if (bio->bi_error) {
  2937. ret = bio->bi_error;
  2938. btrfs_dev_stat_inc_and_print(device,
  2939. BTRFS_DEV_STAT_FLUSH_ERRS);
  2940. }
  2941. /* drop the reference from the wait == 0 run */
  2942. bio_put(bio);
  2943. device->flush_bio = NULL;
  2944. return ret;
  2945. }
  2946. /*
  2947. * one reference for us, and we leave it for the
  2948. * caller
  2949. */
  2950. device->flush_bio = NULL;
  2951. bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
  2952. if (!bio)
  2953. return -ENOMEM;
  2954. bio->bi_end_io = btrfs_end_empty_barrier;
  2955. bio->bi_bdev = device->bdev;
  2956. init_completion(&device->flush_wait);
  2957. bio->bi_private = &device->flush_wait;
  2958. device->flush_bio = bio;
  2959. bio_get(bio);
  2960. btrfsic_submit_bio(WRITE_FLUSH, bio);
  2961. return 0;
  2962. }
  2963. /*
  2964. * send an empty flush down to each device in parallel,
  2965. * then wait for them
  2966. */
  2967. static int barrier_all_devices(struct btrfs_fs_info *info)
  2968. {
  2969. struct list_head *head;
  2970. struct btrfs_device *dev;
  2971. int errors_send = 0;
  2972. int errors_wait = 0;
  2973. int ret;
  2974. /* send down all the barriers */
  2975. head = &info->fs_devices->devices;
  2976. list_for_each_entry_rcu(dev, head, dev_list) {
  2977. if (dev->missing)
  2978. continue;
  2979. if (!dev->bdev) {
  2980. errors_send++;
  2981. continue;
  2982. }
  2983. if (!dev->in_fs_metadata || !dev->writeable)
  2984. continue;
  2985. ret = write_dev_flush(dev, 0);
  2986. if (ret)
  2987. errors_send++;
  2988. }
  2989. /* wait for all the barriers */
  2990. list_for_each_entry_rcu(dev, head, dev_list) {
  2991. if (dev->missing)
  2992. continue;
  2993. if (!dev->bdev) {
  2994. errors_wait++;
  2995. continue;
  2996. }
  2997. if (!dev->in_fs_metadata || !dev->writeable)
  2998. continue;
  2999. ret = write_dev_flush(dev, 1);
  3000. if (ret)
  3001. errors_wait++;
  3002. }
  3003. if (errors_send > info->num_tolerated_disk_barrier_failures ||
  3004. errors_wait > info->num_tolerated_disk_barrier_failures)
  3005. return -EIO;
  3006. return 0;
  3007. }
  3008. int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
  3009. {
  3010. int raid_type;
  3011. int min_tolerated = INT_MAX;
  3012. if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
  3013. (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
  3014. min_tolerated = min(min_tolerated,
  3015. btrfs_raid_array[BTRFS_RAID_SINGLE].
  3016. tolerated_failures);
  3017. for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
  3018. if (raid_type == BTRFS_RAID_SINGLE)
  3019. continue;
  3020. if (!(flags & btrfs_raid_group[raid_type]))
  3021. continue;
  3022. min_tolerated = min(min_tolerated,
  3023. btrfs_raid_array[raid_type].
  3024. tolerated_failures);
  3025. }
  3026. if (min_tolerated == INT_MAX) {
  3027. pr_warn("BTRFS: unknown raid flag: %llu\n", flags);
  3028. min_tolerated = 0;
  3029. }
  3030. return min_tolerated;
  3031. }
  3032. int btrfs_calc_num_tolerated_disk_barrier_failures(
  3033. struct btrfs_fs_info *fs_info)
  3034. {
  3035. struct btrfs_ioctl_space_info space;
  3036. struct btrfs_space_info *sinfo;
  3037. u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
  3038. BTRFS_BLOCK_GROUP_SYSTEM,
  3039. BTRFS_BLOCK_GROUP_METADATA,
  3040. BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
  3041. int i;
  3042. int c;
  3043. int num_tolerated_disk_barrier_failures =
  3044. (int)fs_info->fs_devices->num_devices;
  3045. for (i = 0; i < ARRAY_SIZE(types); i++) {
  3046. struct btrfs_space_info *tmp;
  3047. sinfo = NULL;
  3048. rcu_read_lock();
  3049. list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
  3050. if (tmp->flags == types[i]) {
  3051. sinfo = tmp;
  3052. break;
  3053. }
  3054. }
  3055. rcu_read_unlock();
  3056. if (!sinfo)
  3057. continue;
  3058. down_read(&sinfo->groups_sem);
  3059. for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
  3060. u64 flags;
  3061. if (list_empty(&sinfo->block_groups[c]))
  3062. continue;
  3063. btrfs_get_block_group_info(&sinfo->block_groups[c],
  3064. &space);
  3065. if (space.total_bytes == 0 || space.used_bytes == 0)
  3066. continue;
  3067. flags = space.flags;
  3068. num_tolerated_disk_barrier_failures = min(
  3069. num_tolerated_disk_barrier_failures,
  3070. btrfs_get_num_tolerated_disk_barrier_failures(
  3071. flags));
  3072. }
  3073. up_read(&sinfo->groups_sem);
  3074. }
  3075. return num_tolerated_disk_barrier_failures;
  3076. }
  3077. static int write_all_supers(struct btrfs_root *root, int max_mirrors)
  3078. {
  3079. struct list_head *head;
  3080. struct btrfs_device *dev;
  3081. struct btrfs_super_block *sb;
  3082. struct btrfs_dev_item *dev_item;
  3083. int ret;
  3084. int do_barriers;
  3085. int max_errors;
  3086. int total_errors = 0;
  3087. u64 flags;
  3088. do_barriers = !btrfs_test_opt(root, NOBARRIER);
  3089. backup_super_roots(root->fs_info);
  3090. sb = root->fs_info->super_for_commit;
  3091. dev_item = &sb->dev_item;
  3092. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  3093. head = &root->fs_info->fs_devices->devices;
  3094. max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
  3095. if (do_barriers) {
  3096. ret = barrier_all_devices(root->fs_info);
  3097. if (ret) {
  3098. mutex_unlock(
  3099. &root->fs_info->fs_devices->device_list_mutex);
  3100. btrfs_std_error(root->fs_info, ret,
  3101. "errors while submitting device barriers.");
  3102. return ret;
  3103. }
  3104. }
  3105. list_for_each_entry_rcu(dev, head, dev_list) {
  3106. if (!dev->bdev) {
  3107. total_errors++;
  3108. continue;
  3109. }
  3110. if (!dev->in_fs_metadata || !dev->writeable)
  3111. continue;
  3112. btrfs_set_stack_device_generation(dev_item, 0);
  3113. btrfs_set_stack_device_type(dev_item, dev->type);
  3114. btrfs_set_stack_device_id(dev_item, dev->devid);
  3115. btrfs_set_stack_device_total_bytes(dev_item,
  3116. dev->commit_total_bytes);
  3117. btrfs_set_stack_device_bytes_used(dev_item,
  3118. dev->commit_bytes_used);
  3119. btrfs_set_stack_device_io_align(dev_item, dev->io_align);
  3120. btrfs_set_stack_device_io_width(dev_item, dev->io_width);
  3121. btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
  3122. memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
  3123. memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
  3124. flags = btrfs_super_flags(sb);
  3125. btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
  3126. ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
  3127. if (ret)
  3128. total_errors++;
  3129. }
  3130. if (total_errors > max_errors) {
  3131. btrfs_err(root->fs_info, "%d errors while writing supers",
  3132. total_errors);
  3133. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  3134. /* FUA is masked off if unsupported and can't be the reason */
  3135. btrfs_std_error(root->fs_info, -EIO,
  3136. "%d errors while writing supers", total_errors);
  3137. return -EIO;
  3138. }
  3139. total_errors = 0;
  3140. list_for_each_entry_rcu(dev, head, dev_list) {
  3141. if (!dev->bdev)
  3142. continue;
  3143. if (!dev->in_fs_metadata || !dev->writeable)
  3144. continue;
  3145. ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
  3146. if (ret)
  3147. total_errors++;
  3148. }
  3149. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  3150. if (total_errors > max_errors) {
  3151. btrfs_std_error(root->fs_info, -EIO,
  3152. "%d errors while writing supers", total_errors);
  3153. return -EIO;
  3154. }
  3155. return 0;
  3156. }
  3157. int write_ctree_super(struct btrfs_trans_handle *trans,
  3158. struct btrfs_root *root, int max_mirrors)
  3159. {
  3160. return write_all_supers(root, max_mirrors);
  3161. }
  3162. /* Drop a fs root from the radix tree and free it. */
  3163. void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
  3164. struct btrfs_root *root)
  3165. {
  3166. spin_lock(&fs_info->fs_roots_radix_lock);
  3167. radix_tree_delete(&fs_info->fs_roots_radix,
  3168. (unsigned long)root->root_key.objectid);
  3169. spin_unlock(&fs_info->fs_roots_radix_lock);
  3170. if (btrfs_root_refs(&root->root_item) == 0)
  3171. synchronize_srcu(&fs_info->subvol_srcu);
  3172. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  3173. btrfs_free_log(NULL, root);
  3174. if (root->free_ino_pinned)
  3175. __btrfs_remove_free_space_cache(root->free_ino_pinned);
  3176. if (root->free_ino_ctl)
  3177. __btrfs_remove_free_space_cache(root->free_ino_ctl);
  3178. free_fs_root(root);
  3179. }
  3180. static void free_fs_root(struct btrfs_root *root)
  3181. {
  3182. iput(root->ino_cache_inode);
  3183. WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
  3184. btrfs_free_block_rsv(root, root->orphan_block_rsv);
  3185. root->orphan_block_rsv = NULL;
  3186. if (root->anon_dev)
  3187. free_anon_bdev(root->anon_dev);
  3188. if (root->subv_writers)
  3189. btrfs_free_subvolume_writers(root->subv_writers);
  3190. free_extent_buffer(root->node);
  3191. free_extent_buffer(root->commit_root);
  3192. kfree(root->free_ino_ctl);
  3193. kfree(root->free_ino_pinned);
  3194. kfree(root->name);
  3195. btrfs_put_fs_root(root);
  3196. }
  3197. void btrfs_free_fs_root(struct btrfs_root *root)
  3198. {
  3199. free_fs_root(root);
  3200. }
  3201. int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
  3202. {
  3203. u64 root_objectid = 0;
  3204. struct btrfs_root *gang[8];
  3205. int i = 0;
  3206. int err = 0;
  3207. unsigned int ret = 0;
  3208. int index;
  3209. while (1) {
  3210. index = srcu_read_lock(&fs_info->subvol_srcu);
  3211. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  3212. (void **)gang, root_objectid,
  3213. ARRAY_SIZE(gang));
  3214. if (!ret) {
  3215. srcu_read_unlock(&fs_info->subvol_srcu, index);
  3216. break;
  3217. }
  3218. root_objectid = gang[ret - 1]->root_key.objectid + 1;
  3219. for (i = 0; i < ret; i++) {
  3220. /* Avoid to grab roots in dead_roots */
  3221. if (btrfs_root_refs(&gang[i]->root_item) == 0) {
  3222. gang[i] = NULL;
  3223. continue;
  3224. }
  3225. /* grab all the search result for later use */
  3226. gang[i] = btrfs_grab_fs_root(gang[i]);
  3227. }
  3228. srcu_read_unlock(&fs_info->subvol_srcu, index);
  3229. for (i = 0; i < ret; i++) {
  3230. if (!gang[i])
  3231. continue;
  3232. root_objectid = gang[i]->root_key.objectid;
  3233. err = btrfs_orphan_cleanup(gang[i]);
  3234. if (err)
  3235. break;
  3236. btrfs_put_fs_root(gang[i]);
  3237. }
  3238. root_objectid++;
  3239. }
  3240. /* release the uncleaned roots due to error */
  3241. for (; i < ret; i++) {
  3242. if (gang[i])
  3243. btrfs_put_fs_root(gang[i]);
  3244. }
  3245. return err;
  3246. }
  3247. int btrfs_commit_super(struct btrfs_root *root)
  3248. {
  3249. struct btrfs_trans_handle *trans;
  3250. mutex_lock(&root->fs_info->cleaner_mutex);
  3251. btrfs_run_delayed_iputs(root);
  3252. mutex_unlock(&root->fs_info->cleaner_mutex);
  3253. wake_up_process(root->fs_info->cleaner_kthread);
  3254. /* wait until ongoing cleanup work done */
  3255. down_write(&root->fs_info->cleanup_work_sem);
  3256. up_write(&root->fs_info->cleanup_work_sem);
  3257. trans = btrfs_join_transaction(root);
  3258. if (IS_ERR(trans))
  3259. return PTR_ERR(trans);
  3260. return btrfs_commit_transaction(trans, root);
  3261. }
  3262. void close_ctree(struct btrfs_root *root)
  3263. {
  3264. struct btrfs_fs_info *fs_info = root->fs_info;
  3265. int ret;
  3266. fs_info->closing = 1;
  3267. smp_mb();
  3268. /* wait for the uuid_scan task to finish */
  3269. down(&fs_info->uuid_tree_rescan_sem);
  3270. /* avoid complains from lockdep et al., set sem back to initial state */
  3271. up(&fs_info->uuid_tree_rescan_sem);
  3272. /* pause restriper - we want to resume on mount */
  3273. btrfs_pause_balance(fs_info);
  3274. btrfs_dev_replace_suspend_for_unmount(fs_info);
  3275. btrfs_scrub_cancel(fs_info);
  3276. /* wait for any defraggers to finish */
  3277. wait_event(fs_info->transaction_wait,
  3278. (atomic_read(&fs_info->defrag_running) == 0));
  3279. /* clear out the rbtree of defraggable inodes */
  3280. btrfs_cleanup_defrag_inodes(fs_info);
  3281. cancel_work_sync(&fs_info->async_reclaim_work);
  3282. if (!(fs_info->sb->s_flags & MS_RDONLY)) {
  3283. /*
  3284. * If the cleaner thread is stopped and there are
  3285. * block groups queued for removal, the deletion will be
  3286. * skipped when we quit the cleaner thread.
  3287. */
  3288. btrfs_delete_unused_bgs(root->fs_info);
  3289. ret = btrfs_commit_super(root);
  3290. if (ret)
  3291. btrfs_err(fs_info, "commit super ret %d", ret);
  3292. }
  3293. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  3294. btrfs_error_commit_super(root);
  3295. kthread_stop(fs_info->transaction_kthread);
  3296. kthread_stop(fs_info->cleaner_kthread);
  3297. fs_info->closing = 2;
  3298. smp_mb();
  3299. btrfs_free_qgroup_config(fs_info);
  3300. if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
  3301. btrfs_info(fs_info, "at unmount delalloc count %lld",
  3302. percpu_counter_sum(&fs_info->delalloc_bytes));
  3303. }
  3304. btrfs_sysfs_remove_mounted(fs_info);
  3305. btrfs_sysfs_remove_fsid(fs_info->fs_devices);
  3306. btrfs_free_fs_roots(fs_info);
  3307. btrfs_put_block_group_cache(fs_info);
  3308. btrfs_free_block_groups(fs_info);
  3309. /*
  3310. * we must make sure there is not any read request to
  3311. * submit after we stopping all workers.
  3312. */
  3313. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  3314. btrfs_stop_all_workers(fs_info);
  3315. fs_info->open = 0;
  3316. free_root_pointers(fs_info, 1);
  3317. iput(fs_info->btree_inode);
  3318. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  3319. if (btrfs_test_opt(root, CHECK_INTEGRITY))
  3320. btrfsic_unmount(root, fs_info->fs_devices);
  3321. #endif
  3322. btrfs_close_devices(fs_info->fs_devices);
  3323. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  3324. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  3325. percpu_counter_destroy(&fs_info->delalloc_bytes);
  3326. percpu_counter_destroy(&fs_info->bio_counter);
  3327. bdi_destroy(&fs_info->bdi);
  3328. cleanup_srcu_struct(&fs_info->subvol_srcu);
  3329. btrfs_free_stripe_hash_table(fs_info);
  3330. __btrfs_free_block_rsv(root->orphan_block_rsv);
  3331. root->orphan_block_rsv = NULL;
  3332. lock_chunks(root);
  3333. while (!list_empty(&fs_info->pinned_chunks)) {
  3334. struct extent_map *em;
  3335. em = list_first_entry(&fs_info->pinned_chunks,
  3336. struct extent_map, list);
  3337. list_del_init(&em->list);
  3338. free_extent_map(em);
  3339. }
  3340. unlock_chunks(root);
  3341. }
  3342. int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
  3343. int atomic)
  3344. {
  3345. int ret;
  3346. struct inode *btree_inode = buf->pages[0]->mapping->host;
  3347. ret = extent_buffer_uptodate(buf);
  3348. if (!ret)
  3349. return ret;
  3350. ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
  3351. parent_transid, atomic);
  3352. if (ret == -EAGAIN)
  3353. return ret;
  3354. return !ret;
  3355. }
  3356. int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
  3357. {
  3358. return set_extent_buffer_uptodate(buf);
  3359. }
  3360. void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
  3361. {
  3362. struct btrfs_root *root;
  3363. u64 transid = btrfs_header_generation(buf);
  3364. int was_dirty;
  3365. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  3366. /*
  3367. * This is a fast path so only do this check if we have sanity tests
  3368. * enabled. Normal people shouldn't be marking dummy buffers as dirty
  3369. * outside of the sanity tests.
  3370. */
  3371. if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
  3372. return;
  3373. #endif
  3374. root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3375. btrfs_assert_tree_locked(buf);
  3376. if (transid != root->fs_info->generation)
  3377. WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
  3378. "found %llu running %llu\n",
  3379. buf->start, transid, root->fs_info->generation);
  3380. was_dirty = set_extent_buffer_dirty(buf);
  3381. if (!was_dirty)
  3382. __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
  3383. buf->len,
  3384. root->fs_info->dirty_metadata_batch);
  3385. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  3386. if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
  3387. btrfs_print_leaf(root, buf);
  3388. ASSERT(0);
  3389. }
  3390. #endif
  3391. }
  3392. static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
  3393. int flush_delayed)
  3394. {
  3395. /*
  3396. * looks as though older kernels can get into trouble with
  3397. * this code, they end up stuck in balance_dirty_pages forever
  3398. */
  3399. int ret;
  3400. if (current->flags & PF_MEMALLOC)
  3401. return;
  3402. if (flush_delayed)
  3403. btrfs_balance_delayed_items(root);
  3404. ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
  3405. BTRFS_DIRTY_METADATA_THRESH);
  3406. if (ret > 0) {
  3407. balance_dirty_pages_ratelimited(
  3408. root->fs_info->btree_inode->i_mapping);
  3409. }
  3410. return;
  3411. }
  3412. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  3413. {
  3414. __btrfs_btree_balance_dirty(root, 1);
  3415. }
  3416. void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
  3417. {
  3418. __btrfs_btree_balance_dirty(root, 0);
  3419. }
  3420. int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
  3421. {
  3422. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3423. return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  3424. }
  3425. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  3426. int read_only)
  3427. {
  3428. struct btrfs_super_block *sb = fs_info->super_copy;
  3429. int ret = 0;
  3430. if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
  3431. printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
  3432. btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
  3433. ret = -EINVAL;
  3434. }
  3435. if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
  3436. printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
  3437. btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
  3438. ret = -EINVAL;
  3439. }
  3440. if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
  3441. printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
  3442. btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
  3443. ret = -EINVAL;
  3444. }
  3445. /*
  3446. * The common minimum, we don't know if we can trust the nodesize/sectorsize
  3447. * items yet, they'll be verified later. Issue just a warning.
  3448. */
  3449. if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
  3450. printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
  3451. btrfs_super_root(sb));
  3452. if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
  3453. printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
  3454. btrfs_super_chunk_root(sb));
  3455. if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
  3456. printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
  3457. btrfs_super_log_root(sb));
  3458. /*
  3459. * Check the lower bound, the alignment and other constraints are
  3460. * checked later.
  3461. */
  3462. if (btrfs_super_nodesize(sb) < 4096) {
  3463. printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
  3464. btrfs_super_nodesize(sb));
  3465. ret = -EINVAL;
  3466. }
  3467. if (btrfs_super_sectorsize(sb) < 4096) {
  3468. printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n",
  3469. btrfs_super_sectorsize(sb));
  3470. ret = -EINVAL;
  3471. }
  3472. if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
  3473. printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
  3474. fs_info->fsid, sb->dev_item.fsid);
  3475. ret = -EINVAL;
  3476. }
  3477. /*
  3478. * Hint to catch really bogus numbers, bitflips or so, more exact checks are
  3479. * done later
  3480. */
  3481. if (btrfs_super_num_devices(sb) > (1UL << 31))
  3482. printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
  3483. btrfs_super_num_devices(sb));
  3484. if (btrfs_super_num_devices(sb) == 0) {
  3485. printk(KERN_ERR "BTRFS: number of devices is 0\n");
  3486. ret = -EINVAL;
  3487. }
  3488. if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
  3489. printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
  3490. btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
  3491. ret = -EINVAL;
  3492. }
  3493. /*
  3494. * Obvious sys_chunk_array corruptions, it must hold at least one key
  3495. * and one chunk
  3496. */
  3497. if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
  3498. printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
  3499. btrfs_super_sys_array_size(sb),
  3500. BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
  3501. ret = -EINVAL;
  3502. }
  3503. if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
  3504. + sizeof(struct btrfs_chunk)) {
  3505. printk(KERN_ERR "BTRFS: system chunk array too small %u < %zu\n",
  3506. btrfs_super_sys_array_size(sb),
  3507. sizeof(struct btrfs_disk_key)
  3508. + sizeof(struct btrfs_chunk));
  3509. ret = -EINVAL;
  3510. }
  3511. /*
  3512. * The generation is a global counter, we'll trust it more than the others
  3513. * but it's still possible that it's the one that's wrong.
  3514. */
  3515. if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
  3516. printk(KERN_WARNING
  3517. "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
  3518. btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
  3519. if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
  3520. && btrfs_super_cache_generation(sb) != (u64)-1)
  3521. printk(KERN_WARNING
  3522. "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
  3523. btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
  3524. return ret;
  3525. }
  3526. static void btrfs_error_commit_super(struct btrfs_root *root)
  3527. {
  3528. mutex_lock(&root->fs_info->cleaner_mutex);
  3529. btrfs_run_delayed_iputs(root);
  3530. mutex_unlock(&root->fs_info->cleaner_mutex);
  3531. down_write(&root->fs_info->cleanup_work_sem);
  3532. up_write(&root->fs_info->cleanup_work_sem);
  3533. /* cleanup FS via transaction */
  3534. btrfs_cleanup_transaction(root);
  3535. }
  3536. static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
  3537. {
  3538. struct btrfs_ordered_extent *ordered;
  3539. spin_lock(&root->ordered_extent_lock);
  3540. /*
  3541. * This will just short circuit the ordered completion stuff which will
  3542. * make sure the ordered extent gets properly cleaned up.
  3543. */
  3544. list_for_each_entry(ordered, &root->ordered_extents,
  3545. root_extent_list)
  3546. set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
  3547. spin_unlock(&root->ordered_extent_lock);
  3548. }
  3549. static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
  3550. {
  3551. struct btrfs_root *root;
  3552. struct list_head splice;
  3553. INIT_LIST_HEAD(&splice);
  3554. spin_lock(&fs_info->ordered_root_lock);
  3555. list_splice_init(&fs_info->ordered_roots, &splice);
  3556. while (!list_empty(&splice)) {
  3557. root = list_first_entry(&splice, struct btrfs_root,
  3558. ordered_root);
  3559. list_move_tail(&root->ordered_root,
  3560. &fs_info->ordered_roots);
  3561. spin_unlock(&fs_info->ordered_root_lock);
  3562. btrfs_destroy_ordered_extents(root);
  3563. cond_resched();
  3564. spin_lock(&fs_info->ordered_root_lock);
  3565. }
  3566. spin_unlock(&fs_info->ordered_root_lock);
  3567. }
  3568. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  3569. struct btrfs_root *root)
  3570. {
  3571. struct rb_node *node;
  3572. struct btrfs_delayed_ref_root *delayed_refs;
  3573. struct btrfs_delayed_ref_node *ref;
  3574. int ret = 0;
  3575. delayed_refs = &trans->delayed_refs;
  3576. spin_lock(&delayed_refs->lock);
  3577. if (atomic_read(&delayed_refs->num_entries) == 0) {
  3578. spin_unlock(&delayed_refs->lock);
  3579. btrfs_info(root->fs_info, "delayed_refs has NO entry");
  3580. return ret;
  3581. }
  3582. while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
  3583. struct btrfs_delayed_ref_head *head;
  3584. struct btrfs_delayed_ref_node *tmp;
  3585. bool pin_bytes = false;
  3586. head = rb_entry(node, struct btrfs_delayed_ref_head,
  3587. href_node);
  3588. if (!mutex_trylock(&head->mutex)) {
  3589. atomic_inc(&head->node.refs);
  3590. spin_unlock(&delayed_refs->lock);
  3591. mutex_lock(&head->mutex);
  3592. mutex_unlock(&head->mutex);
  3593. btrfs_put_delayed_ref(&head->node);
  3594. spin_lock(&delayed_refs->lock);
  3595. continue;
  3596. }
  3597. spin_lock(&head->lock);
  3598. list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
  3599. list) {
  3600. ref->in_tree = 0;
  3601. list_del(&ref->list);
  3602. atomic_dec(&delayed_refs->num_entries);
  3603. btrfs_put_delayed_ref(ref);
  3604. }
  3605. if (head->must_insert_reserved)
  3606. pin_bytes = true;
  3607. btrfs_free_delayed_extent_op(head->extent_op);
  3608. delayed_refs->num_heads--;
  3609. if (head->processing == 0)
  3610. delayed_refs->num_heads_ready--;
  3611. atomic_dec(&delayed_refs->num_entries);
  3612. head->node.in_tree = 0;
  3613. rb_erase(&head->href_node, &delayed_refs->href_root);
  3614. spin_unlock(&head->lock);
  3615. spin_unlock(&delayed_refs->lock);
  3616. mutex_unlock(&head->mutex);
  3617. if (pin_bytes)
  3618. btrfs_pin_extent(root, head->node.bytenr,
  3619. head->node.num_bytes, 1);
  3620. btrfs_put_delayed_ref(&head->node);
  3621. cond_resched();
  3622. spin_lock(&delayed_refs->lock);
  3623. }
  3624. spin_unlock(&delayed_refs->lock);
  3625. return ret;
  3626. }
  3627. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
  3628. {
  3629. struct btrfs_inode *btrfs_inode;
  3630. struct list_head splice;
  3631. INIT_LIST_HEAD(&splice);
  3632. spin_lock(&root->delalloc_lock);
  3633. list_splice_init(&root->delalloc_inodes, &splice);
  3634. while (!list_empty(&splice)) {
  3635. btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
  3636. delalloc_inodes);
  3637. list_del_init(&btrfs_inode->delalloc_inodes);
  3638. clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
  3639. &btrfs_inode->runtime_flags);
  3640. spin_unlock(&root->delalloc_lock);
  3641. btrfs_invalidate_inodes(btrfs_inode->root);
  3642. spin_lock(&root->delalloc_lock);
  3643. }
  3644. spin_unlock(&root->delalloc_lock);
  3645. }
  3646. static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
  3647. {
  3648. struct btrfs_root *root;
  3649. struct list_head splice;
  3650. INIT_LIST_HEAD(&splice);
  3651. spin_lock(&fs_info->delalloc_root_lock);
  3652. list_splice_init(&fs_info->delalloc_roots, &splice);
  3653. while (!list_empty(&splice)) {
  3654. root = list_first_entry(&splice, struct btrfs_root,
  3655. delalloc_root);
  3656. list_del_init(&root->delalloc_root);
  3657. root = btrfs_grab_fs_root(root);
  3658. BUG_ON(!root);
  3659. spin_unlock(&fs_info->delalloc_root_lock);
  3660. btrfs_destroy_delalloc_inodes(root);
  3661. btrfs_put_fs_root(root);
  3662. spin_lock(&fs_info->delalloc_root_lock);
  3663. }
  3664. spin_unlock(&fs_info->delalloc_root_lock);
  3665. }
  3666. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  3667. struct extent_io_tree *dirty_pages,
  3668. int mark)
  3669. {
  3670. int ret;
  3671. struct extent_buffer *eb;
  3672. u64 start = 0;
  3673. u64 end;
  3674. while (1) {
  3675. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  3676. mark, NULL);
  3677. if (ret)
  3678. break;
  3679. clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
  3680. while (start <= end) {
  3681. eb = btrfs_find_tree_block(root->fs_info, start);
  3682. start += root->nodesize;
  3683. if (!eb)
  3684. continue;
  3685. wait_on_extent_buffer_writeback(eb);
  3686. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
  3687. &eb->bflags))
  3688. clear_extent_buffer_dirty(eb);
  3689. free_extent_buffer_stale(eb);
  3690. }
  3691. }
  3692. return ret;
  3693. }
  3694. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  3695. struct extent_io_tree *pinned_extents)
  3696. {
  3697. struct extent_io_tree *unpin;
  3698. u64 start;
  3699. u64 end;
  3700. int ret;
  3701. bool loop = true;
  3702. unpin = pinned_extents;
  3703. again:
  3704. while (1) {
  3705. ret = find_first_extent_bit(unpin, 0, &start, &end,
  3706. EXTENT_DIRTY, NULL);
  3707. if (ret)
  3708. break;
  3709. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  3710. btrfs_error_unpin_extent_range(root, start, end);
  3711. cond_resched();
  3712. }
  3713. if (loop) {
  3714. if (unpin == &root->fs_info->freed_extents[0])
  3715. unpin = &root->fs_info->freed_extents[1];
  3716. else
  3717. unpin = &root->fs_info->freed_extents[0];
  3718. loop = false;
  3719. goto again;
  3720. }
  3721. return 0;
  3722. }
  3723. void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
  3724. struct btrfs_root *root)
  3725. {
  3726. btrfs_destroy_delayed_refs(cur_trans, root);
  3727. cur_trans->state = TRANS_STATE_COMMIT_START;
  3728. wake_up(&root->fs_info->transaction_blocked_wait);
  3729. cur_trans->state = TRANS_STATE_UNBLOCKED;
  3730. wake_up(&root->fs_info->transaction_wait);
  3731. btrfs_destroy_delayed_inodes(root);
  3732. btrfs_assert_delayed_root_empty(root);
  3733. btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
  3734. EXTENT_DIRTY);
  3735. btrfs_destroy_pinned_extent(root,
  3736. root->fs_info->pinned_extents);
  3737. cur_trans->state =TRANS_STATE_COMPLETED;
  3738. wake_up(&cur_trans->commit_wait);
  3739. /*
  3740. memset(cur_trans, 0, sizeof(*cur_trans));
  3741. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  3742. */
  3743. }
  3744. static int btrfs_cleanup_transaction(struct btrfs_root *root)
  3745. {
  3746. struct btrfs_transaction *t;
  3747. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  3748. spin_lock(&root->fs_info->trans_lock);
  3749. while (!list_empty(&root->fs_info->trans_list)) {
  3750. t = list_first_entry(&root->fs_info->trans_list,
  3751. struct btrfs_transaction, list);
  3752. if (t->state >= TRANS_STATE_COMMIT_START) {
  3753. atomic_inc(&t->use_count);
  3754. spin_unlock(&root->fs_info->trans_lock);
  3755. btrfs_wait_for_commit(root, t->transid);
  3756. btrfs_put_transaction(t);
  3757. spin_lock(&root->fs_info->trans_lock);
  3758. continue;
  3759. }
  3760. if (t == root->fs_info->running_transaction) {
  3761. t->state = TRANS_STATE_COMMIT_DOING;
  3762. spin_unlock(&root->fs_info->trans_lock);
  3763. /*
  3764. * We wait for 0 num_writers since we don't hold a trans
  3765. * handle open currently for this transaction.
  3766. */
  3767. wait_event(t->writer_wait,
  3768. atomic_read(&t->num_writers) == 0);
  3769. } else {
  3770. spin_unlock(&root->fs_info->trans_lock);
  3771. }
  3772. btrfs_cleanup_one_transaction(t, root);
  3773. spin_lock(&root->fs_info->trans_lock);
  3774. if (t == root->fs_info->running_transaction)
  3775. root->fs_info->running_transaction = NULL;
  3776. list_del_init(&t->list);
  3777. spin_unlock(&root->fs_info->trans_lock);
  3778. btrfs_put_transaction(t);
  3779. trace_btrfs_transaction_commit(root);
  3780. spin_lock(&root->fs_info->trans_lock);
  3781. }
  3782. spin_unlock(&root->fs_info->trans_lock);
  3783. btrfs_destroy_all_ordered_extents(root->fs_info);
  3784. btrfs_destroy_delayed_inodes(root);
  3785. btrfs_assert_delayed_root_empty(root);
  3786. btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
  3787. btrfs_destroy_all_delalloc_inodes(root->fs_info);
  3788. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  3789. return 0;
  3790. }
  3791. static const struct extent_io_ops btree_extent_io_ops = {
  3792. .readpage_end_io_hook = btree_readpage_end_io_hook,
  3793. .readpage_io_failed_hook = btree_io_failed_hook,
  3794. .submit_bio_hook = btree_submit_bio_hook,
  3795. /* note we're sharing with inode.c for the merge bio hook */
  3796. .merge_bio_hook = btrfs_merge_bio_hook,
  3797. };