extent-tree.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include "hash.h"
  23. #include "crc32c.h"
  24. #include "ctree.h"
  25. #include "disk-io.h"
  26. #include "print-tree.h"
  27. #include "transaction.h"
  28. #include "volumes.h"
  29. #include "locking.h"
  30. #include "ref-cache.h"
  31. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  32. btrfs_root *extent_root);
  33. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  34. btrfs_root *extent_root);
  35. static struct btrfs_block_group_cache *
  36. __btrfs_find_block_group(struct btrfs_root *root,
  37. struct btrfs_block_group_cache *hint,
  38. u64 search_start, int data, int owner);
  39. void maybe_lock_mutex(struct btrfs_root *root)
  40. {
  41. if (root != root->fs_info->extent_root &&
  42. root != root->fs_info->chunk_root &&
  43. root != root->fs_info->dev_root) {
  44. mutex_lock(&root->fs_info->alloc_mutex);
  45. }
  46. }
  47. void maybe_unlock_mutex(struct btrfs_root *root)
  48. {
  49. if (root != root->fs_info->extent_root &&
  50. root != root->fs_info->chunk_root &&
  51. root != root->fs_info->dev_root) {
  52. mutex_unlock(&root->fs_info->alloc_mutex);
  53. }
  54. }
  55. static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
  56. {
  57. return (cache->flags & bits) == bits;
  58. }
  59. /*
  60. * this adds the block group to the fs_info rb tree for the block group
  61. * cache
  62. */
  63. int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
  64. struct btrfs_block_group_cache *block_group)
  65. {
  66. struct rb_node **p;
  67. struct rb_node *parent = NULL;
  68. struct btrfs_block_group_cache *cache;
  69. spin_lock(&info->block_group_cache_lock);
  70. p = &info->block_group_cache_tree.rb_node;
  71. while (*p) {
  72. parent = *p;
  73. cache = rb_entry(parent, struct btrfs_block_group_cache,
  74. cache_node);
  75. if (block_group->key.objectid < cache->key.objectid) {
  76. p = &(*p)->rb_left;
  77. } else if (block_group->key.objectid > cache->key.objectid) {
  78. p = &(*p)->rb_right;
  79. } else {
  80. spin_unlock(&info->block_group_cache_lock);
  81. return -EEXIST;
  82. }
  83. }
  84. rb_link_node(&block_group->cache_node, parent, p);
  85. rb_insert_color(&block_group->cache_node,
  86. &info->block_group_cache_tree);
  87. spin_unlock(&info->block_group_cache_lock);
  88. return 0;
  89. }
  90. /*
  91. * This will return the block group at or after bytenr if contains is 0, else
  92. * it will return the block group that contains the bytenr
  93. */
  94. static struct btrfs_block_group_cache *
  95. block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
  96. int contains)
  97. {
  98. struct btrfs_block_group_cache *cache, *ret = NULL;
  99. struct rb_node *n;
  100. u64 end, start;
  101. spin_lock(&info->block_group_cache_lock);
  102. n = info->block_group_cache_tree.rb_node;
  103. while (n) {
  104. cache = rb_entry(n, struct btrfs_block_group_cache,
  105. cache_node);
  106. end = cache->key.objectid + cache->key.offset - 1;
  107. start = cache->key.objectid;
  108. if (bytenr < start) {
  109. if (!contains && (!ret || start < ret->key.objectid))
  110. ret = cache;
  111. n = n->rb_left;
  112. } else if (bytenr > start) {
  113. if (contains && bytenr <= end) {
  114. ret = cache;
  115. break;
  116. }
  117. n = n->rb_right;
  118. } else {
  119. ret = cache;
  120. break;
  121. }
  122. }
  123. spin_unlock(&info->block_group_cache_lock);
  124. return ret;
  125. }
  126. /*
  127. * this is only called by cache_block_group, since we could have freed extents
  128. * we need to check the pinned_extents for any extents that can't be used yet
  129. * since their free space will be released as soon as the transaction commits.
  130. */
  131. static int add_new_free_space(struct btrfs_block_group_cache *block_group,
  132. struct btrfs_fs_info *info, u64 start, u64 end)
  133. {
  134. u64 extent_start, extent_end, size;
  135. int ret;
  136. while (start < end) {
  137. ret = find_first_extent_bit(&info->pinned_extents, start,
  138. &extent_start, &extent_end,
  139. EXTENT_DIRTY);
  140. if (ret)
  141. break;
  142. if (extent_start == start) {
  143. start = extent_end + 1;
  144. } else if (extent_start > start && extent_start < end) {
  145. size = extent_start - start;
  146. ret = btrfs_add_free_space(block_group, start, size);
  147. BUG_ON(ret);
  148. start = extent_end + 1;
  149. } else {
  150. break;
  151. }
  152. }
  153. if (start < end) {
  154. size = end - start;
  155. ret = btrfs_add_free_space(block_group, start, size);
  156. BUG_ON(ret);
  157. }
  158. return 0;
  159. }
  160. static int cache_block_group(struct btrfs_root *root,
  161. struct btrfs_block_group_cache *block_group)
  162. {
  163. struct btrfs_path *path;
  164. int ret = 0;
  165. struct btrfs_key key;
  166. struct extent_buffer *leaf;
  167. int slot;
  168. u64 last = 0;
  169. u64 first_free;
  170. int found = 0;
  171. if (!block_group)
  172. return 0;
  173. root = root->fs_info->extent_root;
  174. if (block_group->cached)
  175. return 0;
  176. path = btrfs_alloc_path();
  177. if (!path)
  178. return -ENOMEM;
  179. path->reada = 2;
  180. /*
  181. * we get into deadlocks with paths held by callers of this function.
  182. * since the alloc_mutex is protecting things right now, just
  183. * skip the locking here
  184. */
  185. path->skip_locking = 1;
  186. first_free = max_t(u64, block_group->key.objectid,
  187. BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
  188. key.objectid = block_group->key.objectid;
  189. key.offset = 0;
  190. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  191. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  192. if (ret < 0)
  193. goto err;
  194. ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
  195. if (ret < 0)
  196. goto err;
  197. if (ret == 0) {
  198. leaf = path->nodes[0];
  199. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  200. if (key.objectid + key.offset > first_free)
  201. first_free = key.objectid + key.offset;
  202. }
  203. while(1) {
  204. leaf = path->nodes[0];
  205. slot = path->slots[0];
  206. if (slot >= btrfs_header_nritems(leaf)) {
  207. ret = btrfs_next_leaf(root, path);
  208. if (ret < 0)
  209. goto err;
  210. if (ret == 0)
  211. continue;
  212. else
  213. break;
  214. }
  215. btrfs_item_key_to_cpu(leaf, &key, slot);
  216. if (key.objectid < block_group->key.objectid)
  217. goto next;
  218. if (key.objectid >= block_group->key.objectid +
  219. block_group->key.offset)
  220. break;
  221. if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
  222. if (!found) {
  223. last = first_free;
  224. found = 1;
  225. }
  226. add_new_free_space(block_group, root->fs_info, last,
  227. key.objectid);
  228. last = key.objectid + key.offset;
  229. }
  230. next:
  231. path->slots[0]++;
  232. }
  233. if (!found)
  234. last = first_free;
  235. add_new_free_space(block_group, root->fs_info, last,
  236. block_group->key.objectid +
  237. block_group->key.offset);
  238. block_group->cached = 1;
  239. ret = 0;
  240. err:
  241. btrfs_free_path(path);
  242. return ret;
  243. }
  244. /*
  245. * return the block group that starts at or after bytenr
  246. */
  247. struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
  248. btrfs_fs_info *info,
  249. u64 bytenr)
  250. {
  251. struct btrfs_block_group_cache *cache;
  252. cache = block_group_cache_tree_search(info, bytenr, 0);
  253. return cache;
  254. }
  255. /*
  256. * return the block group that contains teh given bytenr
  257. */
  258. struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
  259. btrfs_fs_info *info,
  260. u64 bytenr)
  261. {
  262. struct btrfs_block_group_cache *cache;
  263. cache = block_group_cache_tree_search(info, bytenr, 1);
  264. return cache;
  265. }
  266. static int noinline find_free_space(struct btrfs_root *root,
  267. struct btrfs_block_group_cache **cache_ret,
  268. u64 *start_ret, u64 num, int data)
  269. {
  270. int ret;
  271. struct btrfs_block_group_cache *cache = *cache_ret;
  272. struct btrfs_free_space *info = NULL;
  273. u64 last;
  274. u64 total_fs_bytes;
  275. u64 search_start = *start_ret;
  276. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  277. total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
  278. if (!cache)
  279. goto out;
  280. last = max(search_start, cache->key.objectid);
  281. again:
  282. ret = cache_block_group(root, cache);
  283. if (ret)
  284. goto out;
  285. if (cache->ro || !block_group_bits(cache, data))
  286. goto new_group;
  287. info = btrfs_find_free_space(cache, last, num);
  288. if (info) {
  289. *start_ret = info->offset;
  290. return 0;
  291. }
  292. new_group:
  293. last = cache->key.objectid + cache->key.offset;
  294. cache = btrfs_lookup_first_block_group(root->fs_info, last);
  295. if (!cache || cache->key.objectid >= total_fs_bytes)
  296. goto out;
  297. *cache_ret = cache;
  298. goto again;
  299. out:
  300. return -ENOSPC;
  301. }
  302. static u64 div_factor(u64 num, int factor)
  303. {
  304. if (factor == 10)
  305. return num;
  306. num *= factor;
  307. do_div(num, 10);
  308. return num;
  309. }
  310. static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
  311. u64 flags)
  312. {
  313. struct list_head *head = &info->space_info;
  314. struct list_head *cur;
  315. struct btrfs_space_info *found;
  316. list_for_each(cur, head) {
  317. found = list_entry(cur, struct btrfs_space_info, list);
  318. if (found->flags == flags)
  319. return found;
  320. }
  321. return NULL;
  322. }
  323. static struct btrfs_block_group_cache *
  324. __btrfs_find_block_group(struct btrfs_root *root,
  325. struct btrfs_block_group_cache *hint,
  326. u64 search_start, int data, int owner)
  327. {
  328. struct btrfs_block_group_cache *cache;
  329. struct btrfs_block_group_cache *found_group = NULL;
  330. struct btrfs_fs_info *info = root->fs_info;
  331. struct btrfs_space_info *sinfo;
  332. u64 used;
  333. u64 last = 0;
  334. u64 free_check;
  335. int full_search = 0;
  336. int factor = 10;
  337. int wrapped = 0;
  338. if (data & BTRFS_BLOCK_GROUP_METADATA)
  339. factor = 9;
  340. if (search_start) {
  341. struct btrfs_block_group_cache *shint;
  342. shint = btrfs_lookup_first_block_group(info, search_start);
  343. if (shint && block_group_bits(shint, data) && !shint->ro) {
  344. spin_lock(&shint->lock);
  345. used = btrfs_block_group_used(&shint->item);
  346. if (used + shint->pinned <
  347. div_factor(shint->key.offset, factor)) {
  348. spin_unlock(&shint->lock);
  349. return shint;
  350. }
  351. spin_unlock(&shint->lock);
  352. }
  353. }
  354. if (hint && !hint->ro && block_group_bits(hint, data)) {
  355. spin_lock(&hint->lock);
  356. used = btrfs_block_group_used(&hint->item);
  357. if (used + hint->pinned <
  358. div_factor(hint->key.offset, factor)) {
  359. spin_unlock(&hint->lock);
  360. return hint;
  361. }
  362. spin_unlock(&hint->lock);
  363. last = hint->key.objectid + hint->key.offset;
  364. } else {
  365. if (hint)
  366. last = max(hint->key.objectid, search_start);
  367. else
  368. last = search_start;
  369. }
  370. sinfo = __find_space_info(root->fs_info, data);
  371. if (!sinfo)
  372. goto found;
  373. again:
  374. while(1) {
  375. struct list_head *l;
  376. cache = NULL;
  377. spin_lock(&sinfo->lock);
  378. list_for_each(l, &sinfo->block_groups) {
  379. struct btrfs_block_group_cache *entry;
  380. entry = list_entry(l, struct btrfs_block_group_cache,
  381. list);
  382. if ((entry->key.objectid >= last) &&
  383. (!cache || (entry->key.objectid <
  384. cache->key.objectid)))
  385. cache = entry;
  386. }
  387. spin_unlock(&sinfo->lock);
  388. if (!cache)
  389. break;
  390. spin_lock(&cache->lock);
  391. last = cache->key.objectid + cache->key.offset;
  392. used = btrfs_block_group_used(&cache->item);
  393. if (!cache->ro && block_group_bits(cache, data)) {
  394. free_check = div_factor(cache->key.offset, factor);
  395. if (used + cache->pinned < free_check) {
  396. found_group = cache;
  397. spin_unlock(&cache->lock);
  398. goto found;
  399. }
  400. }
  401. spin_unlock(&cache->lock);
  402. cond_resched();
  403. }
  404. if (!wrapped) {
  405. last = search_start;
  406. wrapped = 1;
  407. goto again;
  408. }
  409. if (!full_search && factor < 10) {
  410. last = search_start;
  411. full_search = 1;
  412. factor = 10;
  413. goto again;
  414. }
  415. found:
  416. return found_group;
  417. }
  418. struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
  419. struct btrfs_block_group_cache
  420. *hint, u64 search_start,
  421. int data, int owner)
  422. {
  423. struct btrfs_block_group_cache *ret;
  424. ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
  425. return ret;
  426. }
  427. static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
  428. u64 owner, u64 owner_offset)
  429. {
  430. u32 high_crc = ~(u32)0;
  431. u32 low_crc = ~(u32)0;
  432. __le64 lenum;
  433. lenum = cpu_to_le64(root_objectid);
  434. high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
  435. lenum = cpu_to_le64(ref_generation);
  436. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  437. if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
  438. lenum = cpu_to_le64(owner);
  439. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  440. lenum = cpu_to_le64(owner_offset);
  441. low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
  442. }
  443. return ((u64)high_crc << 32) | (u64)low_crc;
  444. }
  445. static int match_extent_ref(struct extent_buffer *leaf,
  446. struct btrfs_extent_ref *disk_ref,
  447. struct btrfs_extent_ref *cpu_ref)
  448. {
  449. int ret;
  450. int len;
  451. if (cpu_ref->objectid)
  452. len = sizeof(*cpu_ref);
  453. else
  454. len = 2 * sizeof(u64);
  455. ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
  456. len);
  457. return ret == 0;
  458. }
  459. /* simple helper to search for an existing extent at a given offset */
  460. int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path,
  461. u64 start, u64 len)
  462. {
  463. int ret;
  464. struct btrfs_key key;
  465. maybe_lock_mutex(root);
  466. key.objectid = start;
  467. key.offset = len;
  468. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  469. ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
  470. 0, 0);
  471. maybe_unlock_mutex(root);
  472. return ret;
  473. }
  474. static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
  475. struct btrfs_root *root,
  476. struct btrfs_path *path, u64 bytenr,
  477. u64 root_objectid,
  478. u64 ref_generation, u64 owner,
  479. u64 owner_offset, int del)
  480. {
  481. u64 hash;
  482. struct btrfs_key key;
  483. struct btrfs_key found_key;
  484. struct btrfs_extent_ref ref;
  485. struct extent_buffer *leaf;
  486. struct btrfs_extent_ref *disk_ref;
  487. int ret;
  488. int ret2;
  489. btrfs_set_stack_ref_root(&ref, root_objectid);
  490. btrfs_set_stack_ref_generation(&ref, ref_generation);
  491. btrfs_set_stack_ref_objectid(&ref, owner);
  492. btrfs_set_stack_ref_offset(&ref, owner_offset);
  493. hash = hash_extent_ref(root_objectid, ref_generation, owner,
  494. owner_offset);
  495. key.offset = hash;
  496. key.objectid = bytenr;
  497. key.type = BTRFS_EXTENT_REF_KEY;
  498. while (1) {
  499. ret = btrfs_search_slot(trans, root, &key, path,
  500. del ? -1 : 0, del);
  501. if (ret < 0)
  502. goto out;
  503. leaf = path->nodes[0];
  504. if (ret != 0) {
  505. u32 nritems = btrfs_header_nritems(leaf);
  506. if (path->slots[0] >= nritems) {
  507. ret2 = btrfs_next_leaf(root, path);
  508. if (ret2)
  509. goto out;
  510. leaf = path->nodes[0];
  511. }
  512. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  513. if (found_key.objectid != bytenr ||
  514. found_key.type != BTRFS_EXTENT_REF_KEY)
  515. goto out;
  516. key.offset = found_key.offset;
  517. if (del) {
  518. btrfs_release_path(root, path);
  519. continue;
  520. }
  521. }
  522. disk_ref = btrfs_item_ptr(path->nodes[0],
  523. path->slots[0],
  524. struct btrfs_extent_ref);
  525. if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
  526. ret = 0;
  527. goto out;
  528. }
  529. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  530. key.offset = found_key.offset + 1;
  531. btrfs_release_path(root, path);
  532. }
  533. out:
  534. return ret;
  535. }
  536. /*
  537. * Back reference rules. Back refs have three main goals:
  538. *
  539. * 1) differentiate between all holders of references to an extent so that
  540. * when a reference is dropped we can make sure it was a valid reference
  541. * before freeing the extent.
  542. *
  543. * 2) Provide enough information to quickly find the holders of an extent
  544. * if we notice a given block is corrupted or bad.
  545. *
  546. * 3) Make it easy to migrate blocks for FS shrinking or storage pool
  547. * maintenance. This is actually the same as #2, but with a slightly
  548. * different use case.
  549. *
  550. * File extents can be referenced by:
  551. *
  552. * - multiple snapshots, subvolumes, or different generations in one subvol
  553. * - different files inside a single subvolume (in theory, not implemented yet)
  554. * - different offsets inside a file (bookend extents in file.c)
  555. *
  556. * The extent ref structure has fields for:
  557. *
  558. * - Objectid of the subvolume root
  559. * - Generation number of the tree holding the reference
  560. * - objectid of the file holding the reference
  561. * - offset in the file corresponding to the key holding the reference
  562. *
  563. * When a file extent is allocated the fields are filled in:
  564. * (root_key.objectid, trans->transid, inode objectid, offset in file)
  565. *
  566. * When a leaf is cow'd new references are added for every file extent found
  567. * in the leaf. It looks the same as the create case, but trans->transid
  568. * will be different when the block is cow'd.
  569. *
  570. * (root_key.objectid, trans->transid, inode objectid, offset in file)
  571. *
  572. * When a file extent is removed either during snapshot deletion or file
  573. * truncation, the corresponding back reference is found
  574. * by searching for:
  575. *
  576. * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
  577. * inode objectid, offset in file)
  578. *
  579. * Btree extents can be referenced by:
  580. *
  581. * - Different subvolumes
  582. * - Different generations of the same subvolume
  583. *
  584. * Storing sufficient information for a full reverse mapping of a btree
  585. * block would require storing the lowest key of the block in the backref,
  586. * and it would require updating that lowest key either before write out or
  587. * every time it changed. Instead, the objectid of the lowest key is stored
  588. * along with the level of the tree block. This provides a hint
  589. * about where in the btree the block can be found. Searches through the
  590. * btree only need to look for a pointer to that block, so they stop one
  591. * level higher than the level recorded in the backref.
  592. *
  593. * Some btrees do not do reference counting on their extents. These
  594. * include the extent tree and the tree of tree roots. Backrefs for these
  595. * trees always have a generation of zero.
  596. *
  597. * When a tree block is created, back references are inserted:
  598. *
  599. * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
  600. *
  601. * When a tree block is cow'd in a reference counted root,
  602. * new back references are added for all the blocks it points to.
  603. * These are of the form (trans->transid will have increased since creation):
  604. *
  605. * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
  606. *
  607. * Because the lowest_key_objectid and the level are just hints
  608. * they are not used when backrefs are deleted. When a backref is deleted:
  609. *
  610. * if backref was for a tree root:
  611. * root_objectid = root->root_key.objectid
  612. * else
  613. * root_objectid = btrfs_header_owner(parent)
  614. *
  615. * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
  616. *
  617. * Back Reference Key hashing:
  618. *
  619. * Back references have four fields, each 64 bits long. Unfortunately,
  620. * This is hashed into a single 64 bit number and placed into the key offset.
  621. * The key objectid corresponds to the first byte in the extent, and the
  622. * key type is set to BTRFS_EXTENT_REF_KEY
  623. */
  624. int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
  625. struct btrfs_root *root,
  626. struct btrfs_path *path, u64 bytenr,
  627. u64 root_objectid, u64 ref_generation,
  628. u64 owner, u64 owner_offset)
  629. {
  630. u64 hash;
  631. struct btrfs_key key;
  632. struct btrfs_extent_ref ref;
  633. struct btrfs_extent_ref *disk_ref;
  634. int ret;
  635. btrfs_set_stack_ref_root(&ref, root_objectid);
  636. btrfs_set_stack_ref_generation(&ref, ref_generation);
  637. btrfs_set_stack_ref_objectid(&ref, owner);
  638. btrfs_set_stack_ref_offset(&ref, owner_offset);
  639. hash = hash_extent_ref(root_objectid, ref_generation, owner,
  640. owner_offset);
  641. key.offset = hash;
  642. key.objectid = bytenr;
  643. key.type = BTRFS_EXTENT_REF_KEY;
  644. ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
  645. while (ret == -EEXIST) {
  646. disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  647. struct btrfs_extent_ref);
  648. if (match_extent_ref(path->nodes[0], disk_ref, &ref))
  649. goto out;
  650. key.offset++;
  651. btrfs_release_path(root, path);
  652. ret = btrfs_insert_empty_item(trans, root, path, &key,
  653. sizeof(ref));
  654. }
  655. if (ret)
  656. goto out;
  657. disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  658. struct btrfs_extent_ref);
  659. write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
  660. sizeof(ref));
  661. btrfs_mark_buffer_dirty(path->nodes[0]);
  662. out:
  663. btrfs_release_path(root, path);
  664. return ret;
  665. }
  666. static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  667. struct btrfs_root *root,
  668. u64 bytenr, u64 num_bytes,
  669. u64 root_objectid, u64 ref_generation,
  670. u64 owner, u64 owner_offset)
  671. {
  672. struct btrfs_path *path;
  673. int ret;
  674. struct btrfs_key key;
  675. struct extent_buffer *l;
  676. struct btrfs_extent_item *item;
  677. u32 refs;
  678. WARN_ON(num_bytes < root->sectorsize);
  679. path = btrfs_alloc_path();
  680. if (!path)
  681. return -ENOMEM;
  682. path->reada = 1;
  683. key.objectid = bytenr;
  684. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  685. key.offset = num_bytes;
  686. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  687. 0, 1);
  688. if (ret < 0)
  689. return ret;
  690. if (ret != 0) {
  691. BUG();
  692. }
  693. BUG_ON(ret != 0);
  694. l = path->nodes[0];
  695. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  696. refs = btrfs_extent_refs(l, item);
  697. btrfs_set_extent_refs(l, item, refs + 1);
  698. btrfs_mark_buffer_dirty(path->nodes[0]);
  699. btrfs_release_path(root->fs_info->extent_root, path);
  700. path->reada = 1;
  701. ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
  702. path, bytenr, root_objectid,
  703. ref_generation, owner, owner_offset);
  704. BUG_ON(ret);
  705. finish_current_insert(trans, root->fs_info->extent_root);
  706. del_pending_extents(trans, root->fs_info->extent_root);
  707. btrfs_free_path(path);
  708. return 0;
  709. }
  710. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  711. struct btrfs_root *root,
  712. u64 bytenr, u64 num_bytes,
  713. u64 root_objectid, u64 ref_generation,
  714. u64 owner, u64 owner_offset)
  715. {
  716. int ret;
  717. mutex_lock(&root->fs_info->alloc_mutex);
  718. ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
  719. root_objectid, ref_generation,
  720. owner, owner_offset);
  721. mutex_unlock(&root->fs_info->alloc_mutex);
  722. return ret;
  723. }
  724. int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
  725. struct btrfs_root *root)
  726. {
  727. finish_current_insert(trans, root->fs_info->extent_root);
  728. del_pending_extents(trans, root->fs_info->extent_root);
  729. return 0;
  730. }
  731. static int lookup_extent_ref(struct btrfs_trans_handle *trans,
  732. struct btrfs_root *root, u64 bytenr,
  733. u64 num_bytes, u32 *refs)
  734. {
  735. struct btrfs_path *path;
  736. int ret;
  737. struct btrfs_key key;
  738. struct extent_buffer *l;
  739. struct btrfs_extent_item *item;
  740. WARN_ON(num_bytes < root->sectorsize);
  741. path = btrfs_alloc_path();
  742. path->reada = 1;
  743. key.objectid = bytenr;
  744. key.offset = num_bytes;
  745. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  746. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  747. 0, 0);
  748. if (ret < 0)
  749. goto out;
  750. if (ret != 0) {
  751. btrfs_print_leaf(root, path->nodes[0]);
  752. printk("failed to find block number %Lu\n", bytenr);
  753. BUG();
  754. }
  755. l = path->nodes[0];
  756. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  757. *refs = btrfs_extent_refs(l, item);
  758. out:
  759. btrfs_free_path(path);
  760. return 0;
  761. }
  762. static int get_reference_status(struct btrfs_root *root, u64 bytenr,
  763. u64 parent_gen, u64 ref_objectid,
  764. u64 *min_generation, u32 *ref_count)
  765. {
  766. struct btrfs_root *extent_root = root->fs_info->extent_root;
  767. struct btrfs_path *path;
  768. struct extent_buffer *leaf;
  769. struct btrfs_extent_ref *ref_item;
  770. struct btrfs_key key;
  771. struct btrfs_key found_key;
  772. u64 root_objectid = root->root_key.objectid;
  773. u64 ref_generation;
  774. u32 nritems;
  775. int ret;
  776. key.objectid = bytenr;
  777. key.offset = 0;
  778. key.type = BTRFS_EXTENT_ITEM_KEY;
  779. path = btrfs_alloc_path();
  780. mutex_lock(&root->fs_info->alloc_mutex);
  781. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  782. if (ret < 0)
  783. goto out;
  784. BUG_ON(ret == 0);
  785. leaf = path->nodes[0];
  786. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  787. if (found_key.objectid != bytenr ||
  788. found_key.type != BTRFS_EXTENT_ITEM_KEY) {
  789. ret = 1;
  790. goto out;
  791. }
  792. *ref_count = 0;
  793. *min_generation = (u64)-1;
  794. while (1) {
  795. leaf = path->nodes[0];
  796. nritems = btrfs_header_nritems(leaf);
  797. if (path->slots[0] >= nritems) {
  798. ret = btrfs_next_leaf(extent_root, path);
  799. if (ret < 0)
  800. goto out;
  801. if (ret == 0)
  802. continue;
  803. break;
  804. }
  805. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  806. if (found_key.objectid != bytenr)
  807. break;
  808. if (found_key.type != BTRFS_EXTENT_REF_KEY) {
  809. path->slots[0]++;
  810. continue;
  811. }
  812. ref_item = btrfs_item_ptr(leaf, path->slots[0],
  813. struct btrfs_extent_ref);
  814. ref_generation = btrfs_ref_generation(leaf, ref_item);
  815. /*
  816. * For (parent_gen > 0 && parent_gen > ref_gen):
  817. *
  818. * we reach here through the oldest root, therefore
  819. * all other reference from same snapshot should have
  820. * a larger generation.
  821. */
  822. if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
  823. (parent_gen > 0 && parent_gen > ref_generation) ||
  824. (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
  825. ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
  826. if (ref_count)
  827. *ref_count = 2;
  828. break;
  829. }
  830. *ref_count = 1;
  831. if (*min_generation > ref_generation)
  832. *min_generation = ref_generation;
  833. path->slots[0]++;
  834. }
  835. ret = 0;
  836. out:
  837. mutex_unlock(&root->fs_info->alloc_mutex);
  838. btrfs_free_path(path);
  839. return ret;
  840. }
  841. int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
  842. struct btrfs_root *root,
  843. struct btrfs_key *key, u64 bytenr)
  844. {
  845. struct btrfs_root *old_root;
  846. struct btrfs_path *path = NULL;
  847. struct extent_buffer *eb;
  848. struct btrfs_file_extent_item *item;
  849. u64 ref_generation;
  850. u64 min_generation;
  851. u64 extent_start;
  852. u32 ref_count;
  853. int level;
  854. int ret;
  855. BUG_ON(trans == NULL);
  856. BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
  857. ret = get_reference_status(root, bytenr, 0, key->objectid,
  858. &min_generation, &ref_count);
  859. if (ret)
  860. return ret;
  861. if (ref_count != 1)
  862. return 1;
  863. old_root = root->dirty_root->root;
  864. ref_generation = old_root->root_key.offset;
  865. /* all references are created in running transaction */
  866. if (min_generation > ref_generation) {
  867. ret = 0;
  868. goto out;
  869. }
  870. path = btrfs_alloc_path();
  871. if (!path) {
  872. ret = -ENOMEM;
  873. goto out;
  874. }
  875. path->skip_locking = 1;
  876. /* if no item found, the extent is referenced by other snapshot */
  877. ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
  878. if (ret)
  879. goto out;
  880. eb = path->nodes[0];
  881. item = btrfs_item_ptr(eb, path->slots[0],
  882. struct btrfs_file_extent_item);
  883. if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
  884. btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
  885. ret = 1;
  886. goto out;
  887. }
  888. for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
  889. if (level >= 0) {
  890. eb = path->nodes[level];
  891. if (!eb)
  892. continue;
  893. extent_start = eb->start;
  894. } else
  895. extent_start = bytenr;
  896. ret = get_reference_status(root, extent_start, ref_generation,
  897. 0, &min_generation, &ref_count);
  898. if (ret)
  899. goto out;
  900. if (ref_count != 1) {
  901. ret = 1;
  902. goto out;
  903. }
  904. if (level >= 0)
  905. ref_generation = btrfs_header_generation(eb);
  906. }
  907. ret = 0;
  908. out:
  909. if (path)
  910. btrfs_free_path(path);
  911. return ret;
  912. }
  913. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  914. struct extent_buffer *buf, int cache_ref)
  915. {
  916. u64 bytenr;
  917. u32 nritems;
  918. struct btrfs_key key;
  919. struct btrfs_file_extent_item *fi;
  920. int i;
  921. int level;
  922. int ret;
  923. int faili;
  924. int nr_file_extents = 0;
  925. if (!root->ref_cows)
  926. return 0;
  927. level = btrfs_header_level(buf);
  928. nritems = btrfs_header_nritems(buf);
  929. for (i = 0; i < nritems; i++) {
  930. cond_resched();
  931. if (level == 0) {
  932. u64 disk_bytenr;
  933. btrfs_item_key_to_cpu(buf, &key, i);
  934. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  935. continue;
  936. fi = btrfs_item_ptr(buf, i,
  937. struct btrfs_file_extent_item);
  938. if (btrfs_file_extent_type(buf, fi) ==
  939. BTRFS_FILE_EXTENT_INLINE)
  940. continue;
  941. disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  942. if (disk_bytenr == 0)
  943. continue;
  944. if (buf != root->commit_root)
  945. nr_file_extents++;
  946. mutex_lock(&root->fs_info->alloc_mutex);
  947. ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
  948. btrfs_file_extent_disk_num_bytes(buf, fi),
  949. root->root_key.objectid, trans->transid,
  950. key.objectid, key.offset);
  951. mutex_unlock(&root->fs_info->alloc_mutex);
  952. if (ret) {
  953. faili = i;
  954. WARN_ON(1);
  955. goto fail;
  956. }
  957. } else {
  958. bytenr = btrfs_node_blockptr(buf, i);
  959. btrfs_node_key_to_cpu(buf, &key, i);
  960. mutex_lock(&root->fs_info->alloc_mutex);
  961. ret = __btrfs_inc_extent_ref(trans, root, bytenr,
  962. btrfs_level_size(root, level - 1),
  963. root->root_key.objectid,
  964. trans->transid,
  965. level - 1, key.objectid);
  966. mutex_unlock(&root->fs_info->alloc_mutex);
  967. if (ret) {
  968. faili = i;
  969. WARN_ON(1);
  970. goto fail;
  971. }
  972. }
  973. }
  974. /* cache orignal leaf block's references */
  975. if (level == 0 && cache_ref && buf != root->commit_root) {
  976. struct btrfs_leaf_ref *ref;
  977. struct btrfs_extent_info *info;
  978. ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
  979. if (!ref) {
  980. WARN_ON(1);
  981. goto out;
  982. }
  983. ref->root_gen = root->root_key.offset;
  984. ref->bytenr = buf->start;
  985. ref->owner = btrfs_header_owner(buf);
  986. ref->generation = btrfs_header_generation(buf);
  987. ref->nritems = nr_file_extents;
  988. info = ref->extents;
  989. for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
  990. u64 disk_bytenr;
  991. btrfs_item_key_to_cpu(buf, &key, i);
  992. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  993. continue;
  994. fi = btrfs_item_ptr(buf, i,
  995. struct btrfs_file_extent_item);
  996. if (btrfs_file_extent_type(buf, fi) ==
  997. BTRFS_FILE_EXTENT_INLINE)
  998. continue;
  999. disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  1000. if (disk_bytenr == 0)
  1001. continue;
  1002. info->bytenr = disk_bytenr;
  1003. info->num_bytes =
  1004. btrfs_file_extent_disk_num_bytes(buf, fi);
  1005. info->objectid = key.objectid;
  1006. info->offset = key.offset;
  1007. info++;
  1008. }
  1009. BUG_ON(!root->ref_tree);
  1010. ret = btrfs_add_leaf_ref(root, ref);
  1011. WARN_ON(ret);
  1012. btrfs_free_leaf_ref(root, ref);
  1013. }
  1014. out:
  1015. return 0;
  1016. fail:
  1017. WARN_ON(1);
  1018. #if 0
  1019. for (i =0; i < faili; i++) {
  1020. if (level == 0) {
  1021. u64 disk_bytenr;
  1022. btrfs_item_key_to_cpu(buf, &key, i);
  1023. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  1024. continue;
  1025. fi = btrfs_item_ptr(buf, i,
  1026. struct btrfs_file_extent_item);
  1027. if (btrfs_file_extent_type(buf, fi) ==
  1028. BTRFS_FILE_EXTENT_INLINE)
  1029. continue;
  1030. disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
  1031. if (disk_bytenr == 0)
  1032. continue;
  1033. err = btrfs_free_extent(trans, root, disk_bytenr,
  1034. btrfs_file_extent_disk_num_bytes(buf,
  1035. fi), 0);
  1036. BUG_ON(err);
  1037. } else {
  1038. bytenr = btrfs_node_blockptr(buf, i);
  1039. err = btrfs_free_extent(trans, root, bytenr,
  1040. btrfs_level_size(root, level - 1), 0);
  1041. BUG_ON(err);
  1042. }
  1043. }
  1044. #endif
  1045. return ret;
  1046. }
  1047. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  1048. struct btrfs_root *root,
  1049. struct btrfs_path *path,
  1050. struct btrfs_block_group_cache *cache)
  1051. {
  1052. int ret;
  1053. int pending_ret;
  1054. struct btrfs_root *extent_root = root->fs_info->extent_root;
  1055. unsigned long bi;
  1056. struct extent_buffer *leaf;
  1057. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  1058. if (ret < 0)
  1059. goto fail;
  1060. BUG_ON(ret);
  1061. leaf = path->nodes[0];
  1062. bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
  1063. write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
  1064. btrfs_mark_buffer_dirty(leaf);
  1065. btrfs_release_path(extent_root, path);
  1066. fail:
  1067. finish_current_insert(trans, extent_root);
  1068. pending_ret = del_pending_extents(trans, extent_root);
  1069. if (ret)
  1070. return ret;
  1071. if (pending_ret)
  1072. return pending_ret;
  1073. return 0;
  1074. }
  1075. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  1076. struct btrfs_root *root)
  1077. {
  1078. struct btrfs_block_group_cache *cache, *entry;
  1079. struct rb_node *n;
  1080. int err = 0;
  1081. int werr = 0;
  1082. struct btrfs_path *path;
  1083. u64 last = 0;
  1084. path = btrfs_alloc_path();
  1085. if (!path)
  1086. return -ENOMEM;
  1087. mutex_lock(&root->fs_info->alloc_mutex);
  1088. while(1) {
  1089. cache = NULL;
  1090. spin_lock(&root->fs_info->block_group_cache_lock);
  1091. for (n = rb_first(&root->fs_info->block_group_cache_tree);
  1092. n; n = rb_next(n)) {
  1093. entry = rb_entry(n, struct btrfs_block_group_cache,
  1094. cache_node);
  1095. if (entry->dirty) {
  1096. cache = entry;
  1097. break;
  1098. }
  1099. }
  1100. spin_unlock(&root->fs_info->block_group_cache_lock);
  1101. if (!cache)
  1102. break;
  1103. last += cache->key.offset;
  1104. err = write_one_cache_group(trans, root,
  1105. path, cache);
  1106. /*
  1107. * if we fail to write the cache group, we want
  1108. * to keep it marked dirty in hopes that a later
  1109. * write will work
  1110. */
  1111. if (err) {
  1112. werr = err;
  1113. continue;
  1114. }
  1115. cache->dirty = 0;
  1116. }
  1117. btrfs_free_path(path);
  1118. mutex_unlock(&root->fs_info->alloc_mutex);
  1119. return werr;
  1120. }
  1121. static int update_space_info(struct btrfs_fs_info *info, u64 flags,
  1122. u64 total_bytes, u64 bytes_used,
  1123. struct btrfs_space_info **space_info)
  1124. {
  1125. struct btrfs_space_info *found;
  1126. found = __find_space_info(info, flags);
  1127. if (found) {
  1128. found->total_bytes += total_bytes;
  1129. found->bytes_used += bytes_used;
  1130. found->full = 0;
  1131. *space_info = found;
  1132. return 0;
  1133. }
  1134. found = kmalloc(sizeof(*found), GFP_NOFS);
  1135. if (!found)
  1136. return -ENOMEM;
  1137. list_add(&found->list, &info->space_info);
  1138. INIT_LIST_HEAD(&found->block_groups);
  1139. spin_lock_init(&found->lock);
  1140. found->flags = flags;
  1141. found->total_bytes = total_bytes;
  1142. found->bytes_used = bytes_used;
  1143. found->bytes_pinned = 0;
  1144. found->full = 0;
  1145. found->force_alloc = 0;
  1146. *space_info = found;
  1147. return 0;
  1148. }
  1149. static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
  1150. {
  1151. u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
  1152. BTRFS_BLOCK_GROUP_RAID1 |
  1153. BTRFS_BLOCK_GROUP_RAID10 |
  1154. BTRFS_BLOCK_GROUP_DUP);
  1155. if (extra_flags) {
  1156. if (flags & BTRFS_BLOCK_GROUP_DATA)
  1157. fs_info->avail_data_alloc_bits |= extra_flags;
  1158. if (flags & BTRFS_BLOCK_GROUP_METADATA)
  1159. fs_info->avail_metadata_alloc_bits |= extra_flags;
  1160. if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  1161. fs_info->avail_system_alloc_bits |= extra_flags;
  1162. }
  1163. }
  1164. static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
  1165. {
  1166. u64 num_devices = root->fs_info->fs_devices->num_devices;
  1167. if (num_devices == 1)
  1168. flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
  1169. if (num_devices < 4)
  1170. flags &= ~BTRFS_BLOCK_GROUP_RAID10;
  1171. if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
  1172. (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  1173. BTRFS_BLOCK_GROUP_RAID10))) {
  1174. flags &= ~BTRFS_BLOCK_GROUP_DUP;
  1175. }
  1176. if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
  1177. (flags & BTRFS_BLOCK_GROUP_RAID10)) {
  1178. flags &= ~BTRFS_BLOCK_GROUP_RAID1;
  1179. }
  1180. if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
  1181. ((flags & BTRFS_BLOCK_GROUP_RAID1) |
  1182. (flags & BTRFS_BLOCK_GROUP_RAID10) |
  1183. (flags & BTRFS_BLOCK_GROUP_DUP)))
  1184. flags &= ~BTRFS_BLOCK_GROUP_RAID0;
  1185. return flags;
  1186. }
  1187. static int do_chunk_alloc(struct btrfs_trans_handle *trans,
  1188. struct btrfs_root *extent_root, u64 alloc_bytes,
  1189. u64 flags, int force)
  1190. {
  1191. struct btrfs_space_info *space_info;
  1192. u64 thresh;
  1193. u64 start;
  1194. u64 num_bytes;
  1195. int ret = 0;
  1196. flags = reduce_alloc_profile(extent_root, flags);
  1197. space_info = __find_space_info(extent_root->fs_info, flags);
  1198. if (!space_info) {
  1199. ret = update_space_info(extent_root->fs_info, flags,
  1200. 0, 0, &space_info);
  1201. BUG_ON(ret);
  1202. }
  1203. BUG_ON(!space_info);
  1204. if (space_info->force_alloc) {
  1205. force = 1;
  1206. space_info->force_alloc = 0;
  1207. }
  1208. if (space_info->full)
  1209. goto out;
  1210. thresh = div_factor(space_info->total_bytes, 6);
  1211. if (!force &&
  1212. (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
  1213. thresh)
  1214. goto out;
  1215. mutex_lock(&extent_root->fs_info->chunk_mutex);
  1216. ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
  1217. if (ret == -ENOSPC) {
  1218. printk("space info full %Lu\n", flags);
  1219. space_info->full = 1;
  1220. goto out_unlock;
  1221. }
  1222. BUG_ON(ret);
  1223. ret = btrfs_make_block_group(trans, extent_root, 0, flags,
  1224. BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
  1225. BUG_ON(ret);
  1226. out_unlock:
  1227. mutex_unlock(&extent_root->fs_info->chunk_mutex);
  1228. out:
  1229. return ret;
  1230. }
  1231. static int update_block_group(struct btrfs_trans_handle *trans,
  1232. struct btrfs_root *root,
  1233. u64 bytenr, u64 num_bytes, int alloc,
  1234. int mark_free)
  1235. {
  1236. struct btrfs_block_group_cache *cache;
  1237. struct btrfs_fs_info *info = root->fs_info;
  1238. u64 total = num_bytes;
  1239. u64 old_val;
  1240. u64 byte_in_group;
  1241. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  1242. while(total) {
  1243. cache = btrfs_lookup_block_group(info, bytenr);
  1244. if (!cache) {
  1245. return -1;
  1246. }
  1247. byte_in_group = bytenr - cache->key.objectid;
  1248. WARN_ON(byte_in_group > cache->key.offset);
  1249. spin_lock(&cache->lock);
  1250. cache->dirty = 1;
  1251. old_val = btrfs_block_group_used(&cache->item);
  1252. num_bytes = min(total, cache->key.offset - byte_in_group);
  1253. if (alloc) {
  1254. old_val += num_bytes;
  1255. cache->space_info->bytes_used += num_bytes;
  1256. btrfs_set_block_group_used(&cache->item, old_val);
  1257. spin_unlock(&cache->lock);
  1258. } else {
  1259. old_val -= num_bytes;
  1260. cache->space_info->bytes_used -= num_bytes;
  1261. btrfs_set_block_group_used(&cache->item, old_val);
  1262. spin_unlock(&cache->lock);
  1263. if (mark_free) {
  1264. int ret;
  1265. ret = btrfs_add_free_space(cache, bytenr,
  1266. num_bytes);
  1267. if (ret)
  1268. return -1;
  1269. }
  1270. }
  1271. total -= num_bytes;
  1272. bytenr += num_bytes;
  1273. }
  1274. return 0;
  1275. }
  1276. static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
  1277. {
  1278. struct btrfs_block_group_cache *cache;
  1279. cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
  1280. if (!cache)
  1281. return 0;
  1282. return cache->key.objectid;
  1283. }
  1284. int btrfs_update_pinned_extents(struct btrfs_root *root,
  1285. u64 bytenr, u64 num, int pin)
  1286. {
  1287. u64 len;
  1288. struct btrfs_block_group_cache *cache;
  1289. struct btrfs_fs_info *fs_info = root->fs_info;
  1290. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  1291. if (pin) {
  1292. set_extent_dirty(&fs_info->pinned_extents,
  1293. bytenr, bytenr + num - 1, GFP_NOFS);
  1294. } else {
  1295. clear_extent_dirty(&fs_info->pinned_extents,
  1296. bytenr, bytenr + num - 1, GFP_NOFS);
  1297. }
  1298. while (num > 0) {
  1299. cache = btrfs_lookup_block_group(fs_info, bytenr);
  1300. if (!cache) {
  1301. u64 first = first_logical_byte(root, bytenr);
  1302. WARN_ON(first < bytenr);
  1303. len = min(first - bytenr, num);
  1304. } else {
  1305. len = min(num, cache->key.offset -
  1306. (bytenr - cache->key.objectid));
  1307. }
  1308. if (pin) {
  1309. if (cache) {
  1310. spin_lock(&cache->lock);
  1311. cache->pinned += len;
  1312. cache->space_info->bytes_pinned += len;
  1313. spin_unlock(&cache->lock);
  1314. }
  1315. fs_info->total_pinned += len;
  1316. } else {
  1317. if (cache) {
  1318. spin_lock(&cache->lock);
  1319. cache->pinned -= len;
  1320. cache->space_info->bytes_pinned -= len;
  1321. spin_unlock(&cache->lock);
  1322. }
  1323. fs_info->total_pinned -= len;
  1324. }
  1325. bytenr += len;
  1326. num -= len;
  1327. }
  1328. return 0;
  1329. }
  1330. int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
  1331. {
  1332. u64 last = 0;
  1333. u64 start;
  1334. u64 end;
  1335. struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
  1336. int ret;
  1337. while(1) {
  1338. ret = find_first_extent_bit(pinned_extents, last,
  1339. &start, &end, EXTENT_DIRTY);
  1340. if (ret)
  1341. break;
  1342. set_extent_dirty(copy, start, end, GFP_NOFS);
  1343. last = end + 1;
  1344. }
  1345. return 0;
  1346. }
  1347. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  1348. struct btrfs_root *root,
  1349. struct extent_io_tree *unpin)
  1350. {
  1351. u64 start;
  1352. u64 end;
  1353. int ret;
  1354. struct btrfs_block_group_cache *cache;
  1355. mutex_lock(&root->fs_info->alloc_mutex);
  1356. while(1) {
  1357. ret = find_first_extent_bit(unpin, 0, &start, &end,
  1358. EXTENT_DIRTY);
  1359. if (ret)
  1360. break;
  1361. btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
  1362. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  1363. cache = btrfs_lookup_block_group(root->fs_info, start);
  1364. if (cache->cached)
  1365. btrfs_add_free_space(cache, start, end - start + 1);
  1366. if (need_resched()) {
  1367. mutex_unlock(&root->fs_info->alloc_mutex);
  1368. cond_resched();
  1369. mutex_lock(&root->fs_info->alloc_mutex);
  1370. }
  1371. }
  1372. mutex_unlock(&root->fs_info->alloc_mutex);
  1373. return 0;
  1374. }
  1375. static int finish_current_insert(struct btrfs_trans_handle *trans,
  1376. struct btrfs_root *extent_root)
  1377. {
  1378. u64 start;
  1379. u64 end;
  1380. struct btrfs_fs_info *info = extent_root->fs_info;
  1381. struct extent_buffer *eb;
  1382. struct btrfs_path *path;
  1383. struct btrfs_key ins;
  1384. struct btrfs_disk_key first;
  1385. struct btrfs_extent_item extent_item;
  1386. int ret;
  1387. int level;
  1388. int err = 0;
  1389. WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
  1390. btrfs_set_stack_extent_refs(&extent_item, 1);
  1391. btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
  1392. path = btrfs_alloc_path();
  1393. while(1) {
  1394. ret = find_first_extent_bit(&info->extent_ins, 0, &start,
  1395. &end, EXTENT_LOCKED);
  1396. if (ret)
  1397. break;
  1398. ins.objectid = start;
  1399. ins.offset = end + 1 - start;
  1400. err = btrfs_insert_item(trans, extent_root, &ins,
  1401. &extent_item, sizeof(extent_item));
  1402. clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
  1403. GFP_NOFS);
  1404. eb = btrfs_find_create_tree_block(extent_root, ins.objectid,
  1405. ins.offset);
  1406. if (!btrfs_buffer_uptodate(eb, trans->transid))
  1407. btrfs_read_buffer(eb, trans->transid);
  1408. btrfs_tree_lock(eb);
  1409. level = btrfs_header_level(eb);
  1410. if (level == 0) {
  1411. btrfs_item_key(eb, &first, 0);
  1412. } else {
  1413. btrfs_node_key(eb, &first, 0);
  1414. }
  1415. btrfs_tree_unlock(eb);
  1416. free_extent_buffer(eb);
  1417. /*
  1418. * the first key is just a hint, so the race we've created
  1419. * against reading it is fine
  1420. */
  1421. err = btrfs_insert_extent_backref(trans, extent_root, path,
  1422. start, extent_root->root_key.objectid,
  1423. 0, level,
  1424. btrfs_disk_key_objectid(&first));
  1425. BUG_ON(err);
  1426. if (need_resched()) {
  1427. mutex_unlock(&extent_root->fs_info->alloc_mutex);
  1428. cond_resched();
  1429. mutex_lock(&extent_root->fs_info->alloc_mutex);
  1430. }
  1431. }
  1432. btrfs_free_path(path);
  1433. return 0;
  1434. }
  1435. static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
  1436. int is_data, int pending)
  1437. {
  1438. int err = 0;
  1439. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  1440. if (!pending) {
  1441. struct extent_buffer *buf;
  1442. if (is_data)
  1443. goto pinit;
  1444. buf = btrfs_find_tree_block(root, bytenr, num_bytes);
  1445. if (buf) {
  1446. /* we can reuse a block if it hasn't been written
  1447. * and it is from this transaction. We can't
  1448. * reuse anything from the tree log root because
  1449. * it has tiny sub-transactions.
  1450. */
  1451. if (btrfs_buffer_uptodate(buf, 0) &&
  1452. btrfs_try_tree_lock(buf)) {
  1453. u64 transid =
  1454. root->fs_info->running_transaction->transid;
  1455. u64 header_transid =
  1456. btrfs_header_generation(buf);
  1457. if (btrfs_header_owner(buf) !=
  1458. BTRFS_TREE_LOG_OBJECTID &&
  1459. header_transid == transid &&
  1460. !btrfs_header_flag(buf,
  1461. BTRFS_HEADER_FLAG_WRITTEN)) {
  1462. clean_tree_block(NULL, root, buf);
  1463. btrfs_tree_unlock(buf);
  1464. free_extent_buffer(buf);
  1465. return 1;
  1466. }
  1467. btrfs_tree_unlock(buf);
  1468. }
  1469. free_extent_buffer(buf);
  1470. }
  1471. pinit:
  1472. btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
  1473. } else {
  1474. set_extent_bits(&root->fs_info->pending_del,
  1475. bytenr, bytenr + num_bytes - 1,
  1476. EXTENT_LOCKED, GFP_NOFS);
  1477. }
  1478. BUG_ON(err < 0);
  1479. return 0;
  1480. }
  1481. /*
  1482. * remove an extent from the root, returns 0 on success
  1483. */
  1484. static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  1485. *root, u64 bytenr, u64 num_bytes,
  1486. u64 root_objectid, u64 ref_generation,
  1487. u64 owner_objectid, u64 owner_offset, int pin,
  1488. int mark_free)
  1489. {
  1490. struct btrfs_path *path;
  1491. struct btrfs_key key;
  1492. struct btrfs_fs_info *info = root->fs_info;
  1493. struct btrfs_root *extent_root = info->extent_root;
  1494. struct extent_buffer *leaf;
  1495. int ret;
  1496. int extent_slot = 0;
  1497. int found_extent = 0;
  1498. int num_to_del = 1;
  1499. struct btrfs_extent_item *ei;
  1500. u32 refs;
  1501. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  1502. key.objectid = bytenr;
  1503. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  1504. key.offset = num_bytes;
  1505. path = btrfs_alloc_path();
  1506. if (!path)
  1507. return -ENOMEM;
  1508. path->reada = 1;
  1509. ret = lookup_extent_backref(trans, extent_root, path,
  1510. bytenr, root_objectid,
  1511. ref_generation,
  1512. owner_objectid, owner_offset, 1);
  1513. if (ret == 0) {
  1514. struct btrfs_key found_key;
  1515. extent_slot = path->slots[0];
  1516. while(extent_slot > 0) {
  1517. extent_slot--;
  1518. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  1519. extent_slot);
  1520. if (found_key.objectid != bytenr)
  1521. break;
  1522. if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
  1523. found_key.offset == num_bytes) {
  1524. found_extent = 1;
  1525. break;
  1526. }
  1527. if (path->slots[0] - extent_slot > 5)
  1528. break;
  1529. }
  1530. if (!found_extent)
  1531. ret = btrfs_del_item(trans, extent_root, path);
  1532. } else {
  1533. btrfs_print_leaf(extent_root, path->nodes[0]);
  1534. WARN_ON(1);
  1535. printk("Unable to find ref byte nr %Lu root %Lu "
  1536. " gen %Lu owner %Lu offset %Lu\n", bytenr,
  1537. root_objectid, ref_generation, owner_objectid,
  1538. owner_offset);
  1539. }
  1540. if (!found_extent) {
  1541. btrfs_release_path(extent_root, path);
  1542. ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
  1543. if (ret < 0)
  1544. return ret;
  1545. BUG_ON(ret);
  1546. extent_slot = path->slots[0];
  1547. }
  1548. leaf = path->nodes[0];
  1549. ei = btrfs_item_ptr(leaf, extent_slot,
  1550. struct btrfs_extent_item);
  1551. refs = btrfs_extent_refs(leaf, ei);
  1552. BUG_ON(refs == 0);
  1553. refs -= 1;
  1554. btrfs_set_extent_refs(leaf, ei, refs);
  1555. btrfs_mark_buffer_dirty(leaf);
  1556. if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
  1557. /* if the back ref and the extent are next to each other
  1558. * they get deleted below in one shot
  1559. */
  1560. path->slots[0] = extent_slot;
  1561. num_to_del = 2;
  1562. } else if (found_extent) {
  1563. /* otherwise delete the extent back ref */
  1564. ret = btrfs_del_item(trans, extent_root, path);
  1565. BUG_ON(ret);
  1566. /* if refs are 0, we need to setup the path for deletion */
  1567. if (refs == 0) {
  1568. btrfs_release_path(extent_root, path);
  1569. ret = btrfs_search_slot(trans, extent_root, &key, path,
  1570. -1, 1);
  1571. if (ret < 0)
  1572. return ret;
  1573. BUG_ON(ret);
  1574. }
  1575. }
  1576. if (refs == 0) {
  1577. u64 super_used;
  1578. u64 root_used;
  1579. #ifdef BIO_RW_DISCARD
  1580. u64 map_length = num_bytes;
  1581. struct btrfs_multi_bio *multi = NULL;
  1582. #endif
  1583. if (pin) {
  1584. ret = pin_down_bytes(root, bytenr, num_bytes,
  1585. owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
  1586. if (ret > 0)
  1587. mark_free = 1;
  1588. BUG_ON(ret < 0);
  1589. }
  1590. /* block accounting for super block */
  1591. spin_lock_irq(&info->delalloc_lock);
  1592. super_used = btrfs_super_bytes_used(&info->super_copy);
  1593. btrfs_set_super_bytes_used(&info->super_copy,
  1594. super_used - num_bytes);
  1595. spin_unlock_irq(&info->delalloc_lock);
  1596. /* block accounting for root item */
  1597. root_used = btrfs_root_used(&root->root_item);
  1598. btrfs_set_root_used(&root->root_item,
  1599. root_used - num_bytes);
  1600. ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
  1601. num_to_del);
  1602. if (ret) {
  1603. return ret;
  1604. }
  1605. ret = update_block_group(trans, root, bytenr, num_bytes, 0,
  1606. mark_free);
  1607. BUG_ON(ret);
  1608. #ifdef BIO_RW_DISCARD
  1609. /* Tell the block device(s) that the sectors can be discarded */
  1610. ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
  1611. bytenr, &map_length, &multi, 0);
  1612. if (!ret) {
  1613. struct btrfs_bio_stripe *stripe = multi->stripes;
  1614. int i;
  1615. if (map_length > num_bytes)
  1616. map_length = num_bytes;
  1617. for (i = 0; i < multi->num_stripes; i++, stripe++) {
  1618. blkdev_issue_discard(stripe->dev->bdev,
  1619. stripe->physical >> 9,
  1620. map_length >> 9);
  1621. }
  1622. kfree(multi);
  1623. }
  1624. #endif
  1625. }
  1626. btrfs_free_path(path);
  1627. finish_current_insert(trans, extent_root);
  1628. return ret;
  1629. }
  1630. /*
  1631. * find all the blocks marked as pending in the radix tree and remove
  1632. * them from the extent map
  1633. */
  1634. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  1635. btrfs_root *extent_root)
  1636. {
  1637. int ret;
  1638. int err = 0;
  1639. u64 start;
  1640. u64 end;
  1641. struct extent_io_tree *pending_del;
  1642. struct extent_io_tree *pinned_extents;
  1643. WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
  1644. pending_del = &extent_root->fs_info->pending_del;
  1645. pinned_extents = &extent_root->fs_info->pinned_extents;
  1646. while(1) {
  1647. ret = find_first_extent_bit(pending_del, 0, &start, &end,
  1648. EXTENT_LOCKED);
  1649. if (ret)
  1650. break;
  1651. clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
  1652. GFP_NOFS);
  1653. if (!test_range_bit(&extent_root->fs_info->extent_ins,
  1654. start, end, EXTENT_LOCKED, 0)) {
  1655. btrfs_update_pinned_extents(extent_root, start,
  1656. end + 1 - start, 1);
  1657. ret = __free_extent(trans, extent_root,
  1658. start, end + 1 - start,
  1659. extent_root->root_key.objectid,
  1660. 0, 0, 0, 0, 0);
  1661. } else {
  1662. clear_extent_bits(&extent_root->fs_info->extent_ins,
  1663. start, end, EXTENT_LOCKED, GFP_NOFS);
  1664. }
  1665. if (ret)
  1666. err = ret;
  1667. if (need_resched()) {
  1668. mutex_unlock(&extent_root->fs_info->alloc_mutex);
  1669. cond_resched();
  1670. mutex_lock(&extent_root->fs_info->alloc_mutex);
  1671. }
  1672. }
  1673. return err;
  1674. }
  1675. /*
  1676. * remove an extent from the root, returns 0 on success
  1677. */
  1678. static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  1679. struct btrfs_root *root, u64 bytenr,
  1680. u64 num_bytes, u64 root_objectid,
  1681. u64 ref_generation, u64 owner_objectid,
  1682. u64 owner_offset, int pin)
  1683. {
  1684. struct btrfs_root *extent_root = root->fs_info->extent_root;
  1685. int pending_ret;
  1686. int ret;
  1687. WARN_ON(num_bytes < root->sectorsize);
  1688. if (!root->ref_cows)
  1689. ref_generation = 0;
  1690. if (root == extent_root) {
  1691. pin_down_bytes(root, bytenr, num_bytes, 0, 1);
  1692. return 0;
  1693. }
  1694. /* if metadata always pin */
  1695. if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
  1696. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  1697. struct btrfs_block_group_cache *cache;
  1698. /* btrfs_free_reserved_extent */
  1699. cache = btrfs_lookup_block_group(root->fs_info, bytenr);
  1700. BUG_ON(!cache);
  1701. btrfs_add_free_space(cache, bytenr, num_bytes);
  1702. return 0;
  1703. }
  1704. pin = 1;
  1705. }
  1706. /* if data pin when any transaction has committed this */
  1707. if (ref_generation != trans->transid)
  1708. pin = 1;
  1709. ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
  1710. ref_generation, owner_objectid, owner_offset,
  1711. pin, pin == 0);
  1712. finish_current_insert(trans, root->fs_info->extent_root);
  1713. pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
  1714. return ret ? ret : pending_ret;
  1715. }
  1716. int btrfs_free_extent(struct btrfs_trans_handle *trans,
  1717. struct btrfs_root *root, u64 bytenr,
  1718. u64 num_bytes, u64 root_objectid,
  1719. u64 ref_generation, u64 owner_objectid,
  1720. u64 owner_offset, int pin)
  1721. {
  1722. int ret;
  1723. maybe_lock_mutex(root);
  1724. ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
  1725. root_objectid, ref_generation,
  1726. owner_objectid, owner_offset, pin);
  1727. maybe_unlock_mutex(root);
  1728. return ret;
  1729. }
  1730. static u64 stripe_align(struct btrfs_root *root, u64 val)
  1731. {
  1732. u64 mask = ((u64)root->stripesize - 1);
  1733. u64 ret = (val + mask) & ~mask;
  1734. return ret;
  1735. }
  1736. /*
  1737. * walks the btree of allocated extents and find a hole of a given size.
  1738. * The key ins is changed to record the hole:
  1739. * ins->objectid == block start
  1740. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  1741. * ins->offset == number of blocks
  1742. * Any available blocks before search_start are skipped.
  1743. */
  1744. static int noinline find_free_extent(struct btrfs_trans_handle *trans,
  1745. struct btrfs_root *orig_root,
  1746. u64 num_bytes, u64 empty_size,
  1747. u64 search_start, u64 search_end,
  1748. u64 hint_byte, struct btrfs_key *ins,
  1749. u64 exclude_start, u64 exclude_nr,
  1750. int data)
  1751. {
  1752. int ret;
  1753. u64 orig_search_start;
  1754. struct btrfs_root * root = orig_root->fs_info->extent_root;
  1755. struct btrfs_fs_info *info = root->fs_info;
  1756. u64 total_needed = num_bytes;
  1757. u64 *last_ptr = NULL;
  1758. struct btrfs_block_group_cache *block_group;
  1759. int chunk_alloc_done = 0;
  1760. int empty_cluster = 2 * 1024 * 1024;
  1761. int allowed_chunk_alloc = 0;
  1762. WARN_ON(num_bytes < root->sectorsize);
  1763. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  1764. if (orig_root->ref_cows || empty_size)
  1765. allowed_chunk_alloc = 1;
  1766. if (data & BTRFS_BLOCK_GROUP_METADATA) {
  1767. last_ptr = &root->fs_info->last_alloc;
  1768. empty_cluster = 256 * 1024;
  1769. }
  1770. if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
  1771. last_ptr = &root->fs_info->last_data_alloc;
  1772. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  1773. last_ptr = &root->fs_info->last_log_alloc;
  1774. if (!last_ptr == 0 && root->fs_info->last_alloc) {
  1775. *last_ptr = root->fs_info->last_alloc + empty_cluster;
  1776. }
  1777. }
  1778. if (last_ptr) {
  1779. if (*last_ptr)
  1780. hint_byte = *last_ptr;
  1781. else
  1782. empty_size += empty_cluster;
  1783. }
  1784. search_start = max(search_start, first_logical_byte(root, 0));
  1785. orig_search_start = search_start;
  1786. if (search_end == (u64)-1)
  1787. search_end = btrfs_super_total_bytes(&info->super_copy);
  1788. search_start = max(search_start, hint_byte);
  1789. total_needed += empty_size;
  1790. new_group:
  1791. block_group = btrfs_lookup_block_group(info, search_start);
  1792. /*
  1793. * Ok this looks a little tricky, buts its really simple. First if we
  1794. * didn't find a block group obviously we want to start over.
  1795. * Secondly, if the block group we found does not match the type we
  1796. * need, and we have a last_ptr and its not 0, chances are the last
  1797. * allocation we made was at the end of the block group, so lets go
  1798. * ahead and skip the looking through the rest of the block groups and
  1799. * start at the beginning. This helps with metadata allocations,
  1800. * since you are likely to have a bunch of data block groups to search
  1801. * through first before you realize that you need to start over, so go
  1802. * ahead and start over and save the time.
  1803. */
  1804. if (!block_group || (!block_group_bits(block_group, data) &&
  1805. last_ptr && *last_ptr)) {
  1806. if (search_start != orig_search_start) {
  1807. if (last_ptr && *last_ptr)
  1808. *last_ptr = 0;
  1809. search_start = orig_search_start;
  1810. goto new_group;
  1811. } else if (!chunk_alloc_done && allowed_chunk_alloc) {
  1812. ret = do_chunk_alloc(trans, root,
  1813. num_bytes + 2 * 1024 * 1024,
  1814. data, 1);
  1815. if (ret < 0) {
  1816. struct btrfs_space_info *info;
  1817. info = __find_space_info(root->fs_info, data);
  1818. goto error;
  1819. }
  1820. BUG_ON(ret);
  1821. chunk_alloc_done = 1;
  1822. search_start = orig_search_start;
  1823. goto new_group;
  1824. } else {
  1825. ret = -ENOSPC;
  1826. goto error;
  1827. }
  1828. }
  1829. /*
  1830. * this is going to seach through all of the existing block groups it
  1831. * can find, so if we don't find something we need to see if we can
  1832. * allocate what we need.
  1833. */
  1834. ret = find_free_space(root, &block_group, &search_start,
  1835. total_needed, data);
  1836. if (ret == -ENOSPC) {
  1837. /*
  1838. * instead of allocating, start at the original search start
  1839. * and see if there is something to be found, if not then we
  1840. * allocate
  1841. */
  1842. if (search_start != orig_search_start) {
  1843. if (last_ptr && *last_ptr) {
  1844. *last_ptr = 0;
  1845. total_needed += empty_cluster;
  1846. }
  1847. search_start = orig_search_start;
  1848. goto new_group;
  1849. }
  1850. /*
  1851. * we've already allocated, we're pretty screwed
  1852. */
  1853. if (chunk_alloc_done) {
  1854. goto error;
  1855. } else if (!allowed_chunk_alloc && block_group &&
  1856. block_group_bits(block_group, data)) {
  1857. block_group->space_info->force_alloc = 1;
  1858. goto error;
  1859. } else if (!allowed_chunk_alloc) {
  1860. goto error;
  1861. }
  1862. ret = do_chunk_alloc(trans, root, num_bytes + 2 * 1024 * 1024,
  1863. data, 1);
  1864. if (ret < 0)
  1865. goto error;
  1866. BUG_ON(ret);
  1867. chunk_alloc_done = 1;
  1868. if (block_group)
  1869. search_start = block_group->key.objectid +
  1870. block_group->key.offset;
  1871. else
  1872. search_start = orig_search_start;
  1873. goto new_group;
  1874. }
  1875. if (ret)
  1876. goto error;
  1877. search_start = stripe_align(root, search_start);
  1878. ins->objectid = search_start;
  1879. ins->offset = num_bytes;
  1880. if (ins->objectid + num_bytes >= search_end) {
  1881. search_start = orig_search_start;
  1882. if (chunk_alloc_done) {
  1883. ret = -ENOSPC;
  1884. goto error;
  1885. }
  1886. goto new_group;
  1887. }
  1888. if (ins->objectid + num_bytes >
  1889. block_group->key.objectid + block_group->key.offset) {
  1890. if (search_start == orig_search_start && chunk_alloc_done) {
  1891. ret = -ENOSPC;
  1892. goto error;
  1893. }
  1894. search_start = block_group->key.objectid +
  1895. block_group->key.offset;
  1896. goto new_group;
  1897. }
  1898. if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
  1899. ins->objectid < exclude_start + exclude_nr)) {
  1900. search_start = exclude_start + exclude_nr;
  1901. goto new_group;
  1902. }
  1903. if (!(data & BTRFS_BLOCK_GROUP_DATA))
  1904. trans->block_group = block_group;
  1905. ins->offset = num_bytes;
  1906. if (last_ptr) {
  1907. *last_ptr = ins->objectid + ins->offset;
  1908. if (*last_ptr ==
  1909. btrfs_super_total_bytes(&root->fs_info->super_copy))
  1910. *last_ptr = 0;
  1911. }
  1912. ret = 0;
  1913. error:
  1914. return ret;
  1915. }
  1916. static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
  1917. {
  1918. struct btrfs_block_group_cache *cache;
  1919. struct list_head *l;
  1920. printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
  1921. info->total_bytes - info->bytes_used - info->bytes_pinned,
  1922. (info->full) ? "" : "not ");
  1923. spin_lock(&info->lock);
  1924. list_for_each(l, &info->block_groups) {
  1925. cache = list_entry(l, struct btrfs_block_group_cache, list);
  1926. spin_lock(&cache->lock);
  1927. printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
  1928. "%Lu pinned\n",
  1929. cache->key.objectid, cache->key.offset,
  1930. btrfs_block_group_used(&cache->item), cache->pinned);
  1931. btrfs_dump_free_space(cache, bytes);
  1932. spin_unlock(&cache->lock);
  1933. }
  1934. spin_unlock(&info->lock);
  1935. }
  1936. static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  1937. struct btrfs_root *root,
  1938. u64 num_bytes, u64 min_alloc_size,
  1939. u64 empty_size, u64 hint_byte,
  1940. u64 search_end, struct btrfs_key *ins,
  1941. u64 data)
  1942. {
  1943. int ret;
  1944. u64 search_start = 0;
  1945. u64 alloc_profile;
  1946. struct btrfs_fs_info *info = root->fs_info;
  1947. struct btrfs_block_group_cache *cache;
  1948. if (data) {
  1949. alloc_profile = info->avail_data_alloc_bits &
  1950. info->data_alloc_profile;
  1951. data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
  1952. } else if (root == root->fs_info->chunk_root) {
  1953. alloc_profile = info->avail_system_alloc_bits &
  1954. info->system_alloc_profile;
  1955. data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
  1956. } else {
  1957. alloc_profile = info->avail_metadata_alloc_bits &
  1958. info->metadata_alloc_profile;
  1959. data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
  1960. }
  1961. again:
  1962. data = reduce_alloc_profile(root, data);
  1963. /*
  1964. * the only place that sets empty_size is btrfs_realloc_node, which
  1965. * is not called recursively on allocations
  1966. */
  1967. if (empty_size || root->ref_cows) {
  1968. if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
  1969. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  1970. 2 * 1024 * 1024,
  1971. BTRFS_BLOCK_GROUP_METADATA |
  1972. (info->metadata_alloc_profile &
  1973. info->avail_metadata_alloc_bits), 0);
  1974. }
  1975. ret = do_chunk_alloc(trans, root->fs_info->extent_root,
  1976. num_bytes + 2 * 1024 * 1024, data, 0);
  1977. }
  1978. WARN_ON(num_bytes < root->sectorsize);
  1979. ret = find_free_extent(trans, root, num_bytes, empty_size,
  1980. search_start, search_end, hint_byte, ins,
  1981. trans->alloc_exclude_start,
  1982. trans->alloc_exclude_nr, data);
  1983. if (ret == -ENOSPC && num_bytes > min_alloc_size) {
  1984. num_bytes = num_bytes >> 1;
  1985. num_bytes = num_bytes & ~(root->sectorsize - 1);
  1986. num_bytes = max(num_bytes, min_alloc_size);
  1987. do_chunk_alloc(trans, root->fs_info->extent_root,
  1988. num_bytes, data, 1);
  1989. goto again;
  1990. }
  1991. if (ret) {
  1992. struct btrfs_space_info *sinfo;
  1993. sinfo = __find_space_info(root->fs_info, data);
  1994. printk("allocation failed flags %Lu, wanted %Lu\n",
  1995. data, num_bytes);
  1996. dump_space_info(sinfo, num_bytes);
  1997. BUG();
  1998. }
  1999. cache = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  2000. if (!cache) {
  2001. printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid);
  2002. return -ENOSPC;
  2003. }
  2004. ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset);
  2005. return ret;
  2006. }
  2007. int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
  2008. {
  2009. struct btrfs_block_group_cache *cache;
  2010. maybe_lock_mutex(root);
  2011. cache = btrfs_lookup_block_group(root->fs_info, start);
  2012. if (!cache) {
  2013. printk(KERN_ERR "Unable to find block group for %Lu\n", start);
  2014. maybe_unlock_mutex(root);
  2015. return -ENOSPC;
  2016. }
  2017. btrfs_add_free_space(cache, start, len);
  2018. maybe_unlock_mutex(root);
  2019. return 0;
  2020. }
  2021. int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
  2022. struct btrfs_root *root,
  2023. u64 num_bytes, u64 min_alloc_size,
  2024. u64 empty_size, u64 hint_byte,
  2025. u64 search_end, struct btrfs_key *ins,
  2026. u64 data)
  2027. {
  2028. int ret;
  2029. maybe_lock_mutex(root);
  2030. ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
  2031. empty_size, hint_byte, search_end, ins,
  2032. data);
  2033. maybe_unlock_mutex(root);
  2034. return ret;
  2035. }
  2036. static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
  2037. struct btrfs_root *root,
  2038. u64 root_objectid, u64 ref_generation,
  2039. u64 owner, u64 owner_offset,
  2040. struct btrfs_key *ins)
  2041. {
  2042. int ret;
  2043. int pending_ret;
  2044. u64 super_used;
  2045. u64 root_used;
  2046. u64 num_bytes = ins->offset;
  2047. u32 sizes[2];
  2048. struct btrfs_fs_info *info = root->fs_info;
  2049. struct btrfs_root *extent_root = info->extent_root;
  2050. struct btrfs_extent_item *extent_item;
  2051. struct btrfs_extent_ref *ref;
  2052. struct btrfs_path *path;
  2053. struct btrfs_key keys[2];
  2054. /* block accounting for super block */
  2055. spin_lock_irq(&info->delalloc_lock);
  2056. super_used = btrfs_super_bytes_used(&info->super_copy);
  2057. btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
  2058. spin_unlock_irq(&info->delalloc_lock);
  2059. /* block accounting for root item */
  2060. root_used = btrfs_root_used(&root->root_item);
  2061. btrfs_set_root_used(&root->root_item, root_used + num_bytes);
  2062. if (root == extent_root) {
  2063. set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
  2064. ins->objectid + ins->offset - 1,
  2065. EXTENT_LOCKED, GFP_NOFS);
  2066. goto update_block;
  2067. }
  2068. memcpy(&keys[0], ins, sizeof(*ins));
  2069. keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
  2070. owner, owner_offset);
  2071. keys[1].objectid = ins->objectid;
  2072. keys[1].type = BTRFS_EXTENT_REF_KEY;
  2073. sizes[0] = sizeof(*extent_item);
  2074. sizes[1] = sizeof(*ref);
  2075. path = btrfs_alloc_path();
  2076. BUG_ON(!path);
  2077. ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
  2078. sizes, 2);
  2079. BUG_ON(ret);
  2080. extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  2081. struct btrfs_extent_item);
  2082. btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
  2083. ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
  2084. struct btrfs_extent_ref);
  2085. btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
  2086. btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
  2087. btrfs_set_ref_objectid(path->nodes[0], ref, owner);
  2088. btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
  2089. btrfs_mark_buffer_dirty(path->nodes[0]);
  2090. trans->alloc_exclude_start = 0;
  2091. trans->alloc_exclude_nr = 0;
  2092. btrfs_free_path(path);
  2093. finish_current_insert(trans, extent_root);
  2094. pending_ret = del_pending_extents(trans, extent_root);
  2095. if (ret)
  2096. goto out;
  2097. if (pending_ret) {
  2098. ret = pending_ret;
  2099. goto out;
  2100. }
  2101. update_block:
  2102. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
  2103. if (ret) {
  2104. printk("update block group failed for %Lu %Lu\n",
  2105. ins->objectid, ins->offset);
  2106. BUG();
  2107. }
  2108. out:
  2109. return ret;
  2110. }
  2111. int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
  2112. struct btrfs_root *root,
  2113. u64 root_objectid, u64 ref_generation,
  2114. u64 owner, u64 owner_offset,
  2115. struct btrfs_key *ins)
  2116. {
  2117. int ret;
  2118. maybe_lock_mutex(root);
  2119. ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
  2120. ref_generation, owner,
  2121. owner_offset, ins);
  2122. maybe_unlock_mutex(root);
  2123. return ret;
  2124. }
  2125. /*
  2126. * this is used by the tree logging recovery code. It records that
  2127. * an extent has been allocated and makes sure to clear the free
  2128. * space cache bits as well
  2129. */
  2130. int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
  2131. struct btrfs_root *root,
  2132. u64 root_objectid, u64 ref_generation,
  2133. u64 owner, u64 owner_offset,
  2134. struct btrfs_key *ins)
  2135. {
  2136. int ret;
  2137. struct btrfs_block_group_cache *block_group;
  2138. maybe_lock_mutex(root);
  2139. block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
  2140. cache_block_group(root, block_group);
  2141. ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset);
  2142. BUG_ON(ret);
  2143. ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
  2144. ref_generation, owner,
  2145. owner_offset, ins);
  2146. maybe_unlock_mutex(root);
  2147. return ret;
  2148. }
  2149. /*
  2150. * finds a free extent and does all the dirty work required for allocation
  2151. * returns the key for the extent through ins, and a tree buffer for
  2152. * the first block of the extent through buf.
  2153. *
  2154. * returns 0 if everything worked, non-zero otherwise.
  2155. */
  2156. int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
  2157. struct btrfs_root *root,
  2158. u64 num_bytes, u64 min_alloc_size,
  2159. u64 root_objectid, u64 ref_generation,
  2160. u64 owner, u64 owner_offset,
  2161. u64 empty_size, u64 hint_byte,
  2162. u64 search_end, struct btrfs_key *ins, u64 data)
  2163. {
  2164. int ret;
  2165. maybe_lock_mutex(root);
  2166. ret = __btrfs_reserve_extent(trans, root, num_bytes,
  2167. min_alloc_size, empty_size, hint_byte,
  2168. search_end, ins, data);
  2169. BUG_ON(ret);
  2170. if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
  2171. ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
  2172. ref_generation, owner,
  2173. owner_offset, ins);
  2174. BUG_ON(ret);
  2175. }
  2176. maybe_unlock_mutex(root);
  2177. return ret;
  2178. }
  2179. struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
  2180. struct btrfs_root *root,
  2181. u64 bytenr, u32 blocksize)
  2182. {
  2183. struct extent_buffer *buf;
  2184. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  2185. if (!buf)
  2186. return ERR_PTR(-ENOMEM);
  2187. btrfs_set_header_generation(buf, trans->transid);
  2188. btrfs_tree_lock(buf);
  2189. clean_tree_block(trans, root, buf);
  2190. btrfs_set_buffer_uptodate(buf);
  2191. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  2192. set_extent_dirty(&root->dirty_log_pages, buf->start,
  2193. buf->start + buf->len - 1, GFP_NOFS);
  2194. } else {
  2195. set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
  2196. buf->start + buf->len - 1, GFP_NOFS);
  2197. }
  2198. trans->blocks_used++;
  2199. return buf;
  2200. }
  2201. /*
  2202. * helper function to allocate a block for a given tree
  2203. * returns the tree buffer or NULL.
  2204. */
  2205. struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  2206. struct btrfs_root *root,
  2207. u32 blocksize,
  2208. u64 root_objectid,
  2209. u64 ref_generation,
  2210. u64 first_objectid,
  2211. int level,
  2212. u64 hint,
  2213. u64 empty_size)
  2214. {
  2215. struct btrfs_key ins;
  2216. int ret;
  2217. struct extent_buffer *buf;
  2218. ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
  2219. root_objectid, ref_generation,
  2220. level, first_objectid, empty_size, hint,
  2221. (u64)-1, &ins, 0);
  2222. if (ret) {
  2223. BUG_ON(ret > 0);
  2224. return ERR_PTR(ret);
  2225. }
  2226. buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
  2227. return buf;
  2228. }
  2229. int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
  2230. struct btrfs_root *root, struct extent_buffer *leaf)
  2231. {
  2232. u64 leaf_owner;
  2233. u64 leaf_generation;
  2234. struct btrfs_key key;
  2235. struct btrfs_file_extent_item *fi;
  2236. int i;
  2237. int nritems;
  2238. int ret;
  2239. BUG_ON(!btrfs_is_leaf(leaf));
  2240. nritems = btrfs_header_nritems(leaf);
  2241. leaf_owner = btrfs_header_owner(leaf);
  2242. leaf_generation = btrfs_header_generation(leaf);
  2243. for (i = 0; i < nritems; i++) {
  2244. u64 disk_bytenr;
  2245. cond_resched();
  2246. btrfs_item_key_to_cpu(leaf, &key, i);
  2247. if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
  2248. continue;
  2249. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  2250. if (btrfs_file_extent_type(leaf, fi) ==
  2251. BTRFS_FILE_EXTENT_INLINE)
  2252. continue;
  2253. /*
  2254. * FIXME make sure to insert a trans record that
  2255. * repeats the snapshot del on crash
  2256. */
  2257. disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  2258. if (disk_bytenr == 0)
  2259. continue;
  2260. mutex_lock(&root->fs_info->alloc_mutex);
  2261. ret = __btrfs_free_extent(trans, root, disk_bytenr,
  2262. btrfs_file_extent_disk_num_bytes(leaf, fi),
  2263. leaf_owner, leaf_generation,
  2264. key.objectid, key.offset, 0);
  2265. mutex_unlock(&root->fs_info->alloc_mutex);
  2266. atomic_inc(&root->fs_info->throttle_gen);
  2267. wake_up(&root->fs_info->transaction_throttle);
  2268. cond_resched();
  2269. BUG_ON(ret);
  2270. }
  2271. return 0;
  2272. }
  2273. static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
  2274. struct btrfs_root *root,
  2275. struct btrfs_leaf_ref *ref)
  2276. {
  2277. int i;
  2278. int ret;
  2279. struct btrfs_extent_info *info = ref->extents;
  2280. for (i = 0; i < ref->nritems; i++) {
  2281. mutex_lock(&root->fs_info->alloc_mutex);
  2282. ret = __btrfs_free_extent(trans, root,
  2283. info->bytenr, info->num_bytes,
  2284. ref->owner, ref->generation,
  2285. info->objectid, info->offset, 0);
  2286. mutex_unlock(&root->fs_info->alloc_mutex);
  2287. atomic_inc(&root->fs_info->throttle_gen);
  2288. wake_up(&root->fs_info->transaction_throttle);
  2289. cond_resched();
  2290. BUG_ON(ret);
  2291. info++;
  2292. }
  2293. return 0;
  2294. }
  2295. int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
  2296. u32 *refs)
  2297. {
  2298. int ret;
  2299. ret = lookup_extent_ref(NULL, root, start, len, refs);
  2300. BUG_ON(ret);
  2301. #if 0 // some debugging code in case we see problems here
  2302. /* if the refs count is one, it won't get increased again. But
  2303. * if the ref count is > 1, someone may be decreasing it at
  2304. * the same time we are.
  2305. */
  2306. if (*refs != 1) {
  2307. struct extent_buffer *eb = NULL;
  2308. eb = btrfs_find_create_tree_block(root, start, len);
  2309. if (eb)
  2310. btrfs_tree_lock(eb);
  2311. mutex_lock(&root->fs_info->alloc_mutex);
  2312. ret = lookup_extent_ref(NULL, root, start, len, refs);
  2313. BUG_ON(ret);
  2314. mutex_unlock(&root->fs_info->alloc_mutex);
  2315. if (eb) {
  2316. btrfs_tree_unlock(eb);
  2317. free_extent_buffer(eb);
  2318. }
  2319. if (*refs == 1) {
  2320. printk("block %llu went down to one during drop_snap\n",
  2321. (unsigned long long)start);
  2322. }
  2323. }
  2324. #endif
  2325. cond_resched();
  2326. return ret;
  2327. }
  2328. /*
  2329. * helper function for drop_snapshot, this walks down the tree dropping ref
  2330. * counts as it goes.
  2331. */
  2332. static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
  2333. struct btrfs_root *root,
  2334. struct btrfs_path *path, int *level)
  2335. {
  2336. u64 root_owner;
  2337. u64 root_gen;
  2338. u64 bytenr;
  2339. u64 ptr_gen;
  2340. struct extent_buffer *next;
  2341. struct extent_buffer *cur;
  2342. struct extent_buffer *parent;
  2343. struct btrfs_leaf_ref *ref;
  2344. u32 blocksize;
  2345. int ret;
  2346. u32 refs;
  2347. WARN_ON(*level < 0);
  2348. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  2349. ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
  2350. path->nodes[*level]->len, &refs);
  2351. BUG_ON(ret);
  2352. if (refs > 1)
  2353. goto out;
  2354. /*
  2355. * walk down to the last node level and free all the leaves
  2356. */
  2357. while(*level >= 0) {
  2358. WARN_ON(*level < 0);
  2359. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  2360. cur = path->nodes[*level];
  2361. if (btrfs_header_level(cur) != *level)
  2362. WARN_ON(1);
  2363. if (path->slots[*level] >=
  2364. btrfs_header_nritems(cur))
  2365. break;
  2366. if (*level == 0) {
  2367. ret = btrfs_drop_leaf_ref(trans, root, cur);
  2368. BUG_ON(ret);
  2369. break;
  2370. }
  2371. bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
  2372. ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
  2373. blocksize = btrfs_level_size(root, *level - 1);
  2374. ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
  2375. BUG_ON(ret);
  2376. if (refs != 1) {
  2377. parent = path->nodes[*level];
  2378. root_owner = btrfs_header_owner(parent);
  2379. root_gen = btrfs_header_generation(parent);
  2380. path->slots[*level]++;
  2381. mutex_lock(&root->fs_info->alloc_mutex);
  2382. ret = __btrfs_free_extent(trans, root, bytenr,
  2383. blocksize, root_owner,
  2384. root_gen, 0, 0, 1);
  2385. BUG_ON(ret);
  2386. mutex_unlock(&root->fs_info->alloc_mutex);
  2387. atomic_inc(&root->fs_info->throttle_gen);
  2388. wake_up(&root->fs_info->transaction_throttle);
  2389. cond_resched();
  2390. continue;
  2391. }
  2392. /*
  2393. * at this point, we have a single ref, and since the
  2394. * only place referencing this extent is a dead root
  2395. * the reference count should never go higher.
  2396. * So, we don't need to check it again
  2397. */
  2398. if (*level == 1) {
  2399. struct btrfs_key key;
  2400. btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
  2401. ref = btrfs_lookup_leaf_ref(root, bytenr);
  2402. if (ref) {
  2403. ret = cache_drop_leaf_ref(trans, root, ref);
  2404. BUG_ON(ret);
  2405. btrfs_remove_leaf_ref(root, ref);
  2406. btrfs_free_leaf_ref(root, ref);
  2407. *level = 0;
  2408. break;
  2409. }
  2410. if (printk_ratelimit())
  2411. printk("leaf ref miss for bytenr %llu\n",
  2412. (unsigned long long)bytenr);
  2413. }
  2414. next = btrfs_find_tree_block(root, bytenr, blocksize);
  2415. if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
  2416. free_extent_buffer(next);
  2417. next = read_tree_block(root, bytenr, blocksize,
  2418. ptr_gen);
  2419. cond_resched();
  2420. #if 0
  2421. /*
  2422. * this is a debugging check and can go away
  2423. * the ref should never go all the way down to 1
  2424. * at this point
  2425. */
  2426. ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
  2427. &refs);
  2428. BUG_ON(ret);
  2429. WARN_ON(refs != 1);
  2430. #endif
  2431. }
  2432. WARN_ON(*level <= 0);
  2433. if (path->nodes[*level-1])
  2434. free_extent_buffer(path->nodes[*level-1]);
  2435. path->nodes[*level-1] = next;
  2436. *level = btrfs_header_level(next);
  2437. path->slots[*level] = 0;
  2438. cond_resched();
  2439. }
  2440. out:
  2441. WARN_ON(*level < 0);
  2442. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  2443. if (path->nodes[*level] == root->node) {
  2444. parent = path->nodes[*level];
  2445. bytenr = path->nodes[*level]->start;
  2446. } else {
  2447. parent = path->nodes[*level + 1];
  2448. bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
  2449. }
  2450. blocksize = btrfs_level_size(root, *level);
  2451. root_owner = btrfs_header_owner(parent);
  2452. root_gen = btrfs_header_generation(parent);
  2453. mutex_lock(&root->fs_info->alloc_mutex);
  2454. ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
  2455. root_owner, root_gen, 0, 0, 1);
  2456. free_extent_buffer(path->nodes[*level]);
  2457. path->nodes[*level] = NULL;
  2458. *level += 1;
  2459. BUG_ON(ret);
  2460. mutex_unlock(&root->fs_info->alloc_mutex);
  2461. cond_resched();
  2462. return 0;
  2463. }
  2464. /*
  2465. * helper for dropping snapshots. This walks back up the tree in the path
  2466. * to find the first node higher up where we haven't yet gone through
  2467. * all the slots
  2468. */
  2469. static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
  2470. struct btrfs_root *root,
  2471. struct btrfs_path *path, int *level)
  2472. {
  2473. u64 root_owner;
  2474. u64 root_gen;
  2475. struct btrfs_root_item *root_item = &root->root_item;
  2476. int i;
  2477. int slot;
  2478. int ret;
  2479. for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
  2480. slot = path->slots[i];
  2481. if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
  2482. struct extent_buffer *node;
  2483. struct btrfs_disk_key disk_key;
  2484. node = path->nodes[i];
  2485. path->slots[i]++;
  2486. *level = i;
  2487. WARN_ON(*level == 0);
  2488. btrfs_node_key(node, &disk_key, path->slots[i]);
  2489. memcpy(&root_item->drop_progress,
  2490. &disk_key, sizeof(disk_key));
  2491. root_item->drop_level = i;
  2492. return 0;
  2493. } else {
  2494. if (path->nodes[*level] == root->node) {
  2495. root_owner = root->root_key.objectid;
  2496. root_gen =
  2497. btrfs_header_generation(path->nodes[*level]);
  2498. } else {
  2499. struct extent_buffer *node;
  2500. node = path->nodes[*level + 1];
  2501. root_owner = btrfs_header_owner(node);
  2502. root_gen = btrfs_header_generation(node);
  2503. }
  2504. ret = btrfs_free_extent(trans, root,
  2505. path->nodes[*level]->start,
  2506. path->nodes[*level]->len,
  2507. root_owner, root_gen, 0, 0, 1);
  2508. BUG_ON(ret);
  2509. free_extent_buffer(path->nodes[*level]);
  2510. path->nodes[*level] = NULL;
  2511. *level = i + 1;
  2512. }
  2513. }
  2514. return 1;
  2515. }
  2516. /*
  2517. * drop the reference count on the tree rooted at 'snap'. This traverses
  2518. * the tree freeing any blocks that have a ref count of zero after being
  2519. * decremented.
  2520. */
  2521. int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
  2522. *root)
  2523. {
  2524. int ret = 0;
  2525. int wret;
  2526. int level;
  2527. struct btrfs_path *path;
  2528. int i;
  2529. int orig_level;
  2530. struct btrfs_root_item *root_item = &root->root_item;
  2531. WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
  2532. path = btrfs_alloc_path();
  2533. BUG_ON(!path);
  2534. level = btrfs_header_level(root->node);
  2535. orig_level = level;
  2536. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  2537. path->nodes[level] = root->node;
  2538. extent_buffer_get(root->node);
  2539. path->slots[level] = 0;
  2540. } else {
  2541. struct btrfs_key key;
  2542. struct btrfs_disk_key found_key;
  2543. struct extent_buffer *node;
  2544. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  2545. level = root_item->drop_level;
  2546. path->lowest_level = level;
  2547. wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  2548. if (wret < 0) {
  2549. ret = wret;
  2550. goto out;
  2551. }
  2552. node = path->nodes[level];
  2553. btrfs_node_key(node, &found_key, path->slots[level]);
  2554. WARN_ON(memcmp(&found_key, &root_item->drop_progress,
  2555. sizeof(found_key)));
  2556. /*
  2557. * unlock our path, this is safe because only this
  2558. * function is allowed to delete this snapshot
  2559. */
  2560. for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
  2561. if (path->nodes[i] && path->locks[i]) {
  2562. path->locks[i] = 0;
  2563. btrfs_tree_unlock(path->nodes[i]);
  2564. }
  2565. }
  2566. }
  2567. while(1) {
  2568. wret = walk_down_tree(trans, root, path, &level);
  2569. if (wret > 0)
  2570. break;
  2571. if (wret < 0)
  2572. ret = wret;
  2573. wret = walk_up_tree(trans, root, path, &level);
  2574. if (wret > 0)
  2575. break;
  2576. if (wret < 0)
  2577. ret = wret;
  2578. if (trans->transaction->in_commit) {
  2579. ret = -EAGAIN;
  2580. break;
  2581. }
  2582. atomic_inc(&root->fs_info->throttle_gen);
  2583. wake_up(&root->fs_info->transaction_throttle);
  2584. }
  2585. for (i = 0; i <= orig_level; i++) {
  2586. if (path->nodes[i]) {
  2587. free_extent_buffer(path->nodes[i]);
  2588. path->nodes[i] = NULL;
  2589. }
  2590. }
  2591. out:
  2592. btrfs_free_path(path);
  2593. return ret;
  2594. }
  2595. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  2596. {
  2597. struct btrfs_block_group_cache *block_group;
  2598. struct rb_node *n;
  2599. mutex_lock(&info->alloc_mutex);
  2600. spin_lock(&info->block_group_cache_lock);
  2601. while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
  2602. block_group = rb_entry(n, struct btrfs_block_group_cache,
  2603. cache_node);
  2604. btrfs_remove_free_space_cache(block_group);
  2605. rb_erase(&block_group->cache_node,
  2606. &info->block_group_cache_tree);
  2607. spin_lock(&block_group->space_info->lock);
  2608. list_del(&block_group->list);
  2609. spin_unlock(&block_group->space_info->lock);
  2610. kfree(block_group);
  2611. }
  2612. spin_unlock(&info->block_group_cache_lock);
  2613. mutex_unlock(&info->alloc_mutex);
  2614. return 0;
  2615. }
  2616. static unsigned long calc_ra(unsigned long start, unsigned long last,
  2617. unsigned long nr)
  2618. {
  2619. return min(last, start + nr - 1);
  2620. }
  2621. static int noinline relocate_inode_pages(struct inode *inode, u64 start,
  2622. u64 len)
  2623. {
  2624. u64 page_start;
  2625. u64 page_end;
  2626. unsigned long last_index;
  2627. unsigned long i;
  2628. struct page *page;
  2629. struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
  2630. struct file_ra_state *ra;
  2631. unsigned long total_read = 0;
  2632. unsigned long ra_pages;
  2633. struct btrfs_ordered_extent *ordered;
  2634. struct btrfs_trans_handle *trans;
  2635. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  2636. mutex_lock(&inode->i_mutex);
  2637. i = start >> PAGE_CACHE_SHIFT;
  2638. last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
  2639. ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
  2640. file_ra_state_init(ra, inode->i_mapping);
  2641. for (; i <= last_index; i++) {
  2642. if (total_read % ra_pages == 0) {
  2643. btrfs_force_ra(inode->i_mapping, ra, NULL, i,
  2644. calc_ra(i, last_index, ra_pages));
  2645. }
  2646. total_read++;
  2647. again:
  2648. if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
  2649. goto truncate_racing;
  2650. page = grab_cache_page(inode->i_mapping, i);
  2651. if (!page) {
  2652. goto out_unlock;
  2653. }
  2654. if (!PageUptodate(page)) {
  2655. btrfs_readpage(NULL, page);
  2656. lock_page(page);
  2657. if (!PageUptodate(page)) {
  2658. unlock_page(page);
  2659. page_cache_release(page);
  2660. goto out_unlock;
  2661. }
  2662. }
  2663. wait_on_page_writeback(page);
  2664. page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  2665. page_end = page_start + PAGE_CACHE_SIZE - 1;
  2666. lock_extent(io_tree, page_start, page_end, GFP_NOFS);
  2667. ordered = btrfs_lookup_ordered_extent(inode, page_start);
  2668. if (ordered) {
  2669. unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
  2670. unlock_page(page);
  2671. page_cache_release(page);
  2672. btrfs_start_ordered_extent(inode, ordered, 1);
  2673. btrfs_put_ordered_extent(ordered);
  2674. goto again;
  2675. }
  2676. set_page_extent_mapped(page);
  2677. /*
  2678. * make sure page_mkwrite is called for this page if userland
  2679. * wants to change it from mmap
  2680. */
  2681. clear_page_dirty_for_io(page);
  2682. btrfs_set_extent_delalloc(inode, page_start, page_end);
  2683. set_page_dirty(page);
  2684. unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
  2685. unlock_page(page);
  2686. page_cache_release(page);
  2687. }
  2688. out_unlock:
  2689. /* we have to start the IO in order to get the ordered extents
  2690. * instantiated. This allows the relocation to code to wait
  2691. * for all the ordered extents to hit the disk.
  2692. *
  2693. * Otherwise, it would constantly loop over the same extents
  2694. * because the old ones don't get deleted until the IO is
  2695. * started
  2696. */
  2697. btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
  2698. WB_SYNC_NONE);
  2699. kfree(ra);
  2700. trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
  2701. if (trans) {
  2702. btrfs_end_transaction(trans, BTRFS_I(inode)->root);
  2703. mark_inode_dirty(inode);
  2704. }
  2705. mutex_unlock(&inode->i_mutex);
  2706. return 0;
  2707. truncate_racing:
  2708. vmtruncate(inode, inode->i_size);
  2709. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  2710. total_read);
  2711. goto out_unlock;
  2712. }
  2713. /*
  2714. * The back references tell us which tree holds a ref on a block,
  2715. * but it is possible for the tree root field in the reference to
  2716. * reflect the original root before a snapshot was made. In this
  2717. * case we should search through all the children of a given root
  2718. * to find potential holders of references on a block.
  2719. *
  2720. * Instead, we do something a little less fancy and just search
  2721. * all the roots for a given key/block combination.
  2722. */
  2723. static int find_root_for_ref(struct btrfs_root *root,
  2724. struct btrfs_path *path,
  2725. struct btrfs_key *key0,
  2726. int level,
  2727. int file_key,
  2728. struct btrfs_root **found_root,
  2729. u64 bytenr)
  2730. {
  2731. struct btrfs_key root_location;
  2732. struct btrfs_root *cur_root = *found_root;
  2733. struct btrfs_file_extent_item *file_extent;
  2734. u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
  2735. u64 found_bytenr;
  2736. int ret;
  2737. root_location.offset = (u64)-1;
  2738. root_location.type = BTRFS_ROOT_ITEM_KEY;
  2739. path->lowest_level = level;
  2740. path->reada = 0;
  2741. while(1) {
  2742. ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
  2743. found_bytenr = 0;
  2744. if (ret == 0 && file_key) {
  2745. struct extent_buffer *leaf = path->nodes[0];
  2746. file_extent = btrfs_item_ptr(leaf, path->slots[0],
  2747. struct btrfs_file_extent_item);
  2748. if (btrfs_file_extent_type(leaf, file_extent) ==
  2749. BTRFS_FILE_EXTENT_REG) {
  2750. found_bytenr =
  2751. btrfs_file_extent_disk_bytenr(leaf,
  2752. file_extent);
  2753. }
  2754. } else if (!file_key) {
  2755. if (path->nodes[level])
  2756. found_bytenr = path->nodes[level]->start;
  2757. }
  2758. btrfs_release_path(cur_root, path);
  2759. if (found_bytenr == bytenr) {
  2760. *found_root = cur_root;
  2761. ret = 0;
  2762. goto out;
  2763. }
  2764. ret = btrfs_search_root(root->fs_info->tree_root,
  2765. root_search_start, &root_search_start);
  2766. if (ret)
  2767. break;
  2768. root_location.objectid = root_search_start;
  2769. cur_root = btrfs_read_fs_root_no_name(root->fs_info,
  2770. &root_location);
  2771. if (!cur_root) {
  2772. ret = 1;
  2773. break;
  2774. }
  2775. }
  2776. out:
  2777. path->lowest_level = 0;
  2778. return ret;
  2779. }
  2780. /*
  2781. * note, this releases the path
  2782. */
  2783. static int noinline relocate_one_reference(struct btrfs_root *extent_root,
  2784. struct btrfs_path *path,
  2785. struct btrfs_key *extent_key,
  2786. u64 *last_file_objectid,
  2787. u64 *last_file_offset,
  2788. u64 *last_file_root,
  2789. u64 last_extent)
  2790. {
  2791. struct inode *inode;
  2792. struct btrfs_root *found_root;
  2793. struct btrfs_key root_location;
  2794. struct btrfs_key found_key;
  2795. struct btrfs_extent_ref *ref;
  2796. u64 ref_root;
  2797. u64 ref_gen;
  2798. u64 ref_objectid;
  2799. u64 ref_offset;
  2800. int ret;
  2801. int level;
  2802. WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
  2803. ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
  2804. struct btrfs_extent_ref);
  2805. ref_root = btrfs_ref_root(path->nodes[0], ref);
  2806. ref_gen = btrfs_ref_generation(path->nodes[0], ref);
  2807. ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
  2808. ref_offset = btrfs_ref_offset(path->nodes[0], ref);
  2809. btrfs_release_path(extent_root, path);
  2810. root_location.objectid = ref_root;
  2811. if (ref_gen == 0)
  2812. root_location.offset = 0;
  2813. else
  2814. root_location.offset = (u64)-1;
  2815. root_location.type = BTRFS_ROOT_ITEM_KEY;
  2816. found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
  2817. &root_location);
  2818. BUG_ON(!found_root);
  2819. mutex_unlock(&extent_root->fs_info->alloc_mutex);
  2820. if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
  2821. found_key.objectid = ref_objectid;
  2822. found_key.type = BTRFS_EXTENT_DATA_KEY;
  2823. found_key.offset = ref_offset;
  2824. level = 0;
  2825. if (last_extent == extent_key->objectid &&
  2826. *last_file_objectid == ref_objectid &&
  2827. *last_file_offset == ref_offset &&
  2828. *last_file_root == ref_root)
  2829. goto out;
  2830. ret = find_root_for_ref(extent_root, path, &found_key,
  2831. level, 1, &found_root,
  2832. extent_key->objectid);
  2833. if (ret)
  2834. goto out;
  2835. if (last_extent == extent_key->objectid &&
  2836. *last_file_objectid == ref_objectid &&
  2837. *last_file_offset == ref_offset &&
  2838. *last_file_root == ref_root)
  2839. goto out;
  2840. inode = btrfs_iget_locked(extent_root->fs_info->sb,
  2841. ref_objectid, found_root);
  2842. if (inode->i_state & I_NEW) {
  2843. /* the inode and parent dir are two different roots */
  2844. BTRFS_I(inode)->root = found_root;
  2845. BTRFS_I(inode)->location.objectid = ref_objectid;
  2846. BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
  2847. BTRFS_I(inode)->location.offset = 0;
  2848. btrfs_read_locked_inode(inode);
  2849. unlock_new_inode(inode);
  2850. }
  2851. /* this can happen if the reference is not against
  2852. * the latest version of the tree root
  2853. */
  2854. if (is_bad_inode(inode))
  2855. goto out;
  2856. *last_file_objectid = inode->i_ino;
  2857. *last_file_root = found_root->root_key.objectid;
  2858. *last_file_offset = ref_offset;
  2859. relocate_inode_pages(inode, ref_offset, extent_key->offset);
  2860. iput(inode);
  2861. } else {
  2862. struct btrfs_trans_handle *trans;
  2863. struct extent_buffer *eb;
  2864. int needs_lock = 0;
  2865. eb = read_tree_block(found_root, extent_key->objectid,
  2866. extent_key->offset, 0);
  2867. btrfs_tree_lock(eb);
  2868. level = btrfs_header_level(eb);
  2869. if (level == 0)
  2870. btrfs_item_key_to_cpu(eb, &found_key, 0);
  2871. else
  2872. btrfs_node_key_to_cpu(eb, &found_key, 0);
  2873. btrfs_tree_unlock(eb);
  2874. free_extent_buffer(eb);
  2875. ret = find_root_for_ref(extent_root, path, &found_key,
  2876. level, 0, &found_root,
  2877. extent_key->objectid);
  2878. if (ret)
  2879. goto out;
  2880. /*
  2881. * right here almost anything could happen to our key,
  2882. * but that's ok. The cow below will either relocate it
  2883. * or someone else will have relocated it. Either way,
  2884. * it is in a different spot than it was before and
  2885. * we're happy.
  2886. */
  2887. trans = btrfs_start_transaction(found_root, 1);
  2888. if (found_root == extent_root->fs_info->extent_root ||
  2889. found_root == extent_root->fs_info->chunk_root ||
  2890. found_root == extent_root->fs_info->dev_root) {
  2891. needs_lock = 1;
  2892. mutex_lock(&extent_root->fs_info->alloc_mutex);
  2893. }
  2894. path->lowest_level = level;
  2895. path->reada = 2;
  2896. ret = btrfs_search_slot(trans, found_root, &found_key, path,
  2897. 0, 1);
  2898. path->lowest_level = 0;
  2899. btrfs_release_path(found_root, path);
  2900. if (found_root == found_root->fs_info->extent_root)
  2901. btrfs_extent_post_op(trans, found_root);
  2902. if (needs_lock)
  2903. mutex_unlock(&extent_root->fs_info->alloc_mutex);
  2904. btrfs_end_transaction(trans, found_root);
  2905. }
  2906. out:
  2907. mutex_lock(&extent_root->fs_info->alloc_mutex);
  2908. return 0;
  2909. }
  2910. static int noinline del_extent_zero(struct btrfs_root *extent_root,
  2911. struct btrfs_path *path,
  2912. struct btrfs_key *extent_key)
  2913. {
  2914. int ret;
  2915. struct btrfs_trans_handle *trans;
  2916. trans = btrfs_start_transaction(extent_root, 1);
  2917. ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
  2918. if (ret > 0) {
  2919. ret = -EIO;
  2920. goto out;
  2921. }
  2922. if (ret < 0)
  2923. goto out;
  2924. ret = btrfs_del_item(trans, extent_root, path);
  2925. out:
  2926. btrfs_end_transaction(trans, extent_root);
  2927. return ret;
  2928. }
  2929. static int noinline relocate_one_extent(struct btrfs_root *extent_root,
  2930. struct btrfs_path *path,
  2931. struct btrfs_key *extent_key)
  2932. {
  2933. struct btrfs_key key;
  2934. struct btrfs_key found_key;
  2935. struct extent_buffer *leaf;
  2936. u64 last_file_objectid = 0;
  2937. u64 last_file_root = 0;
  2938. u64 last_file_offset = (u64)-1;
  2939. u64 last_extent = 0;
  2940. u32 nritems;
  2941. u32 item_size;
  2942. int ret = 0;
  2943. if (extent_key->objectid == 0) {
  2944. ret = del_extent_zero(extent_root, path, extent_key);
  2945. goto out;
  2946. }
  2947. key.objectid = extent_key->objectid;
  2948. key.type = BTRFS_EXTENT_REF_KEY;
  2949. key.offset = 0;
  2950. while(1) {
  2951. ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
  2952. if (ret < 0)
  2953. goto out;
  2954. ret = 0;
  2955. leaf = path->nodes[0];
  2956. nritems = btrfs_header_nritems(leaf);
  2957. if (path->slots[0] == nritems) {
  2958. ret = btrfs_next_leaf(extent_root, path);
  2959. if (ret > 0) {
  2960. ret = 0;
  2961. goto out;
  2962. }
  2963. if (ret < 0)
  2964. goto out;
  2965. leaf = path->nodes[0];
  2966. }
  2967. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  2968. if (found_key.objectid != extent_key->objectid) {
  2969. break;
  2970. }
  2971. if (found_key.type != BTRFS_EXTENT_REF_KEY) {
  2972. break;
  2973. }
  2974. key.offset = found_key.offset + 1;
  2975. item_size = btrfs_item_size_nr(leaf, path->slots[0]);
  2976. ret = relocate_one_reference(extent_root, path, extent_key,
  2977. &last_file_objectid,
  2978. &last_file_offset,
  2979. &last_file_root, last_extent);
  2980. if (ret)
  2981. goto out;
  2982. last_extent = extent_key->objectid;
  2983. }
  2984. ret = 0;
  2985. out:
  2986. btrfs_release_path(extent_root, path);
  2987. return ret;
  2988. }
  2989. static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
  2990. {
  2991. u64 num_devices;
  2992. u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
  2993. BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
  2994. num_devices = root->fs_info->fs_devices->num_devices;
  2995. if (num_devices == 1) {
  2996. stripped |= BTRFS_BLOCK_GROUP_DUP;
  2997. stripped = flags & ~stripped;
  2998. /* turn raid0 into single device chunks */
  2999. if (flags & BTRFS_BLOCK_GROUP_RAID0)
  3000. return stripped;
  3001. /* turn mirroring into duplication */
  3002. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  3003. BTRFS_BLOCK_GROUP_RAID10))
  3004. return stripped | BTRFS_BLOCK_GROUP_DUP;
  3005. return flags;
  3006. } else {
  3007. /* they already had raid on here, just return */
  3008. if (flags & stripped)
  3009. return flags;
  3010. stripped |= BTRFS_BLOCK_GROUP_DUP;
  3011. stripped = flags & ~stripped;
  3012. /* switch duplicated blocks with raid1 */
  3013. if (flags & BTRFS_BLOCK_GROUP_DUP)
  3014. return stripped | BTRFS_BLOCK_GROUP_RAID1;
  3015. /* turn single device chunks into raid0 */
  3016. return stripped | BTRFS_BLOCK_GROUP_RAID0;
  3017. }
  3018. return flags;
  3019. }
  3020. int __alloc_chunk_for_shrink(struct btrfs_root *root,
  3021. struct btrfs_block_group_cache *shrink_block_group,
  3022. int force)
  3023. {
  3024. struct btrfs_trans_handle *trans;
  3025. u64 new_alloc_flags;
  3026. u64 calc;
  3027. spin_lock(&shrink_block_group->lock);
  3028. if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
  3029. spin_unlock(&shrink_block_group->lock);
  3030. mutex_unlock(&root->fs_info->alloc_mutex);
  3031. trans = btrfs_start_transaction(root, 1);
  3032. mutex_lock(&root->fs_info->alloc_mutex);
  3033. spin_lock(&shrink_block_group->lock);
  3034. new_alloc_flags = update_block_group_flags(root,
  3035. shrink_block_group->flags);
  3036. if (new_alloc_flags != shrink_block_group->flags) {
  3037. calc =
  3038. btrfs_block_group_used(&shrink_block_group->item);
  3039. } else {
  3040. calc = shrink_block_group->key.offset;
  3041. }
  3042. spin_unlock(&shrink_block_group->lock);
  3043. do_chunk_alloc(trans, root->fs_info->extent_root,
  3044. calc + 2 * 1024 * 1024, new_alloc_flags, force);
  3045. mutex_unlock(&root->fs_info->alloc_mutex);
  3046. btrfs_end_transaction(trans, root);
  3047. mutex_lock(&root->fs_info->alloc_mutex);
  3048. } else
  3049. spin_unlock(&shrink_block_group->lock);
  3050. return 0;
  3051. }
  3052. int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
  3053. {
  3054. struct btrfs_trans_handle *trans;
  3055. struct btrfs_root *tree_root = root->fs_info->tree_root;
  3056. struct btrfs_path *path;
  3057. u64 cur_byte;
  3058. u64 total_found;
  3059. u64 shrink_last_byte;
  3060. struct btrfs_block_group_cache *shrink_block_group;
  3061. struct btrfs_key key;
  3062. struct btrfs_key found_key;
  3063. struct extent_buffer *leaf;
  3064. u32 nritems;
  3065. int ret;
  3066. int progress;
  3067. mutex_lock(&root->fs_info->alloc_mutex);
  3068. shrink_block_group = btrfs_lookup_block_group(root->fs_info,
  3069. shrink_start);
  3070. BUG_ON(!shrink_block_group);
  3071. shrink_last_byte = shrink_block_group->key.objectid +
  3072. shrink_block_group->key.offset;
  3073. shrink_block_group->space_info->total_bytes -=
  3074. shrink_block_group->key.offset;
  3075. path = btrfs_alloc_path();
  3076. root = root->fs_info->extent_root;
  3077. path->reada = 2;
  3078. printk("btrfs relocating block group %llu flags %llu\n",
  3079. (unsigned long long)shrink_start,
  3080. (unsigned long long)shrink_block_group->flags);
  3081. __alloc_chunk_for_shrink(root, shrink_block_group, 1);
  3082. again:
  3083. shrink_block_group->ro = 1;
  3084. total_found = 0;
  3085. progress = 0;
  3086. key.objectid = shrink_start;
  3087. key.offset = 0;
  3088. key.type = 0;
  3089. cur_byte = key.objectid;
  3090. mutex_unlock(&root->fs_info->alloc_mutex);
  3091. btrfs_start_delalloc_inodes(root);
  3092. btrfs_wait_ordered_extents(tree_root, 0);
  3093. mutex_lock(&root->fs_info->alloc_mutex);
  3094. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3095. if (ret < 0)
  3096. goto out;
  3097. ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
  3098. if (ret < 0)
  3099. goto out;
  3100. if (ret == 0) {
  3101. leaf = path->nodes[0];
  3102. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  3103. if (found_key.objectid + found_key.offset > shrink_start &&
  3104. found_key.objectid < shrink_last_byte) {
  3105. cur_byte = found_key.objectid;
  3106. key.objectid = cur_byte;
  3107. }
  3108. }
  3109. btrfs_release_path(root, path);
  3110. while(1) {
  3111. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3112. if (ret < 0)
  3113. goto out;
  3114. next:
  3115. leaf = path->nodes[0];
  3116. nritems = btrfs_header_nritems(leaf);
  3117. if (path->slots[0] >= nritems) {
  3118. ret = btrfs_next_leaf(root, path);
  3119. if (ret < 0)
  3120. goto out;
  3121. if (ret == 1) {
  3122. ret = 0;
  3123. break;
  3124. }
  3125. leaf = path->nodes[0];
  3126. nritems = btrfs_header_nritems(leaf);
  3127. }
  3128. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  3129. if (found_key.objectid >= shrink_last_byte)
  3130. break;
  3131. if (progress && need_resched()) {
  3132. memcpy(&key, &found_key, sizeof(key));
  3133. cond_resched();
  3134. btrfs_release_path(root, path);
  3135. btrfs_search_slot(NULL, root, &key, path, 0, 0);
  3136. progress = 0;
  3137. goto next;
  3138. }
  3139. progress = 1;
  3140. if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
  3141. found_key.objectid + found_key.offset <= cur_byte) {
  3142. memcpy(&key, &found_key, sizeof(key));
  3143. key.offset++;
  3144. path->slots[0]++;
  3145. goto next;
  3146. }
  3147. total_found++;
  3148. cur_byte = found_key.objectid + found_key.offset;
  3149. key.objectid = cur_byte;
  3150. btrfs_release_path(root, path);
  3151. ret = relocate_one_extent(root, path, &found_key);
  3152. __alloc_chunk_for_shrink(root, shrink_block_group, 0);
  3153. }
  3154. btrfs_release_path(root, path);
  3155. if (total_found > 0) {
  3156. printk("btrfs relocate found %llu last extent was %llu\n",
  3157. (unsigned long long)total_found,
  3158. (unsigned long long)found_key.objectid);
  3159. mutex_unlock(&root->fs_info->alloc_mutex);
  3160. trans = btrfs_start_transaction(tree_root, 1);
  3161. btrfs_commit_transaction(trans, tree_root);
  3162. btrfs_clean_old_snapshots(tree_root);
  3163. btrfs_start_delalloc_inodes(root);
  3164. btrfs_wait_ordered_extents(tree_root, 0);
  3165. trans = btrfs_start_transaction(tree_root, 1);
  3166. btrfs_commit_transaction(trans, tree_root);
  3167. mutex_lock(&root->fs_info->alloc_mutex);
  3168. goto again;
  3169. }
  3170. /*
  3171. * we've freed all the extents, now remove the block
  3172. * group item from the tree
  3173. */
  3174. mutex_unlock(&root->fs_info->alloc_mutex);
  3175. trans = btrfs_start_transaction(root, 1);
  3176. mutex_lock(&root->fs_info->alloc_mutex);
  3177. memcpy(&key, &shrink_block_group->key, sizeof(key));
  3178. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  3179. if (ret > 0)
  3180. ret = -EIO;
  3181. if (ret < 0) {
  3182. btrfs_end_transaction(trans, root);
  3183. goto out;
  3184. }
  3185. spin_lock(&root->fs_info->block_group_cache_lock);
  3186. rb_erase(&shrink_block_group->cache_node,
  3187. &root->fs_info->block_group_cache_tree);
  3188. spin_unlock(&root->fs_info->block_group_cache_lock);
  3189. ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
  3190. key.offset);
  3191. if (ret) {
  3192. btrfs_end_transaction(trans, root);
  3193. goto out;
  3194. }
  3195. /*
  3196. memset(shrink_block_group, 0, sizeof(*shrink_block_group));
  3197. kfree(shrink_block_group);
  3198. */
  3199. btrfs_del_item(trans, root, path);
  3200. btrfs_release_path(root, path);
  3201. mutex_unlock(&root->fs_info->alloc_mutex);
  3202. btrfs_commit_transaction(trans, root);
  3203. mutex_lock(&root->fs_info->alloc_mutex);
  3204. /* the code to unpin extents might set a few bits in the free
  3205. * space cache for this range again
  3206. */
  3207. /* XXX? */
  3208. ret = btrfs_remove_free_space(shrink_block_group, key.objectid,
  3209. key.offset);
  3210. out:
  3211. btrfs_free_path(path);
  3212. mutex_unlock(&root->fs_info->alloc_mutex);
  3213. return ret;
  3214. }
  3215. int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
  3216. struct btrfs_key *key)
  3217. {
  3218. int ret = 0;
  3219. struct btrfs_key found_key;
  3220. struct extent_buffer *leaf;
  3221. int slot;
  3222. ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
  3223. if (ret < 0)
  3224. goto out;
  3225. while(1) {
  3226. slot = path->slots[0];
  3227. leaf = path->nodes[0];
  3228. if (slot >= btrfs_header_nritems(leaf)) {
  3229. ret = btrfs_next_leaf(root, path);
  3230. if (ret == 0)
  3231. continue;
  3232. if (ret < 0)
  3233. goto out;
  3234. break;
  3235. }
  3236. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  3237. if (found_key.objectid >= key->objectid &&
  3238. found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
  3239. ret = 0;
  3240. goto out;
  3241. }
  3242. path->slots[0]++;
  3243. }
  3244. ret = -ENOENT;
  3245. out:
  3246. return ret;
  3247. }
  3248. int btrfs_read_block_groups(struct btrfs_root *root)
  3249. {
  3250. struct btrfs_path *path;
  3251. int ret;
  3252. struct btrfs_block_group_cache *cache;
  3253. struct btrfs_fs_info *info = root->fs_info;
  3254. struct btrfs_space_info *space_info;
  3255. struct btrfs_key key;
  3256. struct btrfs_key found_key;
  3257. struct extent_buffer *leaf;
  3258. root = info->extent_root;
  3259. key.objectid = 0;
  3260. key.offset = 0;
  3261. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  3262. path = btrfs_alloc_path();
  3263. if (!path)
  3264. return -ENOMEM;
  3265. mutex_lock(&root->fs_info->alloc_mutex);
  3266. while(1) {
  3267. ret = find_first_block_group(root, path, &key);
  3268. if (ret > 0) {
  3269. ret = 0;
  3270. goto error;
  3271. }
  3272. if (ret != 0)
  3273. goto error;
  3274. leaf = path->nodes[0];
  3275. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  3276. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  3277. if (!cache) {
  3278. ret = -ENOMEM;
  3279. break;
  3280. }
  3281. spin_lock_init(&cache->lock);
  3282. INIT_LIST_HEAD(&cache->list);
  3283. read_extent_buffer(leaf, &cache->item,
  3284. btrfs_item_ptr_offset(leaf, path->slots[0]),
  3285. sizeof(cache->item));
  3286. memcpy(&cache->key, &found_key, sizeof(found_key));
  3287. key.objectid = found_key.objectid + found_key.offset;
  3288. btrfs_release_path(root, path);
  3289. cache->flags = btrfs_block_group_flags(&cache->item);
  3290. ret = update_space_info(info, cache->flags, found_key.offset,
  3291. btrfs_block_group_used(&cache->item),
  3292. &space_info);
  3293. BUG_ON(ret);
  3294. cache->space_info = space_info;
  3295. spin_lock(&space_info->lock);
  3296. list_add(&cache->list, &space_info->block_groups);
  3297. spin_unlock(&space_info->lock);
  3298. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  3299. BUG_ON(ret);
  3300. if (key.objectid >=
  3301. btrfs_super_total_bytes(&info->super_copy))
  3302. break;
  3303. }
  3304. ret = 0;
  3305. error:
  3306. btrfs_free_path(path);
  3307. mutex_unlock(&root->fs_info->alloc_mutex);
  3308. return ret;
  3309. }
  3310. int btrfs_make_block_group(struct btrfs_trans_handle *trans,
  3311. struct btrfs_root *root, u64 bytes_used,
  3312. u64 type, u64 chunk_objectid, u64 chunk_offset,
  3313. u64 size)
  3314. {
  3315. int ret;
  3316. struct btrfs_root *extent_root;
  3317. struct btrfs_block_group_cache *cache;
  3318. WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
  3319. extent_root = root->fs_info->extent_root;
  3320. root->fs_info->last_trans_new_blockgroup = trans->transid;
  3321. cache = kzalloc(sizeof(*cache), GFP_NOFS);
  3322. if (!cache)
  3323. return -ENOMEM;
  3324. cache->key.objectid = chunk_offset;
  3325. cache->key.offset = size;
  3326. spin_lock_init(&cache->lock);
  3327. INIT_LIST_HEAD(&cache->list);
  3328. btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  3329. btrfs_set_block_group_used(&cache->item, bytes_used);
  3330. btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
  3331. cache->flags = type;
  3332. btrfs_set_block_group_flags(&cache->item, type);
  3333. ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
  3334. &cache->space_info);
  3335. BUG_ON(ret);
  3336. spin_lock(&cache->space_info->lock);
  3337. list_add(&cache->list, &cache->space_info->block_groups);
  3338. spin_unlock(&cache->space_info->lock);
  3339. ret = btrfs_add_block_group_cache(root->fs_info, cache);
  3340. BUG_ON(ret);
  3341. ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
  3342. sizeof(cache->item));
  3343. BUG_ON(ret);
  3344. finish_current_insert(trans, extent_root);
  3345. ret = del_pending_extents(trans, extent_root);
  3346. BUG_ON(ret);
  3347. set_avail_alloc_bits(extent_root->fs_info, type);
  3348. return 0;
  3349. }