free-space-cache.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/math64.h>
  22. #include <linux/ratelimit.h>
  23. #include "ctree.h"
  24. #include "free-space-cache.h"
  25. #include "transaction.h"
  26. #include "disk-io.h"
  27. #include "extent_io.h"
  28. #include "inode-map.h"
  29. #include "volumes.h"
  30. #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
  31. #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
  32. struct btrfs_trim_range {
  33. u64 start;
  34. u64 bytes;
  35. struct list_head list;
  36. };
  37. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  38. struct btrfs_free_space *info);
  39. static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  40. struct btrfs_free_space *info);
  41. static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  42. struct btrfs_path *path,
  43. u64 offset)
  44. {
  45. struct btrfs_key key;
  46. struct btrfs_key location;
  47. struct btrfs_disk_key disk_key;
  48. struct btrfs_free_space_header *header;
  49. struct extent_buffer *leaf;
  50. struct inode *inode = NULL;
  51. int ret;
  52. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  53. key.offset = offset;
  54. key.type = 0;
  55. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  56. if (ret < 0)
  57. return ERR_PTR(ret);
  58. if (ret > 0) {
  59. btrfs_release_path(path);
  60. return ERR_PTR(-ENOENT);
  61. }
  62. leaf = path->nodes[0];
  63. header = btrfs_item_ptr(leaf, path->slots[0],
  64. struct btrfs_free_space_header);
  65. btrfs_free_space_key(leaf, header, &disk_key);
  66. btrfs_disk_key_to_cpu(&location, &disk_key);
  67. btrfs_release_path(path);
  68. inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  69. if (!inode)
  70. return ERR_PTR(-ENOENT);
  71. if (IS_ERR(inode))
  72. return inode;
  73. if (is_bad_inode(inode)) {
  74. iput(inode);
  75. return ERR_PTR(-ENOENT);
  76. }
  77. mapping_set_gfp_mask(inode->i_mapping,
  78. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  79. return inode;
  80. }
  81. struct inode *lookup_free_space_inode(struct btrfs_root *root,
  82. struct btrfs_block_group_cache
  83. *block_group, struct btrfs_path *path)
  84. {
  85. struct inode *inode = NULL;
  86. u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  87. spin_lock(&block_group->lock);
  88. if (block_group->inode)
  89. inode = igrab(block_group->inode);
  90. spin_unlock(&block_group->lock);
  91. if (inode)
  92. return inode;
  93. inode = __lookup_free_space_inode(root, path,
  94. block_group->key.objectid);
  95. if (IS_ERR(inode))
  96. return inode;
  97. spin_lock(&block_group->lock);
  98. if (!((BTRFS_I(inode)->flags & flags) == flags)) {
  99. btrfs_info(root->fs_info,
  100. "Old style space inode found, converting.");
  101. BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
  102. BTRFS_INODE_NODATACOW;
  103. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  104. }
  105. if (!block_group->iref) {
  106. block_group->inode = igrab(inode);
  107. block_group->iref = 1;
  108. }
  109. spin_unlock(&block_group->lock);
  110. return inode;
  111. }
  112. static int __create_free_space_inode(struct btrfs_root *root,
  113. struct btrfs_trans_handle *trans,
  114. struct btrfs_path *path,
  115. u64 ino, u64 offset)
  116. {
  117. struct btrfs_key key;
  118. struct btrfs_disk_key disk_key;
  119. struct btrfs_free_space_header *header;
  120. struct btrfs_inode_item *inode_item;
  121. struct extent_buffer *leaf;
  122. u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
  123. int ret;
  124. ret = btrfs_insert_empty_inode(trans, root, path, ino);
  125. if (ret)
  126. return ret;
  127. /* We inline crc's for the free disk space cache */
  128. if (ino != BTRFS_FREE_INO_OBJECTID)
  129. flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  130. leaf = path->nodes[0];
  131. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  132. struct btrfs_inode_item);
  133. btrfs_item_key(leaf, &disk_key, path->slots[0]);
  134. memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
  135. sizeof(*inode_item));
  136. btrfs_set_inode_generation(leaf, inode_item, trans->transid);
  137. btrfs_set_inode_size(leaf, inode_item, 0);
  138. btrfs_set_inode_nbytes(leaf, inode_item, 0);
  139. btrfs_set_inode_uid(leaf, inode_item, 0);
  140. btrfs_set_inode_gid(leaf, inode_item, 0);
  141. btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
  142. btrfs_set_inode_flags(leaf, inode_item, flags);
  143. btrfs_set_inode_nlink(leaf, inode_item, 1);
  144. btrfs_set_inode_transid(leaf, inode_item, trans->transid);
  145. btrfs_set_inode_block_group(leaf, inode_item, offset);
  146. btrfs_mark_buffer_dirty(leaf);
  147. btrfs_release_path(path);
  148. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  149. key.offset = offset;
  150. key.type = 0;
  151. ret = btrfs_insert_empty_item(trans, root, path, &key,
  152. sizeof(struct btrfs_free_space_header));
  153. if (ret < 0) {
  154. btrfs_release_path(path);
  155. return ret;
  156. }
  157. leaf = path->nodes[0];
  158. header = btrfs_item_ptr(leaf, path->slots[0],
  159. struct btrfs_free_space_header);
  160. memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
  161. btrfs_set_free_space_key(leaf, header, &disk_key);
  162. btrfs_mark_buffer_dirty(leaf);
  163. btrfs_release_path(path);
  164. return 0;
  165. }
  166. int create_free_space_inode(struct btrfs_root *root,
  167. struct btrfs_trans_handle *trans,
  168. struct btrfs_block_group_cache *block_group,
  169. struct btrfs_path *path)
  170. {
  171. int ret;
  172. u64 ino;
  173. ret = btrfs_find_free_objectid(root, &ino);
  174. if (ret < 0)
  175. return ret;
  176. return __create_free_space_inode(root, trans, path, ino,
  177. block_group->key.objectid);
  178. }
  179. int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
  180. struct btrfs_block_rsv *rsv)
  181. {
  182. u64 needed_bytes;
  183. int ret;
  184. /* 1 for slack space, 1 for updating the inode */
  185. needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
  186. btrfs_calc_trans_metadata_size(root, 1);
  187. spin_lock(&rsv->lock);
  188. if (rsv->reserved < needed_bytes)
  189. ret = -ENOSPC;
  190. else
  191. ret = 0;
  192. spin_unlock(&rsv->lock);
  193. return ret;
  194. }
  195. int btrfs_truncate_free_space_cache(struct btrfs_root *root,
  196. struct btrfs_trans_handle *trans,
  197. struct inode *inode)
  198. {
  199. int ret = 0;
  200. btrfs_i_size_write(inode, 0);
  201. truncate_pagecache(inode, 0);
  202. /*
  203. * We don't need an orphan item because truncating the free space cache
  204. * will never be split across transactions.
  205. */
  206. ret = btrfs_truncate_inode_items(trans, root, inode,
  207. 0, BTRFS_EXTENT_DATA_KEY);
  208. if (ret) {
  209. btrfs_abort_transaction(trans, root, ret);
  210. return ret;
  211. }
  212. ret = btrfs_update_inode(trans, root, inode);
  213. if (ret)
  214. btrfs_abort_transaction(trans, root, ret);
  215. return ret;
  216. }
  217. static int readahead_cache(struct inode *inode)
  218. {
  219. struct file_ra_state *ra;
  220. unsigned long last_index;
  221. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  222. if (!ra)
  223. return -ENOMEM;
  224. file_ra_state_init(ra, inode->i_mapping);
  225. last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
  226. page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
  227. kfree(ra);
  228. return 0;
  229. }
  230. struct io_ctl {
  231. void *cur, *orig;
  232. struct page *page;
  233. struct page **pages;
  234. struct btrfs_root *root;
  235. unsigned long size;
  236. int index;
  237. int num_pages;
  238. unsigned check_crcs:1;
  239. };
  240. static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
  241. struct btrfs_root *root, int write)
  242. {
  243. int num_pages;
  244. int check_crcs = 0;
  245. num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
  246. if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
  247. check_crcs = 1;
  248. /* Make sure we can fit our crcs into the first page */
  249. if (write && check_crcs &&
  250. (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
  251. return -ENOSPC;
  252. memset(io_ctl, 0, sizeof(struct io_ctl));
  253. io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
  254. if (!io_ctl->pages)
  255. return -ENOMEM;
  256. io_ctl->num_pages = num_pages;
  257. io_ctl->root = root;
  258. io_ctl->check_crcs = check_crcs;
  259. return 0;
  260. }
  261. static void io_ctl_free(struct io_ctl *io_ctl)
  262. {
  263. kfree(io_ctl->pages);
  264. }
  265. static void io_ctl_unmap_page(struct io_ctl *io_ctl)
  266. {
  267. if (io_ctl->cur) {
  268. kunmap(io_ctl->page);
  269. io_ctl->cur = NULL;
  270. io_ctl->orig = NULL;
  271. }
  272. }
  273. static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
  274. {
  275. ASSERT(io_ctl->index < io_ctl->num_pages);
  276. io_ctl->page = io_ctl->pages[io_ctl->index++];
  277. io_ctl->cur = kmap(io_ctl->page);
  278. io_ctl->orig = io_ctl->cur;
  279. io_ctl->size = PAGE_CACHE_SIZE;
  280. if (clear)
  281. memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
  282. }
  283. static void io_ctl_drop_pages(struct io_ctl *io_ctl)
  284. {
  285. int i;
  286. io_ctl_unmap_page(io_ctl);
  287. for (i = 0; i < io_ctl->num_pages; i++) {
  288. if (io_ctl->pages[i]) {
  289. ClearPageChecked(io_ctl->pages[i]);
  290. unlock_page(io_ctl->pages[i]);
  291. page_cache_release(io_ctl->pages[i]);
  292. }
  293. }
  294. }
  295. static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
  296. int uptodate)
  297. {
  298. struct page *page;
  299. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  300. int i;
  301. for (i = 0; i < io_ctl->num_pages; i++) {
  302. page = find_or_create_page(inode->i_mapping, i, mask);
  303. if (!page) {
  304. io_ctl_drop_pages(io_ctl);
  305. return -ENOMEM;
  306. }
  307. io_ctl->pages[i] = page;
  308. if (uptodate && !PageUptodate(page)) {
  309. btrfs_readpage(NULL, page);
  310. lock_page(page);
  311. if (!PageUptodate(page)) {
  312. btrfs_err(BTRFS_I(inode)->root->fs_info,
  313. "error reading free space cache");
  314. io_ctl_drop_pages(io_ctl);
  315. return -EIO;
  316. }
  317. }
  318. }
  319. for (i = 0; i < io_ctl->num_pages; i++) {
  320. clear_page_dirty_for_io(io_ctl->pages[i]);
  321. set_page_extent_mapped(io_ctl->pages[i]);
  322. }
  323. return 0;
  324. }
  325. static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
  326. {
  327. __le64 *val;
  328. io_ctl_map_page(io_ctl, 1);
  329. /*
  330. * Skip the csum areas. If we don't check crcs then we just have a
  331. * 64bit chunk at the front of the first page.
  332. */
  333. if (io_ctl->check_crcs) {
  334. io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
  335. io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
  336. } else {
  337. io_ctl->cur += sizeof(u64);
  338. io_ctl->size -= sizeof(u64) * 2;
  339. }
  340. val = io_ctl->cur;
  341. *val = cpu_to_le64(generation);
  342. io_ctl->cur += sizeof(u64);
  343. }
  344. static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
  345. {
  346. __le64 *gen;
  347. /*
  348. * Skip the crc area. If we don't check crcs then we just have a 64bit
  349. * chunk at the front of the first page.
  350. */
  351. if (io_ctl->check_crcs) {
  352. io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
  353. io_ctl->size -= sizeof(u64) +
  354. (sizeof(u32) * io_ctl->num_pages);
  355. } else {
  356. io_ctl->cur += sizeof(u64);
  357. io_ctl->size -= sizeof(u64) * 2;
  358. }
  359. gen = io_ctl->cur;
  360. if (le64_to_cpu(*gen) != generation) {
  361. printk_ratelimited(KERN_ERR "BTRFS: space cache generation "
  362. "(%Lu) does not match inode (%Lu)\n", *gen,
  363. generation);
  364. io_ctl_unmap_page(io_ctl);
  365. return -EIO;
  366. }
  367. io_ctl->cur += sizeof(u64);
  368. return 0;
  369. }
  370. static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
  371. {
  372. u32 *tmp;
  373. u32 crc = ~(u32)0;
  374. unsigned offset = 0;
  375. if (!io_ctl->check_crcs) {
  376. io_ctl_unmap_page(io_ctl);
  377. return;
  378. }
  379. if (index == 0)
  380. offset = sizeof(u32) * io_ctl->num_pages;
  381. crc = btrfs_csum_data(io_ctl->orig + offset, crc,
  382. PAGE_CACHE_SIZE - offset);
  383. btrfs_csum_final(crc, (char *)&crc);
  384. io_ctl_unmap_page(io_ctl);
  385. tmp = kmap(io_ctl->pages[0]);
  386. tmp += index;
  387. *tmp = crc;
  388. kunmap(io_ctl->pages[0]);
  389. }
  390. static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
  391. {
  392. u32 *tmp, val;
  393. u32 crc = ~(u32)0;
  394. unsigned offset = 0;
  395. if (!io_ctl->check_crcs) {
  396. io_ctl_map_page(io_ctl, 0);
  397. return 0;
  398. }
  399. if (index == 0)
  400. offset = sizeof(u32) * io_ctl->num_pages;
  401. tmp = kmap(io_ctl->pages[0]);
  402. tmp += index;
  403. val = *tmp;
  404. kunmap(io_ctl->pages[0]);
  405. io_ctl_map_page(io_ctl, 0);
  406. crc = btrfs_csum_data(io_ctl->orig + offset, crc,
  407. PAGE_CACHE_SIZE - offset);
  408. btrfs_csum_final(crc, (char *)&crc);
  409. if (val != crc) {
  410. printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free "
  411. "space cache\n");
  412. io_ctl_unmap_page(io_ctl);
  413. return -EIO;
  414. }
  415. return 0;
  416. }
  417. static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
  418. void *bitmap)
  419. {
  420. struct btrfs_free_space_entry *entry;
  421. if (!io_ctl->cur)
  422. return -ENOSPC;
  423. entry = io_ctl->cur;
  424. entry->offset = cpu_to_le64(offset);
  425. entry->bytes = cpu_to_le64(bytes);
  426. entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
  427. BTRFS_FREE_SPACE_EXTENT;
  428. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  429. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  430. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  431. return 0;
  432. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  433. /* No more pages to map */
  434. if (io_ctl->index >= io_ctl->num_pages)
  435. return 0;
  436. /* map the next page */
  437. io_ctl_map_page(io_ctl, 1);
  438. return 0;
  439. }
  440. static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
  441. {
  442. if (!io_ctl->cur)
  443. return -ENOSPC;
  444. /*
  445. * If we aren't at the start of the current page, unmap this one and
  446. * map the next one if there is any left.
  447. */
  448. if (io_ctl->cur != io_ctl->orig) {
  449. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  450. if (io_ctl->index >= io_ctl->num_pages)
  451. return -ENOSPC;
  452. io_ctl_map_page(io_ctl, 0);
  453. }
  454. memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
  455. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  456. if (io_ctl->index < io_ctl->num_pages)
  457. io_ctl_map_page(io_ctl, 0);
  458. return 0;
  459. }
  460. static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
  461. {
  462. /*
  463. * If we're not on the boundary we know we've modified the page and we
  464. * need to crc the page.
  465. */
  466. if (io_ctl->cur != io_ctl->orig)
  467. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  468. else
  469. io_ctl_unmap_page(io_ctl);
  470. while (io_ctl->index < io_ctl->num_pages) {
  471. io_ctl_map_page(io_ctl, 1);
  472. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  473. }
  474. }
  475. static int io_ctl_read_entry(struct io_ctl *io_ctl,
  476. struct btrfs_free_space *entry, u8 *type)
  477. {
  478. struct btrfs_free_space_entry *e;
  479. int ret;
  480. if (!io_ctl->cur) {
  481. ret = io_ctl_check_crc(io_ctl, io_ctl->index);
  482. if (ret)
  483. return ret;
  484. }
  485. e = io_ctl->cur;
  486. entry->offset = le64_to_cpu(e->offset);
  487. entry->bytes = le64_to_cpu(e->bytes);
  488. *type = e->type;
  489. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  490. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  491. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  492. return 0;
  493. io_ctl_unmap_page(io_ctl);
  494. return 0;
  495. }
  496. static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
  497. struct btrfs_free_space *entry)
  498. {
  499. int ret;
  500. ret = io_ctl_check_crc(io_ctl, io_ctl->index);
  501. if (ret)
  502. return ret;
  503. memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
  504. io_ctl_unmap_page(io_ctl);
  505. return 0;
  506. }
  507. /*
  508. * Since we attach pinned extents after the fact we can have contiguous sections
  509. * of free space that are split up in entries. This poses a problem with the
  510. * tree logging stuff since it could have allocated across what appears to be 2
  511. * entries since we would have merged the entries when adding the pinned extents
  512. * back to the free space cache. So run through the space cache that we just
  513. * loaded and merge contiguous entries. This will make the log replay stuff not
  514. * blow up and it will make for nicer allocator behavior.
  515. */
  516. static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
  517. {
  518. struct btrfs_free_space *e, *prev = NULL;
  519. struct rb_node *n;
  520. again:
  521. spin_lock(&ctl->tree_lock);
  522. for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
  523. e = rb_entry(n, struct btrfs_free_space, offset_index);
  524. if (!prev)
  525. goto next;
  526. if (e->bitmap || prev->bitmap)
  527. goto next;
  528. if (prev->offset + prev->bytes == e->offset) {
  529. unlink_free_space(ctl, prev);
  530. unlink_free_space(ctl, e);
  531. prev->bytes += e->bytes;
  532. kmem_cache_free(btrfs_free_space_cachep, e);
  533. link_free_space(ctl, prev);
  534. prev = NULL;
  535. spin_unlock(&ctl->tree_lock);
  536. goto again;
  537. }
  538. next:
  539. prev = e;
  540. }
  541. spin_unlock(&ctl->tree_lock);
  542. }
  543. static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
  544. struct btrfs_free_space_ctl *ctl,
  545. struct btrfs_path *path, u64 offset)
  546. {
  547. struct btrfs_free_space_header *header;
  548. struct extent_buffer *leaf;
  549. struct io_ctl io_ctl;
  550. struct btrfs_key key;
  551. struct btrfs_free_space *e, *n;
  552. LIST_HEAD(bitmaps);
  553. u64 num_entries;
  554. u64 num_bitmaps;
  555. u64 generation;
  556. u8 type;
  557. int ret = 0;
  558. /* Nothing in the space cache, goodbye */
  559. if (!i_size_read(inode))
  560. return 0;
  561. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  562. key.offset = offset;
  563. key.type = 0;
  564. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  565. if (ret < 0)
  566. return 0;
  567. else if (ret > 0) {
  568. btrfs_release_path(path);
  569. return 0;
  570. }
  571. ret = -1;
  572. leaf = path->nodes[0];
  573. header = btrfs_item_ptr(leaf, path->slots[0],
  574. struct btrfs_free_space_header);
  575. num_entries = btrfs_free_space_entries(leaf, header);
  576. num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
  577. generation = btrfs_free_space_generation(leaf, header);
  578. btrfs_release_path(path);
  579. if (!BTRFS_I(inode)->generation) {
  580. btrfs_info(root->fs_info,
  581. "The free space cache file (%llu) is invalid. skip it\n",
  582. offset);
  583. return 0;
  584. }
  585. if (BTRFS_I(inode)->generation != generation) {
  586. btrfs_err(root->fs_info,
  587. "free space inode generation (%llu) "
  588. "did not match free space cache generation (%llu)",
  589. BTRFS_I(inode)->generation, generation);
  590. return 0;
  591. }
  592. if (!num_entries)
  593. return 0;
  594. ret = io_ctl_init(&io_ctl, inode, root, 0);
  595. if (ret)
  596. return ret;
  597. ret = readahead_cache(inode);
  598. if (ret)
  599. goto out;
  600. ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
  601. if (ret)
  602. goto out;
  603. ret = io_ctl_check_crc(&io_ctl, 0);
  604. if (ret)
  605. goto free_cache;
  606. ret = io_ctl_check_generation(&io_ctl, generation);
  607. if (ret)
  608. goto free_cache;
  609. while (num_entries) {
  610. e = kmem_cache_zalloc(btrfs_free_space_cachep,
  611. GFP_NOFS);
  612. if (!e)
  613. goto free_cache;
  614. ret = io_ctl_read_entry(&io_ctl, e, &type);
  615. if (ret) {
  616. kmem_cache_free(btrfs_free_space_cachep, e);
  617. goto free_cache;
  618. }
  619. if (!e->bytes) {
  620. kmem_cache_free(btrfs_free_space_cachep, e);
  621. goto free_cache;
  622. }
  623. if (type == BTRFS_FREE_SPACE_EXTENT) {
  624. spin_lock(&ctl->tree_lock);
  625. ret = link_free_space(ctl, e);
  626. spin_unlock(&ctl->tree_lock);
  627. if (ret) {
  628. btrfs_err(root->fs_info,
  629. "Duplicate entries in free space cache, dumping");
  630. kmem_cache_free(btrfs_free_space_cachep, e);
  631. goto free_cache;
  632. }
  633. } else {
  634. ASSERT(num_bitmaps);
  635. num_bitmaps--;
  636. e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  637. if (!e->bitmap) {
  638. kmem_cache_free(
  639. btrfs_free_space_cachep, e);
  640. goto free_cache;
  641. }
  642. spin_lock(&ctl->tree_lock);
  643. ret = link_free_space(ctl, e);
  644. ctl->total_bitmaps++;
  645. ctl->op->recalc_thresholds(ctl);
  646. spin_unlock(&ctl->tree_lock);
  647. if (ret) {
  648. btrfs_err(root->fs_info,
  649. "Duplicate entries in free space cache, dumping");
  650. kmem_cache_free(btrfs_free_space_cachep, e);
  651. goto free_cache;
  652. }
  653. list_add_tail(&e->list, &bitmaps);
  654. }
  655. num_entries--;
  656. }
  657. io_ctl_unmap_page(&io_ctl);
  658. /*
  659. * We add the bitmaps at the end of the entries in order that
  660. * the bitmap entries are added to the cache.
  661. */
  662. list_for_each_entry_safe(e, n, &bitmaps, list) {
  663. list_del_init(&e->list);
  664. ret = io_ctl_read_bitmap(&io_ctl, e);
  665. if (ret)
  666. goto free_cache;
  667. }
  668. io_ctl_drop_pages(&io_ctl);
  669. merge_space_tree(ctl);
  670. ret = 1;
  671. out:
  672. io_ctl_free(&io_ctl);
  673. return ret;
  674. free_cache:
  675. io_ctl_drop_pages(&io_ctl);
  676. __btrfs_remove_free_space_cache(ctl);
  677. goto out;
  678. }
  679. int load_free_space_cache(struct btrfs_fs_info *fs_info,
  680. struct btrfs_block_group_cache *block_group)
  681. {
  682. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  683. struct btrfs_root *root = fs_info->tree_root;
  684. struct inode *inode;
  685. struct btrfs_path *path;
  686. int ret = 0;
  687. bool matched;
  688. u64 used = btrfs_block_group_used(&block_group->item);
  689. /*
  690. * If this block group has been marked to be cleared for one reason or
  691. * another then we can't trust the on disk cache, so just return.
  692. */
  693. spin_lock(&block_group->lock);
  694. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  695. spin_unlock(&block_group->lock);
  696. return 0;
  697. }
  698. spin_unlock(&block_group->lock);
  699. path = btrfs_alloc_path();
  700. if (!path)
  701. return 0;
  702. path->search_commit_root = 1;
  703. path->skip_locking = 1;
  704. inode = lookup_free_space_inode(root, block_group, path);
  705. if (IS_ERR(inode)) {
  706. btrfs_free_path(path);
  707. return 0;
  708. }
  709. /* We may have converted the inode and made the cache invalid. */
  710. spin_lock(&block_group->lock);
  711. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  712. spin_unlock(&block_group->lock);
  713. btrfs_free_path(path);
  714. goto out;
  715. }
  716. spin_unlock(&block_group->lock);
  717. ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
  718. path, block_group->key.objectid);
  719. btrfs_free_path(path);
  720. if (ret <= 0)
  721. goto out;
  722. spin_lock(&ctl->tree_lock);
  723. matched = (ctl->free_space == (block_group->key.offset - used -
  724. block_group->bytes_super));
  725. spin_unlock(&ctl->tree_lock);
  726. if (!matched) {
  727. __btrfs_remove_free_space_cache(ctl);
  728. btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
  729. block_group->key.objectid);
  730. ret = -1;
  731. }
  732. out:
  733. if (ret < 0) {
  734. /* This cache is bogus, make sure it gets cleared */
  735. spin_lock(&block_group->lock);
  736. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  737. spin_unlock(&block_group->lock);
  738. ret = 0;
  739. btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
  740. block_group->key.objectid);
  741. }
  742. iput(inode);
  743. return ret;
  744. }
  745. static noinline_for_stack
  746. int write_cache_extent_entries(struct io_ctl *io_ctl,
  747. struct btrfs_free_space_ctl *ctl,
  748. struct btrfs_block_group_cache *block_group,
  749. int *entries, int *bitmaps,
  750. struct list_head *bitmap_list)
  751. {
  752. int ret;
  753. struct btrfs_free_cluster *cluster = NULL;
  754. struct rb_node *node = rb_first(&ctl->free_space_offset);
  755. struct btrfs_trim_range *trim_entry;
  756. /* Get the cluster for this block_group if it exists */
  757. if (block_group && !list_empty(&block_group->cluster_list)) {
  758. cluster = list_entry(block_group->cluster_list.next,
  759. struct btrfs_free_cluster,
  760. block_group_list);
  761. }
  762. if (!node && cluster) {
  763. node = rb_first(&cluster->root);
  764. cluster = NULL;
  765. }
  766. /* Write out the extent entries */
  767. while (node) {
  768. struct btrfs_free_space *e;
  769. e = rb_entry(node, struct btrfs_free_space, offset_index);
  770. *entries += 1;
  771. ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
  772. e->bitmap);
  773. if (ret)
  774. goto fail;
  775. if (e->bitmap) {
  776. list_add_tail(&e->list, bitmap_list);
  777. *bitmaps += 1;
  778. }
  779. node = rb_next(node);
  780. if (!node && cluster) {
  781. node = rb_first(&cluster->root);
  782. cluster = NULL;
  783. }
  784. }
  785. /*
  786. * Make sure we don't miss any range that was removed from our rbtree
  787. * because trimming is running. Otherwise after a umount+mount (or crash
  788. * after committing the transaction) we would leak free space and get
  789. * an inconsistent free space cache report from fsck.
  790. */
  791. list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
  792. ret = io_ctl_add_entry(io_ctl, trim_entry->start,
  793. trim_entry->bytes, NULL);
  794. if (ret)
  795. goto fail;
  796. *entries += 1;
  797. }
  798. return 0;
  799. fail:
  800. return -ENOSPC;
  801. }
  802. static noinline_for_stack int
  803. update_cache_item(struct btrfs_trans_handle *trans,
  804. struct btrfs_root *root,
  805. struct inode *inode,
  806. struct btrfs_path *path, u64 offset,
  807. int entries, int bitmaps)
  808. {
  809. struct btrfs_key key;
  810. struct btrfs_free_space_header *header;
  811. struct extent_buffer *leaf;
  812. int ret;
  813. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  814. key.offset = offset;
  815. key.type = 0;
  816. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  817. if (ret < 0) {
  818. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
  819. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
  820. GFP_NOFS);
  821. goto fail;
  822. }
  823. leaf = path->nodes[0];
  824. if (ret > 0) {
  825. struct btrfs_key found_key;
  826. ASSERT(path->slots[0]);
  827. path->slots[0]--;
  828. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  829. if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
  830. found_key.offset != offset) {
  831. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
  832. inode->i_size - 1,
  833. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
  834. NULL, GFP_NOFS);
  835. btrfs_release_path(path);
  836. goto fail;
  837. }
  838. }
  839. BTRFS_I(inode)->generation = trans->transid;
  840. header = btrfs_item_ptr(leaf, path->slots[0],
  841. struct btrfs_free_space_header);
  842. btrfs_set_free_space_entries(leaf, header, entries);
  843. btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
  844. btrfs_set_free_space_generation(leaf, header, trans->transid);
  845. btrfs_mark_buffer_dirty(leaf);
  846. btrfs_release_path(path);
  847. return 0;
  848. fail:
  849. return -1;
  850. }
  851. static noinline_for_stack int
  852. write_pinned_extent_entries(struct btrfs_root *root,
  853. struct btrfs_block_group_cache *block_group,
  854. struct io_ctl *io_ctl,
  855. int *entries)
  856. {
  857. u64 start, extent_start, extent_end, len;
  858. struct extent_io_tree *unpin = NULL;
  859. int ret;
  860. if (!block_group)
  861. return 0;
  862. /*
  863. * We want to add any pinned extents to our free space cache
  864. * so we don't leak the space
  865. *
  866. * We shouldn't have switched the pinned extents yet so this is the
  867. * right one
  868. */
  869. unpin = root->fs_info->pinned_extents;
  870. start = block_group->key.objectid;
  871. while (start < block_group->key.objectid + block_group->key.offset) {
  872. ret = find_first_extent_bit(unpin, start,
  873. &extent_start, &extent_end,
  874. EXTENT_DIRTY, NULL);
  875. if (ret)
  876. return 0;
  877. /* This pinned extent is out of our range */
  878. if (extent_start >= block_group->key.objectid +
  879. block_group->key.offset)
  880. return 0;
  881. extent_start = max(extent_start, start);
  882. extent_end = min(block_group->key.objectid +
  883. block_group->key.offset, extent_end + 1);
  884. len = extent_end - extent_start;
  885. *entries += 1;
  886. ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
  887. if (ret)
  888. return -ENOSPC;
  889. start = extent_end;
  890. }
  891. return 0;
  892. }
  893. static noinline_for_stack int
  894. write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
  895. {
  896. struct list_head *pos, *n;
  897. int ret;
  898. /* Write out the bitmaps */
  899. list_for_each_safe(pos, n, bitmap_list) {
  900. struct btrfs_free_space *entry =
  901. list_entry(pos, struct btrfs_free_space, list);
  902. ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
  903. if (ret)
  904. return -ENOSPC;
  905. list_del_init(&entry->list);
  906. }
  907. return 0;
  908. }
  909. static int flush_dirty_cache(struct inode *inode)
  910. {
  911. int ret;
  912. ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
  913. if (ret)
  914. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
  915. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
  916. GFP_NOFS);
  917. return ret;
  918. }
  919. static void noinline_for_stack
  920. cleanup_write_cache_enospc(struct inode *inode,
  921. struct io_ctl *io_ctl,
  922. struct extent_state **cached_state,
  923. struct list_head *bitmap_list)
  924. {
  925. struct list_head *pos, *n;
  926. list_for_each_safe(pos, n, bitmap_list) {
  927. struct btrfs_free_space *entry =
  928. list_entry(pos, struct btrfs_free_space, list);
  929. list_del_init(&entry->list);
  930. }
  931. io_ctl_drop_pages(io_ctl);
  932. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  933. i_size_read(inode) - 1, cached_state,
  934. GFP_NOFS);
  935. }
  936. /**
  937. * __btrfs_write_out_cache - write out cached info to an inode
  938. * @root - the root the inode belongs to
  939. * @ctl - the free space cache we are going to write out
  940. * @block_group - the block_group for this cache if it belongs to a block_group
  941. * @trans - the trans handle
  942. * @path - the path to use
  943. * @offset - the offset for the key we'll insert
  944. *
  945. * This function writes out a free space cache struct to disk for quick recovery
  946. * on mount. This will return 0 if it was successfull in writing the cache out,
  947. * and -1 if it was not.
  948. */
  949. static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
  950. struct btrfs_free_space_ctl *ctl,
  951. struct btrfs_block_group_cache *block_group,
  952. struct btrfs_trans_handle *trans,
  953. struct btrfs_path *path, u64 offset)
  954. {
  955. struct extent_state *cached_state = NULL;
  956. struct io_ctl io_ctl;
  957. LIST_HEAD(bitmap_list);
  958. int entries = 0;
  959. int bitmaps = 0;
  960. int ret;
  961. if (!i_size_read(inode))
  962. return -1;
  963. ret = io_ctl_init(&io_ctl, inode, root, 1);
  964. if (ret)
  965. return -1;
  966. if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
  967. down_write(&block_group->data_rwsem);
  968. spin_lock(&block_group->lock);
  969. if (block_group->delalloc_bytes) {
  970. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  971. spin_unlock(&block_group->lock);
  972. up_write(&block_group->data_rwsem);
  973. BTRFS_I(inode)->generation = 0;
  974. ret = 0;
  975. goto out;
  976. }
  977. spin_unlock(&block_group->lock);
  978. }
  979. /* Lock all pages first so we can lock the extent safely. */
  980. io_ctl_prepare_pages(&io_ctl, inode, 0);
  981. lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
  982. 0, &cached_state);
  983. io_ctl_set_generation(&io_ctl, trans->transid);
  984. mutex_lock(&ctl->cache_writeout_mutex);
  985. /* Write out the extent entries in the free space cache */
  986. ret = write_cache_extent_entries(&io_ctl, ctl,
  987. block_group, &entries, &bitmaps,
  988. &bitmap_list);
  989. if (ret) {
  990. mutex_unlock(&ctl->cache_writeout_mutex);
  991. goto out_nospc;
  992. }
  993. /*
  994. * Some spaces that are freed in the current transaction are pinned,
  995. * they will be added into free space cache after the transaction is
  996. * committed, we shouldn't lose them.
  997. */
  998. ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
  999. if (ret) {
  1000. mutex_unlock(&ctl->cache_writeout_mutex);
  1001. goto out_nospc;
  1002. }
  1003. /*
  1004. * At last, we write out all the bitmaps and keep cache_writeout_mutex
  1005. * locked while doing it because a concurrent trim can be manipulating
  1006. * or freeing the bitmap.
  1007. */
  1008. ret = write_bitmap_entries(&io_ctl, &bitmap_list);
  1009. mutex_unlock(&ctl->cache_writeout_mutex);
  1010. if (ret)
  1011. goto out_nospc;
  1012. /* Zero out the rest of the pages just to make sure */
  1013. io_ctl_zero_remaining_pages(&io_ctl);
  1014. /* Everything is written out, now we dirty the pages in the file. */
  1015. ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
  1016. 0, i_size_read(inode), &cached_state);
  1017. if (ret)
  1018. goto out_nospc;
  1019. if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
  1020. up_write(&block_group->data_rwsem);
  1021. /*
  1022. * Release the pages and unlock the extent, we will flush
  1023. * them out later
  1024. */
  1025. io_ctl_drop_pages(&io_ctl);
  1026. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  1027. i_size_read(inode) - 1, &cached_state, GFP_NOFS);
  1028. /* Flush the dirty pages in the cache file. */
  1029. ret = flush_dirty_cache(inode);
  1030. if (ret)
  1031. goto out;
  1032. /* Update the cache item to tell everyone this cache file is valid. */
  1033. ret = update_cache_item(trans, root, inode, path, offset,
  1034. entries, bitmaps);
  1035. out:
  1036. io_ctl_free(&io_ctl);
  1037. if (ret) {
  1038. invalidate_inode_pages2(inode->i_mapping);
  1039. BTRFS_I(inode)->generation = 0;
  1040. }
  1041. btrfs_update_inode(trans, root, inode);
  1042. return ret;
  1043. out_nospc:
  1044. cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
  1045. if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
  1046. up_write(&block_group->data_rwsem);
  1047. goto out;
  1048. }
  1049. int btrfs_write_out_cache(struct btrfs_root *root,
  1050. struct btrfs_trans_handle *trans,
  1051. struct btrfs_block_group_cache *block_group,
  1052. struct btrfs_path *path)
  1053. {
  1054. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1055. struct inode *inode;
  1056. int ret = 0;
  1057. enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
  1058. root = root->fs_info->tree_root;
  1059. spin_lock(&block_group->lock);
  1060. if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
  1061. spin_unlock(&block_group->lock);
  1062. return 0;
  1063. }
  1064. if (block_group->delalloc_bytes) {
  1065. block_group->disk_cache_state = BTRFS_DC_WRITTEN;
  1066. spin_unlock(&block_group->lock);
  1067. return 0;
  1068. }
  1069. spin_unlock(&block_group->lock);
  1070. inode = lookup_free_space_inode(root, block_group, path);
  1071. if (IS_ERR(inode))
  1072. return 0;
  1073. ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
  1074. path, block_group->key.objectid);
  1075. if (ret) {
  1076. dcs = BTRFS_DC_ERROR;
  1077. ret = 0;
  1078. #ifdef DEBUG
  1079. btrfs_err(root->fs_info,
  1080. "failed to write free space cache for block group %llu",
  1081. block_group->key.objectid);
  1082. #endif
  1083. }
  1084. spin_lock(&block_group->lock);
  1085. block_group->disk_cache_state = dcs;
  1086. spin_unlock(&block_group->lock);
  1087. iput(inode);
  1088. return ret;
  1089. }
  1090. static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
  1091. u64 offset)
  1092. {
  1093. ASSERT(offset >= bitmap_start);
  1094. offset -= bitmap_start;
  1095. return (unsigned long)(div_u64(offset, unit));
  1096. }
  1097. static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
  1098. {
  1099. return (unsigned long)(div_u64(bytes, unit));
  1100. }
  1101. static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
  1102. u64 offset)
  1103. {
  1104. u64 bitmap_start;
  1105. u64 bytes_per_bitmap;
  1106. bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
  1107. bitmap_start = offset - ctl->start;
  1108. bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
  1109. bitmap_start *= bytes_per_bitmap;
  1110. bitmap_start += ctl->start;
  1111. return bitmap_start;
  1112. }
  1113. static int tree_insert_offset(struct rb_root *root, u64 offset,
  1114. struct rb_node *node, int bitmap)
  1115. {
  1116. struct rb_node **p = &root->rb_node;
  1117. struct rb_node *parent = NULL;
  1118. struct btrfs_free_space *info;
  1119. while (*p) {
  1120. parent = *p;
  1121. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  1122. if (offset < info->offset) {
  1123. p = &(*p)->rb_left;
  1124. } else if (offset > info->offset) {
  1125. p = &(*p)->rb_right;
  1126. } else {
  1127. /*
  1128. * we could have a bitmap entry and an extent entry
  1129. * share the same offset. If this is the case, we want
  1130. * the extent entry to always be found first if we do a
  1131. * linear search through the tree, since we want to have
  1132. * the quickest allocation time, and allocating from an
  1133. * extent is faster than allocating from a bitmap. So
  1134. * if we're inserting a bitmap and we find an entry at
  1135. * this offset, we want to go right, or after this entry
  1136. * logically. If we are inserting an extent and we've
  1137. * found a bitmap, we want to go left, or before
  1138. * logically.
  1139. */
  1140. if (bitmap) {
  1141. if (info->bitmap) {
  1142. WARN_ON_ONCE(1);
  1143. return -EEXIST;
  1144. }
  1145. p = &(*p)->rb_right;
  1146. } else {
  1147. if (!info->bitmap) {
  1148. WARN_ON_ONCE(1);
  1149. return -EEXIST;
  1150. }
  1151. p = &(*p)->rb_left;
  1152. }
  1153. }
  1154. }
  1155. rb_link_node(node, parent, p);
  1156. rb_insert_color(node, root);
  1157. return 0;
  1158. }
  1159. /*
  1160. * searches the tree for the given offset.
  1161. *
  1162. * fuzzy - If this is set, then we are trying to make an allocation, and we just
  1163. * want a section that has at least bytes size and comes at or after the given
  1164. * offset.
  1165. */
  1166. static struct btrfs_free_space *
  1167. tree_search_offset(struct btrfs_free_space_ctl *ctl,
  1168. u64 offset, int bitmap_only, int fuzzy)
  1169. {
  1170. struct rb_node *n = ctl->free_space_offset.rb_node;
  1171. struct btrfs_free_space *entry, *prev = NULL;
  1172. /* find entry that is closest to the 'offset' */
  1173. while (1) {
  1174. if (!n) {
  1175. entry = NULL;
  1176. break;
  1177. }
  1178. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1179. prev = entry;
  1180. if (offset < entry->offset)
  1181. n = n->rb_left;
  1182. else if (offset > entry->offset)
  1183. n = n->rb_right;
  1184. else
  1185. break;
  1186. }
  1187. if (bitmap_only) {
  1188. if (!entry)
  1189. return NULL;
  1190. if (entry->bitmap)
  1191. return entry;
  1192. /*
  1193. * bitmap entry and extent entry may share same offset,
  1194. * in that case, bitmap entry comes after extent entry.
  1195. */
  1196. n = rb_next(n);
  1197. if (!n)
  1198. return NULL;
  1199. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1200. if (entry->offset != offset)
  1201. return NULL;
  1202. WARN_ON(!entry->bitmap);
  1203. return entry;
  1204. } else if (entry) {
  1205. if (entry->bitmap) {
  1206. /*
  1207. * if previous extent entry covers the offset,
  1208. * we should return it instead of the bitmap entry
  1209. */
  1210. n = rb_prev(&entry->offset_index);
  1211. if (n) {
  1212. prev = rb_entry(n, struct btrfs_free_space,
  1213. offset_index);
  1214. if (!prev->bitmap &&
  1215. prev->offset + prev->bytes > offset)
  1216. entry = prev;
  1217. }
  1218. }
  1219. return entry;
  1220. }
  1221. if (!prev)
  1222. return NULL;
  1223. /* find last entry before the 'offset' */
  1224. entry = prev;
  1225. if (entry->offset > offset) {
  1226. n = rb_prev(&entry->offset_index);
  1227. if (n) {
  1228. entry = rb_entry(n, struct btrfs_free_space,
  1229. offset_index);
  1230. ASSERT(entry->offset <= offset);
  1231. } else {
  1232. if (fuzzy)
  1233. return entry;
  1234. else
  1235. return NULL;
  1236. }
  1237. }
  1238. if (entry->bitmap) {
  1239. n = rb_prev(&entry->offset_index);
  1240. if (n) {
  1241. prev = rb_entry(n, struct btrfs_free_space,
  1242. offset_index);
  1243. if (!prev->bitmap &&
  1244. prev->offset + prev->bytes > offset)
  1245. return prev;
  1246. }
  1247. if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
  1248. return entry;
  1249. } else if (entry->offset + entry->bytes > offset)
  1250. return entry;
  1251. if (!fuzzy)
  1252. return NULL;
  1253. while (1) {
  1254. if (entry->bitmap) {
  1255. if (entry->offset + BITS_PER_BITMAP *
  1256. ctl->unit > offset)
  1257. break;
  1258. } else {
  1259. if (entry->offset + entry->bytes > offset)
  1260. break;
  1261. }
  1262. n = rb_next(&entry->offset_index);
  1263. if (!n)
  1264. return NULL;
  1265. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1266. }
  1267. return entry;
  1268. }
  1269. static inline void
  1270. __unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1271. struct btrfs_free_space *info)
  1272. {
  1273. rb_erase(&info->offset_index, &ctl->free_space_offset);
  1274. ctl->free_extents--;
  1275. }
  1276. static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1277. struct btrfs_free_space *info)
  1278. {
  1279. __unlink_free_space(ctl, info);
  1280. ctl->free_space -= info->bytes;
  1281. }
  1282. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  1283. struct btrfs_free_space *info)
  1284. {
  1285. int ret = 0;
  1286. ASSERT(info->bytes || info->bitmap);
  1287. ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
  1288. &info->offset_index, (info->bitmap != NULL));
  1289. if (ret)
  1290. return ret;
  1291. ctl->free_space += info->bytes;
  1292. ctl->free_extents++;
  1293. return ret;
  1294. }
  1295. static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
  1296. {
  1297. struct btrfs_block_group_cache *block_group = ctl->private;
  1298. u64 max_bytes;
  1299. u64 bitmap_bytes;
  1300. u64 extent_bytes;
  1301. u64 size = block_group->key.offset;
  1302. u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
  1303. int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
  1304. max_bitmaps = max(max_bitmaps, 1);
  1305. ASSERT(ctl->total_bitmaps <= max_bitmaps);
  1306. /*
  1307. * The goal is to keep the total amount of memory used per 1gb of space
  1308. * at or below 32k, so we need to adjust how much memory we allow to be
  1309. * used by extent based free space tracking
  1310. */
  1311. if (size < 1024 * 1024 * 1024)
  1312. max_bytes = MAX_CACHE_BYTES_PER_GIG;
  1313. else
  1314. max_bytes = MAX_CACHE_BYTES_PER_GIG *
  1315. div64_u64(size, 1024 * 1024 * 1024);
  1316. /*
  1317. * we want to account for 1 more bitmap than what we have so we can make
  1318. * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
  1319. * we add more bitmaps.
  1320. */
  1321. bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
  1322. if (bitmap_bytes >= max_bytes) {
  1323. ctl->extents_thresh = 0;
  1324. return;
  1325. }
  1326. /*
  1327. * we want the extent entry threshold to always be at most 1/2 the maxw
  1328. * bytes we can have, or whatever is less than that.
  1329. */
  1330. extent_bytes = max_bytes - bitmap_bytes;
  1331. extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
  1332. ctl->extents_thresh =
  1333. div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
  1334. }
  1335. static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1336. struct btrfs_free_space *info,
  1337. u64 offset, u64 bytes)
  1338. {
  1339. unsigned long start, count;
  1340. start = offset_to_bit(info->offset, ctl->unit, offset);
  1341. count = bytes_to_bits(bytes, ctl->unit);
  1342. ASSERT(start + count <= BITS_PER_BITMAP);
  1343. bitmap_clear(info->bitmap, start, count);
  1344. info->bytes -= bytes;
  1345. }
  1346. static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1347. struct btrfs_free_space *info, u64 offset,
  1348. u64 bytes)
  1349. {
  1350. __bitmap_clear_bits(ctl, info, offset, bytes);
  1351. ctl->free_space -= bytes;
  1352. }
  1353. static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
  1354. struct btrfs_free_space *info, u64 offset,
  1355. u64 bytes)
  1356. {
  1357. unsigned long start, count;
  1358. start = offset_to_bit(info->offset, ctl->unit, offset);
  1359. count = bytes_to_bits(bytes, ctl->unit);
  1360. ASSERT(start + count <= BITS_PER_BITMAP);
  1361. bitmap_set(info->bitmap, start, count);
  1362. info->bytes += bytes;
  1363. ctl->free_space += bytes;
  1364. }
  1365. /*
  1366. * If we can not find suitable extent, we will use bytes to record
  1367. * the size of the max extent.
  1368. */
  1369. static int search_bitmap(struct btrfs_free_space_ctl *ctl,
  1370. struct btrfs_free_space *bitmap_info, u64 *offset,
  1371. u64 *bytes)
  1372. {
  1373. unsigned long found_bits = 0;
  1374. unsigned long max_bits = 0;
  1375. unsigned long bits, i;
  1376. unsigned long next_zero;
  1377. unsigned long extent_bits;
  1378. i = offset_to_bit(bitmap_info->offset, ctl->unit,
  1379. max_t(u64, *offset, bitmap_info->offset));
  1380. bits = bytes_to_bits(*bytes, ctl->unit);
  1381. for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
  1382. next_zero = find_next_zero_bit(bitmap_info->bitmap,
  1383. BITS_PER_BITMAP, i);
  1384. extent_bits = next_zero - i;
  1385. if (extent_bits >= bits) {
  1386. found_bits = extent_bits;
  1387. break;
  1388. } else if (extent_bits > max_bits) {
  1389. max_bits = extent_bits;
  1390. }
  1391. i = next_zero;
  1392. }
  1393. if (found_bits) {
  1394. *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
  1395. *bytes = (u64)(found_bits) * ctl->unit;
  1396. return 0;
  1397. }
  1398. *bytes = (u64)(max_bits) * ctl->unit;
  1399. return -1;
  1400. }
  1401. /* Cache the size of the max extent in bytes */
  1402. static struct btrfs_free_space *
  1403. find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
  1404. unsigned long align, u64 *max_extent_size)
  1405. {
  1406. struct btrfs_free_space *entry;
  1407. struct rb_node *node;
  1408. u64 tmp;
  1409. u64 align_off;
  1410. int ret;
  1411. if (!ctl->free_space_offset.rb_node)
  1412. goto out;
  1413. entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
  1414. if (!entry)
  1415. goto out;
  1416. for (node = &entry->offset_index; node; node = rb_next(node)) {
  1417. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1418. if (entry->bytes < *bytes) {
  1419. if (entry->bytes > *max_extent_size)
  1420. *max_extent_size = entry->bytes;
  1421. continue;
  1422. }
  1423. /* make sure the space returned is big enough
  1424. * to match our requested alignment
  1425. */
  1426. if (*bytes >= align) {
  1427. tmp = entry->offset - ctl->start + align - 1;
  1428. do_div(tmp, align);
  1429. tmp = tmp * align + ctl->start;
  1430. align_off = tmp - entry->offset;
  1431. } else {
  1432. align_off = 0;
  1433. tmp = entry->offset;
  1434. }
  1435. if (entry->bytes < *bytes + align_off) {
  1436. if (entry->bytes > *max_extent_size)
  1437. *max_extent_size = entry->bytes;
  1438. continue;
  1439. }
  1440. if (entry->bitmap) {
  1441. u64 size = *bytes;
  1442. ret = search_bitmap(ctl, entry, &tmp, &size);
  1443. if (!ret) {
  1444. *offset = tmp;
  1445. *bytes = size;
  1446. return entry;
  1447. } else if (size > *max_extent_size) {
  1448. *max_extent_size = size;
  1449. }
  1450. continue;
  1451. }
  1452. *offset = tmp;
  1453. *bytes = entry->bytes - align_off;
  1454. return entry;
  1455. }
  1456. out:
  1457. return NULL;
  1458. }
  1459. static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
  1460. struct btrfs_free_space *info, u64 offset)
  1461. {
  1462. info->offset = offset_to_bitmap(ctl, offset);
  1463. info->bytes = 0;
  1464. INIT_LIST_HEAD(&info->list);
  1465. link_free_space(ctl, info);
  1466. ctl->total_bitmaps++;
  1467. ctl->op->recalc_thresholds(ctl);
  1468. }
  1469. static void free_bitmap(struct btrfs_free_space_ctl *ctl,
  1470. struct btrfs_free_space *bitmap_info)
  1471. {
  1472. unlink_free_space(ctl, bitmap_info);
  1473. kfree(bitmap_info->bitmap);
  1474. kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
  1475. ctl->total_bitmaps--;
  1476. ctl->op->recalc_thresholds(ctl);
  1477. }
  1478. static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
  1479. struct btrfs_free_space *bitmap_info,
  1480. u64 *offset, u64 *bytes)
  1481. {
  1482. u64 end;
  1483. u64 search_start, search_bytes;
  1484. int ret;
  1485. again:
  1486. end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
  1487. /*
  1488. * We need to search for bits in this bitmap. We could only cover some
  1489. * of the extent in this bitmap thanks to how we add space, so we need
  1490. * to search for as much as it as we can and clear that amount, and then
  1491. * go searching for the next bit.
  1492. */
  1493. search_start = *offset;
  1494. search_bytes = ctl->unit;
  1495. search_bytes = min(search_bytes, end - search_start + 1);
  1496. ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
  1497. if (ret < 0 || search_start != *offset)
  1498. return -EINVAL;
  1499. /* We may have found more bits than what we need */
  1500. search_bytes = min(search_bytes, *bytes);
  1501. /* Cannot clear past the end of the bitmap */
  1502. search_bytes = min(search_bytes, end - search_start + 1);
  1503. bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
  1504. *offset += search_bytes;
  1505. *bytes -= search_bytes;
  1506. if (*bytes) {
  1507. struct rb_node *next = rb_next(&bitmap_info->offset_index);
  1508. if (!bitmap_info->bytes)
  1509. free_bitmap(ctl, bitmap_info);
  1510. /*
  1511. * no entry after this bitmap, but we still have bytes to
  1512. * remove, so something has gone wrong.
  1513. */
  1514. if (!next)
  1515. return -EINVAL;
  1516. bitmap_info = rb_entry(next, struct btrfs_free_space,
  1517. offset_index);
  1518. /*
  1519. * if the next entry isn't a bitmap we need to return to let the
  1520. * extent stuff do its work.
  1521. */
  1522. if (!bitmap_info->bitmap)
  1523. return -EAGAIN;
  1524. /*
  1525. * Ok the next item is a bitmap, but it may not actually hold
  1526. * the information for the rest of this free space stuff, so
  1527. * look for it, and if we don't find it return so we can try
  1528. * everything over again.
  1529. */
  1530. search_start = *offset;
  1531. search_bytes = ctl->unit;
  1532. ret = search_bitmap(ctl, bitmap_info, &search_start,
  1533. &search_bytes);
  1534. if (ret < 0 || search_start != *offset)
  1535. return -EAGAIN;
  1536. goto again;
  1537. } else if (!bitmap_info->bytes)
  1538. free_bitmap(ctl, bitmap_info);
  1539. return 0;
  1540. }
  1541. static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
  1542. struct btrfs_free_space *info, u64 offset,
  1543. u64 bytes)
  1544. {
  1545. u64 bytes_to_set = 0;
  1546. u64 end;
  1547. end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
  1548. bytes_to_set = min(end - offset, bytes);
  1549. bitmap_set_bits(ctl, info, offset, bytes_to_set);
  1550. return bytes_to_set;
  1551. }
  1552. static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
  1553. struct btrfs_free_space *info)
  1554. {
  1555. struct btrfs_block_group_cache *block_group = ctl->private;
  1556. /*
  1557. * If we are below the extents threshold then we can add this as an
  1558. * extent, and don't have to deal with the bitmap
  1559. */
  1560. if (ctl->free_extents < ctl->extents_thresh) {
  1561. /*
  1562. * If this block group has some small extents we don't want to
  1563. * use up all of our free slots in the cache with them, we want
  1564. * to reserve them to larger extents, however if we have plent
  1565. * of cache left then go ahead an dadd them, no sense in adding
  1566. * the overhead of a bitmap if we don't have to.
  1567. */
  1568. if (info->bytes <= block_group->sectorsize * 4) {
  1569. if (ctl->free_extents * 2 <= ctl->extents_thresh)
  1570. return false;
  1571. } else {
  1572. return false;
  1573. }
  1574. }
  1575. /*
  1576. * The original block groups from mkfs can be really small, like 8
  1577. * megabytes, so don't bother with a bitmap for those entries. However
  1578. * some block groups can be smaller than what a bitmap would cover but
  1579. * are still large enough that they could overflow the 32k memory limit,
  1580. * so allow those block groups to still be allowed to have a bitmap
  1581. * entry.
  1582. */
  1583. if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
  1584. return false;
  1585. return true;
  1586. }
  1587. static struct btrfs_free_space_op free_space_op = {
  1588. .recalc_thresholds = recalculate_thresholds,
  1589. .use_bitmap = use_bitmap,
  1590. };
  1591. static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
  1592. struct btrfs_free_space *info)
  1593. {
  1594. struct btrfs_free_space *bitmap_info;
  1595. struct btrfs_block_group_cache *block_group = NULL;
  1596. int added = 0;
  1597. u64 bytes, offset, bytes_added;
  1598. int ret;
  1599. bytes = info->bytes;
  1600. offset = info->offset;
  1601. if (!ctl->op->use_bitmap(ctl, info))
  1602. return 0;
  1603. if (ctl->op == &free_space_op)
  1604. block_group = ctl->private;
  1605. again:
  1606. /*
  1607. * Since we link bitmaps right into the cluster we need to see if we
  1608. * have a cluster here, and if so and it has our bitmap we need to add
  1609. * the free space to that bitmap.
  1610. */
  1611. if (block_group && !list_empty(&block_group->cluster_list)) {
  1612. struct btrfs_free_cluster *cluster;
  1613. struct rb_node *node;
  1614. struct btrfs_free_space *entry;
  1615. cluster = list_entry(block_group->cluster_list.next,
  1616. struct btrfs_free_cluster,
  1617. block_group_list);
  1618. spin_lock(&cluster->lock);
  1619. node = rb_first(&cluster->root);
  1620. if (!node) {
  1621. spin_unlock(&cluster->lock);
  1622. goto no_cluster_bitmap;
  1623. }
  1624. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1625. if (!entry->bitmap) {
  1626. spin_unlock(&cluster->lock);
  1627. goto no_cluster_bitmap;
  1628. }
  1629. if (entry->offset == offset_to_bitmap(ctl, offset)) {
  1630. bytes_added = add_bytes_to_bitmap(ctl, entry,
  1631. offset, bytes);
  1632. bytes -= bytes_added;
  1633. offset += bytes_added;
  1634. }
  1635. spin_unlock(&cluster->lock);
  1636. if (!bytes) {
  1637. ret = 1;
  1638. goto out;
  1639. }
  1640. }
  1641. no_cluster_bitmap:
  1642. bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1643. 1, 0);
  1644. if (!bitmap_info) {
  1645. ASSERT(added == 0);
  1646. goto new_bitmap;
  1647. }
  1648. bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
  1649. bytes -= bytes_added;
  1650. offset += bytes_added;
  1651. added = 0;
  1652. if (!bytes) {
  1653. ret = 1;
  1654. goto out;
  1655. } else
  1656. goto again;
  1657. new_bitmap:
  1658. if (info && info->bitmap) {
  1659. add_new_bitmap(ctl, info, offset);
  1660. added = 1;
  1661. info = NULL;
  1662. goto again;
  1663. } else {
  1664. spin_unlock(&ctl->tree_lock);
  1665. /* no pre-allocated info, allocate a new one */
  1666. if (!info) {
  1667. info = kmem_cache_zalloc(btrfs_free_space_cachep,
  1668. GFP_NOFS);
  1669. if (!info) {
  1670. spin_lock(&ctl->tree_lock);
  1671. ret = -ENOMEM;
  1672. goto out;
  1673. }
  1674. }
  1675. /* allocate the bitmap */
  1676. info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  1677. spin_lock(&ctl->tree_lock);
  1678. if (!info->bitmap) {
  1679. ret = -ENOMEM;
  1680. goto out;
  1681. }
  1682. goto again;
  1683. }
  1684. out:
  1685. if (info) {
  1686. if (info->bitmap)
  1687. kfree(info->bitmap);
  1688. kmem_cache_free(btrfs_free_space_cachep, info);
  1689. }
  1690. return ret;
  1691. }
  1692. static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
  1693. struct btrfs_free_space *info, bool update_stat)
  1694. {
  1695. struct btrfs_free_space *left_info;
  1696. struct btrfs_free_space *right_info;
  1697. bool merged = false;
  1698. u64 offset = info->offset;
  1699. u64 bytes = info->bytes;
  1700. /*
  1701. * first we want to see if there is free space adjacent to the range we
  1702. * are adding, if there is remove that struct and add a new one to
  1703. * cover the entire range
  1704. */
  1705. right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
  1706. if (right_info && rb_prev(&right_info->offset_index))
  1707. left_info = rb_entry(rb_prev(&right_info->offset_index),
  1708. struct btrfs_free_space, offset_index);
  1709. else
  1710. left_info = tree_search_offset(ctl, offset - 1, 0, 0);
  1711. if (right_info && !right_info->bitmap) {
  1712. if (update_stat)
  1713. unlink_free_space(ctl, right_info);
  1714. else
  1715. __unlink_free_space(ctl, right_info);
  1716. info->bytes += right_info->bytes;
  1717. kmem_cache_free(btrfs_free_space_cachep, right_info);
  1718. merged = true;
  1719. }
  1720. if (left_info && !left_info->bitmap &&
  1721. left_info->offset + left_info->bytes == offset) {
  1722. if (update_stat)
  1723. unlink_free_space(ctl, left_info);
  1724. else
  1725. __unlink_free_space(ctl, left_info);
  1726. info->offset = left_info->offset;
  1727. info->bytes += left_info->bytes;
  1728. kmem_cache_free(btrfs_free_space_cachep, left_info);
  1729. merged = true;
  1730. }
  1731. return merged;
  1732. }
  1733. static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
  1734. struct btrfs_free_space *info,
  1735. bool update_stat)
  1736. {
  1737. struct btrfs_free_space *bitmap;
  1738. unsigned long i;
  1739. unsigned long j;
  1740. const u64 end = info->offset + info->bytes;
  1741. const u64 bitmap_offset = offset_to_bitmap(ctl, end);
  1742. u64 bytes;
  1743. bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
  1744. if (!bitmap)
  1745. return false;
  1746. i = offset_to_bit(bitmap->offset, ctl->unit, end);
  1747. j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
  1748. if (j == i)
  1749. return false;
  1750. bytes = (j - i) * ctl->unit;
  1751. info->bytes += bytes;
  1752. if (update_stat)
  1753. bitmap_clear_bits(ctl, bitmap, end, bytes);
  1754. else
  1755. __bitmap_clear_bits(ctl, bitmap, end, bytes);
  1756. if (!bitmap->bytes)
  1757. free_bitmap(ctl, bitmap);
  1758. return true;
  1759. }
  1760. static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
  1761. struct btrfs_free_space *info,
  1762. bool update_stat)
  1763. {
  1764. struct btrfs_free_space *bitmap;
  1765. u64 bitmap_offset;
  1766. unsigned long i;
  1767. unsigned long j;
  1768. unsigned long prev_j;
  1769. u64 bytes;
  1770. bitmap_offset = offset_to_bitmap(ctl, info->offset);
  1771. /* If we're on a boundary, try the previous logical bitmap. */
  1772. if (bitmap_offset == info->offset) {
  1773. if (info->offset == 0)
  1774. return false;
  1775. bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
  1776. }
  1777. bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
  1778. if (!bitmap)
  1779. return false;
  1780. i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
  1781. j = 0;
  1782. prev_j = (unsigned long)-1;
  1783. for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
  1784. if (j > i)
  1785. break;
  1786. prev_j = j;
  1787. }
  1788. if (prev_j == i)
  1789. return false;
  1790. if (prev_j == (unsigned long)-1)
  1791. bytes = (i + 1) * ctl->unit;
  1792. else
  1793. bytes = (i - prev_j) * ctl->unit;
  1794. info->offset -= bytes;
  1795. info->bytes += bytes;
  1796. if (update_stat)
  1797. bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
  1798. else
  1799. __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
  1800. if (!bitmap->bytes)
  1801. free_bitmap(ctl, bitmap);
  1802. return true;
  1803. }
  1804. /*
  1805. * We prefer always to allocate from extent entries, both for clustered and
  1806. * non-clustered allocation requests. So when attempting to add a new extent
  1807. * entry, try to see if there's adjacent free space in bitmap entries, and if
  1808. * there is, migrate that space from the bitmaps to the extent.
  1809. * Like this we get better chances of satisfying space allocation requests
  1810. * because we attempt to satisfy them based on a single cache entry, and never
  1811. * on 2 or more entries - even if the entries represent a contiguous free space
  1812. * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
  1813. * ends).
  1814. */
  1815. static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
  1816. struct btrfs_free_space *info,
  1817. bool update_stat)
  1818. {
  1819. /*
  1820. * Only work with disconnected entries, as we can change their offset,
  1821. * and must be extent entries.
  1822. */
  1823. ASSERT(!info->bitmap);
  1824. ASSERT(RB_EMPTY_NODE(&info->offset_index));
  1825. if (ctl->total_bitmaps > 0) {
  1826. bool stole_end;
  1827. bool stole_front = false;
  1828. stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
  1829. if (ctl->total_bitmaps > 0)
  1830. stole_front = steal_from_bitmap_to_front(ctl, info,
  1831. update_stat);
  1832. if (stole_end || stole_front)
  1833. try_merge_free_space(ctl, info, update_stat);
  1834. }
  1835. }
  1836. int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
  1837. u64 offset, u64 bytes)
  1838. {
  1839. struct btrfs_free_space *info;
  1840. int ret = 0;
  1841. info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
  1842. if (!info)
  1843. return -ENOMEM;
  1844. info->offset = offset;
  1845. info->bytes = bytes;
  1846. RB_CLEAR_NODE(&info->offset_index);
  1847. spin_lock(&ctl->tree_lock);
  1848. if (try_merge_free_space(ctl, info, true))
  1849. goto link;
  1850. /*
  1851. * There was no extent directly to the left or right of this new
  1852. * extent then we know we're going to have to allocate a new extent, so
  1853. * before we do that see if we need to drop this into a bitmap
  1854. */
  1855. ret = insert_into_bitmap(ctl, info);
  1856. if (ret < 0) {
  1857. goto out;
  1858. } else if (ret) {
  1859. ret = 0;
  1860. goto out;
  1861. }
  1862. link:
  1863. /*
  1864. * Only steal free space from adjacent bitmaps if we're sure we're not
  1865. * going to add the new free space to existing bitmap entries - because
  1866. * that would mean unnecessary work that would be reverted. Therefore
  1867. * attempt to steal space from bitmaps if we're adding an extent entry.
  1868. */
  1869. steal_from_bitmap(ctl, info, true);
  1870. ret = link_free_space(ctl, info);
  1871. if (ret)
  1872. kmem_cache_free(btrfs_free_space_cachep, info);
  1873. out:
  1874. spin_unlock(&ctl->tree_lock);
  1875. if (ret) {
  1876. printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
  1877. ASSERT(ret != -EEXIST);
  1878. }
  1879. return ret;
  1880. }
  1881. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  1882. u64 offset, u64 bytes)
  1883. {
  1884. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1885. struct btrfs_free_space *info;
  1886. int ret;
  1887. bool re_search = false;
  1888. spin_lock(&ctl->tree_lock);
  1889. again:
  1890. ret = 0;
  1891. if (!bytes)
  1892. goto out_lock;
  1893. info = tree_search_offset(ctl, offset, 0, 0);
  1894. if (!info) {
  1895. /*
  1896. * oops didn't find an extent that matched the space we wanted
  1897. * to remove, look for a bitmap instead
  1898. */
  1899. info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1900. 1, 0);
  1901. if (!info) {
  1902. /*
  1903. * If we found a partial bit of our free space in a
  1904. * bitmap but then couldn't find the other part this may
  1905. * be a problem, so WARN about it.
  1906. */
  1907. WARN_ON(re_search);
  1908. goto out_lock;
  1909. }
  1910. }
  1911. re_search = false;
  1912. if (!info->bitmap) {
  1913. unlink_free_space(ctl, info);
  1914. if (offset == info->offset) {
  1915. u64 to_free = min(bytes, info->bytes);
  1916. info->bytes -= to_free;
  1917. info->offset += to_free;
  1918. if (info->bytes) {
  1919. ret = link_free_space(ctl, info);
  1920. WARN_ON(ret);
  1921. } else {
  1922. kmem_cache_free(btrfs_free_space_cachep, info);
  1923. }
  1924. offset += to_free;
  1925. bytes -= to_free;
  1926. goto again;
  1927. } else {
  1928. u64 old_end = info->bytes + info->offset;
  1929. info->bytes = offset - info->offset;
  1930. ret = link_free_space(ctl, info);
  1931. WARN_ON(ret);
  1932. if (ret)
  1933. goto out_lock;
  1934. /* Not enough bytes in this entry to satisfy us */
  1935. if (old_end < offset + bytes) {
  1936. bytes -= old_end - offset;
  1937. offset = old_end;
  1938. goto again;
  1939. } else if (old_end == offset + bytes) {
  1940. /* all done */
  1941. goto out_lock;
  1942. }
  1943. spin_unlock(&ctl->tree_lock);
  1944. ret = btrfs_add_free_space(block_group, offset + bytes,
  1945. old_end - (offset + bytes));
  1946. WARN_ON(ret);
  1947. goto out;
  1948. }
  1949. }
  1950. ret = remove_from_bitmap(ctl, info, &offset, &bytes);
  1951. if (ret == -EAGAIN) {
  1952. re_search = true;
  1953. goto again;
  1954. }
  1955. out_lock:
  1956. spin_unlock(&ctl->tree_lock);
  1957. out:
  1958. return ret;
  1959. }
  1960. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  1961. u64 bytes)
  1962. {
  1963. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1964. struct btrfs_free_space *info;
  1965. struct rb_node *n;
  1966. int count = 0;
  1967. for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
  1968. info = rb_entry(n, struct btrfs_free_space, offset_index);
  1969. if (info->bytes >= bytes && !block_group->ro)
  1970. count++;
  1971. btrfs_crit(block_group->fs_info,
  1972. "entry offset %llu, bytes %llu, bitmap %s",
  1973. info->offset, info->bytes,
  1974. (info->bitmap) ? "yes" : "no");
  1975. }
  1976. btrfs_info(block_group->fs_info, "block group has cluster?: %s",
  1977. list_empty(&block_group->cluster_list) ? "no" : "yes");
  1978. btrfs_info(block_group->fs_info,
  1979. "%d blocks of free space at or bigger than bytes is", count);
  1980. }
  1981. void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
  1982. {
  1983. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1984. spin_lock_init(&ctl->tree_lock);
  1985. ctl->unit = block_group->sectorsize;
  1986. ctl->start = block_group->key.objectid;
  1987. ctl->private = block_group;
  1988. ctl->op = &free_space_op;
  1989. INIT_LIST_HEAD(&ctl->trimming_ranges);
  1990. mutex_init(&ctl->cache_writeout_mutex);
  1991. /*
  1992. * we only want to have 32k of ram per block group for keeping
  1993. * track of free space, and if we pass 1/2 of that we want to
  1994. * start converting things over to using bitmaps
  1995. */
  1996. ctl->extents_thresh = ((1024 * 32) / 2) /
  1997. sizeof(struct btrfs_free_space);
  1998. }
  1999. /*
  2000. * for a given cluster, put all of its extents back into the free
  2001. * space cache. If the block group passed doesn't match the block group
  2002. * pointed to by the cluster, someone else raced in and freed the
  2003. * cluster already. In that case, we just return without changing anything
  2004. */
  2005. static int
  2006. __btrfs_return_cluster_to_free_space(
  2007. struct btrfs_block_group_cache *block_group,
  2008. struct btrfs_free_cluster *cluster)
  2009. {
  2010. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2011. struct btrfs_free_space *entry;
  2012. struct rb_node *node;
  2013. spin_lock(&cluster->lock);
  2014. if (cluster->block_group != block_group)
  2015. goto out;
  2016. cluster->block_group = NULL;
  2017. cluster->window_start = 0;
  2018. list_del_init(&cluster->block_group_list);
  2019. node = rb_first(&cluster->root);
  2020. while (node) {
  2021. bool bitmap;
  2022. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2023. node = rb_next(&entry->offset_index);
  2024. rb_erase(&entry->offset_index, &cluster->root);
  2025. RB_CLEAR_NODE(&entry->offset_index);
  2026. bitmap = (entry->bitmap != NULL);
  2027. if (!bitmap) {
  2028. try_merge_free_space(ctl, entry, false);
  2029. steal_from_bitmap(ctl, entry, false);
  2030. }
  2031. tree_insert_offset(&ctl->free_space_offset,
  2032. entry->offset, &entry->offset_index, bitmap);
  2033. }
  2034. cluster->root = RB_ROOT;
  2035. out:
  2036. spin_unlock(&cluster->lock);
  2037. btrfs_put_block_group(block_group);
  2038. return 0;
  2039. }
  2040. static void __btrfs_remove_free_space_cache_locked(
  2041. struct btrfs_free_space_ctl *ctl)
  2042. {
  2043. struct btrfs_free_space *info;
  2044. struct rb_node *node;
  2045. while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
  2046. info = rb_entry(node, struct btrfs_free_space, offset_index);
  2047. if (!info->bitmap) {
  2048. unlink_free_space(ctl, info);
  2049. kmem_cache_free(btrfs_free_space_cachep, info);
  2050. } else {
  2051. free_bitmap(ctl, info);
  2052. }
  2053. if (need_resched()) {
  2054. spin_unlock(&ctl->tree_lock);
  2055. cond_resched();
  2056. spin_lock(&ctl->tree_lock);
  2057. }
  2058. }
  2059. }
  2060. void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
  2061. {
  2062. spin_lock(&ctl->tree_lock);
  2063. __btrfs_remove_free_space_cache_locked(ctl);
  2064. spin_unlock(&ctl->tree_lock);
  2065. }
  2066. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  2067. {
  2068. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2069. struct btrfs_free_cluster *cluster;
  2070. struct list_head *head;
  2071. spin_lock(&ctl->tree_lock);
  2072. while ((head = block_group->cluster_list.next) !=
  2073. &block_group->cluster_list) {
  2074. cluster = list_entry(head, struct btrfs_free_cluster,
  2075. block_group_list);
  2076. WARN_ON(cluster->block_group != block_group);
  2077. __btrfs_return_cluster_to_free_space(block_group, cluster);
  2078. if (need_resched()) {
  2079. spin_unlock(&ctl->tree_lock);
  2080. cond_resched();
  2081. spin_lock(&ctl->tree_lock);
  2082. }
  2083. }
  2084. __btrfs_remove_free_space_cache_locked(ctl);
  2085. spin_unlock(&ctl->tree_lock);
  2086. }
  2087. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  2088. u64 offset, u64 bytes, u64 empty_size,
  2089. u64 *max_extent_size)
  2090. {
  2091. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2092. struct btrfs_free_space *entry = NULL;
  2093. u64 bytes_search = bytes + empty_size;
  2094. u64 ret = 0;
  2095. u64 align_gap = 0;
  2096. u64 align_gap_len = 0;
  2097. spin_lock(&ctl->tree_lock);
  2098. entry = find_free_space(ctl, &offset, &bytes_search,
  2099. block_group->full_stripe_len, max_extent_size);
  2100. if (!entry)
  2101. goto out;
  2102. ret = offset;
  2103. if (entry->bitmap) {
  2104. bitmap_clear_bits(ctl, entry, offset, bytes);
  2105. if (!entry->bytes)
  2106. free_bitmap(ctl, entry);
  2107. } else {
  2108. unlink_free_space(ctl, entry);
  2109. align_gap_len = offset - entry->offset;
  2110. align_gap = entry->offset;
  2111. entry->offset = offset + bytes;
  2112. WARN_ON(entry->bytes < bytes + align_gap_len);
  2113. entry->bytes -= bytes + align_gap_len;
  2114. if (!entry->bytes)
  2115. kmem_cache_free(btrfs_free_space_cachep, entry);
  2116. else
  2117. link_free_space(ctl, entry);
  2118. }
  2119. out:
  2120. spin_unlock(&ctl->tree_lock);
  2121. if (align_gap_len)
  2122. __btrfs_add_free_space(ctl, align_gap, align_gap_len);
  2123. return ret;
  2124. }
  2125. /*
  2126. * given a cluster, put all of its extents back into the free space
  2127. * cache. If a block group is passed, this function will only free
  2128. * a cluster that belongs to the passed block group.
  2129. *
  2130. * Otherwise, it'll get a reference on the block group pointed to by the
  2131. * cluster and remove the cluster from it.
  2132. */
  2133. int btrfs_return_cluster_to_free_space(
  2134. struct btrfs_block_group_cache *block_group,
  2135. struct btrfs_free_cluster *cluster)
  2136. {
  2137. struct btrfs_free_space_ctl *ctl;
  2138. int ret;
  2139. /* first, get a safe pointer to the block group */
  2140. spin_lock(&cluster->lock);
  2141. if (!block_group) {
  2142. block_group = cluster->block_group;
  2143. if (!block_group) {
  2144. spin_unlock(&cluster->lock);
  2145. return 0;
  2146. }
  2147. } else if (cluster->block_group != block_group) {
  2148. /* someone else has already freed it don't redo their work */
  2149. spin_unlock(&cluster->lock);
  2150. return 0;
  2151. }
  2152. atomic_inc(&block_group->count);
  2153. spin_unlock(&cluster->lock);
  2154. ctl = block_group->free_space_ctl;
  2155. /* now return any extents the cluster had on it */
  2156. spin_lock(&ctl->tree_lock);
  2157. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  2158. spin_unlock(&ctl->tree_lock);
  2159. /* finally drop our ref */
  2160. btrfs_put_block_group(block_group);
  2161. return ret;
  2162. }
  2163. static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
  2164. struct btrfs_free_cluster *cluster,
  2165. struct btrfs_free_space *entry,
  2166. u64 bytes, u64 min_start,
  2167. u64 *max_extent_size)
  2168. {
  2169. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2170. int err;
  2171. u64 search_start = cluster->window_start;
  2172. u64 search_bytes = bytes;
  2173. u64 ret = 0;
  2174. search_start = min_start;
  2175. search_bytes = bytes;
  2176. err = search_bitmap(ctl, entry, &search_start, &search_bytes);
  2177. if (err) {
  2178. if (search_bytes > *max_extent_size)
  2179. *max_extent_size = search_bytes;
  2180. return 0;
  2181. }
  2182. ret = search_start;
  2183. __bitmap_clear_bits(ctl, entry, ret, bytes);
  2184. return ret;
  2185. }
  2186. /*
  2187. * given a cluster, try to allocate 'bytes' from it, returns 0
  2188. * if it couldn't find anything suitably large, or a logical disk offset
  2189. * if things worked out
  2190. */
  2191. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  2192. struct btrfs_free_cluster *cluster, u64 bytes,
  2193. u64 min_start, u64 *max_extent_size)
  2194. {
  2195. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2196. struct btrfs_free_space *entry = NULL;
  2197. struct rb_node *node;
  2198. u64 ret = 0;
  2199. spin_lock(&cluster->lock);
  2200. if (bytes > cluster->max_size)
  2201. goto out;
  2202. if (cluster->block_group != block_group)
  2203. goto out;
  2204. node = rb_first(&cluster->root);
  2205. if (!node)
  2206. goto out;
  2207. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2208. while (1) {
  2209. if (entry->bytes < bytes && entry->bytes > *max_extent_size)
  2210. *max_extent_size = entry->bytes;
  2211. if (entry->bytes < bytes ||
  2212. (!entry->bitmap && entry->offset < min_start)) {
  2213. node = rb_next(&entry->offset_index);
  2214. if (!node)
  2215. break;
  2216. entry = rb_entry(node, struct btrfs_free_space,
  2217. offset_index);
  2218. continue;
  2219. }
  2220. if (entry->bitmap) {
  2221. ret = btrfs_alloc_from_bitmap(block_group,
  2222. cluster, entry, bytes,
  2223. cluster->window_start,
  2224. max_extent_size);
  2225. if (ret == 0) {
  2226. node = rb_next(&entry->offset_index);
  2227. if (!node)
  2228. break;
  2229. entry = rb_entry(node, struct btrfs_free_space,
  2230. offset_index);
  2231. continue;
  2232. }
  2233. cluster->window_start += bytes;
  2234. } else {
  2235. ret = entry->offset;
  2236. entry->offset += bytes;
  2237. entry->bytes -= bytes;
  2238. }
  2239. if (entry->bytes == 0)
  2240. rb_erase(&entry->offset_index, &cluster->root);
  2241. break;
  2242. }
  2243. out:
  2244. spin_unlock(&cluster->lock);
  2245. if (!ret)
  2246. return 0;
  2247. spin_lock(&ctl->tree_lock);
  2248. ctl->free_space -= bytes;
  2249. if (entry->bytes == 0) {
  2250. ctl->free_extents--;
  2251. if (entry->bitmap) {
  2252. kfree(entry->bitmap);
  2253. ctl->total_bitmaps--;
  2254. ctl->op->recalc_thresholds(ctl);
  2255. }
  2256. kmem_cache_free(btrfs_free_space_cachep, entry);
  2257. }
  2258. spin_unlock(&ctl->tree_lock);
  2259. return ret;
  2260. }
  2261. static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
  2262. struct btrfs_free_space *entry,
  2263. struct btrfs_free_cluster *cluster,
  2264. u64 offset, u64 bytes,
  2265. u64 cont1_bytes, u64 min_bytes)
  2266. {
  2267. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2268. unsigned long next_zero;
  2269. unsigned long i;
  2270. unsigned long want_bits;
  2271. unsigned long min_bits;
  2272. unsigned long found_bits;
  2273. unsigned long start = 0;
  2274. unsigned long total_found = 0;
  2275. int ret;
  2276. i = offset_to_bit(entry->offset, ctl->unit,
  2277. max_t(u64, offset, entry->offset));
  2278. want_bits = bytes_to_bits(bytes, ctl->unit);
  2279. min_bits = bytes_to_bits(min_bytes, ctl->unit);
  2280. again:
  2281. found_bits = 0;
  2282. for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
  2283. next_zero = find_next_zero_bit(entry->bitmap,
  2284. BITS_PER_BITMAP, i);
  2285. if (next_zero - i >= min_bits) {
  2286. found_bits = next_zero - i;
  2287. break;
  2288. }
  2289. i = next_zero;
  2290. }
  2291. if (!found_bits)
  2292. return -ENOSPC;
  2293. if (!total_found) {
  2294. start = i;
  2295. cluster->max_size = 0;
  2296. }
  2297. total_found += found_bits;
  2298. if (cluster->max_size < found_bits * ctl->unit)
  2299. cluster->max_size = found_bits * ctl->unit;
  2300. if (total_found < want_bits || cluster->max_size < cont1_bytes) {
  2301. i = next_zero + 1;
  2302. goto again;
  2303. }
  2304. cluster->window_start = start * ctl->unit + entry->offset;
  2305. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  2306. ret = tree_insert_offset(&cluster->root, entry->offset,
  2307. &entry->offset_index, 1);
  2308. ASSERT(!ret); /* -EEXIST; Logic error */
  2309. trace_btrfs_setup_cluster(block_group, cluster,
  2310. total_found * ctl->unit, 1);
  2311. return 0;
  2312. }
  2313. /*
  2314. * This searches the block group for just extents to fill the cluster with.
  2315. * Try to find a cluster with at least bytes total bytes, at least one
  2316. * extent of cont1_bytes, and other clusters of at least min_bytes.
  2317. */
  2318. static noinline int
  2319. setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
  2320. struct btrfs_free_cluster *cluster,
  2321. struct list_head *bitmaps, u64 offset, u64 bytes,
  2322. u64 cont1_bytes, u64 min_bytes)
  2323. {
  2324. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2325. struct btrfs_free_space *first = NULL;
  2326. struct btrfs_free_space *entry = NULL;
  2327. struct btrfs_free_space *last;
  2328. struct rb_node *node;
  2329. u64 window_free;
  2330. u64 max_extent;
  2331. u64 total_size = 0;
  2332. entry = tree_search_offset(ctl, offset, 0, 1);
  2333. if (!entry)
  2334. return -ENOSPC;
  2335. /*
  2336. * We don't want bitmaps, so just move along until we find a normal
  2337. * extent entry.
  2338. */
  2339. while (entry->bitmap || entry->bytes < min_bytes) {
  2340. if (entry->bitmap && list_empty(&entry->list))
  2341. list_add_tail(&entry->list, bitmaps);
  2342. node = rb_next(&entry->offset_index);
  2343. if (!node)
  2344. return -ENOSPC;
  2345. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2346. }
  2347. window_free = entry->bytes;
  2348. max_extent = entry->bytes;
  2349. first = entry;
  2350. last = entry;
  2351. for (node = rb_next(&entry->offset_index); node;
  2352. node = rb_next(&entry->offset_index)) {
  2353. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2354. if (entry->bitmap) {
  2355. if (list_empty(&entry->list))
  2356. list_add_tail(&entry->list, bitmaps);
  2357. continue;
  2358. }
  2359. if (entry->bytes < min_bytes)
  2360. continue;
  2361. last = entry;
  2362. window_free += entry->bytes;
  2363. if (entry->bytes > max_extent)
  2364. max_extent = entry->bytes;
  2365. }
  2366. if (window_free < bytes || max_extent < cont1_bytes)
  2367. return -ENOSPC;
  2368. cluster->window_start = first->offset;
  2369. node = &first->offset_index;
  2370. /*
  2371. * now we've found our entries, pull them out of the free space
  2372. * cache and put them into the cluster rbtree
  2373. */
  2374. do {
  2375. int ret;
  2376. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2377. node = rb_next(&entry->offset_index);
  2378. if (entry->bitmap || entry->bytes < min_bytes)
  2379. continue;
  2380. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  2381. ret = tree_insert_offset(&cluster->root, entry->offset,
  2382. &entry->offset_index, 0);
  2383. total_size += entry->bytes;
  2384. ASSERT(!ret); /* -EEXIST; Logic error */
  2385. } while (node && entry != last);
  2386. cluster->max_size = max_extent;
  2387. trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
  2388. return 0;
  2389. }
  2390. /*
  2391. * This specifically looks for bitmaps that may work in the cluster, we assume
  2392. * that we have already failed to find extents that will work.
  2393. */
  2394. static noinline int
  2395. setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
  2396. struct btrfs_free_cluster *cluster,
  2397. struct list_head *bitmaps, u64 offset, u64 bytes,
  2398. u64 cont1_bytes, u64 min_bytes)
  2399. {
  2400. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2401. struct btrfs_free_space *entry;
  2402. int ret = -ENOSPC;
  2403. u64 bitmap_offset = offset_to_bitmap(ctl, offset);
  2404. if (ctl->total_bitmaps == 0)
  2405. return -ENOSPC;
  2406. /*
  2407. * The bitmap that covers offset won't be in the list unless offset
  2408. * is just its start offset.
  2409. */
  2410. entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
  2411. if (entry->offset != bitmap_offset) {
  2412. entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
  2413. if (entry && list_empty(&entry->list))
  2414. list_add(&entry->list, bitmaps);
  2415. }
  2416. list_for_each_entry(entry, bitmaps, list) {
  2417. if (entry->bytes < bytes)
  2418. continue;
  2419. ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
  2420. bytes, cont1_bytes, min_bytes);
  2421. if (!ret)
  2422. return 0;
  2423. }
  2424. /*
  2425. * The bitmaps list has all the bitmaps that record free space
  2426. * starting after offset, so no more search is required.
  2427. */
  2428. return -ENOSPC;
  2429. }
  2430. /*
  2431. * here we try to find a cluster of blocks in a block group. The goal
  2432. * is to find at least bytes+empty_size.
  2433. * We might not find them all in one contiguous area.
  2434. *
  2435. * returns zero and sets up cluster if things worked out, otherwise
  2436. * it returns -enospc
  2437. */
  2438. int btrfs_find_space_cluster(struct btrfs_root *root,
  2439. struct btrfs_block_group_cache *block_group,
  2440. struct btrfs_free_cluster *cluster,
  2441. u64 offset, u64 bytes, u64 empty_size)
  2442. {
  2443. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2444. struct btrfs_free_space *entry, *tmp;
  2445. LIST_HEAD(bitmaps);
  2446. u64 min_bytes;
  2447. u64 cont1_bytes;
  2448. int ret;
  2449. /*
  2450. * Choose the minimum extent size we'll require for this
  2451. * cluster. For SSD_SPREAD, don't allow any fragmentation.
  2452. * For metadata, allow allocates with smaller extents. For
  2453. * data, keep it dense.
  2454. */
  2455. if (btrfs_test_opt(root, SSD_SPREAD)) {
  2456. cont1_bytes = min_bytes = bytes + empty_size;
  2457. } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  2458. cont1_bytes = bytes;
  2459. min_bytes = block_group->sectorsize;
  2460. } else {
  2461. cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
  2462. min_bytes = block_group->sectorsize;
  2463. }
  2464. spin_lock(&ctl->tree_lock);
  2465. /*
  2466. * If we know we don't have enough space to make a cluster don't even
  2467. * bother doing all the work to try and find one.
  2468. */
  2469. if (ctl->free_space < bytes) {
  2470. spin_unlock(&ctl->tree_lock);
  2471. return -ENOSPC;
  2472. }
  2473. spin_lock(&cluster->lock);
  2474. /* someone already found a cluster, hooray */
  2475. if (cluster->block_group) {
  2476. ret = 0;
  2477. goto out;
  2478. }
  2479. trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
  2480. min_bytes);
  2481. ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
  2482. bytes + empty_size,
  2483. cont1_bytes, min_bytes);
  2484. if (ret)
  2485. ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
  2486. offset, bytes + empty_size,
  2487. cont1_bytes, min_bytes);
  2488. /* Clear our temporary list */
  2489. list_for_each_entry_safe(entry, tmp, &bitmaps, list)
  2490. list_del_init(&entry->list);
  2491. if (!ret) {
  2492. atomic_inc(&block_group->count);
  2493. list_add_tail(&cluster->block_group_list,
  2494. &block_group->cluster_list);
  2495. cluster->block_group = block_group;
  2496. } else {
  2497. trace_btrfs_failed_cluster_setup(block_group);
  2498. }
  2499. out:
  2500. spin_unlock(&cluster->lock);
  2501. spin_unlock(&ctl->tree_lock);
  2502. return ret;
  2503. }
  2504. /*
  2505. * simple code to zero out a cluster
  2506. */
  2507. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  2508. {
  2509. spin_lock_init(&cluster->lock);
  2510. spin_lock_init(&cluster->refill_lock);
  2511. cluster->root = RB_ROOT;
  2512. cluster->max_size = 0;
  2513. INIT_LIST_HEAD(&cluster->block_group_list);
  2514. cluster->block_group = NULL;
  2515. }
  2516. static int do_trimming(struct btrfs_block_group_cache *block_group,
  2517. u64 *total_trimmed, u64 start, u64 bytes,
  2518. u64 reserved_start, u64 reserved_bytes,
  2519. struct btrfs_trim_range *trim_entry)
  2520. {
  2521. struct btrfs_space_info *space_info = block_group->space_info;
  2522. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2523. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2524. int ret;
  2525. int update = 0;
  2526. u64 trimmed = 0;
  2527. spin_lock(&space_info->lock);
  2528. spin_lock(&block_group->lock);
  2529. if (!block_group->ro) {
  2530. block_group->reserved += reserved_bytes;
  2531. space_info->bytes_reserved += reserved_bytes;
  2532. update = 1;
  2533. }
  2534. spin_unlock(&block_group->lock);
  2535. spin_unlock(&space_info->lock);
  2536. ret = btrfs_discard_extent(fs_info->extent_root,
  2537. start, bytes, &trimmed);
  2538. if (!ret)
  2539. *total_trimmed += trimmed;
  2540. mutex_lock(&ctl->cache_writeout_mutex);
  2541. btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
  2542. list_del(&trim_entry->list);
  2543. mutex_unlock(&ctl->cache_writeout_mutex);
  2544. if (update) {
  2545. spin_lock(&space_info->lock);
  2546. spin_lock(&block_group->lock);
  2547. if (block_group->ro)
  2548. space_info->bytes_readonly += reserved_bytes;
  2549. block_group->reserved -= reserved_bytes;
  2550. space_info->bytes_reserved -= reserved_bytes;
  2551. spin_unlock(&space_info->lock);
  2552. spin_unlock(&block_group->lock);
  2553. }
  2554. return ret;
  2555. }
  2556. static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
  2557. u64 *total_trimmed, u64 start, u64 end, u64 minlen)
  2558. {
  2559. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2560. struct btrfs_free_space *entry;
  2561. struct rb_node *node;
  2562. int ret = 0;
  2563. u64 extent_start;
  2564. u64 extent_bytes;
  2565. u64 bytes;
  2566. while (start < end) {
  2567. struct btrfs_trim_range trim_entry;
  2568. mutex_lock(&ctl->cache_writeout_mutex);
  2569. spin_lock(&ctl->tree_lock);
  2570. if (ctl->free_space < minlen) {
  2571. spin_unlock(&ctl->tree_lock);
  2572. mutex_unlock(&ctl->cache_writeout_mutex);
  2573. break;
  2574. }
  2575. entry = tree_search_offset(ctl, start, 0, 1);
  2576. if (!entry) {
  2577. spin_unlock(&ctl->tree_lock);
  2578. mutex_unlock(&ctl->cache_writeout_mutex);
  2579. break;
  2580. }
  2581. /* skip bitmaps */
  2582. while (entry->bitmap) {
  2583. node = rb_next(&entry->offset_index);
  2584. if (!node) {
  2585. spin_unlock(&ctl->tree_lock);
  2586. mutex_unlock(&ctl->cache_writeout_mutex);
  2587. goto out;
  2588. }
  2589. entry = rb_entry(node, struct btrfs_free_space,
  2590. offset_index);
  2591. }
  2592. if (entry->offset >= end) {
  2593. spin_unlock(&ctl->tree_lock);
  2594. mutex_unlock(&ctl->cache_writeout_mutex);
  2595. break;
  2596. }
  2597. extent_start = entry->offset;
  2598. extent_bytes = entry->bytes;
  2599. start = max(start, extent_start);
  2600. bytes = min(extent_start + extent_bytes, end) - start;
  2601. if (bytes < minlen) {
  2602. spin_unlock(&ctl->tree_lock);
  2603. mutex_unlock(&ctl->cache_writeout_mutex);
  2604. goto next;
  2605. }
  2606. unlink_free_space(ctl, entry);
  2607. kmem_cache_free(btrfs_free_space_cachep, entry);
  2608. spin_unlock(&ctl->tree_lock);
  2609. trim_entry.start = extent_start;
  2610. trim_entry.bytes = extent_bytes;
  2611. list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
  2612. mutex_unlock(&ctl->cache_writeout_mutex);
  2613. ret = do_trimming(block_group, total_trimmed, start, bytes,
  2614. extent_start, extent_bytes, &trim_entry);
  2615. if (ret)
  2616. break;
  2617. next:
  2618. start += bytes;
  2619. if (fatal_signal_pending(current)) {
  2620. ret = -ERESTARTSYS;
  2621. break;
  2622. }
  2623. cond_resched();
  2624. }
  2625. out:
  2626. return ret;
  2627. }
  2628. static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
  2629. u64 *total_trimmed, u64 start, u64 end, u64 minlen)
  2630. {
  2631. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2632. struct btrfs_free_space *entry;
  2633. int ret = 0;
  2634. int ret2;
  2635. u64 bytes;
  2636. u64 offset = offset_to_bitmap(ctl, start);
  2637. while (offset < end) {
  2638. bool next_bitmap = false;
  2639. struct btrfs_trim_range trim_entry;
  2640. mutex_lock(&ctl->cache_writeout_mutex);
  2641. spin_lock(&ctl->tree_lock);
  2642. if (ctl->free_space < minlen) {
  2643. spin_unlock(&ctl->tree_lock);
  2644. mutex_unlock(&ctl->cache_writeout_mutex);
  2645. break;
  2646. }
  2647. entry = tree_search_offset(ctl, offset, 1, 0);
  2648. if (!entry) {
  2649. spin_unlock(&ctl->tree_lock);
  2650. mutex_unlock(&ctl->cache_writeout_mutex);
  2651. next_bitmap = true;
  2652. goto next;
  2653. }
  2654. bytes = minlen;
  2655. ret2 = search_bitmap(ctl, entry, &start, &bytes);
  2656. if (ret2 || start >= end) {
  2657. spin_unlock(&ctl->tree_lock);
  2658. mutex_unlock(&ctl->cache_writeout_mutex);
  2659. next_bitmap = true;
  2660. goto next;
  2661. }
  2662. bytes = min(bytes, end - start);
  2663. if (bytes < minlen) {
  2664. spin_unlock(&ctl->tree_lock);
  2665. mutex_unlock(&ctl->cache_writeout_mutex);
  2666. goto next;
  2667. }
  2668. bitmap_clear_bits(ctl, entry, start, bytes);
  2669. if (entry->bytes == 0)
  2670. free_bitmap(ctl, entry);
  2671. spin_unlock(&ctl->tree_lock);
  2672. trim_entry.start = start;
  2673. trim_entry.bytes = bytes;
  2674. list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
  2675. mutex_unlock(&ctl->cache_writeout_mutex);
  2676. ret = do_trimming(block_group, total_trimmed, start, bytes,
  2677. start, bytes, &trim_entry);
  2678. if (ret)
  2679. break;
  2680. next:
  2681. if (next_bitmap) {
  2682. offset += BITS_PER_BITMAP * ctl->unit;
  2683. } else {
  2684. start += bytes;
  2685. if (start >= offset + BITS_PER_BITMAP * ctl->unit)
  2686. offset += BITS_PER_BITMAP * ctl->unit;
  2687. }
  2688. if (fatal_signal_pending(current)) {
  2689. ret = -ERESTARTSYS;
  2690. break;
  2691. }
  2692. cond_resched();
  2693. }
  2694. return ret;
  2695. }
  2696. int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
  2697. u64 *trimmed, u64 start, u64 end, u64 minlen)
  2698. {
  2699. int ret;
  2700. *trimmed = 0;
  2701. spin_lock(&block_group->lock);
  2702. if (block_group->removed) {
  2703. spin_unlock(&block_group->lock);
  2704. return 0;
  2705. }
  2706. atomic_inc(&block_group->trimming);
  2707. spin_unlock(&block_group->lock);
  2708. ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
  2709. if (ret)
  2710. goto out;
  2711. ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
  2712. out:
  2713. spin_lock(&block_group->lock);
  2714. if (atomic_dec_and_test(&block_group->trimming) &&
  2715. block_group->removed) {
  2716. struct extent_map_tree *em_tree;
  2717. struct extent_map *em;
  2718. spin_unlock(&block_group->lock);
  2719. lock_chunks(block_group->fs_info->chunk_root);
  2720. em_tree = &block_group->fs_info->mapping_tree.map_tree;
  2721. write_lock(&em_tree->lock);
  2722. em = lookup_extent_mapping(em_tree, block_group->key.objectid,
  2723. 1);
  2724. BUG_ON(!em); /* logic error, can't happen */
  2725. /*
  2726. * remove_extent_mapping() will delete us from the pinned_chunks
  2727. * list, which is protected by the chunk mutex.
  2728. */
  2729. remove_extent_mapping(em_tree, em);
  2730. write_unlock(&em_tree->lock);
  2731. unlock_chunks(block_group->fs_info->chunk_root);
  2732. /* once for us and once for the tree */
  2733. free_extent_map(em);
  2734. free_extent_map(em);
  2735. /*
  2736. * We've left one free space entry and other tasks trimming
  2737. * this block group have left 1 entry each one. Free them.
  2738. */
  2739. __btrfs_remove_free_space_cache(block_group->free_space_ctl);
  2740. } else {
  2741. spin_unlock(&block_group->lock);
  2742. }
  2743. return ret;
  2744. }
  2745. /*
  2746. * Find the left-most item in the cache tree, and then return the
  2747. * smallest inode number in the item.
  2748. *
  2749. * Note: the returned inode number may not be the smallest one in
  2750. * the tree, if the left-most item is a bitmap.
  2751. */
  2752. u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
  2753. {
  2754. struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
  2755. struct btrfs_free_space *entry = NULL;
  2756. u64 ino = 0;
  2757. spin_lock(&ctl->tree_lock);
  2758. if (RB_EMPTY_ROOT(&ctl->free_space_offset))
  2759. goto out;
  2760. entry = rb_entry(rb_first(&ctl->free_space_offset),
  2761. struct btrfs_free_space, offset_index);
  2762. if (!entry->bitmap) {
  2763. ino = entry->offset;
  2764. unlink_free_space(ctl, entry);
  2765. entry->offset++;
  2766. entry->bytes--;
  2767. if (!entry->bytes)
  2768. kmem_cache_free(btrfs_free_space_cachep, entry);
  2769. else
  2770. link_free_space(ctl, entry);
  2771. } else {
  2772. u64 offset = 0;
  2773. u64 count = 1;
  2774. int ret;
  2775. ret = search_bitmap(ctl, entry, &offset, &count);
  2776. /* Logic error; Should be empty if it can't find anything */
  2777. ASSERT(!ret);
  2778. ino = offset;
  2779. bitmap_clear_bits(ctl, entry, offset, 1);
  2780. if (entry->bytes == 0)
  2781. free_bitmap(ctl, entry);
  2782. }
  2783. out:
  2784. spin_unlock(&ctl->tree_lock);
  2785. return ino;
  2786. }
  2787. struct inode *lookup_free_ino_inode(struct btrfs_root *root,
  2788. struct btrfs_path *path)
  2789. {
  2790. struct inode *inode = NULL;
  2791. spin_lock(&root->ino_cache_lock);
  2792. if (root->ino_cache_inode)
  2793. inode = igrab(root->ino_cache_inode);
  2794. spin_unlock(&root->ino_cache_lock);
  2795. if (inode)
  2796. return inode;
  2797. inode = __lookup_free_space_inode(root, path, 0);
  2798. if (IS_ERR(inode))
  2799. return inode;
  2800. spin_lock(&root->ino_cache_lock);
  2801. if (!btrfs_fs_closing(root->fs_info))
  2802. root->ino_cache_inode = igrab(inode);
  2803. spin_unlock(&root->ino_cache_lock);
  2804. return inode;
  2805. }
  2806. int create_free_ino_inode(struct btrfs_root *root,
  2807. struct btrfs_trans_handle *trans,
  2808. struct btrfs_path *path)
  2809. {
  2810. return __create_free_space_inode(root, trans, path,
  2811. BTRFS_FREE_INO_OBJECTID, 0);
  2812. }
  2813. int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  2814. {
  2815. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2816. struct btrfs_path *path;
  2817. struct inode *inode;
  2818. int ret = 0;
  2819. u64 root_gen = btrfs_root_generation(&root->root_item);
  2820. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2821. return 0;
  2822. /*
  2823. * If we're unmounting then just return, since this does a search on the
  2824. * normal root and not the commit root and we could deadlock.
  2825. */
  2826. if (btrfs_fs_closing(fs_info))
  2827. return 0;
  2828. path = btrfs_alloc_path();
  2829. if (!path)
  2830. return 0;
  2831. inode = lookup_free_ino_inode(root, path);
  2832. if (IS_ERR(inode))
  2833. goto out;
  2834. if (root_gen != BTRFS_I(inode)->generation)
  2835. goto out_put;
  2836. ret = __load_free_space_cache(root, inode, ctl, path, 0);
  2837. if (ret < 0)
  2838. btrfs_err(fs_info,
  2839. "failed to load free ino cache for root %llu",
  2840. root->root_key.objectid);
  2841. out_put:
  2842. iput(inode);
  2843. out:
  2844. btrfs_free_path(path);
  2845. return ret;
  2846. }
  2847. int btrfs_write_out_ino_cache(struct btrfs_root *root,
  2848. struct btrfs_trans_handle *trans,
  2849. struct btrfs_path *path,
  2850. struct inode *inode)
  2851. {
  2852. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2853. int ret;
  2854. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2855. return 0;
  2856. ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
  2857. if (ret) {
  2858. btrfs_delalloc_release_metadata(inode, inode->i_size);
  2859. #ifdef DEBUG
  2860. btrfs_err(root->fs_info,
  2861. "failed to write free ino cache for root %llu",
  2862. root->root_key.objectid);
  2863. #endif
  2864. }
  2865. return ret;
  2866. }
  2867. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  2868. /*
  2869. * Use this if you need to make a bitmap or extent entry specifically, it
  2870. * doesn't do any of the merging that add_free_space does, this acts a lot like
  2871. * how the free space cache loading stuff works, so you can get really weird
  2872. * configurations.
  2873. */
  2874. int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
  2875. u64 offset, u64 bytes, bool bitmap)
  2876. {
  2877. struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
  2878. struct btrfs_free_space *info = NULL, *bitmap_info;
  2879. void *map = NULL;
  2880. u64 bytes_added;
  2881. int ret;
  2882. again:
  2883. if (!info) {
  2884. info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
  2885. if (!info)
  2886. return -ENOMEM;
  2887. }
  2888. if (!bitmap) {
  2889. spin_lock(&ctl->tree_lock);
  2890. info->offset = offset;
  2891. info->bytes = bytes;
  2892. ret = link_free_space(ctl, info);
  2893. spin_unlock(&ctl->tree_lock);
  2894. if (ret)
  2895. kmem_cache_free(btrfs_free_space_cachep, info);
  2896. return ret;
  2897. }
  2898. if (!map) {
  2899. map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  2900. if (!map) {
  2901. kmem_cache_free(btrfs_free_space_cachep, info);
  2902. return -ENOMEM;
  2903. }
  2904. }
  2905. spin_lock(&ctl->tree_lock);
  2906. bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  2907. 1, 0);
  2908. if (!bitmap_info) {
  2909. info->bitmap = map;
  2910. map = NULL;
  2911. add_new_bitmap(ctl, info, offset);
  2912. bitmap_info = info;
  2913. info = NULL;
  2914. }
  2915. bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
  2916. bytes -= bytes_added;
  2917. offset += bytes_added;
  2918. spin_unlock(&ctl->tree_lock);
  2919. if (bytes)
  2920. goto again;
  2921. if (info)
  2922. kmem_cache_free(btrfs_free_space_cachep, info);
  2923. if (map)
  2924. kfree(map);
  2925. return 0;
  2926. }
  2927. /*
  2928. * Checks to see if the given range is in the free space cache. This is really
  2929. * just used to check the absence of space, so if there is free space in the
  2930. * range at all we will return 1.
  2931. */
  2932. int test_check_exists(struct btrfs_block_group_cache *cache,
  2933. u64 offset, u64 bytes)
  2934. {
  2935. struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
  2936. struct btrfs_free_space *info;
  2937. int ret = 0;
  2938. spin_lock(&ctl->tree_lock);
  2939. info = tree_search_offset(ctl, offset, 0, 0);
  2940. if (!info) {
  2941. info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  2942. 1, 0);
  2943. if (!info)
  2944. goto out;
  2945. }
  2946. have_info:
  2947. if (info->bitmap) {
  2948. u64 bit_off, bit_bytes;
  2949. struct rb_node *n;
  2950. struct btrfs_free_space *tmp;
  2951. bit_off = offset;
  2952. bit_bytes = ctl->unit;
  2953. ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
  2954. if (!ret) {
  2955. if (bit_off == offset) {
  2956. ret = 1;
  2957. goto out;
  2958. } else if (bit_off > offset &&
  2959. offset + bytes > bit_off) {
  2960. ret = 1;
  2961. goto out;
  2962. }
  2963. }
  2964. n = rb_prev(&info->offset_index);
  2965. while (n) {
  2966. tmp = rb_entry(n, struct btrfs_free_space,
  2967. offset_index);
  2968. if (tmp->offset + tmp->bytes < offset)
  2969. break;
  2970. if (offset + bytes < tmp->offset) {
  2971. n = rb_prev(&info->offset_index);
  2972. continue;
  2973. }
  2974. info = tmp;
  2975. goto have_info;
  2976. }
  2977. n = rb_next(&info->offset_index);
  2978. while (n) {
  2979. tmp = rb_entry(n, struct btrfs_free_space,
  2980. offset_index);
  2981. if (offset + bytes < tmp->offset)
  2982. break;
  2983. if (tmp->offset + tmp->bytes < offset) {
  2984. n = rb_next(&info->offset_index);
  2985. continue;
  2986. }
  2987. info = tmp;
  2988. goto have_info;
  2989. }
  2990. ret = 0;
  2991. goto out;
  2992. }
  2993. if (info->offset == offset) {
  2994. ret = 1;
  2995. goto out;
  2996. }
  2997. if (offset > info->offset && offset < info->offset + info->bytes)
  2998. ret = 1;
  2999. out:
  3000. spin_unlock(&ctl->tree_lock);
  3001. return ret;
  3002. }
  3003. #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */