resize.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024
  1. /*
  2. * linux/fs/ext4/resize.c
  3. *
  4. * Support for resizing an ext4 filesystem while it is mounted.
  5. *
  6. * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
  7. *
  8. * This could probably be made into a module, because it is not often in use.
  9. */
  10. #define EXT4FS_DEBUG
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include "ext4_jbd2.h"
  14. int ext4_resize_begin(struct super_block *sb)
  15. {
  16. int ret = 0;
  17. if (!capable(CAP_SYS_RESOURCE))
  18. return -EPERM;
  19. /*
  20. * If we are not using the primary superblock/GDT copy don't resize,
  21. * because the user tools have no way of handling this. Probably a
  22. * bad time to do it anyways.
  23. */
  24. if (EXT4_SB(sb)->s_sbh->b_blocknr !=
  25. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
  26. ext4_warning(sb, "won't resize using backup superblock at %llu",
  27. (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
  28. return -EPERM;
  29. }
  30. /*
  31. * We are not allowed to do online-resizing on a filesystem mounted
  32. * with error, because it can destroy the filesystem easily.
  33. */
  34. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  35. ext4_warning(sb, "There are errors in the filesystem, "
  36. "so online resizing is not allowed\n");
  37. return -EPERM;
  38. }
  39. if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
  40. ret = -EBUSY;
  41. return ret;
  42. }
  43. void ext4_resize_end(struct super_block *sb)
  44. {
  45. clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
  46. smp_mb__after_atomic();
  47. }
  48. static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
  49. ext4_group_t group) {
  50. return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
  51. EXT4_DESC_PER_BLOCK_BITS(sb);
  52. }
  53. static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
  54. ext4_group_t group) {
  55. group = ext4_meta_bg_first_group(sb, group);
  56. return ext4_group_first_block_no(sb, group);
  57. }
  58. static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
  59. ext4_group_t group) {
  60. ext4_grpblk_t overhead;
  61. overhead = ext4_bg_num_gdb(sb, group);
  62. if (ext4_bg_has_super(sb, group))
  63. overhead += 1 +
  64. le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  65. return overhead;
  66. }
  67. #define outside(b, first, last) ((b) < (first) || (b) >= (last))
  68. #define inside(b, first, last) ((b) >= (first) && (b) < (last))
  69. static int verify_group_input(struct super_block *sb,
  70. struct ext4_new_group_data *input)
  71. {
  72. struct ext4_sb_info *sbi = EXT4_SB(sb);
  73. struct ext4_super_block *es = sbi->s_es;
  74. ext4_fsblk_t start = ext4_blocks_count(es);
  75. ext4_fsblk_t end = start + input->blocks_count;
  76. ext4_group_t group = input->group;
  77. ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
  78. unsigned overhead;
  79. ext4_fsblk_t metaend;
  80. struct buffer_head *bh = NULL;
  81. ext4_grpblk_t free_blocks_count, offset;
  82. int err = -EINVAL;
  83. if (group != sbi->s_groups_count) {
  84. ext4_warning(sb, "Cannot add at group %u (only %u groups)",
  85. input->group, sbi->s_groups_count);
  86. return -EINVAL;
  87. }
  88. overhead = ext4_group_overhead_blocks(sb, group);
  89. metaend = start + overhead;
  90. input->free_blocks_count = free_blocks_count =
  91. input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
  92. if (test_opt(sb, DEBUG))
  93. printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
  94. "(%d free, %u reserved)\n",
  95. ext4_bg_has_super(sb, input->group) ? "normal" :
  96. "no-super", input->group, input->blocks_count,
  97. free_blocks_count, input->reserved_blocks);
  98. ext4_get_group_no_and_offset(sb, start, NULL, &offset);
  99. if (offset != 0)
  100. ext4_warning(sb, "Last group not full");
  101. else if (input->reserved_blocks > input->blocks_count / 5)
  102. ext4_warning(sb, "Reserved blocks too high (%u)",
  103. input->reserved_blocks);
  104. else if (free_blocks_count < 0)
  105. ext4_warning(sb, "Bad blocks count %u",
  106. input->blocks_count);
  107. else if (!(bh = sb_bread(sb, end - 1)))
  108. ext4_warning(sb, "Cannot read last block (%llu)",
  109. end - 1);
  110. else if (outside(input->block_bitmap, start, end))
  111. ext4_warning(sb, "Block bitmap not in group (block %llu)",
  112. (unsigned long long)input->block_bitmap);
  113. else if (outside(input->inode_bitmap, start, end))
  114. ext4_warning(sb, "Inode bitmap not in group (block %llu)",
  115. (unsigned long long)input->inode_bitmap);
  116. else if (outside(input->inode_table, start, end) ||
  117. outside(itend - 1, start, end))
  118. ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
  119. (unsigned long long)input->inode_table, itend - 1);
  120. else if (input->inode_bitmap == input->block_bitmap)
  121. ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
  122. (unsigned long long)input->block_bitmap);
  123. else if (inside(input->block_bitmap, input->inode_table, itend))
  124. ext4_warning(sb, "Block bitmap (%llu) in inode table "
  125. "(%llu-%llu)",
  126. (unsigned long long)input->block_bitmap,
  127. (unsigned long long)input->inode_table, itend - 1);
  128. else if (inside(input->inode_bitmap, input->inode_table, itend))
  129. ext4_warning(sb, "Inode bitmap (%llu) in inode table "
  130. "(%llu-%llu)",
  131. (unsigned long long)input->inode_bitmap,
  132. (unsigned long long)input->inode_table, itend - 1);
  133. else if (inside(input->block_bitmap, start, metaend))
  134. ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
  135. (unsigned long long)input->block_bitmap,
  136. start, metaend - 1);
  137. else if (inside(input->inode_bitmap, start, metaend))
  138. ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
  139. (unsigned long long)input->inode_bitmap,
  140. start, metaend - 1);
  141. else if (inside(input->inode_table, start, metaend) ||
  142. inside(itend - 1, start, metaend))
  143. ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
  144. "(%llu-%llu)",
  145. (unsigned long long)input->inode_table,
  146. itend - 1, start, metaend - 1);
  147. else
  148. err = 0;
  149. brelse(bh);
  150. return err;
  151. }
  152. /*
  153. * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
  154. * group each time.
  155. */
  156. struct ext4_new_flex_group_data {
  157. struct ext4_new_group_data *groups; /* new_group_data for groups
  158. in the flex group */
  159. __u16 *bg_flags; /* block group flags of groups
  160. in @groups */
  161. ext4_group_t count; /* number of groups in @groups
  162. */
  163. };
  164. /*
  165. * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
  166. * @flexbg_size.
  167. *
  168. * Returns NULL on failure otherwise address of the allocated structure.
  169. */
  170. static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
  171. {
  172. struct ext4_new_flex_group_data *flex_gd;
  173. flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
  174. if (flex_gd == NULL)
  175. goto out3;
  176. if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
  177. goto out2;
  178. flex_gd->count = flexbg_size;
  179. flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
  180. flexbg_size, GFP_NOFS);
  181. if (flex_gd->groups == NULL)
  182. goto out2;
  183. flex_gd->bg_flags = kmalloc(flexbg_size * sizeof(__u16), GFP_NOFS);
  184. if (flex_gd->bg_flags == NULL)
  185. goto out1;
  186. return flex_gd;
  187. out1:
  188. kfree(flex_gd->groups);
  189. out2:
  190. kfree(flex_gd);
  191. out3:
  192. return NULL;
  193. }
  194. static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
  195. {
  196. kfree(flex_gd->bg_flags);
  197. kfree(flex_gd->groups);
  198. kfree(flex_gd);
  199. }
  200. /*
  201. * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
  202. * and inode tables for a flex group.
  203. *
  204. * This function is used by 64bit-resize. Note that this function allocates
  205. * group tables from the 1st group of groups contained by @flexgd, which may
  206. * be a partial of a flex group.
  207. *
  208. * @sb: super block of fs to which the groups belongs
  209. *
  210. * Returns 0 on a successful allocation of the metadata blocks in the
  211. * block group.
  212. */
  213. static int ext4_alloc_group_tables(struct super_block *sb,
  214. struct ext4_new_flex_group_data *flex_gd,
  215. int flexbg_size)
  216. {
  217. struct ext4_new_group_data *group_data = flex_gd->groups;
  218. ext4_fsblk_t start_blk;
  219. ext4_fsblk_t last_blk;
  220. ext4_group_t src_group;
  221. ext4_group_t bb_index = 0;
  222. ext4_group_t ib_index = 0;
  223. ext4_group_t it_index = 0;
  224. ext4_group_t group;
  225. ext4_group_t last_group;
  226. unsigned overhead;
  227. __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
  228. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  229. src_group = group_data[0].group;
  230. last_group = src_group + flex_gd->count - 1;
  231. BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
  232. (last_group & ~(flexbg_size - 1))));
  233. next_group:
  234. group = group_data[0].group;
  235. if (src_group >= group_data[0].group + flex_gd->count)
  236. return -ENOSPC;
  237. start_blk = ext4_group_first_block_no(sb, src_group);
  238. last_blk = start_blk + group_data[src_group - group].blocks_count;
  239. overhead = ext4_group_overhead_blocks(sb, src_group);
  240. start_blk += overhead;
  241. /* We collect contiguous blocks as much as possible. */
  242. src_group++;
  243. for (; src_group <= last_group; src_group++) {
  244. overhead = ext4_group_overhead_blocks(sb, src_group);
  245. if (overhead == 0)
  246. last_blk += group_data[src_group - group].blocks_count;
  247. else
  248. break;
  249. }
  250. /* Allocate block bitmaps */
  251. for (; bb_index < flex_gd->count; bb_index++) {
  252. if (start_blk >= last_blk)
  253. goto next_group;
  254. group_data[bb_index].block_bitmap = start_blk++;
  255. group = ext4_get_group_number(sb, start_blk - 1);
  256. group -= group_data[0].group;
  257. group_data[group].free_blocks_count--;
  258. flex_gd->bg_flags[group] &= uninit_mask;
  259. }
  260. /* Allocate inode bitmaps */
  261. for (; ib_index < flex_gd->count; ib_index++) {
  262. if (start_blk >= last_blk)
  263. goto next_group;
  264. group_data[ib_index].inode_bitmap = start_blk++;
  265. group = ext4_get_group_number(sb, start_blk - 1);
  266. group -= group_data[0].group;
  267. group_data[group].free_blocks_count--;
  268. flex_gd->bg_flags[group] &= uninit_mask;
  269. }
  270. /* Allocate inode tables */
  271. for (; it_index < flex_gd->count; it_index++) {
  272. unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
  273. ext4_fsblk_t next_group_start;
  274. if (start_blk + itb > last_blk)
  275. goto next_group;
  276. group_data[it_index].inode_table = start_blk;
  277. group = ext4_get_group_number(sb, start_blk);
  278. next_group_start = ext4_group_first_block_no(sb, group + 1);
  279. group -= group_data[0].group;
  280. if (start_blk + itb > next_group_start) {
  281. flex_gd->bg_flags[group + 1] &= uninit_mask;
  282. overhead = start_blk + itb - next_group_start;
  283. group_data[group + 1].free_blocks_count -= overhead;
  284. itb -= overhead;
  285. }
  286. group_data[group].free_blocks_count -= itb;
  287. flex_gd->bg_flags[group] &= uninit_mask;
  288. start_blk += EXT4_SB(sb)->s_itb_per_group;
  289. }
  290. if (test_opt(sb, DEBUG)) {
  291. int i;
  292. group = group_data[0].group;
  293. printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
  294. "%d groups, flexbg size is %d:\n", flex_gd->count,
  295. flexbg_size);
  296. for (i = 0; i < flex_gd->count; i++) {
  297. printk(KERN_DEBUG "adding %s group %u: %u "
  298. "blocks (%d free)\n",
  299. ext4_bg_has_super(sb, group + i) ? "normal" :
  300. "no-super", group + i,
  301. group_data[i].blocks_count,
  302. group_data[i].free_blocks_count);
  303. }
  304. }
  305. return 0;
  306. }
  307. static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
  308. ext4_fsblk_t blk)
  309. {
  310. struct buffer_head *bh;
  311. int err;
  312. bh = sb_getblk(sb, blk);
  313. if (unlikely(!bh))
  314. return ERR_PTR(-ENOMEM);
  315. BUFFER_TRACE(bh, "get_write_access");
  316. if ((err = ext4_journal_get_write_access(handle, bh))) {
  317. brelse(bh);
  318. bh = ERR_PTR(err);
  319. } else {
  320. memset(bh->b_data, 0, sb->s_blocksize);
  321. set_buffer_uptodate(bh);
  322. }
  323. return bh;
  324. }
  325. /*
  326. * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
  327. * If that fails, restart the transaction & regain write access for the
  328. * buffer head which is used for block_bitmap modifications.
  329. */
  330. static int extend_or_restart_transaction(handle_t *handle, int thresh)
  331. {
  332. int err;
  333. if (ext4_handle_has_enough_credits(handle, thresh))
  334. return 0;
  335. err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
  336. if (err < 0)
  337. return err;
  338. if (err) {
  339. err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
  340. if (err)
  341. return err;
  342. }
  343. return 0;
  344. }
  345. /*
  346. * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
  347. *
  348. * Helper function for ext4_setup_new_group_blocks() which set .
  349. *
  350. * @sb: super block
  351. * @handle: journal handle
  352. * @flex_gd: flex group data
  353. */
  354. static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
  355. struct ext4_new_flex_group_data *flex_gd,
  356. ext4_fsblk_t block, ext4_group_t count)
  357. {
  358. ext4_group_t count2;
  359. ext4_debug("mark blocks [%llu/%u] used\n", block, count);
  360. for (count2 = count; count > 0; count -= count2, block += count2) {
  361. ext4_fsblk_t start;
  362. struct buffer_head *bh;
  363. ext4_group_t group;
  364. int err;
  365. group = ext4_get_group_number(sb, block);
  366. start = ext4_group_first_block_no(sb, group);
  367. group -= flex_gd->groups[0].group;
  368. count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
  369. if (count2 > count)
  370. count2 = count;
  371. if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
  372. BUG_ON(flex_gd->count > 1);
  373. continue;
  374. }
  375. err = extend_or_restart_transaction(handle, 1);
  376. if (err)
  377. return err;
  378. bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
  379. if (unlikely(!bh))
  380. return -ENOMEM;
  381. BUFFER_TRACE(bh, "get_write_access");
  382. err = ext4_journal_get_write_access(handle, bh);
  383. if (err)
  384. return err;
  385. ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
  386. block - start, count2);
  387. ext4_set_bits(bh->b_data, block - start, count2);
  388. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  389. if (unlikely(err))
  390. return err;
  391. brelse(bh);
  392. }
  393. return 0;
  394. }
  395. /*
  396. * Set up the block and inode bitmaps, and the inode table for the new groups.
  397. * This doesn't need to be part of the main transaction, since we are only
  398. * changing blocks outside the actual filesystem. We still do journaling to
  399. * ensure the recovery is correct in case of a failure just after resize.
  400. * If any part of this fails, we simply abort the resize.
  401. *
  402. * setup_new_flex_group_blocks handles a flex group as follow:
  403. * 1. copy super block and GDT, and initialize group tables if necessary.
  404. * In this step, we only set bits in blocks bitmaps for blocks taken by
  405. * super block and GDT.
  406. * 2. allocate group tables in block bitmaps, that is, set bits in block
  407. * bitmap for blocks taken by group tables.
  408. */
  409. static int setup_new_flex_group_blocks(struct super_block *sb,
  410. struct ext4_new_flex_group_data *flex_gd)
  411. {
  412. int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
  413. ext4_fsblk_t start;
  414. ext4_fsblk_t block;
  415. struct ext4_sb_info *sbi = EXT4_SB(sb);
  416. struct ext4_super_block *es = sbi->s_es;
  417. struct ext4_new_group_data *group_data = flex_gd->groups;
  418. __u16 *bg_flags = flex_gd->bg_flags;
  419. handle_t *handle;
  420. ext4_group_t group, count;
  421. struct buffer_head *bh = NULL;
  422. int reserved_gdb, i, j, err = 0, err2;
  423. int meta_bg;
  424. BUG_ON(!flex_gd->count || !group_data ||
  425. group_data[0].group != sbi->s_groups_count);
  426. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  427. meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
  428. /* This transaction may be extended/restarted along the way */
  429. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  430. if (IS_ERR(handle))
  431. return PTR_ERR(handle);
  432. group = group_data[0].group;
  433. for (i = 0; i < flex_gd->count; i++, group++) {
  434. unsigned long gdblocks;
  435. ext4_grpblk_t overhead;
  436. gdblocks = ext4_bg_num_gdb(sb, group);
  437. start = ext4_group_first_block_no(sb, group);
  438. if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
  439. goto handle_itb;
  440. if (meta_bg == 1) {
  441. ext4_group_t first_group;
  442. first_group = ext4_meta_bg_first_group(sb, group);
  443. if (first_group != group + 1 &&
  444. first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
  445. goto handle_itb;
  446. }
  447. block = start + ext4_bg_has_super(sb, group);
  448. /* Copy all of the GDT blocks into the backup in this group */
  449. for (j = 0; j < gdblocks; j++, block++) {
  450. struct buffer_head *gdb;
  451. ext4_debug("update backup group %#04llx\n", block);
  452. err = extend_or_restart_transaction(handle, 1);
  453. if (err)
  454. goto out;
  455. gdb = sb_getblk(sb, block);
  456. if (unlikely(!gdb)) {
  457. err = -ENOMEM;
  458. goto out;
  459. }
  460. BUFFER_TRACE(gdb, "get_write_access");
  461. err = ext4_journal_get_write_access(handle, gdb);
  462. if (err) {
  463. brelse(gdb);
  464. goto out;
  465. }
  466. memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
  467. gdb->b_size);
  468. set_buffer_uptodate(gdb);
  469. err = ext4_handle_dirty_metadata(handle, NULL, gdb);
  470. if (unlikely(err)) {
  471. brelse(gdb);
  472. goto out;
  473. }
  474. brelse(gdb);
  475. }
  476. /* Zero out all of the reserved backup group descriptor
  477. * table blocks
  478. */
  479. if (ext4_bg_has_super(sb, group)) {
  480. err = sb_issue_zeroout(sb, gdblocks + start + 1,
  481. reserved_gdb, GFP_NOFS);
  482. if (err)
  483. goto out;
  484. }
  485. handle_itb:
  486. /* Initialize group tables of the grop @group */
  487. if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
  488. goto handle_bb;
  489. /* Zero out all of the inode table blocks */
  490. block = group_data[i].inode_table;
  491. ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
  492. block, sbi->s_itb_per_group);
  493. err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
  494. GFP_NOFS);
  495. if (err)
  496. goto out;
  497. handle_bb:
  498. if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
  499. goto handle_ib;
  500. /* Initialize block bitmap of the @group */
  501. block = group_data[i].block_bitmap;
  502. err = extend_or_restart_transaction(handle, 1);
  503. if (err)
  504. goto out;
  505. bh = bclean(handle, sb, block);
  506. if (IS_ERR(bh)) {
  507. err = PTR_ERR(bh);
  508. bh = NULL;
  509. goto out;
  510. }
  511. overhead = ext4_group_overhead_blocks(sb, group);
  512. if (overhead != 0) {
  513. ext4_debug("mark backup superblock %#04llx (+0)\n",
  514. start);
  515. ext4_set_bits(bh->b_data, 0, overhead);
  516. }
  517. ext4_mark_bitmap_end(group_data[i].blocks_count,
  518. sb->s_blocksize * 8, bh->b_data);
  519. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  520. if (err)
  521. goto out;
  522. brelse(bh);
  523. handle_ib:
  524. if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
  525. continue;
  526. /* Initialize inode bitmap of the @group */
  527. block = group_data[i].inode_bitmap;
  528. err = extend_or_restart_transaction(handle, 1);
  529. if (err)
  530. goto out;
  531. /* Mark unused entries in inode bitmap used */
  532. bh = bclean(handle, sb, block);
  533. if (IS_ERR(bh)) {
  534. err = PTR_ERR(bh);
  535. bh = NULL;
  536. goto out;
  537. }
  538. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
  539. sb->s_blocksize * 8, bh->b_data);
  540. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  541. if (err)
  542. goto out;
  543. brelse(bh);
  544. }
  545. bh = NULL;
  546. /* Mark group tables in block bitmap */
  547. for (j = 0; j < GROUP_TABLE_COUNT; j++) {
  548. count = group_table_count[j];
  549. start = (&group_data[0].block_bitmap)[j];
  550. block = start;
  551. for (i = 1; i < flex_gd->count; i++) {
  552. block += group_table_count[j];
  553. if (block == (&group_data[i].block_bitmap)[j]) {
  554. count += group_table_count[j];
  555. continue;
  556. }
  557. err = set_flexbg_block_bitmap(sb, handle,
  558. flex_gd, start, count);
  559. if (err)
  560. goto out;
  561. count = group_table_count[j];
  562. start = (&group_data[i].block_bitmap)[j];
  563. block = start;
  564. }
  565. if (count) {
  566. err = set_flexbg_block_bitmap(sb, handle,
  567. flex_gd, start, count);
  568. if (err)
  569. goto out;
  570. }
  571. }
  572. out:
  573. brelse(bh);
  574. err2 = ext4_journal_stop(handle);
  575. if (err2 && !err)
  576. err = err2;
  577. return err;
  578. }
  579. /*
  580. * Iterate through the groups which hold BACKUP superblock/GDT copies in an
  581. * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
  582. * calling this for the first time. In a sparse filesystem it will be the
  583. * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
  584. * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
  585. */
  586. static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
  587. unsigned *five, unsigned *seven)
  588. {
  589. unsigned *min = three;
  590. int mult = 3;
  591. unsigned ret;
  592. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  593. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  594. ret = *min;
  595. *min += 1;
  596. return ret;
  597. }
  598. if (*five < *min) {
  599. min = five;
  600. mult = 5;
  601. }
  602. if (*seven < *min) {
  603. min = seven;
  604. mult = 7;
  605. }
  606. ret = *min;
  607. *min *= mult;
  608. return ret;
  609. }
  610. /*
  611. * Check that all of the backup GDT blocks are held in the primary GDT block.
  612. * It is assumed that they are stored in group order. Returns the number of
  613. * groups in current filesystem that have BACKUPS, or -ve error code.
  614. */
  615. static int verify_reserved_gdb(struct super_block *sb,
  616. ext4_group_t end,
  617. struct buffer_head *primary)
  618. {
  619. const ext4_fsblk_t blk = primary->b_blocknr;
  620. unsigned three = 1;
  621. unsigned five = 5;
  622. unsigned seven = 7;
  623. unsigned grp;
  624. __le32 *p = (__le32 *)primary->b_data;
  625. int gdbackups = 0;
  626. while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
  627. if (le32_to_cpu(*p++) !=
  628. grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
  629. ext4_warning(sb, "reserved GDT %llu"
  630. " missing grp %d (%llu)",
  631. blk, grp,
  632. grp *
  633. (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
  634. blk);
  635. return -EINVAL;
  636. }
  637. if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
  638. return -EFBIG;
  639. }
  640. return gdbackups;
  641. }
  642. /*
  643. * Called when we need to bring a reserved group descriptor table block into
  644. * use from the resize inode. The primary copy of the new GDT block currently
  645. * is an indirect block (under the double indirect block in the resize inode).
  646. * The new backup GDT blocks will be stored as leaf blocks in this indirect
  647. * block, in group order. Even though we know all the block numbers we need,
  648. * we check to ensure that the resize inode has actually reserved these blocks.
  649. *
  650. * Don't need to update the block bitmaps because the blocks are still in use.
  651. *
  652. * We get all of the error cases out of the way, so that we are sure to not
  653. * fail once we start modifying the data on disk, because JBD has no rollback.
  654. */
  655. static int add_new_gdb(handle_t *handle, struct inode *inode,
  656. ext4_group_t group)
  657. {
  658. struct super_block *sb = inode->i_sb;
  659. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  660. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  661. ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
  662. struct buffer_head **o_group_desc, **n_group_desc;
  663. struct buffer_head *dind;
  664. struct buffer_head *gdb_bh;
  665. int gdbackups;
  666. struct ext4_iloc iloc;
  667. __le32 *data;
  668. int err;
  669. if (test_opt(sb, DEBUG))
  670. printk(KERN_DEBUG
  671. "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
  672. gdb_num);
  673. gdb_bh = sb_bread(sb, gdblock);
  674. if (!gdb_bh)
  675. return -EIO;
  676. gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
  677. if (gdbackups < 0) {
  678. err = gdbackups;
  679. goto exit_bh;
  680. }
  681. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  682. dind = sb_bread(sb, le32_to_cpu(*data));
  683. if (!dind) {
  684. err = -EIO;
  685. goto exit_bh;
  686. }
  687. data = (__le32 *)dind->b_data;
  688. if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
  689. ext4_warning(sb, "new group %u GDT block %llu not reserved",
  690. group, gdblock);
  691. err = -EINVAL;
  692. goto exit_dind;
  693. }
  694. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  695. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  696. if (unlikely(err))
  697. goto exit_dind;
  698. BUFFER_TRACE(gdb_bh, "get_write_access");
  699. err = ext4_journal_get_write_access(handle, gdb_bh);
  700. if (unlikely(err))
  701. goto exit_dind;
  702. BUFFER_TRACE(dind, "get_write_access");
  703. err = ext4_journal_get_write_access(handle, dind);
  704. if (unlikely(err))
  705. ext4_std_error(sb, err);
  706. /* ext4_reserve_inode_write() gets a reference on the iloc */
  707. err = ext4_reserve_inode_write(handle, inode, &iloc);
  708. if (unlikely(err))
  709. goto exit_dind;
  710. n_group_desc = ext4_kvmalloc((gdb_num + 1) *
  711. sizeof(struct buffer_head *),
  712. GFP_NOFS);
  713. if (!n_group_desc) {
  714. err = -ENOMEM;
  715. ext4_warning(sb, "not enough memory for %lu groups",
  716. gdb_num + 1);
  717. goto exit_inode;
  718. }
  719. /*
  720. * Finally, we have all of the possible failures behind us...
  721. *
  722. * Remove new GDT block from inode double-indirect block and clear out
  723. * the new GDT block for use (which also "frees" the backup GDT blocks
  724. * from the reserved inode). We don't need to change the bitmaps for
  725. * these blocks, because they are marked as in-use from being in the
  726. * reserved inode, and will become GDT blocks (primary and backup).
  727. */
  728. data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
  729. err = ext4_handle_dirty_metadata(handle, NULL, dind);
  730. if (unlikely(err)) {
  731. ext4_std_error(sb, err);
  732. goto exit_inode;
  733. }
  734. inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
  735. ext4_mark_iloc_dirty(handle, inode, &iloc);
  736. memset(gdb_bh->b_data, 0, sb->s_blocksize);
  737. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  738. if (unlikely(err)) {
  739. ext4_std_error(sb, err);
  740. goto exit_inode;
  741. }
  742. brelse(dind);
  743. o_group_desc = EXT4_SB(sb)->s_group_desc;
  744. memcpy(n_group_desc, o_group_desc,
  745. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  746. n_group_desc[gdb_num] = gdb_bh;
  747. EXT4_SB(sb)->s_group_desc = n_group_desc;
  748. EXT4_SB(sb)->s_gdb_count++;
  749. kvfree(o_group_desc);
  750. le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
  751. err = ext4_handle_dirty_super(handle, sb);
  752. if (err)
  753. ext4_std_error(sb, err);
  754. return err;
  755. exit_inode:
  756. kvfree(n_group_desc);
  757. brelse(iloc.bh);
  758. exit_dind:
  759. brelse(dind);
  760. exit_bh:
  761. brelse(gdb_bh);
  762. ext4_debug("leaving with error %d\n", err);
  763. return err;
  764. }
  765. /*
  766. * add_new_gdb_meta_bg is the sister of add_new_gdb.
  767. */
  768. static int add_new_gdb_meta_bg(struct super_block *sb,
  769. handle_t *handle, ext4_group_t group) {
  770. ext4_fsblk_t gdblock;
  771. struct buffer_head *gdb_bh;
  772. struct buffer_head **o_group_desc, **n_group_desc;
  773. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  774. int err;
  775. gdblock = ext4_meta_bg_first_block_no(sb, group) +
  776. ext4_bg_has_super(sb, group);
  777. gdb_bh = sb_bread(sb, gdblock);
  778. if (!gdb_bh)
  779. return -EIO;
  780. n_group_desc = ext4_kvmalloc((gdb_num + 1) *
  781. sizeof(struct buffer_head *),
  782. GFP_NOFS);
  783. if (!n_group_desc) {
  784. err = -ENOMEM;
  785. ext4_warning(sb, "not enough memory for %lu groups",
  786. gdb_num + 1);
  787. return err;
  788. }
  789. o_group_desc = EXT4_SB(sb)->s_group_desc;
  790. memcpy(n_group_desc, o_group_desc,
  791. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  792. n_group_desc[gdb_num] = gdb_bh;
  793. EXT4_SB(sb)->s_group_desc = n_group_desc;
  794. EXT4_SB(sb)->s_gdb_count++;
  795. kvfree(o_group_desc);
  796. BUFFER_TRACE(gdb_bh, "get_write_access");
  797. err = ext4_journal_get_write_access(handle, gdb_bh);
  798. if (unlikely(err))
  799. brelse(gdb_bh);
  800. return err;
  801. }
  802. /*
  803. * Called when we are adding a new group which has a backup copy of each of
  804. * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
  805. * We need to add these reserved backup GDT blocks to the resize inode, so
  806. * that they are kept for future resizing and not allocated to files.
  807. *
  808. * Each reserved backup GDT block will go into a different indirect block.
  809. * The indirect blocks are actually the primary reserved GDT blocks,
  810. * so we know in advance what their block numbers are. We only get the
  811. * double-indirect block to verify it is pointing to the primary reserved
  812. * GDT blocks so we don't overwrite a data block by accident. The reserved
  813. * backup GDT blocks are stored in their reserved primary GDT block.
  814. */
  815. static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
  816. ext4_group_t group)
  817. {
  818. struct super_block *sb = inode->i_sb;
  819. int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  820. struct buffer_head **primary;
  821. struct buffer_head *dind;
  822. struct ext4_iloc iloc;
  823. ext4_fsblk_t blk;
  824. __le32 *data, *end;
  825. int gdbackups = 0;
  826. int res, i;
  827. int err;
  828. primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
  829. if (!primary)
  830. return -ENOMEM;
  831. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  832. dind = sb_bread(sb, le32_to_cpu(*data));
  833. if (!dind) {
  834. err = -EIO;
  835. goto exit_free;
  836. }
  837. blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
  838. data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
  839. EXT4_ADDR_PER_BLOCK(sb));
  840. end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
  841. /* Get each reserved primary GDT block and verify it holds backups */
  842. for (res = 0; res < reserved_gdb; res++, blk++) {
  843. if (le32_to_cpu(*data) != blk) {
  844. ext4_warning(sb, "reserved block %llu"
  845. " not at offset %ld",
  846. blk,
  847. (long)(data - (__le32 *)dind->b_data));
  848. err = -EINVAL;
  849. goto exit_bh;
  850. }
  851. primary[res] = sb_bread(sb, blk);
  852. if (!primary[res]) {
  853. err = -EIO;
  854. goto exit_bh;
  855. }
  856. gdbackups = verify_reserved_gdb(sb, group, primary[res]);
  857. if (gdbackups < 0) {
  858. brelse(primary[res]);
  859. err = gdbackups;
  860. goto exit_bh;
  861. }
  862. if (++data >= end)
  863. data = (__le32 *)dind->b_data;
  864. }
  865. for (i = 0; i < reserved_gdb; i++) {
  866. BUFFER_TRACE(primary[i], "get_write_access");
  867. if ((err = ext4_journal_get_write_access(handle, primary[i])))
  868. goto exit_bh;
  869. }
  870. if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
  871. goto exit_bh;
  872. /*
  873. * Finally we can add each of the reserved backup GDT blocks from
  874. * the new group to its reserved primary GDT block.
  875. */
  876. blk = group * EXT4_BLOCKS_PER_GROUP(sb);
  877. for (i = 0; i < reserved_gdb; i++) {
  878. int err2;
  879. data = (__le32 *)primary[i]->b_data;
  880. /* printk("reserving backup %lu[%u] = %lu\n",
  881. primary[i]->b_blocknr, gdbackups,
  882. blk + primary[i]->b_blocknr); */
  883. data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
  884. err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
  885. if (!err)
  886. err = err2;
  887. }
  888. inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
  889. ext4_mark_iloc_dirty(handle, inode, &iloc);
  890. exit_bh:
  891. while (--res >= 0)
  892. brelse(primary[res]);
  893. brelse(dind);
  894. exit_free:
  895. kfree(primary);
  896. return err;
  897. }
  898. /*
  899. * Update the backup copies of the ext4 metadata. These don't need to be part
  900. * of the main resize transaction, because e2fsck will re-write them if there
  901. * is a problem (basically only OOM will cause a problem). However, we
  902. * _should_ update the backups if possible, in case the primary gets trashed
  903. * for some reason and we need to run e2fsck from a backup superblock. The
  904. * important part is that the new block and inode counts are in the backup
  905. * superblocks, and the location of the new group metadata in the GDT backups.
  906. *
  907. * We do not need take the s_resize_lock for this, because these
  908. * blocks are not otherwise touched by the filesystem code when it is
  909. * mounted. We don't need to worry about last changing from
  910. * sbi->s_groups_count, because the worst that can happen is that we
  911. * do not copy the full number of backups at this time. The resize
  912. * which changed s_groups_count will backup again.
  913. */
  914. static void update_backups(struct super_block *sb, int blk_off, char *data,
  915. int size, int meta_bg)
  916. {
  917. struct ext4_sb_info *sbi = EXT4_SB(sb);
  918. ext4_group_t last;
  919. const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
  920. unsigned three = 1;
  921. unsigned five = 5;
  922. unsigned seven = 7;
  923. ext4_group_t group = 0;
  924. int rest = sb->s_blocksize - size;
  925. handle_t *handle;
  926. int err = 0, err2;
  927. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  928. if (IS_ERR(handle)) {
  929. group = 1;
  930. err = PTR_ERR(handle);
  931. goto exit_err;
  932. }
  933. if (meta_bg == 0) {
  934. group = ext4_list_backups(sb, &three, &five, &seven);
  935. last = sbi->s_groups_count;
  936. } else {
  937. group = ext4_meta_bg_first_group(sb, group) + 1;
  938. last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
  939. }
  940. while (group < sbi->s_groups_count) {
  941. struct buffer_head *bh;
  942. ext4_fsblk_t backup_block;
  943. /* Out of journal space, and can't get more - abort - so sad */
  944. if (ext4_handle_valid(handle) &&
  945. handle->h_buffer_credits == 0 &&
  946. ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
  947. (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
  948. break;
  949. if (meta_bg == 0)
  950. backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
  951. else
  952. backup_block = (ext4_group_first_block_no(sb, group) +
  953. ext4_bg_has_super(sb, group));
  954. bh = sb_getblk(sb, backup_block);
  955. if (unlikely(!bh)) {
  956. err = -ENOMEM;
  957. break;
  958. }
  959. ext4_debug("update metadata backup %llu(+%llu)\n",
  960. backup_block, backup_block -
  961. ext4_group_first_block_no(sb, group));
  962. BUFFER_TRACE(bh, "get_write_access");
  963. if ((err = ext4_journal_get_write_access(handle, bh)))
  964. break;
  965. lock_buffer(bh);
  966. memcpy(bh->b_data, data, size);
  967. if (rest)
  968. memset(bh->b_data + size, 0, rest);
  969. set_buffer_uptodate(bh);
  970. unlock_buffer(bh);
  971. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  972. if (unlikely(err))
  973. ext4_std_error(sb, err);
  974. brelse(bh);
  975. if (meta_bg == 0)
  976. group = ext4_list_backups(sb, &three, &five, &seven);
  977. else if (group == last)
  978. break;
  979. else
  980. group = last;
  981. }
  982. if ((err2 = ext4_journal_stop(handle)) && !err)
  983. err = err2;
  984. /*
  985. * Ugh! Need to have e2fsck write the backup copies. It is too
  986. * late to revert the resize, we shouldn't fail just because of
  987. * the backup copies (they are only needed in case of corruption).
  988. *
  989. * However, if we got here we have a journal problem too, so we
  990. * can't really start a transaction to mark the superblock.
  991. * Chicken out and just set the flag on the hope it will be written
  992. * to disk, and if not - we will simply wait until next fsck.
  993. */
  994. exit_err:
  995. if (err) {
  996. ext4_warning(sb, "can't update backup for group %u (err %d), "
  997. "forcing fsck on next reboot", group, err);
  998. sbi->s_mount_state &= ~EXT4_VALID_FS;
  999. sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1000. mark_buffer_dirty(sbi->s_sbh);
  1001. }
  1002. }
  1003. /*
  1004. * ext4_add_new_descs() adds @count group descriptor of groups
  1005. * starting at @group
  1006. *
  1007. * @handle: journal handle
  1008. * @sb: super block
  1009. * @group: the group no. of the first group desc to be added
  1010. * @resize_inode: the resize inode
  1011. * @count: number of group descriptors to be added
  1012. */
  1013. static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
  1014. ext4_group_t group, struct inode *resize_inode,
  1015. ext4_group_t count)
  1016. {
  1017. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1018. struct ext4_super_block *es = sbi->s_es;
  1019. struct buffer_head *gdb_bh;
  1020. int i, gdb_off, gdb_num, err = 0;
  1021. int meta_bg;
  1022. meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
  1023. for (i = 0; i < count; i++, group++) {
  1024. int reserved_gdb = ext4_bg_has_super(sb, group) ?
  1025. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1026. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1027. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1028. /*
  1029. * We will only either add reserved group blocks to a backup group
  1030. * or remove reserved blocks for the first group in a new group block.
  1031. * Doing both would be mean more complex code, and sane people don't
  1032. * use non-sparse filesystems anymore. This is already checked above.
  1033. */
  1034. if (gdb_off) {
  1035. gdb_bh = sbi->s_group_desc[gdb_num];
  1036. BUFFER_TRACE(gdb_bh, "get_write_access");
  1037. err = ext4_journal_get_write_access(handle, gdb_bh);
  1038. if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
  1039. err = reserve_backup_gdb(handle, resize_inode, group);
  1040. } else if (meta_bg != 0) {
  1041. err = add_new_gdb_meta_bg(sb, handle, group);
  1042. } else {
  1043. err = add_new_gdb(handle, resize_inode, group);
  1044. }
  1045. if (err)
  1046. break;
  1047. }
  1048. return err;
  1049. }
  1050. static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
  1051. {
  1052. struct buffer_head *bh = sb_getblk(sb, block);
  1053. if (unlikely(!bh))
  1054. return NULL;
  1055. if (!bh_uptodate_or_lock(bh)) {
  1056. if (bh_submit_read(bh) < 0) {
  1057. brelse(bh);
  1058. return NULL;
  1059. }
  1060. }
  1061. return bh;
  1062. }
  1063. static int ext4_set_bitmap_checksums(struct super_block *sb,
  1064. ext4_group_t group,
  1065. struct ext4_group_desc *gdp,
  1066. struct ext4_new_group_data *group_data)
  1067. {
  1068. struct buffer_head *bh;
  1069. if (!ext4_has_metadata_csum(sb))
  1070. return 0;
  1071. bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
  1072. if (!bh)
  1073. return -EIO;
  1074. ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
  1075. EXT4_INODES_PER_GROUP(sb) / 8);
  1076. brelse(bh);
  1077. bh = ext4_get_bitmap(sb, group_data->block_bitmap);
  1078. if (!bh)
  1079. return -EIO;
  1080. ext4_block_bitmap_csum_set(sb, group, gdp, bh);
  1081. brelse(bh);
  1082. return 0;
  1083. }
  1084. /*
  1085. * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  1086. */
  1087. static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
  1088. struct ext4_new_flex_group_data *flex_gd)
  1089. {
  1090. struct ext4_new_group_data *group_data = flex_gd->groups;
  1091. struct ext4_group_desc *gdp;
  1092. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1093. struct buffer_head *gdb_bh;
  1094. ext4_group_t group;
  1095. __u16 *bg_flags = flex_gd->bg_flags;
  1096. int i, gdb_off, gdb_num, err = 0;
  1097. for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
  1098. group = group_data->group;
  1099. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1100. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1101. /*
  1102. * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
  1103. */
  1104. gdb_bh = sbi->s_group_desc[gdb_num];
  1105. /* Update group descriptor block for new group */
  1106. gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
  1107. gdb_off * EXT4_DESC_SIZE(sb));
  1108. memset(gdp, 0, EXT4_DESC_SIZE(sb));
  1109. ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
  1110. ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
  1111. err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
  1112. if (err) {
  1113. ext4_std_error(sb, err);
  1114. break;
  1115. }
  1116. ext4_inode_table_set(sb, gdp, group_data->inode_table);
  1117. ext4_free_group_clusters_set(sb, gdp,
  1118. EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
  1119. ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
  1120. if (ext4_has_group_desc_csum(sb))
  1121. ext4_itable_unused_set(sb, gdp,
  1122. EXT4_INODES_PER_GROUP(sb));
  1123. gdp->bg_flags = cpu_to_le16(*bg_flags);
  1124. ext4_group_desc_csum_set(sb, group, gdp);
  1125. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  1126. if (unlikely(err)) {
  1127. ext4_std_error(sb, err);
  1128. break;
  1129. }
  1130. /*
  1131. * We can allocate memory for mb_alloc based on the new group
  1132. * descriptor
  1133. */
  1134. err = ext4_mb_add_groupinfo(sb, group, gdp);
  1135. if (err)
  1136. break;
  1137. }
  1138. return err;
  1139. }
  1140. /*
  1141. * ext4_update_super() updates the super block so that the newly added
  1142. * groups can be seen by the filesystem.
  1143. *
  1144. * @sb: super block
  1145. * @flex_gd: new added groups
  1146. */
  1147. static void ext4_update_super(struct super_block *sb,
  1148. struct ext4_new_flex_group_data *flex_gd)
  1149. {
  1150. ext4_fsblk_t blocks_count = 0;
  1151. ext4_fsblk_t free_blocks = 0;
  1152. ext4_fsblk_t reserved_blocks = 0;
  1153. struct ext4_new_group_data *group_data = flex_gd->groups;
  1154. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1155. struct ext4_super_block *es = sbi->s_es;
  1156. int i;
  1157. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  1158. /*
  1159. * Make the new blocks and inodes valid next. We do this before
  1160. * increasing the group count so that once the group is enabled,
  1161. * all of its blocks and inodes are already valid.
  1162. *
  1163. * We always allocate group-by-group, then block-by-block or
  1164. * inode-by-inode within a group, so enabling these
  1165. * blocks/inodes before the group is live won't actually let us
  1166. * allocate the new space yet.
  1167. */
  1168. for (i = 0; i < flex_gd->count; i++) {
  1169. blocks_count += group_data[i].blocks_count;
  1170. free_blocks += group_data[i].free_blocks_count;
  1171. }
  1172. reserved_blocks = ext4_r_blocks_count(es) * 100;
  1173. reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
  1174. reserved_blocks *= blocks_count;
  1175. do_div(reserved_blocks, 100);
  1176. ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
  1177. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
  1178. le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1179. flex_gd->count);
  1180. le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1181. flex_gd->count);
  1182. ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
  1183. /*
  1184. * We need to protect s_groups_count against other CPUs seeing
  1185. * inconsistent state in the superblock.
  1186. *
  1187. * The precise rules we use are:
  1188. *
  1189. * * Writers must perform a smp_wmb() after updating all
  1190. * dependent data and before modifying the groups count
  1191. *
  1192. * * Readers must perform an smp_rmb() after reading the groups
  1193. * count and before reading any dependent data.
  1194. *
  1195. * NB. These rules can be relaxed when checking the group count
  1196. * while freeing data, as we can only allocate from a block
  1197. * group after serialising against the group count, and we can
  1198. * only then free after serialising in turn against that
  1199. * allocation.
  1200. */
  1201. smp_wmb();
  1202. /* Update the global fs size fields */
  1203. sbi->s_groups_count += flex_gd->count;
  1204. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  1205. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  1206. /* Update the reserved block counts only once the new group is
  1207. * active. */
  1208. ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
  1209. reserved_blocks);
  1210. /* Update the free space counts */
  1211. percpu_counter_add(&sbi->s_freeclusters_counter,
  1212. EXT4_NUM_B2C(sbi, free_blocks));
  1213. percpu_counter_add(&sbi->s_freeinodes_counter,
  1214. EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
  1215. ext4_debug("free blocks count %llu",
  1216. percpu_counter_read(&sbi->s_freeclusters_counter));
  1217. if (EXT4_HAS_INCOMPAT_FEATURE(sb,
  1218. EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
  1219. sbi->s_log_groups_per_flex) {
  1220. ext4_group_t flex_group;
  1221. flex_group = ext4_flex_group(sbi, group_data[0].group);
  1222. atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
  1223. &sbi->s_flex_groups[flex_group].free_clusters);
  1224. atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
  1225. &sbi->s_flex_groups[flex_group].free_inodes);
  1226. }
  1227. /*
  1228. * Update the fs overhead information
  1229. */
  1230. ext4_calculate_overhead(sb);
  1231. if (test_opt(sb, DEBUG))
  1232. printk(KERN_DEBUG "EXT4-fs: added group %u:"
  1233. "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
  1234. blocks_count, free_blocks, reserved_blocks);
  1235. }
  1236. /* Add a flex group to an fs. Ensure we handle all possible error conditions
  1237. * _before_ we start modifying the filesystem, because we cannot abort the
  1238. * transaction and not have it write the data to disk.
  1239. */
  1240. static int ext4_flex_group_add(struct super_block *sb,
  1241. struct inode *resize_inode,
  1242. struct ext4_new_flex_group_data *flex_gd)
  1243. {
  1244. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1245. struct ext4_super_block *es = sbi->s_es;
  1246. ext4_fsblk_t o_blocks_count;
  1247. ext4_grpblk_t last;
  1248. ext4_group_t group;
  1249. handle_t *handle;
  1250. unsigned reserved_gdb;
  1251. int err = 0, err2 = 0, credit;
  1252. BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
  1253. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  1254. o_blocks_count = ext4_blocks_count(es);
  1255. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1256. BUG_ON(last);
  1257. err = setup_new_flex_group_blocks(sb, flex_gd);
  1258. if (err)
  1259. goto exit;
  1260. /*
  1261. * We will always be modifying at least the superblock and GDT
  1262. * blocks. If we are adding a group past the last current GDT block,
  1263. * we will also modify the inode and the dindirect block. If we
  1264. * are adding a group with superblock/GDT backups we will also
  1265. * modify each of the reserved GDT dindirect blocks.
  1266. */
  1267. credit = 3; /* sb, resize inode, resize inode dindirect */
  1268. /* GDT blocks */
  1269. credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
  1270. credit += reserved_gdb; /* Reserved GDT dindirect blocks */
  1271. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
  1272. if (IS_ERR(handle)) {
  1273. err = PTR_ERR(handle);
  1274. goto exit;
  1275. }
  1276. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1277. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1278. if (err)
  1279. goto exit_journal;
  1280. group = flex_gd->groups[0].group;
  1281. BUG_ON(group != EXT4_SB(sb)->s_groups_count);
  1282. err = ext4_add_new_descs(handle, sb, group,
  1283. resize_inode, flex_gd->count);
  1284. if (err)
  1285. goto exit_journal;
  1286. err = ext4_setup_new_descs(handle, sb, flex_gd);
  1287. if (err)
  1288. goto exit_journal;
  1289. ext4_update_super(sb, flex_gd);
  1290. err = ext4_handle_dirty_super(handle, sb);
  1291. exit_journal:
  1292. err2 = ext4_journal_stop(handle);
  1293. if (!err)
  1294. err = err2;
  1295. if (!err) {
  1296. int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1297. int gdb_num_end = ((group + flex_gd->count - 1) /
  1298. EXT4_DESC_PER_BLOCK(sb));
  1299. int meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb,
  1300. EXT4_FEATURE_INCOMPAT_META_BG);
  1301. sector_t old_gdb = 0;
  1302. update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
  1303. sizeof(struct ext4_super_block), 0);
  1304. for (; gdb_num <= gdb_num_end; gdb_num++) {
  1305. struct buffer_head *gdb_bh;
  1306. gdb_bh = sbi->s_group_desc[gdb_num];
  1307. if (old_gdb == gdb_bh->b_blocknr)
  1308. continue;
  1309. update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
  1310. gdb_bh->b_size, meta_bg);
  1311. old_gdb = gdb_bh->b_blocknr;
  1312. }
  1313. }
  1314. exit:
  1315. return err;
  1316. }
  1317. static int ext4_setup_next_flex_gd(struct super_block *sb,
  1318. struct ext4_new_flex_group_data *flex_gd,
  1319. ext4_fsblk_t n_blocks_count,
  1320. unsigned long flexbg_size)
  1321. {
  1322. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  1323. struct ext4_new_group_data *group_data = flex_gd->groups;
  1324. ext4_fsblk_t o_blocks_count;
  1325. ext4_group_t n_group;
  1326. ext4_group_t group;
  1327. ext4_group_t last_group;
  1328. ext4_grpblk_t last;
  1329. ext4_grpblk_t blocks_per_group;
  1330. unsigned long i;
  1331. blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
  1332. o_blocks_count = ext4_blocks_count(es);
  1333. if (o_blocks_count == n_blocks_count)
  1334. return 0;
  1335. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1336. BUG_ON(last);
  1337. ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
  1338. last_group = group | (flexbg_size - 1);
  1339. if (last_group > n_group)
  1340. last_group = n_group;
  1341. flex_gd->count = last_group - group + 1;
  1342. for (i = 0; i < flex_gd->count; i++) {
  1343. int overhead;
  1344. group_data[i].group = group + i;
  1345. group_data[i].blocks_count = blocks_per_group;
  1346. overhead = ext4_group_overhead_blocks(sb, group + i);
  1347. group_data[i].free_blocks_count = blocks_per_group - overhead;
  1348. if (ext4_has_group_desc_csum(sb)) {
  1349. flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
  1350. EXT4_BG_INODE_UNINIT;
  1351. if (!test_opt(sb, INIT_INODE_TABLE))
  1352. flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
  1353. } else
  1354. flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
  1355. }
  1356. if (last_group == n_group && ext4_has_group_desc_csum(sb))
  1357. /* We need to initialize block bitmap of last group. */
  1358. flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
  1359. if ((last_group == n_group) && (last != blocks_per_group - 1)) {
  1360. group_data[i - 1].blocks_count = last + 1;
  1361. group_data[i - 1].free_blocks_count -= blocks_per_group-
  1362. last - 1;
  1363. }
  1364. return 1;
  1365. }
  1366. /* Add group descriptor data to an existing or new group descriptor block.
  1367. * Ensure we handle all possible error conditions _before_ we start modifying
  1368. * the filesystem, because we cannot abort the transaction and not have it
  1369. * write the data to disk.
  1370. *
  1371. * If we are on a GDT block boundary, we need to get the reserved GDT block.
  1372. * Otherwise, we may need to add backup GDT blocks for a sparse group.
  1373. *
  1374. * We only need to hold the superblock lock while we are actually adding
  1375. * in the new group's counts to the superblock. Prior to that we have
  1376. * not really "added" the group at all. We re-check that we are still
  1377. * adding in the last group in case things have changed since verifying.
  1378. */
  1379. int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
  1380. {
  1381. struct ext4_new_flex_group_data flex_gd;
  1382. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1383. struct ext4_super_block *es = sbi->s_es;
  1384. int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
  1385. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1386. struct inode *inode = NULL;
  1387. int gdb_off;
  1388. int err;
  1389. __u16 bg_flags = 0;
  1390. gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
  1391. if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
  1392. EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
  1393. ext4_warning(sb, "Can't resize non-sparse filesystem further");
  1394. return -EPERM;
  1395. }
  1396. if (ext4_blocks_count(es) + input->blocks_count <
  1397. ext4_blocks_count(es)) {
  1398. ext4_warning(sb, "blocks_count overflow");
  1399. return -EINVAL;
  1400. }
  1401. if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
  1402. le32_to_cpu(es->s_inodes_count)) {
  1403. ext4_warning(sb, "inodes_count overflow");
  1404. return -EINVAL;
  1405. }
  1406. if (reserved_gdb || gdb_off == 0) {
  1407. if (!EXT4_HAS_COMPAT_FEATURE(sb,
  1408. EXT4_FEATURE_COMPAT_RESIZE_INODE)
  1409. || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1410. ext4_warning(sb,
  1411. "No reserved GDT blocks, can't resize");
  1412. return -EPERM;
  1413. }
  1414. inode = ext4_iget(sb, EXT4_RESIZE_INO);
  1415. if (IS_ERR(inode)) {
  1416. ext4_warning(sb, "Error opening resize inode");
  1417. return PTR_ERR(inode);
  1418. }
  1419. }
  1420. err = verify_group_input(sb, input);
  1421. if (err)
  1422. goto out;
  1423. err = ext4_alloc_flex_bg_array(sb, input->group + 1);
  1424. if (err)
  1425. goto out;
  1426. err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
  1427. if (err)
  1428. goto out;
  1429. flex_gd.count = 1;
  1430. flex_gd.groups = input;
  1431. flex_gd.bg_flags = &bg_flags;
  1432. err = ext4_flex_group_add(sb, inode, &flex_gd);
  1433. out:
  1434. iput(inode);
  1435. return err;
  1436. } /* ext4_group_add */
  1437. /*
  1438. * extend a group without checking assuming that checking has been done.
  1439. */
  1440. static int ext4_group_extend_no_check(struct super_block *sb,
  1441. ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
  1442. {
  1443. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  1444. handle_t *handle;
  1445. int err = 0, err2;
  1446. /* We will update the superblock, one block bitmap, and
  1447. * one group descriptor via ext4_group_add_blocks().
  1448. */
  1449. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
  1450. if (IS_ERR(handle)) {
  1451. err = PTR_ERR(handle);
  1452. ext4_warning(sb, "error %d on journal start", err);
  1453. return err;
  1454. }
  1455. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  1456. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  1457. if (err) {
  1458. ext4_warning(sb, "error %d on journal write access", err);
  1459. goto errout;
  1460. }
  1461. ext4_blocks_count_set(es, o_blocks_count + add);
  1462. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
  1463. ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
  1464. o_blocks_count + add);
  1465. /* We add the blocks to the bitmap and set the group need init bit */
  1466. err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
  1467. if (err)
  1468. goto errout;
  1469. ext4_handle_dirty_super(handle, sb);
  1470. ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
  1471. o_blocks_count + add);
  1472. errout:
  1473. err2 = ext4_journal_stop(handle);
  1474. if (err2 && !err)
  1475. err = err2;
  1476. if (!err) {
  1477. if (test_opt(sb, DEBUG))
  1478. printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
  1479. "blocks\n", ext4_blocks_count(es));
  1480. update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
  1481. (char *)es, sizeof(struct ext4_super_block), 0);
  1482. }
  1483. return err;
  1484. }
  1485. /*
  1486. * Extend the filesystem to the new number of blocks specified. This entry
  1487. * point is only used to extend the current filesystem to the end of the last
  1488. * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
  1489. * for emergencies (because it has no dependencies on reserved blocks).
  1490. *
  1491. * If we _really_ wanted, we could use default values to call ext4_group_add()
  1492. * allow the "remount" trick to work for arbitrary resizing, assuming enough
  1493. * GDT blocks are reserved to grow to the desired size.
  1494. */
  1495. int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
  1496. ext4_fsblk_t n_blocks_count)
  1497. {
  1498. ext4_fsblk_t o_blocks_count;
  1499. ext4_grpblk_t last;
  1500. ext4_grpblk_t add;
  1501. struct buffer_head *bh;
  1502. int err;
  1503. ext4_group_t group;
  1504. o_blocks_count = ext4_blocks_count(es);
  1505. if (test_opt(sb, DEBUG))
  1506. ext4_msg(sb, KERN_DEBUG,
  1507. "extending last group from %llu to %llu blocks",
  1508. o_blocks_count, n_blocks_count);
  1509. if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
  1510. return 0;
  1511. if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
  1512. ext4_msg(sb, KERN_ERR,
  1513. "filesystem too large to resize to %llu blocks safely",
  1514. n_blocks_count);
  1515. if (sizeof(sector_t) < 8)
  1516. ext4_warning(sb, "CONFIG_LBDAF not enabled");
  1517. return -EINVAL;
  1518. }
  1519. if (n_blocks_count < o_blocks_count) {
  1520. ext4_warning(sb, "can't shrink FS - resize aborted");
  1521. return -EINVAL;
  1522. }
  1523. /* Handle the remaining blocks in the last group only. */
  1524. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1525. if (last == 0) {
  1526. ext4_warning(sb, "need to use ext2online to resize further");
  1527. return -EPERM;
  1528. }
  1529. add = EXT4_BLOCKS_PER_GROUP(sb) - last;
  1530. if (o_blocks_count + add < o_blocks_count) {
  1531. ext4_warning(sb, "blocks_count overflow");
  1532. return -EINVAL;
  1533. }
  1534. if (o_blocks_count + add > n_blocks_count)
  1535. add = n_blocks_count - o_blocks_count;
  1536. if (o_blocks_count + add < n_blocks_count)
  1537. ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
  1538. o_blocks_count + add, add);
  1539. /* See if the device is actually as big as what was requested */
  1540. bh = sb_bread(sb, o_blocks_count + add - 1);
  1541. if (!bh) {
  1542. ext4_warning(sb, "can't read last block, resize aborted");
  1543. return -ENOSPC;
  1544. }
  1545. brelse(bh);
  1546. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1547. return err;
  1548. } /* ext4_group_extend */
  1549. static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
  1550. {
  1551. return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
  1552. }
  1553. /*
  1554. * Release the resize inode and drop the resize_inode feature if there
  1555. * are no more reserved gdt blocks, and then convert the file system
  1556. * to enable meta_bg
  1557. */
  1558. static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
  1559. {
  1560. handle_t *handle;
  1561. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1562. struct ext4_super_block *es = sbi->s_es;
  1563. struct ext4_inode_info *ei = EXT4_I(inode);
  1564. ext4_fsblk_t nr;
  1565. int i, ret, err = 0;
  1566. int credits = 1;
  1567. ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
  1568. if (inode) {
  1569. if (es->s_reserved_gdt_blocks) {
  1570. ext4_error(sb, "Unexpected non-zero "
  1571. "s_reserved_gdt_blocks");
  1572. return -EPERM;
  1573. }
  1574. /* Do a quick sanity check of the resize inode */
  1575. if (inode->i_blocks != 1 << (inode->i_blkbits - 9))
  1576. goto invalid_resize_inode;
  1577. for (i = 0; i < EXT4_N_BLOCKS; i++) {
  1578. if (i == EXT4_DIND_BLOCK) {
  1579. if (ei->i_data[i])
  1580. continue;
  1581. else
  1582. goto invalid_resize_inode;
  1583. }
  1584. if (ei->i_data[i])
  1585. goto invalid_resize_inode;
  1586. }
  1587. credits += 3; /* block bitmap, bg descriptor, resize inode */
  1588. }
  1589. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
  1590. if (IS_ERR(handle))
  1591. return PTR_ERR(handle);
  1592. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1593. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1594. if (err)
  1595. goto errout;
  1596. EXT4_CLEAR_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE);
  1597. EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
  1598. sbi->s_es->s_first_meta_bg =
  1599. cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
  1600. err = ext4_handle_dirty_super(handle, sb);
  1601. if (err) {
  1602. ext4_std_error(sb, err);
  1603. goto errout;
  1604. }
  1605. if (inode) {
  1606. nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
  1607. ext4_free_blocks(handle, inode, NULL, nr, 1,
  1608. EXT4_FREE_BLOCKS_METADATA |
  1609. EXT4_FREE_BLOCKS_FORGET);
  1610. ei->i_data[EXT4_DIND_BLOCK] = 0;
  1611. inode->i_blocks = 0;
  1612. err = ext4_mark_inode_dirty(handle, inode);
  1613. if (err)
  1614. ext4_std_error(sb, err);
  1615. }
  1616. errout:
  1617. ret = ext4_journal_stop(handle);
  1618. if (!err)
  1619. err = ret;
  1620. return ret;
  1621. invalid_resize_inode:
  1622. ext4_error(sb, "corrupted/inconsistent resize inode");
  1623. return -EINVAL;
  1624. }
  1625. /*
  1626. * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
  1627. *
  1628. * @sb: super block of the fs to be resized
  1629. * @n_blocks_count: the number of blocks resides in the resized fs
  1630. */
  1631. int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
  1632. {
  1633. struct ext4_new_flex_group_data *flex_gd = NULL;
  1634. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1635. struct ext4_super_block *es = sbi->s_es;
  1636. struct buffer_head *bh;
  1637. struct inode *resize_inode = NULL;
  1638. ext4_grpblk_t add, offset;
  1639. unsigned long n_desc_blocks;
  1640. unsigned long o_desc_blocks;
  1641. ext4_group_t o_group;
  1642. ext4_group_t n_group;
  1643. ext4_fsblk_t o_blocks_count;
  1644. ext4_fsblk_t n_blocks_count_retry = 0;
  1645. unsigned long last_update_time = 0;
  1646. int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
  1647. int meta_bg;
  1648. /* See if the device is actually as big as what was requested */
  1649. bh = sb_bread(sb, n_blocks_count - 1);
  1650. if (!bh) {
  1651. ext4_warning(sb, "can't read last block, resize aborted");
  1652. return -ENOSPC;
  1653. }
  1654. brelse(bh);
  1655. retry:
  1656. o_blocks_count = ext4_blocks_count(es);
  1657. ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
  1658. "to %llu blocks", o_blocks_count, n_blocks_count);
  1659. if (n_blocks_count < o_blocks_count) {
  1660. /* On-line shrinking not supported */
  1661. ext4_warning(sb, "can't shrink FS - resize aborted");
  1662. return -EINVAL;
  1663. }
  1664. if (n_blocks_count == o_blocks_count)
  1665. /* Nothing need to do */
  1666. return 0;
  1667. n_group = ext4_get_group_number(sb, n_blocks_count - 1);
  1668. if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
  1669. ext4_warning(sb, "resize would cause inodes_count overflow");
  1670. return -EINVAL;
  1671. }
  1672. ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
  1673. n_desc_blocks = num_desc_blocks(sb, n_group + 1);
  1674. o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
  1675. meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
  1676. if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE)) {
  1677. if (meta_bg) {
  1678. ext4_error(sb, "resize_inode and meta_bg enabled "
  1679. "simultaneously");
  1680. return -EINVAL;
  1681. }
  1682. if (n_desc_blocks > o_desc_blocks +
  1683. le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1684. n_blocks_count_retry = n_blocks_count;
  1685. n_desc_blocks = o_desc_blocks +
  1686. le16_to_cpu(es->s_reserved_gdt_blocks);
  1687. n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
  1688. n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
  1689. n_group--; /* set to last group number */
  1690. }
  1691. if (!resize_inode)
  1692. resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
  1693. if (IS_ERR(resize_inode)) {
  1694. ext4_warning(sb, "Error opening resize inode");
  1695. return PTR_ERR(resize_inode);
  1696. }
  1697. }
  1698. if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
  1699. err = ext4_convert_meta_bg(sb, resize_inode);
  1700. if (err)
  1701. goto out;
  1702. if (resize_inode) {
  1703. iput(resize_inode);
  1704. resize_inode = NULL;
  1705. }
  1706. if (n_blocks_count_retry) {
  1707. n_blocks_count = n_blocks_count_retry;
  1708. n_blocks_count_retry = 0;
  1709. goto retry;
  1710. }
  1711. }
  1712. /* extend the last group */
  1713. if (n_group == o_group)
  1714. add = n_blocks_count - o_blocks_count;
  1715. else
  1716. add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1);
  1717. if (add > 0) {
  1718. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1719. if (err)
  1720. goto out;
  1721. }
  1722. if (ext4_blocks_count(es) == n_blocks_count)
  1723. goto out;
  1724. err = ext4_alloc_flex_bg_array(sb, n_group + 1);
  1725. if (err)
  1726. return err;
  1727. err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
  1728. if (err)
  1729. goto out;
  1730. flex_gd = alloc_flex_gd(flexbg_size);
  1731. if (flex_gd == NULL) {
  1732. err = -ENOMEM;
  1733. goto out;
  1734. }
  1735. /* Add flex groups. Note that a regular group is a
  1736. * flex group with 1 group.
  1737. */
  1738. while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
  1739. flexbg_size)) {
  1740. if (jiffies - last_update_time > HZ * 10) {
  1741. if (last_update_time)
  1742. ext4_msg(sb, KERN_INFO,
  1743. "resized to %llu blocks",
  1744. ext4_blocks_count(es));
  1745. last_update_time = jiffies;
  1746. }
  1747. if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
  1748. break;
  1749. err = ext4_flex_group_add(sb, resize_inode, flex_gd);
  1750. if (unlikely(err))
  1751. break;
  1752. }
  1753. if (!err && n_blocks_count_retry) {
  1754. n_blocks_count = n_blocks_count_retry;
  1755. n_blocks_count_retry = 0;
  1756. free_flex_gd(flex_gd);
  1757. flex_gd = NULL;
  1758. goto retry;
  1759. }
  1760. out:
  1761. if (flex_gd)
  1762. free_flex_gd(flex_gd);
  1763. if (resize_inode != NULL)
  1764. iput(resize_inode);
  1765. ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
  1766. return err;
  1767. }